10deb0bf7SJacob Keller // SPDX-License-Identifier: GPL-2.0 20deb0bf7SJacob Keller /* Copyright (c) 2018, Intel Corporation. */ 30deb0bf7SJacob Keller 40deb0bf7SJacob Keller #include "ice.h" 5*109aba47SJacob Keller #include "ice_vf_lib_private.h" 60deb0bf7SJacob Keller #include "ice_base.h" 70deb0bf7SJacob Keller #include "ice_lib.h" 80deb0bf7SJacob Keller #include "ice_fltr.h" 90deb0bf7SJacob Keller #include "ice_dcb_lib.h" 100deb0bf7SJacob Keller #include "ice_flow.h" 110deb0bf7SJacob Keller #include "ice_eswitch.h" 120deb0bf7SJacob Keller #include "ice_virtchnl_allowlist.h" 130deb0bf7SJacob Keller #include "ice_flex_pipe.h" 140deb0bf7SJacob Keller #include "ice_vf_vsi_vlan_ops.h" 150deb0bf7SJacob Keller #include "ice_vlan.h" 160deb0bf7SJacob Keller 170deb0bf7SJacob Keller #define FIELD_SELECTOR(proto_hdr_field) \ 180deb0bf7SJacob Keller BIT((proto_hdr_field) & PROTO_HDR_FIELD_MASK) 190deb0bf7SJacob Keller 200deb0bf7SJacob Keller struct ice_vc_hdr_match_type { 210deb0bf7SJacob Keller u32 vc_hdr; /* virtchnl headers (VIRTCHNL_PROTO_HDR_XXX) */ 220deb0bf7SJacob Keller u32 ice_hdr; /* ice headers (ICE_FLOW_SEG_HDR_XXX) */ 230deb0bf7SJacob Keller }; 240deb0bf7SJacob Keller 250deb0bf7SJacob Keller static const struct ice_vc_hdr_match_type ice_vc_hdr_list[] = { 260deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_NONE, ICE_FLOW_SEG_HDR_NONE}, 270deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_ETH, ICE_FLOW_SEG_HDR_ETH}, 280deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_S_VLAN, ICE_FLOW_SEG_HDR_VLAN}, 290deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_C_VLAN, ICE_FLOW_SEG_HDR_VLAN}, 300deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_IPV4, ICE_FLOW_SEG_HDR_IPV4 | 310deb0bf7SJacob Keller ICE_FLOW_SEG_HDR_IPV_OTHER}, 320deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_IPV6, ICE_FLOW_SEG_HDR_IPV6 | 330deb0bf7SJacob Keller ICE_FLOW_SEG_HDR_IPV_OTHER}, 340deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_TCP, ICE_FLOW_SEG_HDR_TCP}, 350deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_UDP, ICE_FLOW_SEG_HDR_UDP}, 360deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_SCTP, ICE_FLOW_SEG_HDR_SCTP}, 370deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_PPPOE, ICE_FLOW_SEG_HDR_PPPOE}, 380deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_GTPU_IP, ICE_FLOW_SEG_HDR_GTPU_IP}, 390deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_GTPU_EH, ICE_FLOW_SEG_HDR_GTPU_EH}, 400deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN, 410deb0bf7SJacob Keller ICE_FLOW_SEG_HDR_GTPU_DWN}, 420deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP, 430deb0bf7SJacob Keller ICE_FLOW_SEG_HDR_GTPU_UP}, 440deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_L2TPV3, ICE_FLOW_SEG_HDR_L2TPV3}, 450deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_ESP, ICE_FLOW_SEG_HDR_ESP}, 460deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_AH, ICE_FLOW_SEG_HDR_AH}, 470deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_PFCP, ICE_FLOW_SEG_HDR_PFCP_SESSION}, 480deb0bf7SJacob Keller }; 490deb0bf7SJacob Keller 500deb0bf7SJacob Keller struct ice_vc_hash_field_match_type { 510deb0bf7SJacob Keller u32 vc_hdr; /* virtchnl headers 520deb0bf7SJacob Keller * (VIRTCHNL_PROTO_HDR_XXX) 530deb0bf7SJacob Keller */ 540deb0bf7SJacob Keller u32 vc_hash_field; /* virtchnl hash fields selector 550deb0bf7SJacob Keller * FIELD_SELECTOR((VIRTCHNL_PROTO_HDR_ETH_XXX)) 560deb0bf7SJacob Keller */ 570deb0bf7SJacob Keller u64 ice_hash_field; /* ice hash fields 580deb0bf7SJacob Keller * (BIT_ULL(ICE_FLOW_FIELD_IDX_XXX)) 590deb0bf7SJacob Keller */ 600deb0bf7SJacob Keller }; 610deb0bf7SJacob Keller 620deb0bf7SJacob Keller static const struct 630deb0bf7SJacob Keller ice_vc_hash_field_match_type ice_vc_hash_field_list[] = { 640deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC), 650deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA)}, 660deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST), 670deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_DA)}, 680deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC) | 690deb0bf7SJacob Keller FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST), 700deb0bf7SJacob Keller ICE_FLOW_HASH_ETH}, 710deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_ETH, 720deb0bf7SJacob Keller FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE), 730deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_TYPE)}, 740deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_S_VLAN, 750deb0bf7SJacob Keller FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_S_VLAN_ID), 760deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_S_VLAN)}, 770deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_C_VLAN, 780deb0bf7SJacob Keller FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_C_VLAN_ID), 790deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_C_VLAN)}, 800deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC), 810deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)}, 820deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST), 830deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)}, 840deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | 850deb0bf7SJacob Keller FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST), 860deb0bf7SJacob Keller ICE_FLOW_HASH_IPV4}, 870deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | 880deb0bf7SJacob Keller FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), 890deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | 900deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)}, 910deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | 920deb0bf7SJacob Keller FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), 930deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) | 940deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)}, 950deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | 960deb0bf7SJacob Keller FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | 970deb0bf7SJacob Keller FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), 980deb0bf7SJacob Keller ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)}, 990deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), 1000deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)}, 1010deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC), 1020deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)}, 1030deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST), 1040deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)}, 1050deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) | 1060deb0bf7SJacob Keller FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST), 1070deb0bf7SJacob Keller ICE_FLOW_HASH_IPV6}, 1080deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) | 1090deb0bf7SJacob Keller FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT), 1100deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) | 1110deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)}, 1120deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) | 1130deb0bf7SJacob Keller FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT), 1140deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) | 1150deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)}, 1160deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) | 1170deb0bf7SJacob Keller FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) | 1180deb0bf7SJacob Keller FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT), 1190deb0bf7SJacob Keller ICE_FLOW_HASH_IPV6 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)}, 1200deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT), 1210deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)}, 1220deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_TCP, 1230deb0bf7SJacob Keller FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT), 1240deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)}, 1250deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_TCP, 1260deb0bf7SJacob Keller FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT), 1270deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)}, 1280deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_TCP, 1290deb0bf7SJacob Keller FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) | 1300deb0bf7SJacob Keller FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT), 1310deb0bf7SJacob Keller ICE_FLOW_HASH_TCP_PORT}, 1320deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_UDP, 1330deb0bf7SJacob Keller FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT), 1340deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)}, 1350deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_UDP, 1360deb0bf7SJacob Keller FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT), 1370deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)}, 1380deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_UDP, 1390deb0bf7SJacob Keller FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) | 1400deb0bf7SJacob Keller FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT), 1410deb0bf7SJacob Keller ICE_FLOW_HASH_UDP_PORT}, 1420deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_SCTP, 1430deb0bf7SJacob Keller FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT), 1440deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)}, 1450deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_SCTP, 1460deb0bf7SJacob Keller FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT), 1470deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)}, 1480deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_SCTP, 1490deb0bf7SJacob Keller FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) | 1500deb0bf7SJacob Keller FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT), 1510deb0bf7SJacob Keller ICE_FLOW_HASH_SCTP_PORT}, 1520deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_PPPOE, 1530deb0bf7SJacob Keller FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID), 1540deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID)}, 1550deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_GTPU_IP, 1560deb0bf7SJacob Keller FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_GTPU_IP_TEID), 1570deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID)}, 1580deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_L2TPV3, 1590deb0bf7SJacob Keller FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID), 1600deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID)}, 1610deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_ESP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ESP_SPI), 1620deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_ESP_SPI)}, 1630deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_AH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_AH_SPI), 1640deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_AH_SPI)}, 1650deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_PFCP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PFCP_SEID), 1660deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_PFCP_SEID)}, 1670deb0bf7SJacob Keller }; 1680deb0bf7SJacob Keller 1690deb0bf7SJacob Keller /** 1700deb0bf7SJacob Keller * ice_free_vf_entries - Free all VF entries from the hash table 1710deb0bf7SJacob Keller * @pf: pointer to the PF structure 1720deb0bf7SJacob Keller * 1730deb0bf7SJacob Keller * Iterate over the VF hash table, removing and releasing all VF entries. 1740deb0bf7SJacob Keller * Called during VF teardown or as cleanup during failed VF initialization. 1750deb0bf7SJacob Keller */ 1760deb0bf7SJacob Keller static void ice_free_vf_entries(struct ice_pf *pf) 1770deb0bf7SJacob Keller { 1780deb0bf7SJacob Keller struct ice_vfs *vfs = &pf->vfs; 1790deb0bf7SJacob Keller struct hlist_node *tmp; 1800deb0bf7SJacob Keller struct ice_vf *vf; 1810deb0bf7SJacob Keller unsigned int bkt; 1820deb0bf7SJacob Keller 1830deb0bf7SJacob Keller /* Remove all VFs from the hash table and release their main 1840deb0bf7SJacob Keller * reference. Once all references to the VF are dropped, ice_put_vf() 1850deb0bf7SJacob Keller * will call ice_release_vf which will remove the VF memory. 1860deb0bf7SJacob Keller */ 1870deb0bf7SJacob Keller lockdep_assert_held(&vfs->table_lock); 1880deb0bf7SJacob Keller 1890deb0bf7SJacob Keller hash_for_each_safe(vfs->table, bkt, tmp, vf, entry) { 1900deb0bf7SJacob Keller hash_del_rcu(&vf->entry); 1910deb0bf7SJacob Keller ice_put_vf(vf); 1920deb0bf7SJacob Keller } 1930deb0bf7SJacob Keller } 1940deb0bf7SJacob Keller 1950deb0bf7SJacob Keller /** 1960deb0bf7SJacob Keller * ice_vc_vf_broadcast - Broadcast a message to all VFs on PF 1970deb0bf7SJacob Keller * @pf: pointer to the PF structure 1980deb0bf7SJacob Keller * @v_opcode: operation code 1990deb0bf7SJacob Keller * @v_retval: return value 2000deb0bf7SJacob Keller * @msg: pointer to the msg buffer 2010deb0bf7SJacob Keller * @msglen: msg length 2020deb0bf7SJacob Keller */ 2030deb0bf7SJacob Keller static void 2040deb0bf7SJacob Keller ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode, 2050deb0bf7SJacob Keller enum virtchnl_status_code v_retval, u8 *msg, u16 msglen) 2060deb0bf7SJacob Keller { 2070deb0bf7SJacob Keller struct ice_hw *hw = &pf->hw; 2080deb0bf7SJacob Keller struct ice_vf *vf; 2090deb0bf7SJacob Keller unsigned int bkt; 2100deb0bf7SJacob Keller 2110deb0bf7SJacob Keller mutex_lock(&pf->vfs.table_lock); 2120deb0bf7SJacob Keller ice_for_each_vf(pf, bkt, vf) { 2130deb0bf7SJacob Keller /* Not all vfs are enabled so skip the ones that are not */ 2140deb0bf7SJacob Keller if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) && 2150deb0bf7SJacob Keller !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) 2160deb0bf7SJacob Keller continue; 2170deb0bf7SJacob Keller 2180deb0bf7SJacob Keller /* Ignore return value on purpose - a given VF may fail, but 2190deb0bf7SJacob Keller * we need to keep going and send to all of them 2200deb0bf7SJacob Keller */ 2210deb0bf7SJacob Keller ice_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, msg, 2220deb0bf7SJacob Keller msglen, NULL); 2230deb0bf7SJacob Keller } 2240deb0bf7SJacob Keller mutex_unlock(&pf->vfs.table_lock); 2250deb0bf7SJacob Keller } 2260deb0bf7SJacob Keller 2270deb0bf7SJacob Keller /** 2280deb0bf7SJacob Keller * ice_set_pfe_link - Set the link speed/status of the virtchnl_pf_event 2290deb0bf7SJacob Keller * @vf: pointer to the VF structure 2300deb0bf7SJacob Keller * @pfe: pointer to the virtchnl_pf_event to set link speed/status for 2310deb0bf7SJacob Keller * @ice_link_speed: link speed specified by ICE_AQ_LINK_SPEED_* 2320deb0bf7SJacob Keller * @link_up: whether or not to set the link up/down 2330deb0bf7SJacob Keller */ 2340deb0bf7SJacob Keller static void 2350deb0bf7SJacob Keller ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe, 2360deb0bf7SJacob Keller int ice_link_speed, bool link_up) 2370deb0bf7SJacob Keller { 2380deb0bf7SJacob Keller if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) { 2390deb0bf7SJacob Keller pfe->event_data.link_event_adv.link_status = link_up; 2400deb0bf7SJacob Keller /* Speed in Mbps */ 2410deb0bf7SJacob Keller pfe->event_data.link_event_adv.link_speed = 2420deb0bf7SJacob Keller ice_conv_link_speed_to_virtchnl(true, ice_link_speed); 2430deb0bf7SJacob Keller } else { 2440deb0bf7SJacob Keller pfe->event_data.link_event.link_status = link_up; 2450deb0bf7SJacob Keller /* Legacy method for virtchnl link speeds */ 2460deb0bf7SJacob Keller pfe->event_data.link_event.link_speed = 2470deb0bf7SJacob Keller (enum virtchnl_link_speed) 2480deb0bf7SJacob Keller ice_conv_link_speed_to_virtchnl(false, ice_link_speed); 2490deb0bf7SJacob Keller } 2500deb0bf7SJacob Keller } 2510deb0bf7SJacob Keller 2520deb0bf7SJacob Keller /** 2530deb0bf7SJacob Keller * ice_vc_notify_vf_link_state - Inform a VF of link status 2540deb0bf7SJacob Keller * @vf: pointer to the VF structure 2550deb0bf7SJacob Keller * 2560deb0bf7SJacob Keller * send a link status message to a single VF 2570deb0bf7SJacob Keller */ 2580deb0bf7SJacob Keller void ice_vc_notify_vf_link_state(struct ice_vf *vf) 2590deb0bf7SJacob Keller { 2600deb0bf7SJacob Keller struct virtchnl_pf_event pfe = { 0 }; 2610deb0bf7SJacob Keller struct ice_hw *hw = &vf->pf->hw; 2620deb0bf7SJacob Keller 2630deb0bf7SJacob Keller pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; 2640deb0bf7SJacob Keller pfe.severity = PF_EVENT_SEVERITY_INFO; 2650deb0bf7SJacob Keller 2660deb0bf7SJacob Keller if (ice_is_vf_link_up(vf)) 2670deb0bf7SJacob Keller ice_set_pfe_link(vf, &pfe, 2680deb0bf7SJacob Keller hw->port_info->phy.link_info.link_speed, true); 2690deb0bf7SJacob Keller else 2700deb0bf7SJacob Keller ice_set_pfe_link(vf, &pfe, ICE_AQ_LINK_SPEED_UNKNOWN, false); 2710deb0bf7SJacob Keller 2720deb0bf7SJacob Keller ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT, 2730deb0bf7SJacob Keller VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, 2740deb0bf7SJacob Keller sizeof(pfe), NULL); 2750deb0bf7SJacob Keller } 2760deb0bf7SJacob Keller 2770deb0bf7SJacob Keller /** 2780deb0bf7SJacob Keller * ice_vf_vsi_release - invalidate the VF's VSI after freeing it 2790deb0bf7SJacob Keller * @vf: invalidate this VF's VSI after freeing it 2800deb0bf7SJacob Keller */ 2810deb0bf7SJacob Keller static void ice_vf_vsi_release(struct ice_vf *vf) 2820deb0bf7SJacob Keller { 2830deb0bf7SJacob Keller ice_vsi_release(ice_get_vf_vsi(vf)); 2840deb0bf7SJacob Keller ice_vf_invalidate_vsi(vf); 2850deb0bf7SJacob Keller } 2860deb0bf7SJacob Keller 2870deb0bf7SJacob Keller /** 2880deb0bf7SJacob Keller * ice_free_vf_res - Free a VF's resources 2890deb0bf7SJacob Keller * @vf: pointer to the VF info 2900deb0bf7SJacob Keller */ 2910deb0bf7SJacob Keller static void ice_free_vf_res(struct ice_vf *vf) 2920deb0bf7SJacob Keller { 2930deb0bf7SJacob Keller struct ice_pf *pf = vf->pf; 2940deb0bf7SJacob Keller int i, last_vector_idx; 2950deb0bf7SJacob Keller 2960deb0bf7SJacob Keller /* First, disable VF's configuration API to prevent OS from 2970deb0bf7SJacob Keller * accessing the VF's VSI after it's freed or invalidated. 2980deb0bf7SJacob Keller */ 2990deb0bf7SJacob Keller clear_bit(ICE_VF_STATE_INIT, vf->vf_states); 3000deb0bf7SJacob Keller ice_vf_fdir_exit(vf); 3010deb0bf7SJacob Keller /* free VF control VSI */ 3020deb0bf7SJacob Keller if (vf->ctrl_vsi_idx != ICE_NO_VSI) 3030deb0bf7SJacob Keller ice_vf_ctrl_vsi_release(vf); 3040deb0bf7SJacob Keller 3050deb0bf7SJacob Keller /* free VSI and disconnect it from the parent uplink */ 3060deb0bf7SJacob Keller if (vf->lan_vsi_idx != ICE_NO_VSI) { 3070deb0bf7SJacob Keller ice_vf_vsi_release(vf); 3080deb0bf7SJacob Keller vf->num_mac = 0; 3090deb0bf7SJacob Keller } 3100deb0bf7SJacob Keller 3110deb0bf7SJacob Keller last_vector_idx = vf->first_vector_idx + pf->vfs.num_msix_per - 1; 3120deb0bf7SJacob Keller 3130deb0bf7SJacob Keller /* clear VF MDD event information */ 3140deb0bf7SJacob Keller memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events)); 3150deb0bf7SJacob Keller memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events)); 3160deb0bf7SJacob Keller 3170deb0bf7SJacob Keller /* Disable interrupts so that VF starts in a known state */ 3180deb0bf7SJacob Keller for (i = vf->first_vector_idx; i <= last_vector_idx; i++) { 3190deb0bf7SJacob Keller wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M); 3200deb0bf7SJacob Keller ice_flush(&pf->hw); 3210deb0bf7SJacob Keller } 3220deb0bf7SJacob Keller /* reset some of the state variables keeping track of the resources */ 3230deb0bf7SJacob Keller clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states); 3240deb0bf7SJacob Keller clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states); 3250deb0bf7SJacob Keller } 3260deb0bf7SJacob Keller 3270deb0bf7SJacob Keller /** 3280deb0bf7SJacob Keller * ice_dis_vf_mappings 3290deb0bf7SJacob Keller * @vf: pointer to the VF structure 3300deb0bf7SJacob Keller */ 3310deb0bf7SJacob Keller static void ice_dis_vf_mappings(struct ice_vf *vf) 3320deb0bf7SJacob Keller { 3330deb0bf7SJacob Keller struct ice_pf *pf = vf->pf; 3340deb0bf7SJacob Keller struct ice_vsi *vsi; 3350deb0bf7SJacob Keller struct device *dev; 3360deb0bf7SJacob Keller int first, last, v; 3370deb0bf7SJacob Keller struct ice_hw *hw; 3380deb0bf7SJacob Keller 3390deb0bf7SJacob Keller hw = &pf->hw; 3400deb0bf7SJacob Keller vsi = ice_get_vf_vsi(vf); 3410deb0bf7SJacob Keller 3420deb0bf7SJacob Keller dev = ice_pf_to_dev(pf); 3430deb0bf7SJacob Keller wr32(hw, VPINT_ALLOC(vf->vf_id), 0); 3440deb0bf7SJacob Keller wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0); 3450deb0bf7SJacob Keller 3460deb0bf7SJacob Keller first = vf->first_vector_idx; 3470deb0bf7SJacob Keller last = first + pf->vfs.num_msix_per - 1; 3480deb0bf7SJacob Keller for (v = first; v <= last; v++) { 3490deb0bf7SJacob Keller u32 reg; 3500deb0bf7SJacob Keller 3510deb0bf7SJacob Keller reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) & 3520deb0bf7SJacob Keller GLINT_VECT2FUNC_IS_PF_M) | 3530deb0bf7SJacob Keller ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) & 3540deb0bf7SJacob Keller GLINT_VECT2FUNC_PF_NUM_M)); 3550deb0bf7SJacob Keller wr32(hw, GLINT_VECT2FUNC(v), reg); 3560deb0bf7SJacob Keller } 3570deb0bf7SJacob Keller 3580deb0bf7SJacob Keller if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) 3590deb0bf7SJacob Keller wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0); 3600deb0bf7SJacob Keller else 3610deb0bf7SJacob Keller dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n"); 3620deb0bf7SJacob Keller 3630deb0bf7SJacob Keller if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) 3640deb0bf7SJacob Keller wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0); 3650deb0bf7SJacob Keller else 3660deb0bf7SJacob Keller dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n"); 3670deb0bf7SJacob Keller } 3680deb0bf7SJacob Keller 3690deb0bf7SJacob Keller /** 3700deb0bf7SJacob Keller * ice_sriov_free_msix_res - Reset/free any used MSIX resources 3710deb0bf7SJacob Keller * @pf: pointer to the PF structure 3720deb0bf7SJacob Keller * 3730deb0bf7SJacob Keller * Since no MSIX entries are taken from the pf->irq_tracker then just clear 3740deb0bf7SJacob Keller * the pf->sriov_base_vector. 3750deb0bf7SJacob Keller * 3760deb0bf7SJacob Keller * Returns 0 on success, and -EINVAL on error. 3770deb0bf7SJacob Keller */ 3780deb0bf7SJacob Keller static int ice_sriov_free_msix_res(struct ice_pf *pf) 3790deb0bf7SJacob Keller { 3800deb0bf7SJacob Keller struct ice_res_tracker *res; 3810deb0bf7SJacob Keller 3820deb0bf7SJacob Keller if (!pf) 3830deb0bf7SJacob Keller return -EINVAL; 3840deb0bf7SJacob Keller 3850deb0bf7SJacob Keller res = pf->irq_tracker; 3860deb0bf7SJacob Keller if (!res) 3870deb0bf7SJacob Keller return -EINVAL; 3880deb0bf7SJacob Keller 3890deb0bf7SJacob Keller /* give back irq_tracker resources used */ 3900deb0bf7SJacob Keller WARN_ON(pf->sriov_base_vector < res->num_entries); 3910deb0bf7SJacob Keller 3920deb0bf7SJacob Keller pf->sriov_base_vector = 0; 3930deb0bf7SJacob Keller 3940deb0bf7SJacob Keller return 0; 3950deb0bf7SJacob Keller } 3960deb0bf7SJacob Keller 3970deb0bf7SJacob Keller /** 3980deb0bf7SJacob Keller * ice_free_vfs - Free all VFs 3990deb0bf7SJacob Keller * @pf: pointer to the PF structure 4000deb0bf7SJacob Keller */ 4010deb0bf7SJacob Keller void ice_free_vfs(struct ice_pf *pf) 4020deb0bf7SJacob Keller { 4030deb0bf7SJacob Keller struct device *dev = ice_pf_to_dev(pf); 4040deb0bf7SJacob Keller struct ice_vfs *vfs = &pf->vfs; 4050deb0bf7SJacob Keller struct ice_hw *hw = &pf->hw; 4060deb0bf7SJacob Keller struct ice_vf *vf; 4070deb0bf7SJacob Keller unsigned int bkt; 4080deb0bf7SJacob Keller 4090deb0bf7SJacob Keller if (!ice_has_vfs(pf)) 4100deb0bf7SJacob Keller return; 4110deb0bf7SJacob Keller 4120deb0bf7SJacob Keller while (test_and_set_bit(ICE_VF_DIS, pf->state)) 4130deb0bf7SJacob Keller usleep_range(1000, 2000); 4140deb0bf7SJacob Keller 4150deb0bf7SJacob Keller /* Disable IOV before freeing resources. This lets any VF drivers 4160deb0bf7SJacob Keller * running in the host get themselves cleaned up before we yank 4170deb0bf7SJacob Keller * the carpet out from underneath their feet. 4180deb0bf7SJacob Keller */ 4190deb0bf7SJacob Keller if (!pci_vfs_assigned(pf->pdev)) 4200deb0bf7SJacob Keller pci_disable_sriov(pf->pdev); 4210deb0bf7SJacob Keller else 4220deb0bf7SJacob Keller dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n"); 4230deb0bf7SJacob Keller 4240deb0bf7SJacob Keller mutex_lock(&vfs->table_lock); 4250deb0bf7SJacob Keller 4260deb0bf7SJacob Keller ice_eswitch_release(pf); 4270deb0bf7SJacob Keller 4280deb0bf7SJacob Keller ice_for_each_vf(pf, bkt, vf) { 4290deb0bf7SJacob Keller mutex_lock(&vf->cfg_lock); 4300deb0bf7SJacob Keller 4310deb0bf7SJacob Keller ice_dis_vf_qs(vf); 4320deb0bf7SJacob Keller 4330deb0bf7SJacob Keller if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { 4340deb0bf7SJacob Keller /* disable VF qp mappings and set VF disable state */ 4350deb0bf7SJacob Keller ice_dis_vf_mappings(vf); 4360deb0bf7SJacob Keller set_bit(ICE_VF_STATE_DIS, vf->vf_states); 4370deb0bf7SJacob Keller ice_free_vf_res(vf); 4380deb0bf7SJacob Keller } 4390deb0bf7SJacob Keller 4400deb0bf7SJacob Keller if (!pci_vfs_assigned(pf->pdev)) { 4410deb0bf7SJacob Keller u32 reg_idx, bit_idx; 4420deb0bf7SJacob Keller 4430deb0bf7SJacob Keller reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32; 4440deb0bf7SJacob Keller bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32; 4450deb0bf7SJacob Keller wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); 4460deb0bf7SJacob Keller } 4470deb0bf7SJacob Keller 4480deb0bf7SJacob Keller /* clear malicious info since the VF is getting released */ 4490deb0bf7SJacob Keller if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->vfs.malvfs, 450dc36796eSJacob Keller ICE_MAX_SRIOV_VFS, vf->vf_id)) 4510deb0bf7SJacob Keller dev_dbg(dev, "failed to clear malicious VF state for VF %u\n", 4520deb0bf7SJacob Keller vf->vf_id); 4530deb0bf7SJacob Keller 4540deb0bf7SJacob Keller mutex_unlock(&vf->cfg_lock); 4550deb0bf7SJacob Keller } 4560deb0bf7SJacob Keller 4570deb0bf7SJacob Keller if (ice_sriov_free_msix_res(pf)) 4580deb0bf7SJacob Keller dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n"); 4590deb0bf7SJacob Keller 4600deb0bf7SJacob Keller vfs->num_qps_per = 0; 4610deb0bf7SJacob Keller ice_free_vf_entries(pf); 4620deb0bf7SJacob Keller 4630deb0bf7SJacob Keller mutex_unlock(&vfs->table_lock); 4640deb0bf7SJacob Keller 4650deb0bf7SJacob Keller clear_bit(ICE_VF_DIS, pf->state); 4660deb0bf7SJacob Keller clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags); 4670deb0bf7SJacob Keller } 4680deb0bf7SJacob Keller 4690deb0bf7SJacob Keller /** 4700deb0bf7SJacob Keller * ice_trigger_vf_reset - Reset a VF on HW 4710deb0bf7SJacob Keller * @vf: pointer to the VF structure 4720deb0bf7SJacob Keller * @is_vflr: true if VFLR was issued, false if not 4730deb0bf7SJacob Keller * @is_pfr: true if the reset was triggered due to a previous PFR 4740deb0bf7SJacob Keller * 4750deb0bf7SJacob Keller * Trigger hardware to start a reset for a particular VF. Expects the caller 4760deb0bf7SJacob Keller * to wait the proper amount of time to allow hardware to reset the VF before 4770deb0bf7SJacob Keller * it cleans up and restores VF functionality. 4780deb0bf7SJacob Keller */ 4790deb0bf7SJacob Keller static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr) 4800deb0bf7SJacob Keller { 4810deb0bf7SJacob Keller struct ice_pf *pf = vf->pf; 4820deb0bf7SJacob Keller u32 reg, reg_idx, bit_idx; 4830deb0bf7SJacob Keller unsigned int vf_abs_id, i; 4840deb0bf7SJacob Keller struct device *dev; 4850deb0bf7SJacob Keller struct ice_hw *hw; 4860deb0bf7SJacob Keller 4870deb0bf7SJacob Keller dev = ice_pf_to_dev(pf); 4880deb0bf7SJacob Keller hw = &pf->hw; 4890deb0bf7SJacob Keller vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id; 4900deb0bf7SJacob Keller 4910deb0bf7SJacob Keller /* Inform VF that it is no longer active, as a warning */ 4920deb0bf7SJacob Keller clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states); 4930deb0bf7SJacob Keller 4940deb0bf7SJacob Keller /* Disable VF's configuration API during reset. The flag is re-enabled 4950deb0bf7SJacob Keller * when it's safe again to access VF's VSI. 4960deb0bf7SJacob Keller */ 4970deb0bf7SJacob Keller clear_bit(ICE_VF_STATE_INIT, vf->vf_states); 4980deb0bf7SJacob Keller 4990deb0bf7SJacob Keller /* VF_MBX_ARQLEN and VF_MBX_ATQLEN are cleared by PFR, so the driver 5000deb0bf7SJacob Keller * needs to clear them in the case of VFR/VFLR. If this is done for 5010deb0bf7SJacob Keller * PFR, it can mess up VF resets because the VF driver may already 5020deb0bf7SJacob Keller * have started cleanup by the time we get here. 5030deb0bf7SJacob Keller */ 5040deb0bf7SJacob Keller if (!is_pfr) { 5050deb0bf7SJacob Keller wr32(hw, VF_MBX_ARQLEN(vf->vf_id), 0); 5060deb0bf7SJacob Keller wr32(hw, VF_MBX_ATQLEN(vf->vf_id), 0); 5070deb0bf7SJacob Keller } 5080deb0bf7SJacob Keller 5090deb0bf7SJacob Keller /* In the case of a VFLR, the HW has already reset the VF and we 5100deb0bf7SJacob Keller * just need to clean up, so don't hit the VFRTRIG register. 5110deb0bf7SJacob Keller */ 5120deb0bf7SJacob Keller if (!is_vflr) { 5130deb0bf7SJacob Keller /* reset VF using VPGEN_VFRTRIG reg */ 5140deb0bf7SJacob Keller reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id)); 5150deb0bf7SJacob Keller reg |= VPGEN_VFRTRIG_VFSWR_M; 5160deb0bf7SJacob Keller wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg); 5170deb0bf7SJacob Keller } 5180deb0bf7SJacob Keller /* clear the VFLR bit in GLGEN_VFLRSTAT */ 5190deb0bf7SJacob Keller reg_idx = (vf_abs_id) / 32; 5200deb0bf7SJacob Keller bit_idx = (vf_abs_id) % 32; 5210deb0bf7SJacob Keller wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); 5220deb0bf7SJacob Keller ice_flush(hw); 5230deb0bf7SJacob Keller 5240deb0bf7SJacob Keller wr32(hw, PF_PCI_CIAA, 5250deb0bf7SJacob Keller VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S)); 5260deb0bf7SJacob Keller for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) { 5270deb0bf7SJacob Keller reg = rd32(hw, PF_PCI_CIAD); 5280deb0bf7SJacob Keller /* no transactions pending so stop polling */ 5290deb0bf7SJacob Keller if ((reg & VF_TRANS_PENDING_M) == 0) 5300deb0bf7SJacob Keller break; 5310deb0bf7SJacob Keller 5320deb0bf7SJacob Keller dev_err(dev, "VF %u PCI transactions stuck\n", vf->vf_id); 5330deb0bf7SJacob Keller udelay(ICE_PCI_CIAD_WAIT_DELAY_US); 5340deb0bf7SJacob Keller } 5350deb0bf7SJacob Keller } 5360deb0bf7SJacob Keller 5370deb0bf7SJacob Keller /** 5380deb0bf7SJacob Keller * ice_vf_vsi_setup - Set up a VF VSI 5390deb0bf7SJacob Keller * @vf: VF to setup VSI for 5400deb0bf7SJacob Keller * 5410deb0bf7SJacob Keller * Returns pointer to the successfully allocated VSI struct on success, 5420deb0bf7SJacob Keller * otherwise returns NULL on failure. 5430deb0bf7SJacob Keller */ 5440deb0bf7SJacob Keller static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf) 5450deb0bf7SJacob Keller { 5460deb0bf7SJacob Keller struct ice_port_info *pi = ice_vf_get_port_info(vf); 5470deb0bf7SJacob Keller struct ice_pf *pf = vf->pf; 5480deb0bf7SJacob Keller struct ice_vsi *vsi; 5490deb0bf7SJacob Keller 5500deb0bf7SJacob Keller vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf, NULL); 5510deb0bf7SJacob Keller 5520deb0bf7SJacob Keller if (!vsi) { 5530deb0bf7SJacob Keller dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n"); 5540deb0bf7SJacob Keller ice_vf_invalidate_vsi(vf); 5550deb0bf7SJacob Keller return NULL; 5560deb0bf7SJacob Keller } 5570deb0bf7SJacob Keller 5580deb0bf7SJacob Keller vf->lan_vsi_idx = vsi->idx; 5590deb0bf7SJacob Keller vf->lan_vsi_num = vsi->vsi_num; 5600deb0bf7SJacob Keller 5610deb0bf7SJacob Keller return vsi; 5620deb0bf7SJacob Keller } 5630deb0bf7SJacob Keller 5640deb0bf7SJacob Keller /** 5650deb0bf7SJacob Keller * ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space 5660deb0bf7SJacob Keller * @pf: pointer to PF structure 5670deb0bf7SJacob Keller * @vf: pointer to VF that the first MSIX vector index is being calculated for 5680deb0bf7SJacob Keller * 5690deb0bf7SJacob Keller * This returns the first MSIX vector index in PF space that is used by this VF. 5700deb0bf7SJacob Keller * This index is used when accessing PF relative registers such as 5710deb0bf7SJacob Keller * GLINT_VECT2FUNC and GLINT_DYN_CTL. 5720deb0bf7SJacob Keller * This will always be the OICR index in the AVF driver so any functionality 5730deb0bf7SJacob Keller * using vf->first_vector_idx for queue configuration will have to increment by 5740deb0bf7SJacob Keller * 1 to avoid meddling with the OICR index. 5750deb0bf7SJacob Keller */ 5760deb0bf7SJacob Keller static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf) 5770deb0bf7SJacob Keller { 5780deb0bf7SJacob Keller return pf->sriov_base_vector + vf->vf_id * pf->vfs.num_msix_per; 5790deb0bf7SJacob Keller } 5800deb0bf7SJacob Keller 5810deb0bf7SJacob Keller /** 5820deb0bf7SJacob Keller * ice_vf_rebuild_host_tx_rate_cfg - re-apply the Tx rate limiting configuration 5830deb0bf7SJacob Keller * @vf: VF to re-apply the configuration for 5840deb0bf7SJacob Keller * 5850deb0bf7SJacob Keller * Called after a VF VSI has been re-added/rebuild during reset. The PF driver 5860deb0bf7SJacob Keller * needs to re-apply the host configured Tx rate limiting configuration. 5870deb0bf7SJacob Keller */ 5880deb0bf7SJacob Keller static int ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf *vf) 5890deb0bf7SJacob Keller { 5900deb0bf7SJacob Keller struct device *dev = ice_pf_to_dev(vf->pf); 5910deb0bf7SJacob Keller struct ice_vsi *vsi = ice_get_vf_vsi(vf); 5920deb0bf7SJacob Keller int err; 5930deb0bf7SJacob Keller 5940deb0bf7SJacob Keller if (vf->min_tx_rate) { 5950deb0bf7SJacob Keller err = ice_set_min_bw_limit(vsi, (u64)vf->min_tx_rate * 1000); 5960deb0bf7SJacob Keller if (err) { 5970deb0bf7SJacob Keller dev_err(dev, "failed to set min Tx rate to %d Mbps for VF %u, error %d\n", 5980deb0bf7SJacob Keller vf->min_tx_rate, vf->vf_id, err); 5990deb0bf7SJacob Keller return err; 6000deb0bf7SJacob Keller } 6010deb0bf7SJacob Keller } 6020deb0bf7SJacob Keller 6030deb0bf7SJacob Keller if (vf->max_tx_rate) { 6040deb0bf7SJacob Keller err = ice_set_max_bw_limit(vsi, (u64)vf->max_tx_rate * 1000); 6050deb0bf7SJacob Keller if (err) { 6060deb0bf7SJacob Keller dev_err(dev, "failed to set max Tx rate to %d Mbps for VF %u, error %d\n", 6070deb0bf7SJacob Keller vf->max_tx_rate, vf->vf_id, err); 6080deb0bf7SJacob Keller return err; 6090deb0bf7SJacob Keller } 6100deb0bf7SJacob Keller } 6110deb0bf7SJacob Keller 6120deb0bf7SJacob Keller return 0; 6130deb0bf7SJacob Keller } 6140deb0bf7SJacob Keller 6150deb0bf7SJacob Keller /** 6160deb0bf7SJacob Keller * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN 6170deb0bf7SJacob Keller * @vf: VF to add MAC filters for 6180deb0bf7SJacob Keller * @vsi: Pointer to VSI 6190deb0bf7SJacob Keller * 6200deb0bf7SJacob Keller * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver 6210deb0bf7SJacob Keller * always re-adds either a VLAN 0 or port VLAN based filter after reset. 6220deb0bf7SJacob Keller */ 6230deb0bf7SJacob Keller static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf, struct ice_vsi *vsi) 6240deb0bf7SJacob Keller { 6250deb0bf7SJacob Keller struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 6260deb0bf7SJacob Keller struct device *dev = ice_pf_to_dev(vf->pf); 6270deb0bf7SJacob Keller int err; 6280deb0bf7SJacob Keller 6290deb0bf7SJacob Keller if (ice_vf_is_port_vlan_ena(vf)) { 6300deb0bf7SJacob Keller err = vlan_ops->set_port_vlan(vsi, &vf->port_vlan_info); 6310deb0bf7SJacob Keller if (err) { 6320deb0bf7SJacob Keller dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n", 6330deb0bf7SJacob Keller vf->vf_id, err); 6340deb0bf7SJacob Keller return err; 6350deb0bf7SJacob Keller } 6360deb0bf7SJacob Keller 6370deb0bf7SJacob Keller err = vlan_ops->add_vlan(vsi, &vf->port_vlan_info); 6380deb0bf7SJacob Keller } else { 6390deb0bf7SJacob Keller err = ice_vsi_add_vlan_zero(vsi); 6400deb0bf7SJacob Keller } 6410deb0bf7SJacob Keller 6420deb0bf7SJacob Keller if (err) { 6430deb0bf7SJacob Keller dev_err(dev, "failed to add VLAN %u filter for VF %u during VF rebuild, error %d\n", 6440deb0bf7SJacob Keller ice_vf_is_port_vlan_ena(vf) ? 6450deb0bf7SJacob Keller ice_vf_get_port_vlan_id(vf) : 0, vf->vf_id, err); 6460deb0bf7SJacob Keller return err; 6470deb0bf7SJacob Keller } 6480deb0bf7SJacob Keller 6490deb0bf7SJacob Keller err = vlan_ops->ena_rx_filtering(vsi); 6500deb0bf7SJacob Keller if (err) 6510deb0bf7SJacob Keller dev_warn(dev, "failed to enable Rx VLAN filtering for VF %d VSI %d during VF rebuild, error %d\n", 6520deb0bf7SJacob Keller vf->vf_id, vsi->idx, err); 6530deb0bf7SJacob Keller 6540deb0bf7SJacob Keller return 0; 6550deb0bf7SJacob Keller } 6560deb0bf7SJacob Keller 6570deb0bf7SJacob Keller /** 6580deb0bf7SJacob Keller * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA 6590deb0bf7SJacob Keller * @vf: VF to add MAC filters for 6600deb0bf7SJacob Keller * 6610deb0bf7SJacob Keller * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver 6620deb0bf7SJacob Keller * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset. 6630deb0bf7SJacob Keller */ 6640deb0bf7SJacob Keller static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf) 6650deb0bf7SJacob Keller { 6660deb0bf7SJacob Keller struct device *dev = ice_pf_to_dev(vf->pf); 6670deb0bf7SJacob Keller struct ice_vsi *vsi = ice_get_vf_vsi(vf); 6680deb0bf7SJacob Keller u8 broadcast[ETH_ALEN]; 6690deb0bf7SJacob Keller int status; 6700deb0bf7SJacob Keller 6710deb0bf7SJacob Keller if (ice_is_eswitch_mode_switchdev(vf->pf)) 6720deb0bf7SJacob Keller return 0; 6730deb0bf7SJacob Keller 6740deb0bf7SJacob Keller eth_broadcast_addr(broadcast); 6750deb0bf7SJacob Keller status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI); 6760deb0bf7SJacob Keller if (status) { 6770deb0bf7SJacob Keller dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %d\n", 6780deb0bf7SJacob Keller vf->vf_id, status); 6790deb0bf7SJacob Keller return status; 6800deb0bf7SJacob Keller } 6810deb0bf7SJacob Keller 6820deb0bf7SJacob Keller vf->num_mac++; 6830deb0bf7SJacob Keller 6840deb0bf7SJacob Keller if (is_valid_ether_addr(vf->hw_lan_addr.addr)) { 6850deb0bf7SJacob Keller status = ice_fltr_add_mac(vsi, vf->hw_lan_addr.addr, 6860deb0bf7SJacob Keller ICE_FWD_TO_VSI); 6870deb0bf7SJacob Keller if (status) { 6880deb0bf7SJacob Keller dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %d\n", 6890deb0bf7SJacob Keller &vf->hw_lan_addr.addr[0], vf->vf_id, 6900deb0bf7SJacob Keller status); 6910deb0bf7SJacob Keller return status; 6920deb0bf7SJacob Keller } 6930deb0bf7SJacob Keller vf->num_mac++; 6940deb0bf7SJacob Keller 6950deb0bf7SJacob Keller ether_addr_copy(vf->dev_lan_addr.addr, vf->hw_lan_addr.addr); 6960deb0bf7SJacob Keller } 6970deb0bf7SJacob Keller 6980deb0bf7SJacob Keller return 0; 6990deb0bf7SJacob Keller } 7000deb0bf7SJacob Keller 7010deb0bf7SJacob Keller /** 7020deb0bf7SJacob Keller * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value 7030deb0bf7SJacob Keller * @vf: VF to configure trust setting for 7040deb0bf7SJacob Keller */ 7050deb0bf7SJacob Keller static void ice_vf_set_host_trust_cfg(struct ice_vf *vf) 7060deb0bf7SJacob Keller { 7070deb0bf7SJacob Keller if (vf->trusted) 7080deb0bf7SJacob Keller set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); 7090deb0bf7SJacob Keller else 7100deb0bf7SJacob Keller clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); 7110deb0bf7SJacob Keller } 7120deb0bf7SJacob Keller 7130deb0bf7SJacob Keller /** 7140deb0bf7SJacob Keller * ice_ena_vf_msix_mappings - enable VF MSIX mappings in hardware 7150deb0bf7SJacob Keller * @vf: VF to enable MSIX mappings for 7160deb0bf7SJacob Keller * 7170deb0bf7SJacob Keller * Some of the registers need to be indexed/configured using hardware global 7180deb0bf7SJacob Keller * device values and other registers need 0-based values, which represent PF 7190deb0bf7SJacob Keller * based values. 7200deb0bf7SJacob Keller */ 7210deb0bf7SJacob Keller static void ice_ena_vf_msix_mappings(struct ice_vf *vf) 7220deb0bf7SJacob Keller { 7230deb0bf7SJacob Keller int device_based_first_msix, device_based_last_msix; 7240deb0bf7SJacob Keller int pf_based_first_msix, pf_based_last_msix, v; 7250deb0bf7SJacob Keller struct ice_pf *pf = vf->pf; 7260deb0bf7SJacob Keller int device_based_vf_id; 7270deb0bf7SJacob Keller struct ice_hw *hw; 7280deb0bf7SJacob Keller u32 reg; 7290deb0bf7SJacob Keller 7300deb0bf7SJacob Keller hw = &pf->hw; 7310deb0bf7SJacob Keller pf_based_first_msix = vf->first_vector_idx; 7320deb0bf7SJacob Keller pf_based_last_msix = (pf_based_first_msix + pf->vfs.num_msix_per) - 1; 7330deb0bf7SJacob Keller 7340deb0bf7SJacob Keller device_based_first_msix = pf_based_first_msix + 7350deb0bf7SJacob Keller pf->hw.func_caps.common_cap.msix_vector_first_id; 7360deb0bf7SJacob Keller device_based_last_msix = 7370deb0bf7SJacob Keller (device_based_first_msix + pf->vfs.num_msix_per) - 1; 7380deb0bf7SJacob Keller device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 7390deb0bf7SJacob Keller 7400deb0bf7SJacob Keller reg = (((device_based_first_msix << VPINT_ALLOC_FIRST_S) & 7410deb0bf7SJacob Keller VPINT_ALLOC_FIRST_M) | 7420deb0bf7SJacob Keller ((device_based_last_msix << VPINT_ALLOC_LAST_S) & 7430deb0bf7SJacob Keller VPINT_ALLOC_LAST_M) | VPINT_ALLOC_VALID_M); 7440deb0bf7SJacob Keller wr32(hw, VPINT_ALLOC(vf->vf_id), reg); 7450deb0bf7SJacob Keller 7460deb0bf7SJacob Keller reg = (((device_based_first_msix << VPINT_ALLOC_PCI_FIRST_S) 7470deb0bf7SJacob Keller & VPINT_ALLOC_PCI_FIRST_M) | 7480deb0bf7SJacob Keller ((device_based_last_msix << VPINT_ALLOC_PCI_LAST_S) & 7490deb0bf7SJacob Keller VPINT_ALLOC_PCI_LAST_M) | VPINT_ALLOC_PCI_VALID_M); 7500deb0bf7SJacob Keller wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg); 7510deb0bf7SJacob Keller 7520deb0bf7SJacob Keller /* map the interrupts to its functions */ 7530deb0bf7SJacob Keller for (v = pf_based_first_msix; v <= pf_based_last_msix; v++) { 7540deb0bf7SJacob Keller reg = (((device_based_vf_id << GLINT_VECT2FUNC_VF_NUM_S) & 7550deb0bf7SJacob Keller GLINT_VECT2FUNC_VF_NUM_M) | 7560deb0bf7SJacob Keller ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) & 7570deb0bf7SJacob Keller GLINT_VECT2FUNC_PF_NUM_M)); 7580deb0bf7SJacob Keller wr32(hw, GLINT_VECT2FUNC(v), reg); 7590deb0bf7SJacob Keller } 7600deb0bf7SJacob Keller 7610deb0bf7SJacob Keller /* Map mailbox interrupt to VF MSI-X vector 0 */ 7620deb0bf7SJacob Keller wr32(hw, VPINT_MBX_CTL(device_based_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M); 7630deb0bf7SJacob Keller } 7640deb0bf7SJacob Keller 7650deb0bf7SJacob Keller /** 7660deb0bf7SJacob Keller * ice_ena_vf_q_mappings - enable Rx/Tx queue mappings for a VF 7670deb0bf7SJacob Keller * @vf: VF to enable the mappings for 7680deb0bf7SJacob Keller * @max_txq: max Tx queues allowed on the VF's VSI 7690deb0bf7SJacob Keller * @max_rxq: max Rx queues allowed on the VF's VSI 7700deb0bf7SJacob Keller */ 7710deb0bf7SJacob Keller static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq) 7720deb0bf7SJacob Keller { 7730deb0bf7SJacob Keller struct device *dev = ice_pf_to_dev(vf->pf); 7740deb0bf7SJacob Keller struct ice_vsi *vsi = ice_get_vf_vsi(vf); 7750deb0bf7SJacob Keller struct ice_hw *hw = &vf->pf->hw; 7760deb0bf7SJacob Keller u32 reg; 7770deb0bf7SJacob Keller 7780deb0bf7SJacob Keller /* set regardless of mapping mode */ 7790deb0bf7SJacob Keller wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M); 7800deb0bf7SJacob Keller 7810deb0bf7SJacob Keller /* VF Tx queues allocation */ 7820deb0bf7SJacob Keller if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) { 7830deb0bf7SJacob Keller /* set the VF PF Tx queue range 7840deb0bf7SJacob Keller * VFNUMQ value should be set to (number of queues - 1). A value 7850deb0bf7SJacob Keller * of 0 means 1 queue and a value of 255 means 256 queues 7860deb0bf7SJacob Keller */ 7870deb0bf7SJacob Keller reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) & 7880deb0bf7SJacob Keller VPLAN_TX_QBASE_VFFIRSTQ_M) | 7890deb0bf7SJacob Keller (((max_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) & 7900deb0bf7SJacob Keller VPLAN_TX_QBASE_VFNUMQ_M)); 7910deb0bf7SJacob Keller wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg); 7920deb0bf7SJacob Keller } else { 7930deb0bf7SJacob Keller dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n"); 7940deb0bf7SJacob Keller } 7950deb0bf7SJacob Keller 7960deb0bf7SJacob Keller /* set regardless of mapping mode */ 7970deb0bf7SJacob Keller wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), VPLAN_RXQ_MAPENA_RX_ENA_M); 7980deb0bf7SJacob Keller 7990deb0bf7SJacob Keller /* VF Rx queues allocation */ 8000deb0bf7SJacob Keller if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) { 8010deb0bf7SJacob Keller /* set the VF PF Rx queue range 8020deb0bf7SJacob Keller * VFNUMQ value should be set to (number of queues - 1). A value 8030deb0bf7SJacob Keller * of 0 means 1 queue and a value of 255 means 256 queues 8040deb0bf7SJacob Keller */ 8050deb0bf7SJacob Keller reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) & 8060deb0bf7SJacob Keller VPLAN_RX_QBASE_VFFIRSTQ_M) | 8070deb0bf7SJacob Keller (((max_rxq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) & 8080deb0bf7SJacob Keller VPLAN_RX_QBASE_VFNUMQ_M)); 8090deb0bf7SJacob Keller wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg); 8100deb0bf7SJacob Keller } else { 8110deb0bf7SJacob Keller dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n"); 8120deb0bf7SJacob Keller } 8130deb0bf7SJacob Keller } 8140deb0bf7SJacob Keller 8150deb0bf7SJacob Keller /** 8160deb0bf7SJacob Keller * ice_ena_vf_mappings - enable VF MSIX and queue mapping 8170deb0bf7SJacob Keller * @vf: pointer to the VF structure 8180deb0bf7SJacob Keller */ 8190deb0bf7SJacob Keller static void ice_ena_vf_mappings(struct ice_vf *vf) 8200deb0bf7SJacob Keller { 8210deb0bf7SJacob Keller struct ice_vsi *vsi = ice_get_vf_vsi(vf); 8220deb0bf7SJacob Keller 8230deb0bf7SJacob Keller ice_ena_vf_msix_mappings(vf); 8240deb0bf7SJacob Keller ice_ena_vf_q_mappings(vf, vsi->alloc_txq, vsi->alloc_rxq); 8250deb0bf7SJacob Keller } 8260deb0bf7SJacob Keller 8270deb0bf7SJacob Keller /** 8280deb0bf7SJacob Keller * ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space 8290deb0bf7SJacob Keller * @vf: VF to calculate the register index for 8300deb0bf7SJacob Keller * @q_vector: a q_vector associated to the VF 8310deb0bf7SJacob Keller */ 8320deb0bf7SJacob Keller int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector) 8330deb0bf7SJacob Keller { 8340deb0bf7SJacob Keller struct ice_pf *pf; 8350deb0bf7SJacob Keller 8360deb0bf7SJacob Keller if (!vf || !q_vector) 8370deb0bf7SJacob Keller return -EINVAL; 8380deb0bf7SJacob Keller 8390deb0bf7SJacob Keller pf = vf->pf; 8400deb0bf7SJacob Keller 8410deb0bf7SJacob Keller /* always add one to account for the OICR being the first MSIX */ 8420deb0bf7SJacob Keller return pf->sriov_base_vector + pf->vfs.num_msix_per * vf->vf_id + 8430deb0bf7SJacob Keller q_vector->v_idx + 1; 8440deb0bf7SJacob Keller } 8450deb0bf7SJacob Keller 8460deb0bf7SJacob Keller /** 8470deb0bf7SJacob Keller * ice_get_max_valid_res_idx - Get the max valid resource index 8480deb0bf7SJacob Keller * @res: pointer to the resource to find the max valid index for 8490deb0bf7SJacob Keller * 8500deb0bf7SJacob Keller * Start from the end of the ice_res_tracker and return right when we find the 8510deb0bf7SJacob Keller * first res->list entry with the ICE_RES_VALID_BIT set. This function is only 8520deb0bf7SJacob Keller * valid for SR-IOV because it is the only consumer that manipulates the 8530deb0bf7SJacob Keller * res->end and this is always called when res->end is set to res->num_entries. 8540deb0bf7SJacob Keller */ 8550deb0bf7SJacob Keller static int ice_get_max_valid_res_idx(struct ice_res_tracker *res) 8560deb0bf7SJacob Keller { 8570deb0bf7SJacob Keller int i; 8580deb0bf7SJacob Keller 8590deb0bf7SJacob Keller if (!res) 8600deb0bf7SJacob Keller return -EINVAL; 8610deb0bf7SJacob Keller 8620deb0bf7SJacob Keller for (i = res->num_entries - 1; i >= 0; i--) 8630deb0bf7SJacob Keller if (res->list[i] & ICE_RES_VALID_BIT) 8640deb0bf7SJacob Keller return i; 8650deb0bf7SJacob Keller 8660deb0bf7SJacob Keller return 0; 8670deb0bf7SJacob Keller } 8680deb0bf7SJacob Keller 8690deb0bf7SJacob Keller /** 8700deb0bf7SJacob Keller * ice_sriov_set_msix_res - Set any used MSIX resources 8710deb0bf7SJacob Keller * @pf: pointer to PF structure 8720deb0bf7SJacob Keller * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs 8730deb0bf7SJacob Keller * 8740deb0bf7SJacob Keller * This function allows SR-IOV resources to be taken from the end of the PF's 8750deb0bf7SJacob Keller * allowed HW MSIX vectors so that the irq_tracker will not be affected. We 8760deb0bf7SJacob Keller * just set the pf->sriov_base_vector and return success. 8770deb0bf7SJacob Keller * 8780deb0bf7SJacob Keller * If there are not enough resources available, return an error. This should 8790deb0bf7SJacob Keller * always be caught by ice_set_per_vf_res(). 8800deb0bf7SJacob Keller * 8810deb0bf7SJacob Keller * Return 0 on success, and -EINVAL when there are not enough MSIX vectors 8820deb0bf7SJacob Keller * in the PF's space available for SR-IOV. 8830deb0bf7SJacob Keller */ 8840deb0bf7SJacob Keller static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed) 8850deb0bf7SJacob Keller { 8860deb0bf7SJacob Keller u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors; 8870deb0bf7SJacob Keller int vectors_used = pf->irq_tracker->num_entries; 8880deb0bf7SJacob Keller int sriov_base_vector; 8890deb0bf7SJacob Keller 8900deb0bf7SJacob Keller sriov_base_vector = total_vectors - num_msix_needed; 8910deb0bf7SJacob Keller 8920deb0bf7SJacob Keller /* make sure we only grab irq_tracker entries from the list end and 8930deb0bf7SJacob Keller * that we have enough available MSIX vectors 8940deb0bf7SJacob Keller */ 8950deb0bf7SJacob Keller if (sriov_base_vector < vectors_used) 8960deb0bf7SJacob Keller return -EINVAL; 8970deb0bf7SJacob Keller 8980deb0bf7SJacob Keller pf->sriov_base_vector = sriov_base_vector; 8990deb0bf7SJacob Keller 9000deb0bf7SJacob Keller return 0; 9010deb0bf7SJacob Keller } 9020deb0bf7SJacob Keller 9030deb0bf7SJacob Keller /** 9040deb0bf7SJacob Keller * ice_set_per_vf_res - check if vectors and queues are available 9050deb0bf7SJacob Keller * @pf: pointer to the PF structure 9060deb0bf7SJacob Keller * @num_vfs: the number of SR-IOV VFs being configured 9070deb0bf7SJacob Keller * 9080deb0bf7SJacob Keller * First, determine HW interrupts from common pool. If we allocate fewer VFs, we 9090deb0bf7SJacob Keller * get more vectors and can enable more queues per VF. Note that this does not 9100deb0bf7SJacob Keller * grab any vectors from the SW pool already allocated. Also note, that all 9110deb0bf7SJacob Keller * vector counts include one for each VF's miscellaneous interrupt vector 9120deb0bf7SJacob Keller * (i.e. OICR). 9130deb0bf7SJacob Keller * 9140deb0bf7SJacob Keller * Minimum VFs - 2 vectors, 1 queue pair 9150deb0bf7SJacob Keller * Small VFs - 5 vectors, 4 queue pairs 9160deb0bf7SJacob Keller * Medium VFs - 17 vectors, 16 queue pairs 9170deb0bf7SJacob Keller * 9180deb0bf7SJacob Keller * Second, determine number of queue pairs per VF by starting with a pre-defined 9190deb0bf7SJacob Keller * maximum each VF supports. If this is not possible, then we adjust based on 9200deb0bf7SJacob Keller * queue pairs available on the device. 9210deb0bf7SJacob Keller * 9220deb0bf7SJacob Keller * Lastly, set queue and MSI-X VF variables tracked by the PF so it can be used 9230deb0bf7SJacob Keller * by each VF during VF initialization and reset. 9240deb0bf7SJacob Keller */ 9250deb0bf7SJacob Keller static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs) 9260deb0bf7SJacob Keller { 9270deb0bf7SJacob Keller int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker); 9280deb0bf7SJacob Keller u16 num_msix_per_vf, num_txq, num_rxq, avail_qs; 9290deb0bf7SJacob Keller int msix_avail_per_vf, msix_avail_for_sriov; 9300deb0bf7SJacob Keller struct device *dev = ice_pf_to_dev(pf); 93194ab2488SJacob Keller int err; 9320deb0bf7SJacob Keller 9330deb0bf7SJacob Keller lockdep_assert_held(&pf->vfs.table_lock); 9340deb0bf7SJacob Keller 93594ab2488SJacob Keller if (!num_vfs) 9360deb0bf7SJacob Keller return -EINVAL; 9370deb0bf7SJacob Keller 93894ab2488SJacob Keller if (max_valid_res_idx < 0) 93994ab2488SJacob Keller return -ENOSPC; 94094ab2488SJacob Keller 9410deb0bf7SJacob Keller /* determine MSI-X resources per VF */ 9420deb0bf7SJacob Keller msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors - 9430deb0bf7SJacob Keller pf->irq_tracker->num_entries; 9440deb0bf7SJacob Keller msix_avail_per_vf = msix_avail_for_sriov / num_vfs; 9450deb0bf7SJacob Keller if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) { 9460deb0bf7SJacob Keller num_msix_per_vf = ICE_NUM_VF_MSIX_MED; 9470deb0bf7SJacob Keller } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) { 9480deb0bf7SJacob Keller num_msix_per_vf = ICE_NUM_VF_MSIX_SMALL; 9490deb0bf7SJacob Keller } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MULTIQ_MIN) { 9500deb0bf7SJacob Keller num_msix_per_vf = ICE_NUM_VF_MSIX_MULTIQ_MIN; 9510deb0bf7SJacob Keller } else if (msix_avail_per_vf >= ICE_MIN_INTR_PER_VF) { 9520deb0bf7SJacob Keller num_msix_per_vf = ICE_MIN_INTR_PER_VF; 9530deb0bf7SJacob Keller } else { 9540deb0bf7SJacob Keller dev_err(dev, "Only %d MSI-X interrupts available for SR-IOV. Not enough to support minimum of %d MSI-X interrupts per VF for %d VFs\n", 9550deb0bf7SJacob Keller msix_avail_for_sriov, ICE_MIN_INTR_PER_VF, 9560deb0bf7SJacob Keller num_vfs); 95794ab2488SJacob Keller return -ENOSPC; 9580deb0bf7SJacob Keller } 9590deb0bf7SJacob Keller 9600deb0bf7SJacob Keller num_txq = min_t(u16, num_msix_per_vf - ICE_NONQ_VECS_VF, 9610deb0bf7SJacob Keller ICE_MAX_RSS_QS_PER_VF); 9620deb0bf7SJacob Keller avail_qs = ice_get_avail_txq_count(pf) / num_vfs; 9630deb0bf7SJacob Keller if (!avail_qs) 9640deb0bf7SJacob Keller num_txq = 0; 9650deb0bf7SJacob Keller else if (num_txq > avail_qs) 9660deb0bf7SJacob Keller num_txq = rounddown_pow_of_two(avail_qs); 9670deb0bf7SJacob Keller 9680deb0bf7SJacob Keller num_rxq = min_t(u16, num_msix_per_vf - ICE_NONQ_VECS_VF, 9690deb0bf7SJacob Keller ICE_MAX_RSS_QS_PER_VF); 9700deb0bf7SJacob Keller avail_qs = ice_get_avail_rxq_count(pf) / num_vfs; 9710deb0bf7SJacob Keller if (!avail_qs) 9720deb0bf7SJacob Keller num_rxq = 0; 9730deb0bf7SJacob Keller else if (num_rxq > avail_qs) 9740deb0bf7SJacob Keller num_rxq = rounddown_pow_of_two(avail_qs); 9750deb0bf7SJacob Keller 9760deb0bf7SJacob Keller if (num_txq < ICE_MIN_QS_PER_VF || num_rxq < ICE_MIN_QS_PER_VF) { 9770deb0bf7SJacob Keller dev_err(dev, "Not enough queues to support minimum of %d queue pairs per VF for %d VFs\n", 9780deb0bf7SJacob Keller ICE_MIN_QS_PER_VF, num_vfs); 97994ab2488SJacob Keller return -ENOSPC; 9800deb0bf7SJacob Keller } 9810deb0bf7SJacob Keller 98294ab2488SJacob Keller err = ice_sriov_set_msix_res(pf, num_msix_per_vf * num_vfs); 98394ab2488SJacob Keller if (err) { 98494ab2488SJacob Keller dev_err(dev, "Unable to set MSI-X resources for %d VFs, err %d\n", 98594ab2488SJacob Keller num_vfs, err); 98694ab2488SJacob Keller return err; 9870deb0bf7SJacob Keller } 9880deb0bf7SJacob Keller 9890deb0bf7SJacob Keller /* only allow equal Tx/Rx queue count (i.e. queue pairs) */ 9900deb0bf7SJacob Keller pf->vfs.num_qps_per = min_t(int, num_txq, num_rxq); 9910deb0bf7SJacob Keller pf->vfs.num_msix_per = num_msix_per_vf; 9920deb0bf7SJacob Keller dev_info(dev, "Enabling %d VFs with %d vectors and %d queues per VF\n", 9930deb0bf7SJacob Keller num_vfs, pf->vfs.num_msix_per, pf->vfs.num_qps_per); 9940deb0bf7SJacob Keller 9950deb0bf7SJacob Keller return 0; 9960deb0bf7SJacob Keller } 9970deb0bf7SJacob Keller 9980deb0bf7SJacob Keller /** 9990deb0bf7SJacob Keller * ice_clear_vf_reset_trigger - enable VF to access hardware 10000deb0bf7SJacob Keller * @vf: VF to enabled hardware access for 10010deb0bf7SJacob Keller */ 10020deb0bf7SJacob Keller static void ice_clear_vf_reset_trigger(struct ice_vf *vf) 10030deb0bf7SJacob Keller { 10040deb0bf7SJacob Keller struct ice_hw *hw = &vf->pf->hw; 10050deb0bf7SJacob Keller u32 reg; 10060deb0bf7SJacob Keller 10070deb0bf7SJacob Keller reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id)); 10080deb0bf7SJacob Keller reg &= ~VPGEN_VFRTRIG_VFSWR_M; 10090deb0bf7SJacob Keller wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg); 10100deb0bf7SJacob Keller ice_flush(hw); 10110deb0bf7SJacob Keller } 10120deb0bf7SJacob Keller 10130deb0bf7SJacob Keller static void ice_vf_clear_counters(struct ice_vf *vf) 10140deb0bf7SJacob Keller { 10150deb0bf7SJacob Keller struct ice_vsi *vsi = ice_get_vf_vsi(vf); 10160deb0bf7SJacob Keller 10170deb0bf7SJacob Keller vf->num_mac = 0; 10180deb0bf7SJacob Keller vsi->num_vlan = 0; 10190deb0bf7SJacob Keller memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events)); 10200deb0bf7SJacob Keller memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events)); 10210deb0bf7SJacob Keller } 10220deb0bf7SJacob Keller 10230deb0bf7SJacob Keller /** 10240deb0bf7SJacob Keller * ice_vf_pre_vsi_rebuild - tasks to be done prior to VSI rebuild 10250deb0bf7SJacob Keller * @vf: VF to perform pre VSI rebuild tasks 10260deb0bf7SJacob Keller * 10270deb0bf7SJacob Keller * These tasks are items that don't need to be amortized since they are most 10280deb0bf7SJacob Keller * likely called in a for loop with all VF(s) in the reset_all_vfs() case. 10290deb0bf7SJacob Keller */ 10300deb0bf7SJacob Keller static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf) 10310deb0bf7SJacob Keller { 10320deb0bf7SJacob Keller ice_vf_clear_counters(vf); 10330deb0bf7SJacob Keller ice_clear_vf_reset_trigger(vf); 10340deb0bf7SJacob Keller } 10350deb0bf7SJacob Keller 10360deb0bf7SJacob Keller /** 10370deb0bf7SJacob Keller * ice_vf_rebuild_aggregator_node_cfg - rebuild aggregator node config 10380deb0bf7SJacob Keller * @vsi: Pointer to VSI 10390deb0bf7SJacob Keller * 10400deb0bf7SJacob Keller * This function moves VSI into corresponding scheduler aggregator node 10410deb0bf7SJacob Keller * based on cached value of "aggregator node info" per VSI 10420deb0bf7SJacob Keller */ 10430deb0bf7SJacob Keller static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi) 10440deb0bf7SJacob Keller { 10450deb0bf7SJacob Keller struct ice_pf *pf = vsi->back; 10460deb0bf7SJacob Keller struct device *dev; 10470deb0bf7SJacob Keller int status; 10480deb0bf7SJacob Keller 10490deb0bf7SJacob Keller if (!vsi->agg_node) 10500deb0bf7SJacob Keller return; 10510deb0bf7SJacob Keller 10520deb0bf7SJacob Keller dev = ice_pf_to_dev(pf); 10530deb0bf7SJacob Keller if (vsi->agg_node->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) { 10540deb0bf7SJacob Keller dev_dbg(dev, 10550deb0bf7SJacob Keller "agg_id %u already has reached max_num_vsis %u\n", 10560deb0bf7SJacob Keller vsi->agg_node->agg_id, vsi->agg_node->num_vsis); 10570deb0bf7SJacob Keller return; 10580deb0bf7SJacob Keller } 10590deb0bf7SJacob Keller 10600deb0bf7SJacob Keller status = ice_move_vsi_to_agg(pf->hw.port_info, vsi->agg_node->agg_id, 10610deb0bf7SJacob Keller vsi->idx, vsi->tc_cfg.ena_tc); 10620deb0bf7SJacob Keller if (status) 10630deb0bf7SJacob Keller dev_dbg(dev, "unable to move VSI idx %u into aggregator %u node", 10640deb0bf7SJacob Keller vsi->idx, vsi->agg_node->agg_id); 10650deb0bf7SJacob Keller else 10660deb0bf7SJacob Keller vsi->agg_node->num_vsis++; 10670deb0bf7SJacob Keller } 10680deb0bf7SJacob Keller 10690deb0bf7SJacob Keller /** 10700deb0bf7SJacob Keller * ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset 10710deb0bf7SJacob Keller * @vf: VF to rebuild host configuration on 10720deb0bf7SJacob Keller */ 10730deb0bf7SJacob Keller static void ice_vf_rebuild_host_cfg(struct ice_vf *vf) 10740deb0bf7SJacob Keller { 10750deb0bf7SJacob Keller struct device *dev = ice_pf_to_dev(vf->pf); 10760deb0bf7SJacob Keller struct ice_vsi *vsi = ice_get_vf_vsi(vf); 10770deb0bf7SJacob Keller 10780deb0bf7SJacob Keller ice_vf_set_host_trust_cfg(vf); 10790deb0bf7SJacob Keller 10800deb0bf7SJacob Keller if (ice_vf_rebuild_host_mac_cfg(vf)) 10810deb0bf7SJacob Keller dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n", 10820deb0bf7SJacob Keller vf->vf_id); 10830deb0bf7SJacob Keller 10840deb0bf7SJacob Keller if (ice_vf_rebuild_host_vlan_cfg(vf, vsi)) 10850deb0bf7SJacob Keller dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n", 10860deb0bf7SJacob Keller vf->vf_id); 10870deb0bf7SJacob Keller 10880deb0bf7SJacob Keller if (ice_vf_rebuild_host_tx_rate_cfg(vf)) 10890deb0bf7SJacob Keller dev_err(dev, "failed to rebuild Tx rate limiting configuration for VF %u\n", 10900deb0bf7SJacob Keller vf->vf_id); 10910deb0bf7SJacob Keller 1092a8ea6d86SJacob Keller if (ice_vsi_apply_spoofchk(vsi, vf->spoofchk)) 10930deb0bf7SJacob Keller dev_err(dev, "failed to rebuild spoofchk configuration for VF %d\n", 10940deb0bf7SJacob Keller vf->vf_id); 10950deb0bf7SJacob Keller 10960deb0bf7SJacob Keller /* rebuild aggregator node config for main VF VSI */ 10970deb0bf7SJacob Keller ice_vf_rebuild_aggregator_node_cfg(vsi); 10980deb0bf7SJacob Keller } 10990deb0bf7SJacob Keller 11000deb0bf7SJacob Keller /** 11010deb0bf7SJacob Keller * ice_vf_rebuild_vsi_with_release - release and setup the VF's VSI 11020deb0bf7SJacob Keller * @vf: VF to release and setup the VSI for 11030deb0bf7SJacob Keller * 11040deb0bf7SJacob Keller * This is only called when a single VF is being reset (i.e. VFR, VFLR, host VF 11050deb0bf7SJacob Keller * configuration change, etc.). 11060deb0bf7SJacob Keller */ 11070deb0bf7SJacob Keller static int ice_vf_rebuild_vsi_with_release(struct ice_vf *vf) 11080deb0bf7SJacob Keller { 11090deb0bf7SJacob Keller ice_vf_vsi_release(vf); 11100deb0bf7SJacob Keller if (!ice_vf_vsi_setup(vf)) 11110deb0bf7SJacob Keller return -ENOMEM; 11120deb0bf7SJacob Keller 11130deb0bf7SJacob Keller return 0; 11140deb0bf7SJacob Keller } 11150deb0bf7SJacob Keller 11160deb0bf7SJacob Keller /** 11170deb0bf7SJacob Keller * ice_vf_rebuild_vsi - rebuild the VF's VSI 11180deb0bf7SJacob Keller * @vf: VF to rebuild the VSI for 11190deb0bf7SJacob Keller * 11200deb0bf7SJacob Keller * This is only called when all VF(s) are being reset (i.e. PCIe Reset on the 11210deb0bf7SJacob Keller * host, PFR, CORER, etc.). 11220deb0bf7SJacob Keller */ 11230deb0bf7SJacob Keller static int ice_vf_rebuild_vsi(struct ice_vf *vf) 11240deb0bf7SJacob Keller { 11250deb0bf7SJacob Keller struct ice_vsi *vsi = ice_get_vf_vsi(vf); 11260deb0bf7SJacob Keller struct ice_pf *pf = vf->pf; 11270deb0bf7SJacob Keller 11280deb0bf7SJacob Keller if (ice_vsi_rebuild(vsi, true)) { 11290deb0bf7SJacob Keller dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n", 11300deb0bf7SJacob Keller vf->vf_id); 11310deb0bf7SJacob Keller return -EIO; 11320deb0bf7SJacob Keller } 11330deb0bf7SJacob Keller /* vsi->idx will remain the same in this case so don't update 11340deb0bf7SJacob Keller * vf->lan_vsi_idx 11350deb0bf7SJacob Keller */ 11360deb0bf7SJacob Keller vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); 11370deb0bf7SJacob Keller vf->lan_vsi_num = vsi->vsi_num; 11380deb0bf7SJacob Keller 11390deb0bf7SJacob Keller return 0; 11400deb0bf7SJacob Keller } 11410deb0bf7SJacob Keller 11420deb0bf7SJacob Keller /** 11430deb0bf7SJacob Keller * ice_vf_post_vsi_rebuild - tasks to do after the VF's VSI have been rebuilt 11440deb0bf7SJacob Keller * @vf: VF to perform tasks on 11450deb0bf7SJacob Keller */ 11460deb0bf7SJacob Keller static void ice_vf_post_vsi_rebuild(struct ice_vf *vf) 11470deb0bf7SJacob Keller { 11480deb0bf7SJacob Keller struct ice_pf *pf = vf->pf; 11490deb0bf7SJacob Keller struct ice_hw *hw; 11500deb0bf7SJacob Keller 11510deb0bf7SJacob Keller hw = &pf->hw; 11520deb0bf7SJacob Keller 11530deb0bf7SJacob Keller ice_vf_rebuild_host_cfg(vf); 11540deb0bf7SJacob Keller 11550deb0bf7SJacob Keller ice_vf_set_initialized(vf); 11560deb0bf7SJacob Keller ice_ena_vf_mappings(vf); 11570deb0bf7SJacob Keller wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE); 11580deb0bf7SJacob Keller } 11590deb0bf7SJacob Keller 11600deb0bf7SJacob Keller /** 11610deb0bf7SJacob Keller * ice_reset_all_vfs - reset all allocated VFs in one go 11620deb0bf7SJacob Keller * @pf: pointer to the PF structure 11630deb0bf7SJacob Keller * @is_vflr: true if VFLR was issued, false if not 11640deb0bf7SJacob Keller * 11650deb0bf7SJacob Keller * First, tell the hardware to reset each VF, then do all the waiting in one 11660deb0bf7SJacob Keller * chunk, and finally finish restoring each VF after the wait. This is useful 11670deb0bf7SJacob Keller * during PF routines which need to reset all VFs, as otherwise it must perform 11680deb0bf7SJacob Keller * these resets in a serialized fashion. 11690deb0bf7SJacob Keller * 11700deb0bf7SJacob Keller * Returns true if any VFs were reset, and false otherwise. 11710deb0bf7SJacob Keller */ 11720deb0bf7SJacob Keller bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) 11730deb0bf7SJacob Keller { 11740deb0bf7SJacob Keller struct device *dev = ice_pf_to_dev(pf); 11750deb0bf7SJacob Keller struct ice_hw *hw = &pf->hw; 11760deb0bf7SJacob Keller struct ice_vf *vf; 11770deb0bf7SJacob Keller unsigned int bkt; 11780deb0bf7SJacob Keller 11790deb0bf7SJacob Keller /* If we don't have any VFs, then there is nothing to reset */ 11800deb0bf7SJacob Keller if (!ice_has_vfs(pf)) 11810deb0bf7SJacob Keller return false; 11820deb0bf7SJacob Keller 11830deb0bf7SJacob Keller mutex_lock(&pf->vfs.table_lock); 11840deb0bf7SJacob Keller 11850deb0bf7SJacob Keller /* clear all malicious info if the VFs are getting reset */ 11860deb0bf7SJacob Keller ice_for_each_vf(pf, bkt, vf) 11870deb0bf7SJacob Keller if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->vfs.malvfs, 1188dc36796eSJacob Keller ICE_MAX_SRIOV_VFS, vf->vf_id)) 11890deb0bf7SJacob Keller dev_dbg(dev, "failed to clear malicious VF state for VF %u\n", 11900deb0bf7SJacob Keller vf->vf_id); 11910deb0bf7SJacob Keller 11920deb0bf7SJacob Keller /* If VFs have been disabled, there is no need to reset */ 11930deb0bf7SJacob Keller if (test_and_set_bit(ICE_VF_DIS, pf->state)) { 11940deb0bf7SJacob Keller mutex_unlock(&pf->vfs.table_lock); 11950deb0bf7SJacob Keller return false; 11960deb0bf7SJacob Keller } 11970deb0bf7SJacob Keller 11980deb0bf7SJacob Keller /* Begin reset on all VFs at once */ 11990deb0bf7SJacob Keller ice_for_each_vf(pf, bkt, vf) 12000deb0bf7SJacob Keller ice_trigger_vf_reset(vf, is_vflr, true); 12010deb0bf7SJacob Keller 12020deb0bf7SJacob Keller /* HW requires some time to make sure it can flush the FIFO for a VF 12030deb0bf7SJacob Keller * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in 12040deb0bf7SJacob Keller * sequence to make sure that it has completed. We'll keep track of 12050deb0bf7SJacob Keller * the VFs using a simple iterator that increments once that VF has 12060deb0bf7SJacob Keller * finished resetting. 12070deb0bf7SJacob Keller */ 12080deb0bf7SJacob Keller ice_for_each_vf(pf, bkt, vf) { 12090deb0bf7SJacob Keller bool done = false; 12100deb0bf7SJacob Keller unsigned int i; 12110deb0bf7SJacob Keller u32 reg; 12120deb0bf7SJacob Keller 12130deb0bf7SJacob Keller for (i = 0; i < 10; i++) { 12140deb0bf7SJacob Keller reg = rd32(&pf->hw, VPGEN_VFRSTAT(vf->vf_id)); 12150deb0bf7SJacob Keller if (reg & VPGEN_VFRSTAT_VFRD_M) { 12160deb0bf7SJacob Keller done = true; 12170deb0bf7SJacob Keller break; 12180deb0bf7SJacob Keller } 12190deb0bf7SJacob Keller 12200deb0bf7SJacob Keller /* only delay if check failed */ 12210deb0bf7SJacob Keller usleep_range(10, 20); 12220deb0bf7SJacob Keller } 12230deb0bf7SJacob Keller 12240deb0bf7SJacob Keller if (!done) { 12250deb0bf7SJacob Keller /* Display a warning if at least one VF didn't manage 12260deb0bf7SJacob Keller * to reset in time, but continue on with the 12270deb0bf7SJacob Keller * operation. 12280deb0bf7SJacob Keller */ 12290deb0bf7SJacob Keller dev_warn(dev, "VF %u reset check timeout\n", vf->vf_id); 12300deb0bf7SJacob Keller break; 12310deb0bf7SJacob Keller } 12320deb0bf7SJacob Keller } 12330deb0bf7SJacob Keller 12340deb0bf7SJacob Keller /* free VF resources to begin resetting the VSI state */ 12350deb0bf7SJacob Keller ice_for_each_vf(pf, bkt, vf) { 12360deb0bf7SJacob Keller mutex_lock(&vf->cfg_lock); 12370deb0bf7SJacob Keller 12380deb0bf7SJacob Keller vf->driver_caps = 0; 12390deb0bf7SJacob Keller ice_vc_set_default_allowlist(vf); 12400deb0bf7SJacob Keller 12410deb0bf7SJacob Keller ice_vf_fdir_exit(vf); 12420deb0bf7SJacob Keller ice_vf_fdir_init(vf); 12430deb0bf7SJacob Keller /* clean VF control VSI when resetting VFs since it should be 12440deb0bf7SJacob Keller * setup only when VF creates its first FDIR rule. 12450deb0bf7SJacob Keller */ 12460deb0bf7SJacob Keller if (vf->ctrl_vsi_idx != ICE_NO_VSI) 12470deb0bf7SJacob Keller ice_vf_ctrl_invalidate_vsi(vf); 12480deb0bf7SJacob Keller 12490deb0bf7SJacob Keller ice_vf_pre_vsi_rebuild(vf); 12500deb0bf7SJacob Keller ice_vf_rebuild_vsi(vf); 12510deb0bf7SJacob Keller ice_vf_post_vsi_rebuild(vf); 12520deb0bf7SJacob Keller 12530deb0bf7SJacob Keller mutex_unlock(&vf->cfg_lock); 12540deb0bf7SJacob Keller } 12550deb0bf7SJacob Keller 12560deb0bf7SJacob Keller if (ice_is_eswitch_mode_switchdev(pf)) 12570deb0bf7SJacob Keller if (ice_eswitch_rebuild(pf)) 12580deb0bf7SJacob Keller dev_warn(dev, "eswitch rebuild failed\n"); 12590deb0bf7SJacob Keller 12600deb0bf7SJacob Keller ice_flush(hw); 12610deb0bf7SJacob Keller clear_bit(ICE_VF_DIS, pf->state); 12620deb0bf7SJacob Keller 12630deb0bf7SJacob Keller mutex_unlock(&pf->vfs.table_lock); 12640deb0bf7SJacob Keller 12650deb0bf7SJacob Keller return true; 12660deb0bf7SJacob Keller } 12670deb0bf7SJacob Keller 12680deb0bf7SJacob Keller /** 12690deb0bf7SJacob Keller * ice_reset_vf - Reset a particular VF 12700deb0bf7SJacob Keller * @vf: pointer to the VF structure 12710deb0bf7SJacob Keller * @is_vflr: true if VFLR was issued, false if not 12720deb0bf7SJacob Keller * 12730deb0bf7SJacob Keller * Returns true if the VF is currently in reset, resets successfully, or resets 12740deb0bf7SJacob Keller * are disabled and false otherwise. 12750deb0bf7SJacob Keller */ 12760deb0bf7SJacob Keller bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) 12770deb0bf7SJacob Keller { 12780deb0bf7SJacob Keller struct ice_pf *pf = vf->pf; 12790deb0bf7SJacob Keller struct ice_vsi *vsi; 12800deb0bf7SJacob Keller struct device *dev; 12810deb0bf7SJacob Keller struct ice_hw *hw; 12820deb0bf7SJacob Keller bool rsd = false; 12830deb0bf7SJacob Keller u8 promisc_m; 12840deb0bf7SJacob Keller u32 reg; 12850deb0bf7SJacob Keller int i; 12860deb0bf7SJacob Keller 12870deb0bf7SJacob Keller lockdep_assert_held(&vf->cfg_lock); 12880deb0bf7SJacob Keller 12890deb0bf7SJacob Keller dev = ice_pf_to_dev(pf); 12900deb0bf7SJacob Keller 12910deb0bf7SJacob Keller if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) { 12920deb0bf7SJacob Keller dev_dbg(dev, "Trying to reset VF %d, but all VF resets are disabled\n", 12930deb0bf7SJacob Keller vf->vf_id); 12940deb0bf7SJacob Keller return true; 12950deb0bf7SJacob Keller } 12960deb0bf7SJacob Keller 12970deb0bf7SJacob Keller if (ice_is_vf_disabled(vf)) { 12980deb0bf7SJacob Keller dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n", 12990deb0bf7SJacob Keller vf->vf_id); 13000deb0bf7SJacob Keller return true; 13010deb0bf7SJacob Keller } 13020deb0bf7SJacob Keller 13030deb0bf7SJacob Keller /* Set VF disable bit state here, before triggering reset */ 13040deb0bf7SJacob Keller set_bit(ICE_VF_STATE_DIS, vf->vf_states); 13050deb0bf7SJacob Keller ice_trigger_vf_reset(vf, is_vflr, false); 13060deb0bf7SJacob Keller 13070deb0bf7SJacob Keller vsi = ice_get_vf_vsi(vf); 13080deb0bf7SJacob Keller 13090deb0bf7SJacob Keller ice_dis_vf_qs(vf); 13100deb0bf7SJacob Keller 13110deb0bf7SJacob Keller /* Call Disable LAN Tx queue AQ whether or not queues are 13120deb0bf7SJacob Keller * enabled. This is needed for successful completion of VFR. 13130deb0bf7SJacob Keller */ 13140deb0bf7SJacob Keller ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL, 13150deb0bf7SJacob Keller NULL, ICE_VF_RESET, vf->vf_id, NULL); 13160deb0bf7SJacob Keller 13170deb0bf7SJacob Keller hw = &pf->hw; 13180deb0bf7SJacob Keller /* poll VPGEN_VFRSTAT reg to make sure 13190deb0bf7SJacob Keller * that reset is complete 13200deb0bf7SJacob Keller */ 13210deb0bf7SJacob Keller for (i = 0; i < 10; i++) { 13220deb0bf7SJacob Keller /* VF reset requires driver to first reset the VF and then 13230deb0bf7SJacob Keller * poll the status register to make sure that the reset 13240deb0bf7SJacob Keller * completed successfully. 13250deb0bf7SJacob Keller */ 13260deb0bf7SJacob Keller reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id)); 13270deb0bf7SJacob Keller if (reg & VPGEN_VFRSTAT_VFRD_M) { 13280deb0bf7SJacob Keller rsd = true; 13290deb0bf7SJacob Keller break; 13300deb0bf7SJacob Keller } 13310deb0bf7SJacob Keller 13320deb0bf7SJacob Keller /* only sleep if the reset is not done */ 13330deb0bf7SJacob Keller usleep_range(10, 20); 13340deb0bf7SJacob Keller } 13350deb0bf7SJacob Keller 13360deb0bf7SJacob Keller vf->driver_caps = 0; 13370deb0bf7SJacob Keller ice_vc_set_default_allowlist(vf); 13380deb0bf7SJacob Keller 13390deb0bf7SJacob Keller /* Display a warning if VF didn't manage to reset in time, but need to 13400deb0bf7SJacob Keller * continue on with the operation. 13410deb0bf7SJacob Keller */ 13420deb0bf7SJacob Keller if (!rsd) 13430deb0bf7SJacob Keller dev_warn(dev, "VF reset check timeout on VF %d\n", vf->vf_id); 13440deb0bf7SJacob Keller 13450deb0bf7SJacob Keller /* disable promiscuous modes in case they were enabled 13460deb0bf7SJacob Keller * ignore any error if disabling process failed 13470deb0bf7SJacob Keller */ 13480deb0bf7SJacob Keller if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) || 13490deb0bf7SJacob Keller test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) { 13500deb0bf7SJacob Keller if (ice_vf_is_port_vlan_ena(vf) || vsi->num_vlan) 13510deb0bf7SJacob Keller promisc_m = ICE_UCAST_VLAN_PROMISC_BITS; 13520deb0bf7SJacob Keller else 13530deb0bf7SJacob Keller promisc_m = ICE_UCAST_PROMISC_BITS; 13540deb0bf7SJacob Keller 13550deb0bf7SJacob Keller if (ice_vf_clear_vsi_promisc(vf, vsi, promisc_m)) 13560deb0bf7SJacob Keller dev_err(dev, "disabling promiscuous mode failed\n"); 13570deb0bf7SJacob Keller } 13580deb0bf7SJacob Keller 13590deb0bf7SJacob Keller ice_eswitch_del_vf_mac_rule(vf); 13600deb0bf7SJacob Keller 13610deb0bf7SJacob Keller ice_vf_fdir_exit(vf); 13620deb0bf7SJacob Keller ice_vf_fdir_init(vf); 13630deb0bf7SJacob Keller /* clean VF control VSI when resetting VF since it should be setup 13640deb0bf7SJacob Keller * only when VF creates its first FDIR rule. 13650deb0bf7SJacob Keller */ 13660deb0bf7SJacob Keller if (vf->ctrl_vsi_idx != ICE_NO_VSI) 13670deb0bf7SJacob Keller ice_vf_ctrl_vsi_release(vf); 13680deb0bf7SJacob Keller 13690deb0bf7SJacob Keller ice_vf_pre_vsi_rebuild(vf); 13700deb0bf7SJacob Keller 13710deb0bf7SJacob Keller if (ice_vf_rebuild_vsi_with_release(vf)) { 13720deb0bf7SJacob Keller dev_err(dev, "Failed to release and setup the VF%u's VSI\n", vf->vf_id); 13730deb0bf7SJacob Keller return false; 13740deb0bf7SJacob Keller } 13750deb0bf7SJacob Keller 13760deb0bf7SJacob Keller ice_vf_post_vsi_rebuild(vf); 13770deb0bf7SJacob Keller vsi = ice_get_vf_vsi(vf); 13780deb0bf7SJacob Keller ice_eswitch_update_repr(vsi); 13790deb0bf7SJacob Keller ice_eswitch_replay_vf_mac_rule(vf); 13800deb0bf7SJacob Keller 13810deb0bf7SJacob Keller /* if the VF has been reset allow it to come up again */ 13820deb0bf7SJacob Keller if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->vfs.malvfs, 1383dc36796eSJacob Keller ICE_MAX_SRIOV_VFS, vf->vf_id)) 13840deb0bf7SJacob Keller dev_dbg(dev, "failed to clear malicious VF state for VF %u\n", i); 13850deb0bf7SJacob Keller 13860deb0bf7SJacob Keller return true; 13870deb0bf7SJacob Keller } 13880deb0bf7SJacob Keller 13890deb0bf7SJacob Keller /** 13900deb0bf7SJacob Keller * ice_vc_notify_link_state - Inform all VFs on a PF of link status 13910deb0bf7SJacob Keller * @pf: pointer to the PF structure 13920deb0bf7SJacob Keller */ 13930deb0bf7SJacob Keller void ice_vc_notify_link_state(struct ice_pf *pf) 13940deb0bf7SJacob Keller { 13950deb0bf7SJacob Keller struct ice_vf *vf; 13960deb0bf7SJacob Keller unsigned int bkt; 13970deb0bf7SJacob Keller 13980deb0bf7SJacob Keller mutex_lock(&pf->vfs.table_lock); 13990deb0bf7SJacob Keller ice_for_each_vf(pf, bkt, vf) 14000deb0bf7SJacob Keller ice_vc_notify_vf_link_state(vf); 14010deb0bf7SJacob Keller mutex_unlock(&pf->vfs.table_lock); 14020deb0bf7SJacob Keller } 14030deb0bf7SJacob Keller 14040deb0bf7SJacob Keller /** 14050deb0bf7SJacob Keller * ice_vc_notify_reset - Send pending reset message to all VFs 14060deb0bf7SJacob Keller * @pf: pointer to the PF structure 14070deb0bf7SJacob Keller * 14080deb0bf7SJacob Keller * indicate a pending reset to all VFs on a given PF 14090deb0bf7SJacob Keller */ 14100deb0bf7SJacob Keller void ice_vc_notify_reset(struct ice_pf *pf) 14110deb0bf7SJacob Keller { 14120deb0bf7SJacob Keller struct virtchnl_pf_event pfe; 14130deb0bf7SJacob Keller 14140deb0bf7SJacob Keller if (!ice_has_vfs(pf)) 14150deb0bf7SJacob Keller return; 14160deb0bf7SJacob Keller 14170deb0bf7SJacob Keller pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; 14180deb0bf7SJacob Keller pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; 14190deb0bf7SJacob Keller ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, VIRTCHNL_STATUS_SUCCESS, 14200deb0bf7SJacob Keller (u8 *)&pfe, sizeof(struct virtchnl_pf_event)); 14210deb0bf7SJacob Keller } 14220deb0bf7SJacob Keller 14230deb0bf7SJacob Keller /** 14240deb0bf7SJacob Keller * ice_vc_notify_vf_reset - Notify VF of a reset event 14250deb0bf7SJacob Keller * @vf: pointer to the VF structure 14260deb0bf7SJacob Keller */ 14270deb0bf7SJacob Keller static void ice_vc_notify_vf_reset(struct ice_vf *vf) 14280deb0bf7SJacob Keller { 14290deb0bf7SJacob Keller struct virtchnl_pf_event pfe; 14300deb0bf7SJacob Keller struct ice_pf *pf = vf->pf; 14310deb0bf7SJacob Keller 14320deb0bf7SJacob Keller /* Bail out if VF is in disabled state, neither initialized, nor active 14330deb0bf7SJacob Keller * state - otherwise proceed with notifications 14340deb0bf7SJacob Keller */ 14350deb0bf7SJacob Keller if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) && 14360deb0bf7SJacob Keller !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) || 14370deb0bf7SJacob Keller test_bit(ICE_VF_STATE_DIS, vf->vf_states)) 14380deb0bf7SJacob Keller return; 14390deb0bf7SJacob Keller 14400deb0bf7SJacob Keller pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; 14410deb0bf7SJacob Keller pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; 14420deb0bf7SJacob Keller ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT, 14430deb0bf7SJacob Keller VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe), 14440deb0bf7SJacob Keller NULL); 14450deb0bf7SJacob Keller } 14460deb0bf7SJacob Keller 14470deb0bf7SJacob Keller /** 14480deb0bf7SJacob Keller * ice_init_vf_vsi_res - initialize/setup VF VSI resources 14490deb0bf7SJacob Keller * @vf: VF to initialize/setup the VSI for 14500deb0bf7SJacob Keller * 14510deb0bf7SJacob Keller * This function creates a VSI for the VF, adds a VLAN 0 filter, and sets up the 14520deb0bf7SJacob Keller * VF VSI's broadcast filter and is only used during initial VF creation. 14530deb0bf7SJacob Keller */ 14540deb0bf7SJacob Keller static int ice_init_vf_vsi_res(struct ice_vf *vf) 14550deb0bf7SJacob Keller { 14560deb0bf7SJacob Keller struct ice_vsi_vlan_ops *vlan_ops; 14570deb0bf7SJacob Keller struct ice_pf *pf = vf->pf; 14580deb0bf7SJacob Keller u8 broadcast[ETH_ALEN]; 14590deb0bf7SJacob Keller struct ice_vsi *vsi; 14600deb0bf7SJacob Keller struct device *dev; 14610deb0bf7SJacob Keller int err; 14620deb0bf7SJacob Keller 14630deb0bf7SJacob Keller vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf); 14640deb0bf7SJacob Keller 14650deb0bf7SJacob Keller dev = ice_pf_to_dev(pf); 14660deb0bf7SJacob Keller vsi = ice_vf_vsi_setup(vf); 14670deb0bf7SJacob Keller if (!vsi) 14680deb0bf7SJacob Keller return -ENOMEM; 14690deb0bf7SJacob Keller 14700deb0bf7SJacob Keller err = ice_vsi_add_vlan_zero(vsi); 14710deb0bf7SJacob Keller if (err) { 14720deb0bf7SJacob Keller dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n", 14730deb0bf7SJacob Keller vf->vf_id); 14740deb0bf7SJacob Keller goto release_vsi; 14750deb0bf7SJacob Keller } 14760deb0bf7SJacob Keller 14770deb0bf7SJacob Keller vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 14780deb0bf7SJacob Keller err = vlan_ops->ena_rx_filtering(vsi); 14790deb0bf7SJacob Keller if (err) { 14800deb0bf7SJacob Keller dev_warn(dev, "Failed to enable Rx VLAN filtering for VF %d\n", 14810deb0bf7SJacob Keller vf->vf_id); 14820deb0bf7SJacob Keller goto release_vsi; 14830deb0bf7SJacob Keller } 14840deb0bf7SJacob Keller 14850deb0bf7SJacob Keller eth_broadcast_addr(broadcast); 14860deb0bf7SJacob Keller err = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI); 14870deb0bf7SJacob Keller if (err) { 14880deb0bf7SJacob Keller dev_err(dev, "Failed to add broadcast MAC filter for VF %d, error %d\n", 14890deb0bf7SJacob Keller vf->vf_id, err); 14900deb0bf7SJacob Keller goto release_vsi; 14910deb0bf7SJacob Keller } 14920deb0bf7SJacob Keller 1493a8ea6d86SJacob Keller err = ice_vsi_apply_spoofchk(vsi, vf->spoofchk); 14940deb0bf7SJacob Keller if (err) { 14950deb0bf7SJacob Keller dev_warn(dev, "Failed to initialize spoofchk setting for VF %d\n", 14960deb0bf7SJacob Keller vf->vf_id); 14970deb0bf7SJacob Keller goto release_vsi; 14980deb0bf7SJacob Keller } 14990deb0bf7SJacob Keller 15000deb0bf7SJacob Keller vf->num_mac = 1; 15010deb0bf7SJacob Keller 15020deb0bf7SJacob Keller return 0; 15030deb0bf7SJacob Keller 15040deb0bf7SJacob Keller release_vsi: 15050deb0bf7SJacob Keller ice_vf_vsi_release(vf); 15060deb0bf7SJacob Keller return err; 15070deb0bf7SJacob Keller } 15080deb0bf7SJacob Keller 15090deb0bf7SJacob Keller /** 15100deb0bf7SJacob Keller * ice_start_vfs - start VFs so they are ready to be used by SR-IOV 15110deb0bf7SJacob Keller * @pf: PF the VFs are associated with 15120deb0bf7SJacob Keller */ 15130deb0bf7SJacob Keller static int ice_start_vfs(struct ice_pf *pf) 15140deb0bf7SJacob Keller { 15150deb0bf7SJacob Keller struct ice_hw *hw = &pf->hw; 15160deb0bf7SJacob Keller unsigned int bkt, it_cnt; 15170deb0bf7SJacob Keller struct ice_vf *vf; 15180deb0bf7SJacob Keller int retval; 15190deb0bf7SJacob Keller 15200deb0bf7SJacob Keller lockdep_assert_held(&pf->vfs.table_lock); 15210deb0bf7SJacob Keller 15220deb0bf7SJacob Keller it_cnt = 0; 15230deb0bf7SJacob Keller ice_for_each_vf(pf, bkt, vf) { 15240deb0bf7SJacob Keller ice_clear_vf_reset_trigger(vf); 15250deb0bf7SJacob Keller 15260deb0bf7SJacob Keller retval = ice_init_vf_vsi_res(vf); 15270deb0bf7SJacob Keller if (retval) { 15280deb0bf7SJacob Keller dev_err(ice_pf_to_dev(pf), "Failed to initialize VSI resources for VF %d, error %d\n", 15290deb0bf7SJacob Keller vf->vf_id, retval); 15300deb0bf7SJacob Keller goto teardown; 15310deb0bf7SJacob Keller } 15320deb0bf7SJacob Keller 15330deb0bf7SJacob Keller set_bit(ICE_VF_STATE_INIT, vf->vf_states); 15340deb0bf7SJacob Keller ice_ena_vf_mappings(vf); 15350deb0bf7SJacob Keller wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE); 15360deb0bf7SJacob Keller it_cnt++; 15370deb0bf7SJacob Keller } 15380deb0bf7SJacob Keller 15390deb0bf7SJacob Keller ice_flush(hw); 15400deb0bf7SJacob Keller return 0; 15410deb0bf7SJacob Keller 15420deb0bf7SJacob Keller teardown: 15430deb0bf7SJacob Keller ice_for_each_vf(pf, bkt, vf) { 15440deb0bf7SJacob Keller if (it_cnt == 0) 15450deb0bf7SJacob Keller break; 15460deb0bf7SJacob Keller 15470deb0bf7SJacob Keller ice_dis_vf_mappings(vf); 15480deb0bf7SJacob Keller ice_vf_vsi_release(vf); 15490deb0bf7SJacob Keller it_cnt--; 15500deb0bf7SJacob Keller } 15510deb0bf7SJacob Keller 15520deb0bf7SJacob Keller return retval; 15530deb0bf7SJacob Keller } 15540deb0bf7SJacob Keller 15550deb0bf7SJacob Keller /** 15560deb0bf7SJacob Keller * ice_create_vf_entries - Allocate and insert VF entries 15570deb0bf7SJacob Keller * @pf: pointer to the PF structure 15580deb0bf7SJacob Keller * @num_vfs: the number of VFs to allocate 15590deb0bf7SJacob Keller * 15600deb0bf7SJacob Keller * Allocate new VF entries and insert them into the hash table. Set some 15610deb0bf7SJacob Keller * basic default fields for initializing the new VFs. 15620deb0bf7SJacob Keller * 15630deb0bf7SJacob Keller * After this function exits, the hash table will have num_vfs entries 15640deb0bf7SJacob Keller * inserted. 15650deb0bf7SJacob Keller * 15660deb0bf7SJacob Keller * Returns 0 on success or an integer error code on failure. 15670deb0bf7SJacob Keller */ 15680deb0bf7SJacob Keller static int ice_create_vf_entries(struct ice_pf *pf, u16 num_vfs) 15690deb0bf7SJacob Keller { 15700deb0bf7SJacob Keller struct ice_vfs *vfs = &pf->vfs; 15710deb0bf7SJacob Keller struct ice_vf *vf; 15720deb0bf7SJacob Keller u16 vf_id; 15730deb0bf7SJacob Keller int err; 15740deb0bf7SJacob Keller 15750deb0bf7SJacob Keller lockdep_assert_held(&vfs->table_lock); 15760deb0bf7SJacob Keller 15770deb0bf7SJacob Keller for (vf_id = 0; vf_id < num_vfs; vf_id++) { 15780deb0bf7SJacob Keller vf = kzalloc(sizeof(*vf), GFP_KERNEL); 15790deb0bf7SJacob Keller if (!vf) { 15800deb0bf7SJacob Keller err = -ENOMEM; 15810deb0bf7SJacob Keller goto err_free_entries; 15820deb0bf7SJacob Keller } 15830deb0bf7SJacob Keller kref_init(&vf->refcnt); 15840deb0bf7SJacob Keller 15850deb0bf7SJacob Keller vf->pf = pf; 15860deb0bf7SJacob Keller vf->vf_id = vf_id; 15870deb0bf7SJacob Keller 15880deb0bf7SJacob Keller vf->vf_sw_id = pf->first_sw; 15890deb0bf7SJacob Keller /* assign default capabilities */ 15900deb0bf7SJacob Keller vf->spoofchk = true; 15910deb0bf7SJacob Keller vf->num_vf_qs = pf->vfs.num_qps_per; 15920deb0bf7SJacob Keller ice_vc_set_default_allowlist(vf); 15930deb0bf7SJacob Keller 15940deb0bf7SJacob Keller /* ctrl_vsi_idx will be set to a valid value only when VF 15950deb0bf7SJacob Keller * creates its first fdir rule. 15960deb0bf7SJacob Keller */ 15970deb0bf7SJacob Keller ice_vf_ctrl_invalidate_vsi(vf); 15980deb0bf7SJacob Keller ice_vf_fdir_init(vf); 15990deb0bf7SJacob Keller 1600a7e11710SJacob Keller ice_virtchnl_set_dflt_ops(vf); 16010deb0bf7SJacob Keller 16020deb0bf7SJacob Keller mutex_init(&vf->cfg_lock); 16030deb0bf7SJacob Keller 16040deb0bf7SJacob Keller hash_add_rcu(vfs->table, &vf->entry, vf_id); 16050deb0bf7SJacob Keller } 16060deb0bf7SJacob Keller 16070deb0bf7SJacob Keller return 0; 16080deb0bf7SJacob Keller 16090deb0bf7SJacob Keller err_free_entries: 16100deb0bf7SJacob Keller ice_free_vf_entries(pf); 16110deb0bf7SJacob Keller return err; 16120deb0bf7SJacob Keller } 16130deb0bf7SJacob Keller 16140deb0bf7SJacob Keller /** 16150deb0bf7SJacob Keller * ice_ena_vfs - enable VFs so they are ready to be used 16160deb0bf7SJacob Keller * @pf: pointer to the PF structure 16170deb0bf7SJacob Keller * @num_vfs: number of VFs to enable 16180deb0bf7SJacob Keller */ 16190deb0bf7SJacob Keller static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs) 16200deb0bf7SJacob Keller { 16210deb0bf7SJacob Keller struct device *dev = ice_pf_to_dev(pf); 16220deb0bf7SJacob Keller struct ice_hw *hw = &pf->hw; 16230deb0bf7SJacob Keller int ret; 16240deb0bf7SJacob Keller 16250deb0bf7SJacob Keller /* Disable global interrupt 0 so we don't try to handle the VFLR. */ 16260deb0bf7SJacob Keller wr32(hw, GLINT_DYN_CTL(pf->oicr_idx), 16270deb0bf7SJacob Keller ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S); 16280deb0bf7SJacob Keller set_bit(ICE_OICR_INTR_DIS, pf->state); 16290deb0bf7SJacob Keller ice_flush(hw); 16300deb0bf7SJacob Keller 16310deb0bf7SJacob Keller ret = pci_enable_sriov(pf->pdev, num_vfs); 16320deb0bf7SJacob Keller if (ret) 16330deb0bf7SJacob Keller goto err_unroll_intr; 16340deb0bf7SJacob Keller 16350deb0bf7SJacob Keller mutex_lock(&pf->vfs.table_lock); 16360deb0bf7SJacob Keller 163794ab2488SJacob Keller ret = ice_set_per_vf_res(pf, num_vfs); 163894ab2488SJacob Keller if (ret) { 163994ab2488SJacob Keller dev_err(dev, "Not enough resources for %d VFs, err %d. Try with fewer number of VFs\n", 164094ab2488SJacob Keller num_vfs, ret); 16410deb0bf7SJacob Keller goto err_unroll_sriov; 16420deb0bf7SJacob Keller } 16430deb0bf7SJacob Keller 16440deb0bf7SJacob Keller ret = ice_create_vf_entries(pf, num_vfs); 16450deb0bf7SJacob Keller if (ret) { 16460deb0bf7SJacob Keller dev_err(dev, "Failed to allocate VF entries for %d VFs\n", 16470deb0bf7SJacob Keller num_vfs); 16480deb0bf7SJacob Keller goto err_unroll_sriov; 16490deb0bf7SJacob Keller } 16500deb0bf7SJacob Keller 165194ab2488SJacob Keller ret = ice_start_vfs(pf); 165294ab2488SJacob Keller if (ret) { 165394ab2488SJacob Keller dev_err(dev, "Failed to start %d VFs, err %d\n", num_vfs, ret); 16540deb0bf7SJacob Keller ret = -EAGAIN; 16550deb0bf7SJacob Keller goto err_unroll_vf_entries; 16560deb0bf7SJacob Keller } 16570deb0bf7SJacob Keller 16580deb0bf7SJacob Keller clear_bit(ICE_VF_DIS, pf->state); 16590deb0bf7SJacob Keller 16600deb0bf7SJacob Keller ret = ice_eswitch_configure(pf); 16612b369448SJacob Keller if (ret) { 16622b369448SJacob Keller dev_err(dev, "Failed to configure eswitch, err %d\n", ret); 16630deb0bf7SJacob Keller goto err_unroll_sriov; 16642b369448SJacob Keller } 16650deb0bf7SJacob Keller 16660deb0bf7SJacob Keller /* rearm global interrupts */ 16670deb0bf7SJacob Keller if (test_and_clear_bit(ICE_OICR_INTR_DIS, pf->state)) 16680deb0bf7SJacob Keller ice_irq_dynamic_ena(hw, NULL, NULL); 16690deb0bf7SJacob Keller 16700deb0bf7SJacob Keller mutex_unlock(&pf->vfs.table_lock); 16710deb0bf7SJacob Keller 16720deb0bf7SJacob Keller return 0; 16730deb0bf7SJacob Keller 16740deb0bf7SJacob Keller err_unroll_vf_entries: 16750deb0bf7SJacob Keller ice_free_vf_entries(pf); 16760deb0bf7SJacob Keller err_unroll_sriov: 16770deb0bf7SJacob Keller mutex_unlock(&pf->vfs.table_lock); 16780deb0bf7SJacob Keller pci_disable_sriov(pf->pdev); 16790deb0bf7SJacob Keller err_unroll_intr: 16800deb0bf7SJacob Keller /* rearm interrupts here */ 16810deb0bf7SJacob Keller ice_irq_dynamic_ena(hw, NULL, NULL); 16820deb0bf7SJacob Keller clear_bit(ICE_OICR_INTR_DIS, pf->state); 16830deb0bf7SJacob Keller return ret; 16840deb0bf7SJacob Keller } 16850deb0bf7SJacob Keller 16860deb0bf7SJacob Keller /** 16870deb0bf7SJacob Keller * ice_pci_sriov_ena - Enable or change number of VFs 16880deb0bf7SJacob Keller * @pf: pointer to the PF structure 16890deb0bf7SJacob Keller * @num_vfs: number of VFs to allocate 16900deb0bf7SJacob Keller * 16910deb0bf7SJacob Keller * Returns 0 on success and negative on failure 16920deb0bf7SJacob Keller */ 16930deb0bf7SJacob Keller static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs) 16940deb0bf7SJacob Keller { 16950deb0bf7SJacob Keller int pre_existing_vfs = pci_num_vf(pf->pdev); 16960deb0bf7SJacob Keller struct device *dev = ice_pf_to_dev(pf); 16970deb0bf7SJacob Keller int err; 16980deb0bf7SJacob Keller 16990deb0bf7SJacob Keller if (pre_existing_vfs && pre_existing_vfs != num_vfs) 17000deb0bf7SJacob Keller ice_free_vfs(pf); 17010deb0bf7SJacob Keller else if (pre_existing_vfs && pre_existing_vfs == num_vfs) 17020deb0bf7SJacob Keller return 0; 17030deb0bf7SJacob Keller 17040deb0bf7SJacob Keller if (num_vfs > pf->vfs.num_supported) { 17050deb0bf7SJacob Keller dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n", 17060deb0bf7SJacob Keller num_vfs, pf->vfs.num_supported); 17070deb0bf7SJacob Keller return -EOPNOTSUPP; 17080deb0bf7SJacob Keller } 17090deb0bf7SJacob Keller 17100deb0bf7SJacob Keller dev_info(dev, "Enabling %d VFs\n", num_vfs); 17110deb0bf7SJacob Keller err = ice_ena_vfs(pf, num_vfs); 17120deb0bf7SJacob Keller if (err) { 17130deb0bf7SJacob Keller dev_err(dev, "Failed to enable SR-IOV: %d\n", err); 17140deb0bf7SJacob Keller return err; 17150deb0bf7SJacob Keller } 17160deb0bf7SJacob Keller 17170deb0bf7SJacob Keller set_bit(ICE_FLAG_SRIOV_ENA, pf->flags); 17180deb0bf7SJacob Keller return 0; 17190deb0bf7SJacob Keller } 17200deb0bf7SJacob Keller 17210deb0bf7SJacob Keller /** 17220deb0bf7SJacob Keller * ice_check_sriov_allowed - check if SR-IOV is allowed based on various checks 17230deb0bf7SJacob Keller * @pf: PF to enabled SR-IOV on 17240deb0bf7SJacob Keller */ 17250deb0bf7SJacob Keller static int ice_check_sriov_allowed(struct ice_pf *pf) 17260deb0bf7SJacob Keller { 17270deb0bf7SJacob Keller struct device *dev = ice_pf_to_dev(pf); 17280deb0bf7SJacob Keller 17290deb0bf7SJacob Keller if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) { 17300deb0bf7SJacob Keller dev_err(dev, "This device is not capable of SR-IOV\n"); 17310deb0bf7SJacob Keller return -EOPNOTSUPP; 17320deb0bf7SJacob Keller } 17330deb0bf7SJacob Keller 17340deb0bf7SJacob Keller if (ice_is_safe_mode(pf)) { 17350deb0bf7SJacob Keller dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n"); 17360deb0bf7SJacob Keller return -EOPNOTSUPP; 17370deb0bf7SJacob Keller } 17380deb0bf7SJacob Keller 17390deb0bf7SJacob Keller if (!ice_pf_state_is_nominal(pf)) { 17400deb0bf7SJacob Keller dev_err(dev, "Cannot enable SR-IOV, device not ready\n"); 17410deb0bf7SJacob Keller return -EBUSY; 17420deb0bf7SJacob Keller } 17430deb0bf7SJacob Keller 17440deb0bf7SJacob Keller return 0; 17450deb0bf7SJacob Keller } 17460deb0bf7SJacob Keller 17470deb0bf7SJacob Keller /** 17480deb0bf7SJacob Keller * ice_sriov_configure - Enable or change number of VFs via sysfs 17490deb0bf7SJacob Keller * @pdev: pointer to a pci_dev structure 17500deb0bf7SJacob Keller * @num_vfs: number of VFs to allocate or 0 to free VFs 17510deb0bf7SJacob Keller * 17520deb0bf7SJacob Keller * This function is called when the user updates the number of VFs in sysfs. On 17530deb0bf7SJacob Keller * success return whatever num_vfs was set to by the caller. Return negative on 17540deb0bf7SJacob Keller * failure. 17550deb0bf7SJacob Keller */ 17560deb0bf7SJacob Keller int ice_sriov_configure(struct pci_dev *pdev, int num_vfs) 17570deb0bf7SJacob Keller { 17580deb0bf7SJacob Keller struct ice_pf *pf = pci_get_drvdata(pdev); 17590deb0bf7SJacob Keller struct device *dev = ice_pf_to_dev(pf); 17600deb0bf7SJacob Keller int err; 17610deb0bf7SJacob Keller 17620deb0bf7SJacob Keller err = ice_check_sriov_allowed(pf); 17630deb0bf7SJacob Keller if (err) 17640deb0bf7SJacob Keller return err; 17650deb0bf7SJacob Keller 17660deb0bf7SJacob Keller if (!num_vfs) { 17670deb0bf7SJacob Keller if (!pci_vfs_assigned(pdev)) { 17680deb0bf7SJacob Keller ice_mbx_deinit_snapshot(&pf->hw); 17690deb0bf7SJacob Keller ice_free_vfs(pf); 17700deb0bf7SJacob Keller if (pf->lag) 17710deb0bf7SJacob Keller ice_enable_lag(pf->lag); 17720deb0bf7SJacob Keller return 0; 17730deb0bf7SJacob Keller } 17740deb0bf7SJacob Keller 17750deb0bf7SJacob Keller dev_err(dev, "can't free VFs because some are assigned to VMs.\n"); 17760deb0bf7SJacob Keller return -EBUSY; 17770deb0bf7SJacob Keller } 17780deb0bf7SJacob Keller 17790deb0bf7SJacob Keller err = ice_mbx_init_snapshot(&pf->hw, num_vfs); 17800deb0bf7SJacob Keller if (err) 17810deb0bf7SJacob Keller return err; 17820deb0bf7SJacob Keller 17830deb0bf7SJacob Keller err = ice_pci_sriov_ena(pf, num_vfs); 17840deb0bf7SJacob Keller if (err) { 17850deb0bf7SJacob Keller ice_mbx_deinit_snapshot(&pf->hw); 17860deb0bf7SJacob Keller return err; 17870deb0bf7SJacob Keller } 17880deb0bf7SJacob Keller 17890deb0bf7SJacob Keller if (pf->lag) 17900deb0bf7SJacob Keller ice_disable_lag(pf->lag); 17910deb0bf7SJacob Keller return num_vfs; 17920deb0bf7SJacob Keller } 17930deb0bf7SJacob Keller 17940deb0bf7SJacob Keller /** 17950deb0bf7SJacob Keller * ice_process_vflr_event - Free VF resources via IRQ calls 17960deb0bf7SJacob Keller * @pf: pointer to the PF structure 17970deb0bf7SJacob Keller * 17980deb0bf7SJacob Keller * called from the VFLR IRQ handler to 17990deb0bf7SJacob Keller * free up VF resources and state variables 18000deb0bf7SJacob Keller */ 18010deb0bf7SJacob Keller void ice_process_vflr_event(struct ice_pf *pf) 18020deb0bf7SJacob Keller { 18030deb0bf7SJacob Keller struct ice_hw *hw = &pf->hw; 18040deb0bf7SJacob Keller struct ice_vf *vf; 18050deb0bf7SJacob Keller unsigned int bkt; 18060deb0bf7SJacob Keller u32 reg; 18070deb0bf7SJacob Keller 18080deb0bf7SJacob Keller if (!test_and_clear_bit(ICE_VFLR_EVENT_PENDING, pf->state) || 18090deb0bf7SJacob Keller !ice_has_vfs(pf)) 18100deb0bf7SJacob Keller return; 18110deb0bf7SJacob Keller 18120deb0bf7SJacob Keller mutex_lock(&pf->vfs.table_lock); 18130deb0bf7SJacob Keller ice_for_each_vf(pf, bkt, vf) { 18140deb0bf7SJacob Keller u32 reg_idx, bit_idx; 18150deb0bf7SJacob Keller 18160deb0bf7SJacob Keller reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32; 18170deb0bf7SJacob Keller bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32; 18180deb0bf7SJacob Keller /* read GLGEN_VFLRSTAT register to find out the flr VFs */ 18190deb0bf7SJacob Keller reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx)); 18200deb0bf7SJacob Keller if (reg & BIT(bit_idx)) { 18210deb0bf7SJacob Keller /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */ 18220deb0bf7SJacob Keller mutex_lock(&vf->cfg_lock); 18230deb0bf7SJacob Keller ice_reset_vf(vf, true); 18240deb0bf7SJacob Keller mutex_unlock(&vf->cfg_lock); 18250deb0bf7SJacob Keller } 18260deb0bf7SJacob Keller } 18270deb0bf7SJacob Keller mutex_unlock(&pf->vfs.table_lock); 18280deb0bf7SJacob Keller } 18290deb0bf7SJacob Keller 18300deb0bf7SJacob Keller /** 18310deb0bf7SJacob Keller * ice_vc_reset_vf - Perform software reset on the VF after informing the AVF 18320deb0bf7SJacob Keller * @vf: pointer to the VF info 18330deb0bf7SJacob Keller */ 18340deb0bf7SJacob Keller static void ice_vc_reset_vf(struct ice_vf *vf) 18350deb0bf7SJacob Keller { 18360deb0bf7SJacob Keller ice_vc_notify_vf_reset(vf); 18370deb0bf7SJacob Keller ice_reset_vf(vf, false); 18380deb0bf7SJacob Keller } 18390deb0bf7SJacob Keller 18400deb0bf7SJacob Keller /** 18410deb0bf7SJacob Keller * ice_get_vf_from_pfq - get the VF who owns the PF space queue passed in 18420deb0bf7SJacob Keller * @pf: PF used to index all VFs 18430deb0bf7SJacob Keller * @pfq: queue index relative to the PF's function space 18440deb0bf7SJacob Keller * 18450deb0bf7SJacob Keller * If no VF is found who owns the pfq then return NULL, otherwise return a 18460deb0bf7SJacob Keller * pointer to the VF who owns the pfq 18470deb0bf7SJacob Keller * 18480deb0bf7SJacob Keller * If this function returns non-NULL, it acquires a reference count of the VF 18490deb0bf7SJacob Keller * structure. The caller is responsible for calling ice_put_vf() to drop this 18500deb0bf7SJacob Keller * reference. 18510deb0bf7SJacob Keller */ 18520deb0bf7SJacob Keller static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq) 18530deb0bf7SJacob Keller { 18540deb0bf7SJacob Keller struct ice_vf *vf; 18550deb0bf7SJacob Keller unsigned int bkt; 18560deb0bf7SJacob Keller 18570deb0bf7SJacob Keller rcu_read_lock(); 18580deb0bf7SJacob Keller ice_for_each_vf_rcu(pf, bkt, vf) { 18590deb0bf7SJacob Keller struct ice_vsi *vsi; 18600deb0bf7SJacob Keller u16 rxq_idx; 18610deb0bf7SJacob Keller 18620deb0bf7SJacob Keller vsi = ice_get_vf_vsi(vf); 18630deb0bf7SJacob Keller 18640deb0bf7SJacob Keller ice_for_each_rxq(vsi, rxq_idx) 18650deb0bf7SJacob Keller if (vsi->rxq_map[rxq_idx] == pfq) { 18660deb0bf7SJacob Keller struct ice_vf *found; 18670deb0bf7SJacob Keller 18680deb0bf7SJacob Keller if (kref_get_unless_zero(&vf->refcnt)) 18690deb0bf7SJacob Keller found = vf; 18700deb0bf7SJacob Keller else 18710deb0bf7SJacob Keller found = NULL; 18720deb0bf7SJacob Keller rcu_read_unlock(); 18730deb0bf7SJacob Keller return found; 18740deb0bf7SJacob Keller } 18750deb0bf7SJacob Keller } 18760deb0bf7SJacob Keller rcu_read_unlock(); 18770deb0bf7SJacob Keller 18780deb0bf7SJacob Keller return NULL; 18790deb0bf7SJacob Keller } 18800deb0bf7SJacob Keller 18810deb0bf7SJacob Keller /** 18820deb0bf7SJacob Keller * ice_globalq_to_pfq - convert from global queue index to PF space queue index 18830deb0bf7SJacob Keller * @pf: PF used for conversion 18840deb0bf7SJacob Keller * @globalq: global queue index used to convert to PF space queue index 18850deb0bf7SJacob Keller */ 18860deb0bf7SJacob Keller static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq) 18870deb0bf7SJacob Keller { 18880deb0bf7SJacob Keller return globalq - pf->hw.func_caps.common_cap.rxq_first_id; 18890deb0bf7SJacob Keller } 18900deb0bf7SJacob Keller 18910deb0bf7SJacob Keller /** 18920deb0bf7SJacob Keller * ice_vf_lan_overflow_event - handle LAN overflow event for a VF 18930deb0bf7SJacob Keller * @pf: PF that the LAN overflow event happened on 18940deb0bf7SJacob Keller * @event: structure holding the event information for the LAN overflow event 18950deb0bf7SJacob Keller * 18960deb0bf7SJacob Keller * Determine if the LAN overflow event was caused by a VF queue. If it was not 18970deb0bf7SJacob Keller * caused by a VF, do nothing. If a VF caused this LAN overflow event trigger a 18980deb0bf7SJacob Keller * reset on the offending VF. 18990deb0bf7SJacob Keller */ 19000deb0bf7SJacob Keller void 19010deb0bf7SJacob Keller ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) 19020deb0bf7SJacob Keller { 19030deb0bf7SJacob Keller u32 gldcb_rtctq, queue; 19040deb0bf7SJacob Keller struct ice_vf *vf; 19050deb0bf7SJacob Keller 19060deb0bf7SJacob Keller gldcb_rtctq = le32_to_cpu(event->desc.params.lan_overflow.prtdcb_ruptq); 19070deb0bf7SJacob Keller dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq); 19080deb0bf7SJacob Keller 19090deb0bf7SJacob Keller /* event returns device global Rx queue number */ 19100deb0bf7SJacob Keller queue = (gldcb_rtctq & GLDCB_RTCTQ_RXQNUM_M) >> 19110deb0bf7SJacob Keller GLDCB_RTCTQ_RXQNUM_S; 19120deb0bf7SJacob Keller 19130deb0bf7SJacob Keller vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue)); 19140deb0bf7SJacob Keller if (!vf) 19150deb0bf7SJacob Keller return; 19160deb0bf7SJacob Keller 19170deb0bf7SJacob Keller mutex_lock(&vf->cfg_lock); 19180deb0bf7SJacob Keller ice_vc_reset_vf(vf); 19190deb0bf7SJacob Keller mutex_unlock(&vf->cfg_lock); 19200deb0bf7SJacob Keller 19210deb0bf7SJacob Keller ice_put_vf(vf); 19220deb0bf7SJacob Keller } 19230deb0bf7SJacob Keller 19240deb0bf7SJacob Keller /** 19250deb0bf7SJacob Keller * ice_vc_send_msg_to_vf - Send message to VF 19260deb0bf7SJacob Keller * @vf: pointer to the VF info 19270deb0bf7SJacob Keller * @v_opcode: virtual channel opcode 19280deb0bf7SJacob Keller * @v_retval: virtual channel return value 19290deb0bf7SJacob Keller * @msg: pointer to the msg buffer 19300deb0bf7SJacob Keller * @msglen: msg length 19310deb0bf7SJacob Keller * 19320deb0bf7SJacob Keller * send msg to VF 19330deb0bf7SJacob Keller */ 19340deb0bf7SJacob Keller int 19350deb0bf7SJacob Keller ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode, 19360deb0bf7SJacob Keller enum virtchnl_status_code v_retval, u8 *msg, u16 msglen) 19370deb0bf7SJacob Keller { 19380deb0bf7SJacob Keller struct device *dev; 19390deb0bf7SJacob Keller struct ice_pf *pf; 19400deb0bf7SJacob Keller int aq_ret; 19410deb0bf7SJacob Keller 19420deb0bf7SJacob Keller pf = vf->pf; 19430deb0bf7SJacob Keller dev = ice_pf_to_dev(pf); 19440deb0bf7SJacob Keller 19450deb0bf7SJacob Keller aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval, 19460deb0bf7SJacob Keller msg, msglen, NULL); 19470deb0bf7SJacob Keller if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) { 19480deb0bf7SJacob Keller dev_info(dev, "Unable to send the message to VF %d ret %d aq_err %s\n", 19490deb0bf7SJacob Keller vf->vf_id, aq_ret, 19500deb0bf7SJacob Keller ice_aq_str(pf->hw.mailboxq.sq_last_status)); 19510deb0bf7SJacob Keller return -EIO; 19520deb0bf7SJacob Keller } 19530deb0bf7SJacob Keller 19540deb0bf7SJacob Keller return 0; 19550deb0bf7SJacob Keller } 19560deb0bf7SJacob Keller 19570deb0bf7SJacob Keller /** 19580deb0bf7SJacob Keller * ice_vc_get_ver_msg 19590deb0bf7SJacob Keller * @vf: pointer to the VF info 19600deb0bf7SJacob Keller * @msg: pointer to the msg buffer 19610deb0bf7SJacob Keller * 19620deb0bf7SJacob Keller * called from the VF to request the API version used by the PF 19630deb0bf7SJacob Keller */ 19640deb0bf7SJacob Keller static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg) 19650deb0bf7SJacob Keller { 19660deb0bf7SJacob Keller struct virtchnl_version_info info = { 19670deb0bf7SJacob Keller VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR 19680deb0bf7SJacob Keller }; 19690deb0bf7SJacob Keller 19700deb0bf7SJacob Keller vf->vf_ver = *(struct virtchnl_version_info *)msg; 19710deb0bf7SJacob Keller /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */ 19720deb0bf7SJacob Keller if (VF_IS_V10(&vf->vf_ver)) 19730deb0bf7SJacob Keller info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS; 19740deb0bf7SJacob Keller 19750deb0bf7SJacob Keller return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION, 19760deb0bf7SJacob Keller VIRTCHNL_STATUS_SUCCESS, (u8 *)&info, 19770deb0bf7SJacob Keller sizeof(struct virtchnl_version_info)); 19780deb0bf7SJacob Keller } 19790deb0bf7SJacob Keller 19800deb0bf7SJacob Keller /** 19810deb0bf7SJacob Keller * ice_vc_get_max_frame_size - get max frame size allowed for VF 19820deb0bf7SJacob Keller * @vf: VF used to determine max frame size 19830deb0bf7SJacob Keller * 19840deb0bf7SJacob Keller * Max frame size is determined based on the current port's max frame size and 19850deb0bf7SJacob Keller * whether a port VLAN is configured on this VF. The VF is not aware whether 19860deb0bf7SJacob Keller * it's in a port VLAN so the PF needs to account for this in max frame size 19870deb0bf7SJacob Keller * checks and sending the max frame size to the VF. 19880deb0bf7SJacob Keller */ 19890deb0bf7SJacob Keller static u16 ice_vc_get_max_frame_size(struct ice_vf *vf) 19900deb0bf7SJacob Keller { 19910deb0bf7SJacob Keller struct ice_port_info *pi = ice_vf_get_port_info(vf); 19920deb0bf7SJacob Keller u16 max_frame_size; 19930deb0bf7SJacob Keller 19940deb0bf7SJacob Keller max_frame_size = pi->phy.link_info.max_frame_size; 19950deb0bf7SJacob Keller 19960deb0bf7SJacob Keller if (ice_vf_is_port_vlan_ena(vf)) 19970deb0bf7SJacob Keller max_frame_size -= VLAN_HLEN; 19980deb0bf7SJacob Keller 19990deb0bf7SJacob Keller return max_frame_size; 20000deb0bf7SJacob Keller } 20010deb0bf7SJacob Keller 20020deb0bf7SJacob Keller /** 20030deb0bf7SJacob Keller * ice_vc_get_vf_res_msg 20040deb0bf7SJacob Keller * @vf: pointer to the VF info 20050deb0bf7SJacob Keller * @msg: pointer to the msg buffer 20060deb0bf7SJacob Keller * 20070deb0bf7SJacob Keller * called from the VF to request its resources 20080deb0bf7SJacob Keller */ 20090deb0bf7SJacob Keller static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg) 20100deb0bf7SJacob Keller { 20110deb0bf7SJacob Keller enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 20120deb0bf7SJacob Keller struct virtchnl_vf_resource *vfres = NULL; 20130deb0bf7SJacob Keller struct ice_pf *pf = vf->pf; 20140deb0bf7SJacob Keller struct ice_vsi *vsi; 20150deb0bf7SJacob Keller int len = 0; 20160deb0bf7SJacob Keller int ret; 20170deb0bf7SJacob Keller 20180deb0bf7SJacob Keller if (ice_check_vf_init(pf, vf)) { 20190deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 20200deb0bf7SJacob Keller goto err; 20210deb0bf7SJacob Keller } 20220deb0bf7SJacob Keller 20230deb0bf7SJacob Keller len = sizeof(struct virtchnl_vf_resource); 20240deb0bf7SJacob Keller 20250deb0bf7SJacob Keller vfres = kzalloc(len, GFP_KERNEL); 20260deb0bf7SJacob Keller if (!vfres) { 20270deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 20280deb0bf7SJacob Keller len = 0; 20290deb0bf7SJacob Keller goto err; 20300deb0bf7SJacob Keller } 20310deb0bf7SJacob Keller if (VF_IS_V11(&vf->vf_ver)) 20320deb0bf7SJacob Keller vf->driver_caps = *(u32 *)msg; 20330deb0bf7SJacob Keller else 20340deb0bf7SJacob Keller vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 | 20350deb0bf7SJacob Keller VIRTCHNL_VF_OFFLOAD_RSS_REG | 20360deb0bf7SJacob Keller VIRTCHNL_VF_OFFLOAD_VLAN; 20370deb0bf7SJacob Keller 20380deb0bf7SJacob Keller vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2; 20390deb0bf7SJacob Keller vsi = ice_get_vf_vsi(vf); 20400deb0bf7SJacob Keller if (!vsi) { 20410deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 20420deb0bf7SJacob Keller goto err; 20430deb0bf7SJacob Keller } 20440deb0bf7SJacob Keller 20450deb0bf7SJacob Keller if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_VLAN_V2) { 20460deb0bf7SJacob Keller /* VLAN offloads based on current device configuration */ 20470deb0bf7SJacob Keller vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN_V2; 20480deb0bf7SJacob Keller } else if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_VLAN) { 20490deb0bf7SJacob Keller /* allow VF to negotiate VIRTCHNL_VF_OFFLOAD explicitly for 20500deb0bf7SJacob Keller * these two conditions, which amounts to guest VLAN filtering 20510deb0bf7SJacob Keller * and offloads being based on the inner VLAN or the 20520deb0bf7SJacob Keller * inner/single VLAN respectively and don't allow VF to 20530deb0bf7SJacob Keller * negotiate VIRTCHNL_VF_OFFLOAD in any other cases 20540deb0bf7SJacob Keller */ 20550deb0bf7SJacob Keller if (ice_is_dvm_ena(&pf->hw) && ice_vf_is_port_vlan_ena(vf)) { 20560deb0bf7SJacob Keller vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN; 20570deb0bf7SJacob Keller } else if (!ice_is_dvm_ena(&pf->hw) && 20580deb0bf7SJacob Keller !ice_vf_is_port_vlan_ena(vf)) { 20590deb0bf7SJacob Keller vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN; 20600deb0bf7SJacob Keller /* configure backward compatible support for VFs that 20610deb0bf7SJacob Keller * only support VIRTCHNL_VF_OFFLOAD_VLAN, the PF is 20620deb0bf7SJacob Keller * configured in SVM, and no port VLAN is configured 20630deb0bf7SJacob Keller */ 20640deb0bf7SJacob Keller ice_vf_vsi_cfg_svm_legacy_vlan_mode(vsi); 20650deb0bf7SJacob Keller } else if (ice_is_dvm_ena(&pf->hw)) { 20660deb0bf7SJacob Keller /* configure software offloaded VLAN support when DVM 20670deb0bf7SJacob Keller * is enabled, but no port VLAN is enabled 20680deb0bf7SJacob Keller */ 20690deb0bf7SJacob Keller ice_vf_vsi_cfg_dvm_legacy_vlan_mode(vsi); 20700deb0bf7SJacob Keller } 20710deb0bf7SJacob Keller } 20720deb0bf7SJacob Keller 20730deb0bf7SJacob Keller if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) { 20740deb0bf7SJacob Keller vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF; 20750deb0bf7SJacob Keller } else { 20760deb0bf7SJacob Keller if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ) 20770deb0bf7SJacob Keller vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ; 20780deb0bf7SJacob Keller else 20790deb0bf7SJacob Keller vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG; 20800deb0bf7SJacob Keller } 20810deb0bf7SJacob Keller 20820deb0bf7SJacob Keller if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF) 20830deb0bf7SJacob Keller vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_FDIR_PF; 20840deb0bf7SJacob Keller 20850deb0bf7SJacob Keller if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) 20860deb0bf7SJacob Keller vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2; 20870deb0bf7SJacob Keller 20880deb0bf7SJacob Keller if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP) 20890deb0bf7SJacob Keller vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP; 20900deb0bf7SJacob Keller 20910deb0bf7SJacob Keller if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM) 20920deb0bf7SJacob Keller vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM; 20930deb0bf7SJacob Keller 20940deb0bf7SJacob Keller if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) 20950deb0bf7SJacob Keller vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING; 20960deb0bf7SJacob Keller 20970deb0bf7SJacob Keller if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) 20980deb0bf7SJacob Keller vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR; 20990deb0bf7SJacob Keller 21000deb0bf7SJacob Keller if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES) 21010deb0bf7SJacob Keller vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES; 21020deb0bf7SJacob Keller 21030deb0bf7SJacob Keller if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) 21040deb0bf7SJacob Keller vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED; 21050deb0bf7SJacob Keller 21060deb0bf7SJacob Keller if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF) 21070deb0bf7SJacob Keller vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF; 21080deb0bf7SJacob Keller 21090deb0bf7SJacob Keller if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_USO) 21100deb0bf7SJacob Keller vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_USO; 21110deb0bf7SJacob Keller 21120deb0bf7SJacob Keller vfres->num_vsis = 1; 21130deb0bf7SJacob Keller /* Tx and Rx queue are equal for VF */ 21140deb0bf7SJacob Keller vfres->num_queue_pairs = vsi->num_txq; 21150deb0bf7SJacob Keller vfres->max_vectors = pf->vfs.num_msix_per; 21160deb0bf7SJacob Keller vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE; 21170deb0bf7SJacob Keller vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE; 21180deb0bf7SJacob Keller vfres->max_mtu = ice_vc_get_max_frame_size(vf); 21190deb0bf7SJacob Keller 21200deb0bf7SJacob Keller vfres->vsi_res[0].vsi_id = vf->lan_vsi_num; 21210deb0bf7SJacob Keller vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV; 21220deb0bf7SJacob Keller vfres->vsi_res[0].num_queue_pairs = vsi->num_txq; 21230deb0bf7SJacob Keller ether_addr_copy(vfres->vsi_res[0].default_mac_addr, 21240deb0bf7SJacob Keller vf->hw_lan_addr.addr); 21250deb0bf7SJacob Keller 21260deb0bf7SJacob Keller /* match guest capabilities */ 21270deb0bf7SJacob Keller vf->driver_caps = vfres->vf_cap_flags; 21280deb0bf7SJacob Keller 21290deb0bf7SJacob Keller ice_vc_set_caps_allowlist(vf); 21300deb0bf7SJacob Keller ice_vc_set_working_allowlist(vf); 21310deb0bf7SJacob Keller 21320deb0bf7SJacob Keller set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states); 21330deb0bf7SJacob Keller 21340deb0bf7SJacob Keller err: 21350deb0bf7SJacob Keller /* send the response back to the VF */ 21360deb0bf7SJacob Keller ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, v_ret, 21370deb0bf7SJacob Keller (u8 *)vfres, len); 21380deb0bf7SJacob Keller 21390deb0bf7SJacob Keller kfree(vfres); 21400deb0bf7SJacob Keller return ret; 21410deb0bf7SJacob Keller } 21420deb0bf7SJacob Keller 21430deb0bf7SJacob Keller /** 21440deb0bf7SJacob Keller * ice_vc_reset_vf_msg 21450deb0bf7SJacob Keller * @vf: pointer to the VF info 21460deb0bf7SJacob Keller * 21470deb0bf7SJacob Keller * called from the VF to reset itself, 21480deb0bf7SJacob Keller * unlike other virtchnl messages, PF driver 21490deb0bf7SJacob Keller * doesn't send the response back to the VF 21500deb0bf7SJacob Keller */ 21510deb0bf7SJacob Keller static void ice_vc_reset_vf_msg(struct ice_vf *vf) 21520deb0bf7SJacob Keller { 21530deb0bf7SJacob Keller if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) 21540deb0bf7SJacob Keller ice_reset_vf(vf, false); 21550deb0bf7SJacob Keller } 21560deb0bf7SJacob Keller 21570deb0bf7SJacob Keller /** 21580deb0bf7SJacob Keller * ice_find_vsi_from_id 21590deb0bf7SJacob Keller * @pf: the PF structure to search for the VSI 21600deb0bf7SJacob Keller * @id: ID of the VSI it is searching for 21610deb0bf7SJacob Keller * 21620deb0bf7SJacob Keller * searches for the VSI with the given ID 21630deb0bf7SJacob Keller */ 21640deb0bf7SJacob Keller static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id) 21650deb0bf7SJacob Keller { 21660deb0bf7SJacob Keller int i; 21670deb0bf7SJacob Keller 21680deb0bf7SJacob Keller ice_for_each_vsi(pf, i) 21690deb0bf7SJacob Keller if (pf->vsi[i] && pf->vsi[i]->vsi_num == id) 21700deb0bf7SJacob Keller return pf->vsi[i]; 21710deb0bf7SJacob Keller 21720deb0bf7SJacob Keller return NULL; 21730deb0bf7SJacob Keller } 21740deb0bf7SJacob Keller 21750deb0bf7SJacob Keller /** 21760deb0bf7SJacob Keller * ice_vc_isvalid_vsi_id 21770deb0bf7SJacob Keller * @vf: pointer to the VF info 21780deb0bf7SJacob Keller * @vsi_id: VF relative VSI ID 21790deb0bf7SJacob Keller * 21800deb0bf7SJacob Keller * check for the valid VSI ID 21810deb0bf7SJacob Keller */ 21820deb0bf7SJacob Keller bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id) 21830deb0bf7SJacob Keller { 21840deb0bf7SJacob Keller struct ice_pf *pf = vf->pf; 21850deb0bf7SJacob Keller struct ice_vsi *vsi; 21860deb0bf7SJacob Keller 21870deb0bf7SJacob Keller vsi = ice_find_vsi_from_id(pf, vsi_id); 21880deb0bf7SJacob Keller 21890deb0bf7SJacob Keller return (vsi && (vsi->vf == vf)); 21900deb0bf7SJacob Keller } 21910deb0bf7SJacob Keller 21920deb0bf7SJacob Keller /** 21930deb0bf7SJacob Keller * ice_vc_isvalid_q_id 21940deb0bf7SJacob Keller * @vf: pointer to the VF info 21950deb0bf7SJacob Keller * @vsi_id: VSI ID 21960deb0bf7SJacob Keller * @qid: VSI relative queue ID 21970deb0bf7SJacob Keller * 21980deb0bf7SJacob Keller * check for the valid queue ID 21990deb0bf7SJacob Keller */ 22000deb0bf7SJacob Keller static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid) 22010deb0bf7SJacob Keller { 22020deb0bf7SJacob Keller struct ice_vsi *vsi = ice_find_vsi_from_id(vf->pf, vsi_id); 22030deb0bf7SJacob Keller /* allocated Tx and Rx queues should be always equal for VF VSI */ 22040deb0bf7SJacob Keller return (vsi && (qid < vsi->alloc_txq)); 22050deb0bf7SJacob Keller } 22060deb0bf7SJacob Keller 22070deb0bf7SJacob Keller /** 22080deb0bf7SJacob Keller * ice_vc_isvalid_ring_len 22090deb0bf7SJacob Keller * @ring_len: length of ring 22100deb0bf7SJacob Keller * 22110deb0bf7SJacob Keller * check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE 22120deb0bf7SJacob Keller * or zero 22130deb0bf7SJacob Keller */ 22140deb0bf7SJacob Keller static bool ice_vc_isvalid_ring_len(u16 ring_len) 22150deb0bf7SJacob Keller { 22160deb0bf7SJacob Keller return ring_len == 0 || 22170deb0bf7SJacob Keller (ring_len >= ICE_MIN_NUM_DESC && 22180deb0bf7SJacob Keller ring_len <= ICE_MAX_NUM_DESC && 22190deb0bf7SJacob Keller !(ring_len % ICE_REQ_DESC_MULTIPLE)); 22200deb0bf7SJacob Keller } 22210deb0bf7SJacob Keller 22220deb0bf7SJacob Keller /** 22230deb0bf7SJacob Keller * ice_vc_validate_pattern 22240deb0bf7SJacob Keller * @vf: pointer to the VF info 22250deb0bf7SJacob Keller * @proto: virtchnl protocol headers 22260deb0bf7SJacob Keller * 22270deb0bf7SJacob Keller * validate the pattern is supported or not. 22280deb0bf7SJacob Keller * 22290deb0bf7SJacob Keller * Return: true on success, false on error. 22300deb0bf7SJacob Keller */ 22310deb0bf7SJacob Keller bool 22320deb0bf7SJacob Keller ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto) 22330deb0bf7SJacob Keller { 22340deb0bf7SJacob Keller bool is_ipv4 = false; 22350deb0bf7SJacob Keller bool is_ipv6 = false; 22360deb0bf7SJacob Keller bool is_udp = false; 22370deb0bf7SJacob Keller u16 ptype = -1; 22380deb0bf7SJacob Keller int i = 0; 22390deb0bf7SJacob Keller 22400deb0bf7SJacob Keller while (i < proto->count && 22410deb0bf7SJacob Keller proto->proto_hdr[i].type != VIRTCHNL_PROTO_HDR_NONE) { 22420deb0bf7SJacob Keller switch (proto->proto_hdr[i].type) { 22430deb0bf7SJacob Keller case VIRTCHNL_PROTO_HDR_ETH: 22440deb0bf7SJacob Keller ptype = ICE_PTYPE_MAC_PAY; 22450deb0bf7SJacob Keller break; 22460deb0bf7SJacob Keller case VIRTCHNL_PROTO_HDR_IPV4: 22470deb0bf7SJacob Keller ptype = ICE_PTYPE_IPV4_PAY; 22480deb0bf7SJacob Keller is_ipv4 = true; 22490deb0bf7SJacob Keller break; 22500deb0bf7SJacob Keller case VIRTCHNL_PROTO_HDR_IPV6: 22510deb0bf7SJacob Keller ptype = ICE_PTYPE_IPV6_PAY; 22520deb0bf7SJacob Keller is_ipv6 = true; 22530deb0bf7SJacob Keller break; 22540deb0bf7SJacob Keller case VIRTCHNL_PROTO_HDR_UDP: 22550deb0bf7SJacob Keller if (is_ipv4) 22560deb0bf7SJacob Keller ptype = ICE_PTYPE_IPV4_UDP_PAY; 22570deb0bf7SJacob Keller else if (is_ipv6) 22580deb0bf7SJacob Keller ptype = ICE_PTYPE_IPV6_UDP_PAY; 22590deb0bf7SJacob Keller is_udp = true; 22600deb0bf7SJacob Keller break; 22610deb0bf7SJacob Keller case VIRTCHNL_PROTO_HDR_TCP: 22620deb0bf7SJacob Keller if (is_ipv4) 22630deb0bf7SJacob Keller ptype = ICE_PTYPE_IPV4_TCP_PAY; 22640deb0bf7SJacob Keller else if (is_ipv6) 22650deb0bf7SJacob Keller ptype = ICE_PTYPE_IPV6_TCP_PAY; 22660deb0bf7SJacob Keller break; 22670deb0bf7SJacob Keller case VIRTCHNL_PROTO_HDR_SCTP: 22680deb0bf7SJacob Keller if (is_ipv4) 22690deb0bf7SJacob Keller ptype = ICE_PTYPE_IPV4_SCTP_PAY; 22700deb0bf7SJacob Keller else if (is_ipv6) 22710deb0bf7SJacob Keller ptype = ICE_PTYPE_IPV6_SCTP_PAY; 22720deb0bf7SJacob Keller break; 22730deb0bf7SJacob Keller case VIRTCHNL_PROTO_HDR_GTPU_IP: 22740deb0bf7SJacob Keller case VIRTCHNL_PROTO_HDR_GTPU_EH: 22750deb0bf7SJacob Keller if (is_ipv4) 22760deb0bf7SJacob Keller ptype = ICE_MAC_IPV4_GTPU; 22770deb0bf7SJacob Keller else if (is_ipv6) 22780deb0bf7SJacob Keller ptype = ICE_MAC_IPV6_GTPU; 22790deb0bf7SJacob Keller goto out; 22800deb0bf7SJacob Keller case VIRTCHNL_PROTO_HDR_L2TPV3: 22810deb0bf7SJacob Keller if (is_ipv4) 22820deb0bf7SJacob Keller ptype = ICE_MAC_IPV4_L2TPV3; 22830deb0bf7SJacob Keller else if (is_ipv6) 22840deb0bf7SJacob Keller ptype = ICE_MAC_IPV6_L2TPV3; 22850deb0bf7SJacob Keller goto out; 22860deb0bf7SJacob Keller case VIRTCHNL_PROTO_HDR_ESP: 22870deb0bf7SJacob Keller if (is_ipv4) 22880deb0bf7SJacob Keller ptype = is_udp ? ICE_MAC_IPV4_NAT_T_ESP : 22890deb0bf7SJacob Keller ICE_MAC_IPV4_ESP; 22900deb0bf7SJacob Keller else if (is_ipv6) 22910deb0bf7SJacob Keller ptype = is_udp ? ICE_MAC_IPV6_NAT_T_ESP : 22920deb0bf7SJacob Keller ICE_MAC_IPV6_ESP; 22930deb0bf7SJacob Keller goto out; 22940deb0bf7SJacob Keller case VIRTCHNL_PROTO_HDR_AH: 22950deb0bf7SJacob Keller if (is_ipv4) 22960deb0bf7SJacob Keller ptype = ICE_MAC_IPV4_AH; 22970deb0bf7SJacob Keller else if (is_ipv6) 22980deb0bf7SJacob Keller ptype = ICE_MAC_IPV6_AH; 22990deb0bf7SJacob Keller goto out; 23000deb0bf7SJacob Keller case VIRTCHNL_PROTO_HDR_PFCP: 23010deb0bf7SJacob Keller if (is_ipv4) 23020deb0bf7SJacob Keller ptype = ICE_MAC_IPV4_PFCP_SESSION; 23030deb0bf7SJacob Keller else if (is_ipv6) 23040deb0bf7SJacob Keller ptype = ICE_MAC_IPV6_PFCP_SESSION; 23050deb0bf7SJacob Keller goto out; 23060deb0bf7SJacob Keller default: 23070deb0bf7SJacob Keller break; 23080deb0bf7SJacob Keller } 23090deb0bf7SJacob Keller i++; 23100deb0bf7SJacob Keller } 23110deb0bf7SJacob Keller 23120deb0bf7SJacob Keller out: 23130deb0bf7SJacob Keller return ice_hw_ptype_ena(&vf->pf->hw, ptype); 23140deb0bf7SJacob Keller } 23150deb0bf7SJacob Keller 23160deb0bf7SJacob Keller /** 23170deb0bf7SJacob Keller * ice_vc_parse_rss_cfg - parses hash fields and headers from 23180deb0bf7SJacob Keller * a specific virtchnl RSS cfg 23190deb0bf7SJacob Keller * @hw: pointer to the hardware 23200deb0bf7SJacob Keller * @rss_cfg: pointer to the virtchnl RSS cfg 23210deb0bf7SJacob Keller * @addl_hdrs: pointer to the protocol header fields (ICE_FLOW_SEG_HDR_*) 23220deb0bf7SJacob Keller * to configure 23230deb0bf7SJacob Keller * @hash_flds: pointer to the hash bit fields (ICE_FLOW_HASH_*) to configure 23240deb0bf7SJacob Keller * 23250deb0bf7SJacob Keller * Return true if all the protocol header and hash fields in the RSS cfg could 23260deb0bf7SJacob Keller * be parsed, else return false 23270deb0bf7SJacob Keller * 23280deb0bf7SJacob Keller * This function parses the virtchnl RSS cfg to be the intended 23290deb0bf7SJacob Keller * hash fields and the intended header for RSS configuration 23300deb0bf7SJacob Keller */ 23310deb0bf7SJacob Keller static bool 23320deb0bf7SJacob Keller ice_vc_parse_rss_cfg(struct ice_hw *hw, struct virtchnl_rss_cfg *rss_cfg, 23330deb0bf7SJacob Keller u32 *addl_hdrs, u64 *hash_flds) 23340deb0bf7SJacob Keller { 23350deb0bf7SJacob Keller const struct ice_vc_hash_field_match_type *hf_list; 23360deb0bf7SJacob Keller const struct ice_vc_hdr_match_type *hdr_list; 23370deb0bf7SJacob Keller int i, hf_list_len, hdr_list_len; 23380deb0bf7SJacob Keller 23390deb0bf7SJacob Keller hf_list = ice_vc_hash_field_list; 23400deb0bf7SJacob Keller hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list); 23410deb0bf7SJacob Keller hdr_list = ice_vc_hdr_list; 23420deb0bf7SJacob Keller hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list); 23430deb0bf7SJacob Keller 23440deb0bf7SJacob Keller for (i = 0; i < rss_cfg->proto_hdrs.count; i++) { 23450deb0bf7SJacob Keller struct virtchnl_proto_hdr *proto_hdr = 23460deb0bf7SJacob Keller &rss_cfg->proto_hdrs.proto_hdr[i]; 23470deb0bf7SJacob Keller bool hdr_found = false; 23480deb0bf7SJacob Keller int j; 23490deb0bf7SJacob Keller 23500deb0bf7SJacob Keller /* Find matched ice headers according to virtchnl headers. */ 23510deb0bf7SJacob Keller for (j = 0; j < hdr_list_len; j++) { 23520deb0bf7SJacob Keller struct ice_vc_hdr_match_type hdr_map = hdr_list[j]; 23530deb0bf7SJacob Keller 23540deb0bf7SJacob Keller if (proto_hdr->type == hdr_map.vc_hdr) { 23550deb0bf7SJacob Keller *addl_hdrs |= hdr_map.ice_hdr; 23560deb0bf7SJacob Keller hdr_found = true; 23570deb0bf7SJacob Keller } 23580deb0bf7SJacob Keller } 23590deb0bf7SJacob Keller 23600deb0bf7SJacob Keller if (!hdr_found) 23610deb0bf7SJacob Keller return false; 23620deb0bf7SJacob Keller 23630deb0bf7SJacob Keller /* Find matched ice hash fields according to 23640deb0bf7SJacob Keller * virtchnl hash fields. 23650deb0bf7SJacob Keller */ 23660deb0bf7SJacob Keller for (j = 0; j < hf_list_len; j++) { 23670deb0bf7SJacob Keller struct ice_vc_hash_field_match_type hf_map = hf_list[j]; 23680deb0bf7SJacob Keller 23690deb0bf7SJacob Keller if (proto_hdr->type == hf_map.vc_hdr && 23700deb0bf7SJacob Keller proto_hdr->field_selector == hf_map.vc_hash_field) { 23710deb0bf7SJacob Keller *hash_flds |= hf_map.ice_hash_field; 23720deb0bf7SJacob Keller break; 23730deb0bf7SJacob Keller } 23740deb0bf7SJacob Keller } 23750deb0bf7SJacob Keller } 23760deb0bf7SJacob Keller 23770deb0bf7SJacob Keller return true; 23780deb0bf7SJacob Keller } 23790deb0bf7SJacob Keller 23800deb0bf7SJacob Keller /** 23810deb0bf7SJacob Keller * ice_vf_adv_rss_offload_ena - determine if capabilities support advanced 23820deb0bf7SJacob Keller * RSS offloads 23830deb0bf7SJacob Keller * @caps: VF driver negotiated capabilities 23840deb0bf7SJacob Keller * 23850deb0bf7SJacob Keller * Return true if VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF capability is set, 23860deb0bf7SJacob Keller * else return false 23870deb0bf7SJacob Keller */ 23880deb0bf7SJacob Keller static bool ice_vf_adv_rss_offload_ena(u32 caps) 23890deb0bf7SJacob Keller { 23900deb0bf7SJacob Keller return !!(caps & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF); 23910deb0bf7SJacob Keller } 23920deb0bf7SJacob Keller 23930deb0bf7SJacob Keller /** 23940deb0bf7SJacob Keller * ice_vc_handle_rss_cfg 23950deb0bf7SJacob Keller * @vf: pointer to the VF info 23960deb0bf7SJacob Keller * @msg: pointer to the message buffer 23970deb0bf7SJacob Keller * @add: add a RSS config if true, otherwise delete a RSS config 23980deb0bf7SJacob Keller * 23990deb0bf7SJacob Keller * This function adds/deletes a RSS config 24000deb0bf7SJacob Keller */ 24010deb0bf7SJacob Keller static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add) 24020deb0bf7SJacob Keller { 24030deb0bf7SJacob Keller u32 v_opcode = add ? VIRTCHNL_OP_ADD_RSS_CFG : VIRTCHNL_OP_DEL_RSS_CFG; 24040deb0bf7SJacob Keller struct virtchnl_rss_cfg *rss_cfg = (struct virtchnl_rss_cfg *)msg; 24050deb0bf7SJacob Keller enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 24060deb0bf7SJacob Keller struct device *dev = ice_pf_to_dev(vf->pf); 24070deb0bf7SJacob Keller struct ice_hw *hw = &vf->pf->hw; 24080deb0bf7SJacob Keller struct ice_vsi *vsi; 24090deb0bf7SJacob Keller 24100deb0bf7SJacob Keller if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) { 24110deb0bf7SJacob Keller dev_dbg(dev, "VF %d attempting to configure RSS, but RSS is not supported by the PF\n", 24120deb0bf7SJacob Keller vf->vf_id); 24130deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED; 24140deb0bf7SJacob Keller goto error_param; 24150deb0bf7SJacob Keller } 24160deb0bf7SJacob Keller 24170deb0bf7SJacob Keller if (!ice_vf_adv_rss_offload_ena(vf->driver_caps)) { 24180deb0bf7SJacob Keller dev_dbg(dev, "VF %d attempting to configure RSS, but Advanced RSS offload is not supported\n", 24190deb0bf7SJacob Keller vf->vf_id); 24200deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 24210deb0bf7SJacob Keller goto error_param; 24220deb0bf7SJacob Keller } 24230deb0bf7SJacob Keller 24240deb0bf7SJacob Keller if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { 24250deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 24260deb0bf7SJacob Keller goto error_param; 24270deb0bf7SJacob Keller } 24280deb0bf7SJacob Keller 24290deb0bf7SJacob Keller if (rss_cfg->proto_hdrs.count > VIRTCHNL_MAX_NUM_PROTO_HDRS || 24300deb0bf7SJacob Keller rss_cfg->rss_algorithm < VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC || 24310deb0bf7SJacob Keller rss_cfg->rss_algorithm > VIRTCHNL_RSS_ALG_XOR_SYMMETRIC) { 24320deb0bf7SJacob Keller dev_dbg(dev, "VF %d attempting to configure RSS, but RSS configuration is not valid\n", 24330deb0bf7SJacob Keller vf->vf_id); 24340deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 24350deb0bf7SJacob Keller goto error_param; 24360deb0bf7SJacob Keller } 24370deb0bf7SJacob Keller 24380deb0bf7SJacob Keller vsi = ice_get_vf_vsi(vf); 24390deb0bf7SJacob Keller if (!vsi) { 24400deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 24410deb0bf7SJacob Keller goto error_param; 24420deb0bf7SJacob Keller } 24430deb0bf7SJacob Keller 24440deb0bf7SJacob Keller if (!ice_vc_validate_pattern(vf, &rss_cfg->proto_hdrs)) { 24450deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 24460deb0bf7SJacob Keller goto error_param; 24470deb0bf7SJacob Keller } 24480deb0bf7SJacob Keller 24490deb0bf7SJacob Keller if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_R_ASYMMETRIC) { 24500deb0bf7SJacob Keller struct ice_vsi_ctx *ctx; 24510deb0bf7SJacob Keller u8 lut_type, hash_type; 24520deb0bf7SJacob Keller int status; 24530deb0bf7SJacob Keller 24540deb0bf7SJacob Keller lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI; 24550deb0bf7SJacob Keller hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_XOR : 24560deb0bf7SJacob Keller ICE_AQ_VSI_Q_OPT_RSS_TPLZ; 24570deb0bf7SJacob Keller 24580deb0bf7SJacob Keller ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 24590deb0bf7SJacob Keller if (!ctx) { 24600deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 24610deb0bf7SJacob Keller goto error_param; 24620deb0bf7SJacob Keller } 24630deb0bf7SJacob Keller 24640deb0bf7SJacob Keller ctx->info.q_opt_rss = ((lut_type << 24650deb0bf7SJacob Keller ICE_AQ_VSI_Q_OPT_RSS_LUT_S) & 24660deb0bf7SJacob Keller ICE_AQ_VSI_Q_OPT_RSS_LUT_M) | 24670deb0bf7SJacob Keller (hash_type & 24680deb0bf7SJacob Keller ICE_AQ_VSI_Q_OPT_RSS_HASH_M); 24690deb0bf7SJacob Keller 24700deb0bf7SJacob Keller /* Preserve existing queueing option setting */ 24710deb0bf7SJacob Keller ctx->info.q_opt_rss |= (vsi->info.q_opt_rss & 24720deb0bf7SJacob Keller ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M); 24730deb0bf7SJacob Keller ctx->info.q_opt_tc = vsi->info.q_opt_tc; 24740deb0bf7SJacob Keller ctx->info.q_opt_flags = vsi->info.q_opt_rss; 24750deb0bf7SJacob Keller 24760deb0bf7SJacob Keller ctx->info.valid_sections = 24770deb0bf7SJacob Keller cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID); 24780deb0bf7SJacob Keller 24790deb0bf7SJacob Keller status = ice_update_vsi(hw, vsi->idx, ctx, NULL); 24800deb0bf7SJacob Keller if (status) { 24810deb0bf7SJacob Keller dev_err(dev, "update VSI for RSS failed, err %d aq_err %s\n", 24820deb0bf7SJacob Keller status, ice_aq_str(hw->adminq.sq_last_status)); 24830deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 24840deb0bf7SJacob Keller } else { 24850deb0bf7SJacob Keller vsi->info.q_opt_rss = ctx->info.q_opt_rss; 24860deb0bf7SJacob Keller } 24870deb0bf7SJacob Keller 24880deb0bf7SJacob Keller kfree(ctx); 24890deb0bf7SJacob Keller } else { 24900deb0bf7SJacob Keller u32 addl_hdrs = ICE_FLOW_SEG_HDR_NONE; 24910deb0bf7SJacob Keller u64 hash_flds = ICE_HASH_INVALID; 24920deb0bf7SJacob Keller 24930deb0bf7SJacob Keller if (!ice_vc_parse_rss_cfg(hw, rss_cfg, &addl_hdrs, 24940deb0bf7SJacob Keller &hash_flds)) { 24950deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 24960deb0bf7SJacob Keller goto error_param; 24970deb0bf7SJacob Keller } 24980deb0bf7SJacob Keller 24990deb0bf7SJacob Keller if (add) { 25000deb0bf7SJacob Keller if (ice_add_rss_cfg(hw, vsi->idx, hash_flds, 25010deb0bf7SJacob Keller addl_hdrs)) { 25020deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 25030deb0bf7SJacob Keller dev_err(dev, "ice_add_rss_cfg failed for vsi = %d, v_ret = %d\n", 25040deb0bf7SJacob Keller vsi->vsi_num, v_ret); 25050deb0bf7SJacob Keller } 25060deb0bf7SJacob Keller } else { 25070deb0bf7SJacob Keller int status; 25080deb0bf7SJacob Keller 25090deb0bf7SJacob Keller status = ice_rem_rss_cfg(hw, vsi->idx, hash_flds, 25100deb0bf7SJacob Keller addl_hdrs); 25110deb0bf7SJacob Keller /* We just ignore -ENOENT, because if two configurations 25120deb0bf7SJacob Keller * share the same profile remove one of them actually 25130deb0bf7SJacob Keller * removes both, since the profile is deleted. 25140deb0bf7SJacob Keller */ 25150deb0bf7SJacob Keller if (status && status != -ENOENT) { 25160deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 25170deb0bf7SJacob Keller dev_err(dev, "ice_rem_rss_cfg failed for VF ID:%d, error:%d\n", 25180deb0bf7SJacob Keller vf->vf_id, status); 25190deb0bf7SJacob Keller } 25200deb0bf7SJacob Keller } 25210deb0bf7SJacob Keller } 25220deb0bf7SJacob Keller 25230deb0bf7SJacob Keller error_param: 25240deb0bf7SJacob Keller return ice_vc_send_msg_to_vf(vf, v_opcode, v_ret, NULL, 0); 25250deb0bf7SJacob Keller } 25260deb0bf7SJacob Keller 25270deb0bf7SJacob Keller /** 25280deb0bf7SJacob Keller * ice_vc_config_rss_key 25290deb0bf7SJacob Keller * @vf: pointer to the VF info 25300deb0bf7SJacob Keller * @msg: pointer to the msg buffer 25310deb0bf7SJacob Keller * 25320deb0bf7SJacob Keller * Configure the VF's RSS key 25330deb0bf7SJacob Keller */ 25340deb0bf7SJacob Keller static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg) 25350deb0bf7SJacob Keller { 25360deb0bf7SJacob Keller enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 25370deb0bf7SJacob Keller struct virtchnl_rss_key *vrk = 25380deb0bf7SJacob Keller (struct virtchnl_rss_key *)msg; 25390deb0bf7SJacob Keller struct ice_vsi *vsi; 25400deb0bf7SJacob Keller 25410deb0bf7SJacob Keller if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { 25420deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 25430deb0bf7SJacob Keller goto error_param; 25440deb0bf7SJacob Keller } 25450deb0bf7SJacob Keller 25460deb0bf7SJacob Keller if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) { 25470deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 25480deb0bf7SJacob Keller goto error_param; 25490deb0bf7SJacob Keller } 25500deb0bf7SJacob Keller 25510deb0bf7SJacob Keller if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) { 25520deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 25530deb0bf7SJacob Keller goto error_param; 25540deb0bf7SJacob Keller } 25550deb0bf7SJacob Keller 25560deb0bf7SJacob Keller if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) { 25570deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 25580deb0bf7SJacob Keller goto error_param; 25590deb0bf7SJacob Keller } 25600deb0bf7SJacob Keller 25610deb0bf7SJacob Keller vsi = ice_get_vf_vsi(vf); 25620deb0bf7SJacob Keller if (!vsi) { 25630deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 25640deb0bf7SJacob Keller goto error_param; 25650deb0bf7SJacob Keller } 25660deb0bf7SJacob Keller 25670deb0bf7SJacob Keller if (ice_set_rss_key(vsi, vrk->key)) 25680deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; 25690deb0bf7SJacob Keller error_param: 25700deb0bf7SJacob Keller return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret, 25710deb0bf7SJacob Keller NULL, 0); 25720deb0bf7SJacob Keller } 25730deb0bf7SJacob Keller 25740deb0bf7SJacob Keller /** 25750deb0bf7SJacob Keller * ice_vc_config_rss_lut 25760deb0bf7SJacob Keller * @vf: pointer to the VF info 25770deb0bf7SJacob Keller * @msg: pointer to the msg buffer 25780deb0bf7SJacob Keller * 25790deb0bf7SJacob Keller * Configure the VF's RSS LUT 25800deb0bf7SJacob Keller */ 25810deb0bf7SJacob Keller static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg) 25820deb0bf7SJacob Keller { 25830deb0bf7SJacob Keller struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg; 25840deb0bf7SJacob Keller enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 25850deb0bf7SJacob Keller struct ice_vsi *vsi; 25860deb0bf7SJacob Keller 25870deb0bf7SJacob Keller if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { 25880deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 25890deb0bf7SJacob Keller goto error_param; 25900deb0bf7SJacob Keller } 25910deb0bf7SJacob Keller 25920deb0bf7SJacob Keller if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) { 25930deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 25940deb0bf7SJacob Keller goto error_param; 25950deb0bf7SJacob Keller } 25960deb0bf7SJacob Keller 25970deb0bf7SJacob Keller if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) { 25980deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 25990deb0bf7SJacob Keller goto error_param; 26000deb0bf7SJacob Keller } 26010deb0bf7SJacob Keller 26020deb0bf7SJacob Keller if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) { 26030deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 26040deb0bf7SJacob Keller goto error_param; 26050deb0bf7SJacob Keller } 26060deb0bf7SJacob Keller 26070deb0bf7SJacob Keller vsi = ice_get_vf_vsi(vf); 26080deb0bf7SJacob Keller if (!vsi) { 26090deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 26100deb0bf7SJacob Keller goto error_param; 26110deb0bf7SJacob Keller } 26120deb0bf7SJacob Keller 26130deb0bf7SJacob Keller if (ice_set_rss_lut(vsi, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE)) 26140deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; 26150deb0bf7SJacob Keller error_param: 26160deb0bf7SJacob Keller return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret, 26170deb0bf7SJacob Keller NULL, 0); 26180deb0bf7SJacob Keller } 26190deb0bf7SJacob Keller 26200deb0bf7SJacob Keller /** 26210deb0bf7SJacob Keller * ice_set_vf_spoofchk 26220deb0bf7SJacob Keller * @netdev: network interface device structure 26230deb0bf7SJacob Keller * @vf_id: VF identifier 26240deb0bf7SJacob Keller * @ena: flag to enable or disable feature 26250deb0bf7SJacob Keller * 26260deb0bf7SJacob Keller * Enable or disable VF spoof checking 26270deb0bf7SJacob Keller */ 26280deb0bf7SJacob Keller int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena) 26290deb0bf7SJacob Keller { 26300deb0bf7SJacob Keller struct ice_netdev_priv *np = netdev_priv(netdev); 26310deb0bf7SJacob Keller struct ice_pf *pf = np->vsi->back; 26320deb0bf7SJacob Keller struct ice_vsi *vf_vsi; 26330deb0bf7SJacob Keller struct device *dev; 26340deb0bf7SJacob Keller struct ice_vf *vf; 26350deb0bf7SJacob Keller int ret; 26360deb0bf7SJacob Keller 26370deb0bf7SJacob Keller dev = ice_pf_to_dev(pf); 26380deb0bf7SJacob Keller 26390deb0bf7SJacob Keller vf = ice_get_vf_by_id(pf, vf_id); 26400deb0bf7SJacob Keller if (!vf) 26410deb0bf7SJacob Keller return -EINVAL; 26420deb0bf7SJacob Keller 26430deb0bf7SJacob Keller ret = ice_check_vf_ready_for_cfg(vf); 26440deb0bf7SJacob Keller if (ret) 26450deb0bf7SJacob Keller goto out_put_vf; 26460deb0bf7SJacob Keller 26470deb0bf7SJacob Keller vf_vsi = ice_get_vf_vsi(vf); 26480deb0bf7SJacob Keller if (!vf_vsi) { 26490deb0bf7SJacob Keller netdev_err(netdev, "VSI %d for VF %d is null\n", 26500deb0bf7SJacob Keller vf->lan_vsi_idx, vf->vf_id); 26510deb0bf7SJacob Keller ret = -EINVAL; 26520deb0bf7SJacob Keller goto out_put_vf; 26530deb0bf7SJacob Keller } 26540deb0bf7SJacob Keller 26550deb0bf7SJacob Keller if (vf_vsi->type != ICE_VSI_VF) { 26560deb0bf7SJacob Keller netdev_err(netdev, "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n", 26570deb0bf7SJacob Keller vf_vsi->type, vf_vsi->vsi_num, vf->vf_id); 26580deb0bf7SJacob Keller ret = -ENODEV; 26590deb0bf7SJacob Keller goto out_put_vf; 26600deb0bf7SJacob Keller } 26610deb0bf7SJacob Keller 26620deb0bf7SJacob Keller if (ena == vf->spoofchk) { 26630deb0bf7SJacob Keller dev_dbg(dev, "VF spoofchk already %s\n", ena ? "ON" : "OFF"); 26640deb0bf7SJacob Keller ret = 0; 26650deb0bf7SJacob Keller goto out_put_vf; 26660deb0bf7SJacob Keller } 26670deb0bf7SJacob Keller 2668a8ea6d86SJacob Keller ret = ice_vsi_apply_spoofchk(vf_vsi, ena); 26690deb0bf7SJacob Keller if (ret) 26700deb0bf7SJacob Keller dev_err(dev, "Failed to set spoofchk %s for VF %d VSI %d\n error %d\n", 26710deb0bf7SJacob Keller ena ? "ON" : "OFF", vf->vf_id, vf_vsi->vsi_num, ret); 26720deb0bf7SJacob Keller else 26730deb0bf7SJacob Keller vf->spoofchk = ena; 26740deb0bf7SJacob Keller 26750deb0bf7SJacob Keller out_put_vf: 26760deb0bf7SJacob Keller ice_put_vf(vf); 26770deb0bf7SJacob Keller return ret; 26780deb0bf7SJacob Keller } 26790deb0bf7SJacob Keller 26800deb0bf7SJacob Keller /** 26810deb0bf7SJacob Keller * ice_vc_cfg_promiscuous_mode_msg 26820deb0bf7SJacob Keller * @vf: pointer to the VF info 26830deb0bf7SJacob Keller * @msg: pointer to the msg buffer 26840deb0bf7SJacob Keller * 26850deb0bf7SJacob Keller * called from the VF to configure VF VSIs promiscuous mode 26860deb0bf7SJacob Keller */ 26870deb0bf7SJacob Keller static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg) 26880deb0bf7SJacob Keller { 26890deb0bf7SJacob Keller enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 26900deb0bf7SJacob Keller bool rm_promisc, alluni = false, allmulti = false; 26910deb0bf7SJacob Keller struct virtchnl_promisc_info *info = 26920deb0bf7SJacob Keller (struct virtchnl_promisc_info *)msg; 26930deb0bf7SJacob Keller struct ice_vsi_vlan_ops *vlan_ops; 26940deb0bf7SJacob Keller int mcast_err = 0, ucast_err = 0; 26950deb0bf7SJacob Keller struct ice_pf *pf = vf->pf; 26960deb0bf7SJacob Keller struct ice_vsi *vsi; 26970deb0bf7SJacob Keller struct device *dev; 26980deb0bf7SJacob Keller int ret = 0; 26990deb0bf7SJacob Keller 27000deb0bf7SJacob Keller if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { 27010deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 27020deb0bf7SJacob Keller goto error_param; 27030deb0bf7SJacob Keller } 27040deb0bf7SJacob Keller 27050deb0bf7SJacob Keller if (!ice_vc_isvalid_vsi_id(vf, info->vsi_id)) { 27060deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 27070deb0bf7SJacob Keller goto error_param; 27080deb0bf7SJacob Keller } 27090deb0bf7SJacob Keller 27100deb0bf7SJacob Keller vsi = ice_get_vf_vsi(vf); 27110deb0bf7SJacob Keller if (!vsi) { 27120deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 27130deb0bf7SJacob Keller goto error_param; 27140deb0bf7SJacob Keller } 27150deb0bf7SJacob Keller 27160deb0bf7SJacob Keller dev = ice_pf_to_dev(pf); 27171261691dSJacob Keller if (!ice_is_vf_trusted(vf)) { 27180deb0bf7SJacob Keller dev_err(dev, "Unprivileged VF %d is attempting to configure promiscuous mode\n", 27190deb0bf7SJacob Keller vf->vf_id); 27200deb0bf7SJacob Keller /* Leave v_ret alone, lie to the VF on purpose. */ 27210deb0bf7SJacob Keller goto error_param; 27220deb0bf7SJacob Keller } 27230deb0bf7SJacob Keller 27240deb0bf7SJacob Keller if (info->flags & FLAG_VF_UNICAST_PROMISC) 27250deb0bf7SJacob Keller alluni = true; 27260deb0bf7SJacob Keller 27270deb0bf7SJacob Keller if (info->flags & FLAG_VF_MULTICAST_PROMISC) 27280deb0bf7SJacob Keller allmulti = true; 27290deb0bf7SJacob Keller 27300deb0bf7SJacob Keller rm_promisc = !allmulti && !alluni; 27310deb0bf7SJacob Keller 27320deb0bf7SJacob Keller vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 27330deb0bf7SJacob Keller if (rm_promisc) 27340deb0bf7SJacob Keller ret = vlan_ops->ena_rx_filtering(vsi); 27350deb0bf7SJacob Keller else 27360deb0bf7SJacob Keller ret = vlan_ops->dis_rx_filtering(vsi); 27370deb0bf7SJacob Keller if (ret) { 27380deb0bf7SJacob Keller dev_err(dev, "Failed to configure VLAN pruning in promiscuous mode\n"); 27390deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 27400deb0bf7SJacob Keller goto error_param; 27410deb0bf7SJacob Keller } 27420deb0bf7SJacob Keller 27430deb0bf7SJacob Keller if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) { 27440deb0bf7SJacob Keller bool set_dflt_vsi = alluni || allmulti; 27450deb0bf7SJacob Keller 27460deb0bf7SJacob Keller if (set_dflt_vsi && !ice_is_dflt_vsi_in_use(pf->first_sw)) 27470deb0bf7SJacob Keller /* only attempt to set the default forwarding VSI if 27480deb0bf7SJacob Keller * it's not currently set 27490deb0bf7SJacob Keller */ 27500deb0bf7SJacob Keller ret = ice_set_dflt_vsi(pf->first_sw, vsi); 27510deb0bf7SJacob Keller else if (!set_dflt_vsi && 27520deb0bf7SJacob Keller ice_is_vsi_dflt_vsi(pf->first_sw, vsi)) 27530deb0bf7SJacob Keller /* only attempt to free the default forwarding VSI if we 27540deb0bf7SJacob Keller * are the owner 27550deb0bf7SJacob Keller */ 27560deb0bf7SJacob Keller ret = ice_clear_dflt_vsi(pf->first_sw); 27570deb0bf7SJacob Keller 27580deb0bf7SJacob Keller if (ret) { 27590deb0bf7SJacob Keller dev_err(dev, "%sable VF %d as the default VSI failed, error %d\n", 27600deb0bf7SJacob Keller set_dflt_vsi ? "en" : "dis", vf->vf_id, ret); 27610deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; 27620deb0bf7SJacob Keller goto error_param; 27630deb0bf7SJacob Keller } 27640deb0bf7SJacob Keller } else { 27650deb0bf7SJacob Keller u8 mcast_m, ucast_m; 27660deb0bf7SJacob Keller 27670deb0bf7SJacob Keller if (ice_vf_is_port_vlan_ena(vf) || 27680deb0bf7SJacob Keller ice_vsi_has_non_zero_vlans(vsi)) { 27690deb0bf7SJacob Keller mcast_m = ICE_MCAST_VLAN_PROMISC_BITS; 27700deb0bf7SJacob Keller ucast_m = ICE_UCAST_VLAN_PROMISC_BITS; 27710deb0bf7SJacob Keller } else { 27720deb0bf7SJacob Keller mcast_m = ICE_MCAST_PROMISC_BITS; 27730deb0bf7SJacob Keller ucast_m = ICE_UCAST_PROMISC_BITS; 27740deb0bf7SJacob Keller } 27750deb0bf7SJacob Keller 27760deb0bf7SJacob Keller if (alluni) 27770deb0bf7SJacob Keller ucast_err = ice_vf_set_vsi_promisc(vf, vsi, ucast_m); 27780deb0bf7SJacob Keller else 27790deb0bf7SJacob Keller ucast_err = ice_vf_clear_vsi_promisc(vf, vsi, ucast_m); 27800deb0bf7SJacob Keller 27810deb0bf7SJacob Keller if (allmulti) 27820deb0bf7SJacob Keller mcast_err = ice_vf_set_vsi_promisc(vf, vsi, mcast_m); 27830deb0bf7SJacob Keller else 27840deb0bf7SJacob Keller mcast_err = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m); 27850deb0bf7SJacob Keller 27860deb0bf7SJacob Keller if (ucast_err || mcast_err) 27870deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 27880deb0bf7SJacob Keller } 27890deb0bf7SJacob Keller 27900deb0bf7SJacob Keller if (!mcast_err) { 27910deb0bf7SJacob Keller if (allmulti && 27920deb0bf7SJacob Keller !test_and_set_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) 27930deb0bf7SJacob Keller dev_info(dev, "VF %u successfully set multicast promiscuous mode\n", 27940deb0bf7SJacob Keller vf->vf_id); 27950deb0bf7SJacob Keller else if (!allmulti && test_and_clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) 27960deb0bf7SJacob Keller dev_info(dev, "VF %u successfully unset multicast promiscuous mode\n", 27970deb0bf7SJacob Keller vf->vf_id); 27980deb0bf7SJacob Keller } 27990deb0bf7SJacob Keller 28000deb0bf7SJacob Keller if (!ucast_err) { 28010deb0bf7SJacob Keller if (alluni && !test_and_set_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) 28020deb0bf7SJacob Keller dev_info(dev, "VF %u successfully set unicast promiscuous mode\n", 28030deb0bf7SJacob Keller vf->vf_id); 28040deb0bf7SJacob Keller else if (!alluni && test_and_clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) 28050deb0bf7SJacob Keller dev_info(dev, "VF %u successfully unset unicast promiscuous mode\n", 28060deb0bf7SJacob Keller vf->vf_id); 28070deb0bf7SJacob Keller } 28080deb0bf7SJacob Keller 28090deb0bf7SJacob Keller error_param: 28100deb0bf7SJacob Keller return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, 28110deb0bf7SJacob Keller v_ret, NULL, 0); 28120deb0bf7SJacob Keller } 28130deb0bf7SJacob Keller 28140deb0bf7SJacob Keller /** 28150deb0bf7SJacob Keller * ice_vc_get_stats_msg 28160deb0bf7SJacob Keller * @vf: pointer to the VF info 28170deb0bf7SJacob Keller * @msg: pointer to the msg buffer 28180deb0bf7SJacob Keller * 28190deb0bf7SJacob Keller * called from the VF to get VSI stats 28200deb0bf7SJacob Keller */ 28210deb0bf7SJacob Keller static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg) 28220deb0bf7SJacob Keller { 28230deb0bf7SJacob Keller enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 28240deb0bf7SJacob Keller struct virtchnl_queue_select *vqs = 28250deb0bf7SJacob Keller (struct virtchnl_queue_select *)msg; 28260deb0bf7SJacob Keller struct ice_eth_stats stats = { 0 }; 28270deb0bf7SJacob Keller struct ice_vsi *vsi; 28280deb0bf7SJacob Keller 28290deb0bf7SJacob Keller if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { 28300deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 28310deb0bf7SJacob Keller goto error_param; 28320deb0bf7SJacob Keller } 28330deb0bf7SJacob Keller 28340deb0bf7SJacob Keller if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 28350deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 28360deb0bf7SJacob Keller goto error_param; 28370deb0bf7SJacob Keller } 28380deb0bf7SJacob Keller 28390deb0bf7SJacob Keller vsi = ice_get_vf_vsi(vf); 28400deb0bf7SJacob Keller if (!vsi) { 28410deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 28420deb0bf7SJacob Keller goto error_param; 28430deb0bf7SJacob Keller } 28440deb0bf7SJacob Keller 28450deb0bf7SJacob Keller ice_update_eth_stats(vsi); 28460deb0bf7SJacob Keller 28470deb0bf7SJacob Keller stats = vsi->eth_stats; 28480deb0bf7SJacob Keller 28490deb0bf7SJacob Keller error_param: 28500deb0bf7SJacob Keller /* send the response to the VF */ 28510deb0bf7SJacob Keller return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, v_ret, 28520deb0bf7SJacob Keller (u8 *)&stats, sizeof(stats)); 28530deb0bf7SJacob Keller } 28540deb0bf7SJacob Keller 28550deb0bf7SJacob Keller /** 28560deb0bf7SJacob Keller * ice_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTCHNL 28570deb0bf7SJacob Keller * @vqs: virtchnl_queue_select structure containing bitmaps to validate 28580deb0bf7SJacob Keller * 28590deb0bf7SJacob Keller * Return true on successful validation, else false 28600deb0bf7SJacob Keller */ 28610deb0bf7SJacob Keller static bool ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs) 28620deb0bf7SJacob Keller { 28630deb0bf7SJacob Keller if ((!vqs->rx_queues && !vqs->tx_queues) || 28640deb0bf7SJacob Keller vqs->rx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF) || 28650deb0bf7SJacob Keller vqs->tx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF)) 28660deb0bf7SJacob Keller return false; 28670deb0bf7SJacob Keller 28680deb0bf7SJacob Keller return true; 28690deb0bf7SJacob Keller } 28700deb0bf7SJacob Keller 28710deb0bf7SJacob Keller /** 28720deb0bf7SJacob Keller * ice_vf_ena_txq_interrupt - enable Tx queue interrupt via QINT_TQCTL 28730deb0bf7SJacob Keller * @vsi: VSI of the VF to configure 28740deb0bf7SJacob Keller * @q_idx: VF queue index used to determine the queue in the PF's space 28750deb0bf7SJacob Keller */ 28760deb0bf7SJacob Keller static void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx) 28770deb0bf7SJacob Keller { 28780deb0bf7SJacob Keller struct ice_hw *hw = &vsi->back->hw; 28790deb0bf7SJacob Keller u32 pfq = vsi->txq_map[q_idx]; 28800deb0bf7SJacob Keller u32 reg; 28810deb0bf7SJacob Keller 28820deb0bf7SJacob Keller reg = rd32(hw, QINT_TQCTL(pfq)); 28830deb0bf7SJacob Keller 28840deb0bf7SJacob Keller /* MSI-X index 0 in the VF's space is always for the OICR, which means 28850deb0bf7SJacob Keller * this is most likely a poll mode VF driver, so don't enable an 28860deb0bf7SJacob Keller * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP 28870deb0bf7SJacob Keller */ 28880deb0bf7SJacob Keller if (!(reg & QINT_TQCTL_MSIX_INDX_M)) 28890deb0bf7SJacob Keller return; 28900deb0bf7SJacob Keller 28910deb0bf7SJacob Keller wr32(hw, QINT_TQCTL(pfq), reg | QINT_TQCTL_CAUSE_ENA_M); 28920deb0bf7SJacob Keller } 28930deb0bf7SJacob Keller 28940deb0bf7SJacob Keller /** 28950deb0bf7SJacob Keller * ice_vf_ena_rxq_interrupt - enable Tx queue interrupt via QINT_RQCTL 28960deb0bf7SJacob Keller * @vsi: VSI of the VF to configure 28970deb0bf7SJacob Keller * @q_idx: VF queue index used to determine the queue in the PF's space 28980deb0bf7SJacob Keller */ 28990deb0bf7SJacob Keller static void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx) 29000deb0bf7SJacob Keller { 29010deb0bf7SJacob Keller struct ice_hw *hw = &vsi->back->hw; 29020deb0bf7SJacob Keller u32 pfq = vsi->rxq_map[q_idx]; 29030deb0bf7SJacob Keller u32 reg; 29040deb0bf7SJacob Keller 29050deb0bf7SJacob Keller reg = rd32(hw, QINT_RQCTL(pfq)); 29060deb0bf7SJacob Keller 29070deb0bf7SJacob Keller /* MSI-X index 0 in the VF's space is always for the OICR, which means 29080deb0bf7SJacob Keller * this is most likely a poll mode VF driver, so don't enable an 29090deb0bf7SJacob Keller * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP 29100deb0bf7SJacob Keller */ 29110deb0bf7SJacob Keller if (!(reg & QINT_RQCTL_MSIX_INDX_M)) 29120deb0bf7SJacob Keller return; 29130deb0bf7SJacob Keller 29140deb0bf7SJacob Keller wr32(hw, QINT_RQCTL(pfq), reg | QINT_RQCTL_CAUSE_ENA_M); 29150deb0bf7SJacob Keller } 29160deb0bf7SJacob Keller 29170deb0bf7SJacob Keller /** 29180deb0bf7SJacob Keller * ice_vc_ena_qs_msg 29190deb0bf7SJacob Keller * @vf: pointer to the VF info 29200deb0bf7SJacob Keller * @msg: pointer to the msg buffer 29210deb0bf7SJacob Keller * 29220deb0bf7SJacob Keller * called from the VF to enable all or specific queue(s) 29230deb0bf7SJacob Keller */ 29240deb0bf7SJacob Keller static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg) 29250deb0bf7SJacob Keller { 29260deb0bf7SJacob Keller enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 29270deb0bf7SJacob Keller struct virtchnl_queue_select *vqs = 29280deb0bf7SJacob Keller (struct virtchnl_queue_select *)msg; 29290deb0bf7SJacob Keller struct ice_vsi *vsi; 29300deb0bf7SJacob Keller unsigned long q_map; 29310deb0bf7SJacob Keller u16 vf_q_id; 29320deb0bf7SJacob Keller 29330deb0bf7SJacob Keller if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { 29340deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 29350deb0bf7SJacob Keller goto error_param; 29360deb0bf7SJacob Keller } 29370deb0bf7SJacob Keller 29380deb0bf7SJacob Keller if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 29390deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 29400deb0bf7SJacob Keller goto error_param; 29410deb0bf7SJacob Keller } 29420deb0bf7SJacob Keller 29430deb0bf7SJacob Keller if (!ice_vc_validate_vqs_bitmaps(vqs)) { 29440deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 29450deb0bf7SJacob Keller goto error_param; 29460deb0bf7SJacob Keller } 29470deb0bf7SJacob Keller 29480deb0bf7SJacob Keller vsi = ice_get_vf_vsi(vf); 29490deb0bf7SJacob Keller if (!vsi) { 29500deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 29510deb0bf7SJacob Keller goto error_param; 29520deb0bf7SJacob Keller } 29530deb0bf7SJacob Keller 29540deb0bf7SJacob Keller /* Enable only Rx rings, Tx rings were enabled by the FW when the 29550deb0bf7SJacob Keller * Tx queue group list was configured and the context bits were 29560deb0bf7SJacob Keller * programmed using ice_vsi_cfg_txqs 29570deb0bf7SJacob Keller */ 29580deb0bf7SJacob Keller q_map = vqs->rx_queues; 29590deb0bf7SJacob Keller for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) { 29600deb0bf7SJacob Keller if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) { 29610deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 29620deb0bf7SJacob Keller goto error_param; 29630deb0bf7SJacob Keller } 29640deb0bf7SJacob Keller 29650deb0bf7SJacob Keller /* Skip queue if enabled */ 29660deb0bf7SJacob Keller if (test_bit(vf_q_id, vf->rxq_ena)) 29670deb0bf7SJacob Keller continue; 29680deb0bf7SJacob Keller 29690deb0bf7SJacob Keller if (ice_vsi_ctrl_one_rx_ring(vsi, true, vf_q_id, true)) { 29700deb0bf7SJacob Keller dev_err(ice_pf_to_dev(vsi->back), "Failed to enable Rx ring %d on VSI %d\n", 29710deb0bf7SJacob Keller vf_q_id, vsi->vsi_num); 29720deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 29730deb0bf7SJacob Keller goto error_param; 29740deb0bf7SJacob Keller } 29750deb0bf7SJacob Keller 29760deb0bf7SJacob Keller ice_vf_ena_rxq_interrupt(vsi, vf_q_id); 29770deb0bf7SJacob Keller set_bit(vf_q_id, vf->rxq_ena); 29780deb0bf7SJacob Keller } 29790deb0bf7SJacob Keller 29800deb0bf7SJacob Keller q_map = vqs->tx_queues; 29810deb0bf7SJacob Keller for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) { 29820deb0bf7SJacob Keller if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) { 29830deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 29840deb0bf7SJacob Keller goto error_param; 29850deb0bf7SJacob Keller } 29860deb0bf7SJacob Keller 29870deb0bf7SJacob Keller /* Skip queue if enabled */ 29880deb0bf7SJacob Keller if (test_bit(vf_q_id, vf->txq_ena)) 29890deb0bf7SJacob Keller continue; 29900deb0bf7SJacob Keller 29910deb0bf7SJacob Keller ice_vf_ena_txq_interrupt(vsi, vf_q_id); 29920deb0bf7SJacob Keller set_bit(vf_q_id, vf->txq_ena); 29930deb0bf7SJacob Keller } 29940deb0bf7SJacob Keller 29950deb0bf7SJacob Keller /* Set flag to indicate that queues are enabled */ 29960deb0bf7SJacob Keller if (v_ret == VIRTCHNL_STATUS_SUCCESS) 29970deb0bf7SJacob Keller set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states); 29980deb0bf7SJacob Keller 29990deb0bf7SJacob Keller error_param: 30000deb0bf7SJacob Keller /* send the response to the VF */ 30010deb0bf7SJacob Keller return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret, 30020deb0bf7SJacob Keller NULL, 0); 30030deb0bf7SJacob Keller } 30040deb0bf7SJacob Keller 30050deb0bf7SJacob Keller /** 30060deb0bf7SJacob Keller * ice_vc_dis_qs_msg 30070deb0bf7SJacob Keller * @vf: pointer to the VF info 30080deb0bf7SJacob Keller * @msg: pointer to the msg buffer 30090deb0bf7SJacob Keller * 30100deb0bf7SJacob Keller * called from the VF to disable all or specific 30110deb0bf7SJacob Keller * queue(s) 30120deb0bf7SJacob Keller */ 30130deb0bf7SJacob Keller static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg) 30140deb0bf7SJacob Keller { 30150deb0bf7SJacob Keller enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 30160deb0bf7SJacob Keller struct virtchnl_queue_select *vqs = 30170deb0bf7SJacob Keller (struct virtchnl_queue_select *)msg; 30180deb0bf7SJacob Keller struct ice_vsi *vsi; 30190deb0bf7SJacob Keller unsigned long q_map; 30200deb0bf7SJacob Keller u16 vf_q_id; 30210deb0bf7SJacob Keller 30220deb0bf7SJacob Keller if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) && 30230deb0bf7SJacob Keller !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) { 30240deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 30250deb0bf7SJacob Keller goto error_param; 30260deb0bf7SJacob Keller } 30270deb0bf7SJacob Keller 30280deb0bf7SJacob Keller if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 30290deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 30300deb0bf7SJacob Keller goto error_param; 30310deb0bf7SJacob Keller } 30320deb0bf7SJacob Keller 30330deb0bf7SJacob Keller if (!ice_vc_validate_vqs_bitmaps(vqs)) { 30340deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 30350deb0bf7SJacob Keller goto error_param; 30360deb0bf7SJacob Keller } 30370deb0bf7SJacob Keller 30380deb0bf7SJacob Keller vsi = ice_get_vf_vsi(vf); 30390deb0bf7SJacob Keller if (!vsi) { 30400deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 30410deb0bf7SJacob Keller goto error_param; 30420deb0bf7SJacob Keller } 30430deb0bf7SJacob Keller 30440deb0bf7SJacob Keller if (vqs->tx_queues) { 30450deb0bf7SJacob Keller q_map = vqs->tx_queues; 30460deb0bf7SJacob Keller 30470deb0bf7SJacob Keller for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) { 30480deb0bf7SJacob Keller struct ice_tx_ring *ring = vsi->tx_rings[vf_q_id]; 30490deb0bf7SJacob Keller struct ice_txq_meta txq_meta = { 0 }; 30500deb0bf7SJacob Keller 30510deb0bf7SJacob Keller if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) { 30520deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 30530deb0bf7SJacob Keller goto error_param; 30540deb0bf7SJacob Keller } 30550deb0bf7SJacob Keller 30560deb0bf7SJacob Keller /* Skip queue if not enabled */ 30570deb0bf7SJacob Keller if (!test_bit(vf_q_id, vf->txq_ena)) 30580deb0bf7SJacob Keller continue; 30590deb0bf7SJacob Keller 30600deb0bf7SJacob Keller ice_fill_txq_meta(vsi, ring, &txq_meta); 30610deb0bf7SJacob Keller 30620deb0bf7SJacob Keller if (ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id, 30630deb0bf7SJacob Keller ring, &txq_meta)) { 30640deb0bf7SJacob Keller dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n", 30650deb0bf7SJacob Keller vf_q_id, vsi->vsi_num); 30660deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 30670deb0bf7SJacob Keller goto error_param; 30680deb0bf7SJacob Keller } 30690deb0bf7SJacob Keller 30700deb0bf7SJacob Keller /* Clear enabled queues flag */ 30710deb0bf7SJacob Keller clear_bit(vf_q_id, vf->txq_ena); 30720deb0bf7SJacob Keller } 30730deb0bf7SJacob Keller } 30740deb0bf7SJacob Keller 30750deb0bf7SJacob Keller q_map = vqs->rx_queues; 30760deb0bf7SJacob Keller /* speed up Rx queue disable by batching them if possible */ 30770deb0bf7SJacob Keller if (q_map && 30780deb0bf7SJacob Keller bitmap_equal(&q_map, vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF)) { 30790deb0bf7SJacob Keller if (ice_vsi_stop_all_rx_rings(vsi)) { 30800deb0bf7SJacob Keller dev_err(ice_pf_to_dev(vsi->back), "Failed to stop all Rx rings on VSI %d\n", 30810deb0bf7SJacob Keller vsi->vsi_num); 30820deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 30830deb0bf7SJacob Keller goto error_param; 30840deb0bf7SJacob Keller } 30850deb0bf7SJacob Keller 30860deb0bf7SJacob Keller bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF); 30870deb0bf7SJacob Keller } else if (q_map) { 30880deb0bf7SJacob Keller for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) { 30890deb0bf7SJacob Keller if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) { 30900deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 30910deb0bf7SJacob Keller goto error_param; 30920deb0bf7SJacob Keller } 30930deb0bf7SJacob Keller 30940deb0bf7SJacob Keller /* Skip queue if not enabled */ 30950deb0bf7SJacob Keller if (!test_bit(vf_q_id, vf->rxq_ena)) 30960deb0bf7SJacob Keller continue; 30970deb0bf7SJacob Keller 30980deb0bf7SJacob Keller if (ice_vsi_ctrl_one_rx_ring(vsi, false, vf_q_id, 30990deb0bf7SJacob Keller true)) { 31000deb0bf7SJacob Keller dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Rx ring %d on VSI %d\n", 31010deb0bf7SJacob Keller vf_q_id, vsi->vsi_num); 31020deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 31030deb0bf7SJacob Keller goto error_param; 31040deb0bf7SJacob Keller } 31050deb0bf7SJacob Keller 31060deb0bf7SJacob Keller /* Clear enabled queues flag */ 31070deb0bf7SJacob Keller clear_bit(vf_q_id, vf->rxq_ena); 31080deb0bf7SJacob Keller } 31090deb0bf7SJacob Keller } 31100deb0bf7SJacob Keller 31110deb0bf7SJacob Keller /* Clear enabled queues flag */ 31120deb0bf7SJacob Keller if (v_ret == VIRTCHNL_STATUS_SUCCESS && ice_vf_has_no_qs_ena(vf)) 31130deb0bf7SJacob Keller clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states); 31140deb0bf7SJacob Keller 31150deb0bf7SJacob Keller error_param: 31160deb0bf7SJacob Keller /* send the response to the VF */ 31170deb0bf7SJacob Keller return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret, 31180deb0bf7SJacob Keller NULL, 0); 31190deb0bf7SJacob Keller } 31200deb0bf7SJacob Keller 31210deb0bf7SJacob Keller /** 31220deb0bf7SJacob Keller * ice_cfg_interrupt 31230deb0bf7SJacob Keller * @vf: pointer to the VF info 31240deb0bf7SJacob Keller * @vsi: the VSI being configured 31250deb0bf7SJacob Keller * @vector_id: vector ID 31260deb0bf7SJacob Keller * @map: vector map for mapping vectors to queues 31270deb0bf7SJacob Keller * @q_vector: structure for interrupt vector 31280deb0bf7SJacob Keller * configure the IRQ to queue map 31290deb0bf7SJacob Keller */ 31300deb0bf7SJacob Keller static int 31310deb0bf7SJacob Keller ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id, 31320deb0bf7SJacob Keller struct virtchnl_vector_map *map, 31330deb0bf7SJacob Keller struct ice_q_vector *q_vector) 31340deb0bf7SJacob Keller { 31350deb0bf7SJacob Keller u16 vsi_q_id, vsi_q_id_idx; 31360deb0bf7SJacob Keller unsigned long qmap; 31370deb0bf7SJacob Keller 31380deb0bf7SJacob Keller q_vector->num_ring_rx = 0; 31390deb0bf7SJacob Keller q_vector->num_ring_tx = 0; 31400deb0bf7SJacob Keller 31410deb0bf7SJacob Keller qmap = map->rxq_map; 31420deb0bf7SJacob Keller for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) { 31430deb0bf7SJacob Keller vsi_q_id = vsi_q_id_idx; 31440deb0bf7SJacob Keller 31450deb0bf7SJacob Keller if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id)) 31460deb0bf7SJacob Keller return VIRTCHNL_STATUS_ERR_PARAM; 31470deb0bf7SJacob Keller 31480deb0bf7SJacob Keller q_vector->num_ring_rx++; 31490deb0bf7SJacob Keller q_vector->rx.itr_idx = map->rxitr_idx; 31500deb0bf7SJacob Keller vsi->rx_rings[vsi_q_id]->q_vector = q_vector; 31510deb0bf7SJacob Keller ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id, 31520deb0bf7SJacob Keller q_vector->rx.itr_idx); 31530deb0bf7SJacob Keller } 31540deb0bf7SJacob Keller 31550deb0bf7SJacob Keller qmap = map->txq_map; 31560deb0bf7SJacob Keller for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) { 31570deb0bf7SJacob Keller vsi_q_id = vsi_q_id_idx; 31580deb0bf7SJacob Keller 31590deb0bf7SJacob Keller if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id)) 31600deb0bf7SJacob Keller return VIRTCHNL_STATUS_ERR_PARAM; 31610deb0bf7SJacob Keller 31620deb0bf7SJacob Keller q_vector->num_ring_tx++; 31630deb0bf7SJacob Keller q_vector->tx.itr_idx = map->txitr_idx; 31640deb0bf7SJacob Keller vsi->tx_rings[vsi_q_id]->q_vector = q_vector; 31650deb0bf7SJacob Keller ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id, 31660deb0bf7SJacob Keller q_vector->tx.itr_idx); 31670deb0bf7SJacob Keller } 31680deb0bf7SJacob Keller 31690deb0bf7SJacob Keller return VIRTCHNL_STATUS_SUCCESS; 31700deb0bf7SJacob Keller } 31710deb0bf7SJacob Keller 31720deb0bf7SJacob Keller /** 31730deb0bf7SJacob Keller * ice_vc_cfg_irq_map_msg 31740deb0bf7SJacob Keller * @vf: pointer to the VF info 31750deb0bf7SJacob Keller * @msg: pointer to the msg buffer 31760deb0bf7SJacob Keller * 31770deb0bf7SJacob Keller * called from the VF to configure the IRQ to queue map 31780deb0bf7SJacob Keller */ 31790deb0bf7SJacob Keller static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) 31800deb0bf7SJacob Keller { 31810deb0bf7SJacob Keller enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 31820deb0bf7SJacob Keller u16 num_q_vectors_mapped, vsi_id, vector_id; 31830deb0bf7SJacob Keller struct virtchnl_irq_map_info *irqmap_info; 31840deb0bf7SJacob Keller struct virtchnl_vector_map *map; 31850deb0bf7SJacob Keller struct ice_pf *pf = vf->pf; 31860deb0bf7SJacob Keller struct ice_vsi *vsi; 31870deb0bf7SJacob Keller int i; 31880deb0bf7SJacob Keller 31890deb0bf7SJacob Keller irqmap_info = (struct virtchnl_irq_map_info *)msg; 31900deb0bf7SJacob Keller num_q_vectors_mapped = irqmap_info->num_vectors; 31910deb0bf7SJacob Keller 31920deb0bf7SJacob Keller /* Check to make sure number of VF vectors mapped is not greater than 31930deb0bf7SJacob Keller * number of VF vectors originally allocated, and check that 31940deb0bf7SJacob Keller * there is actually at least a single VF queue vector mapped 31950deb0bf7SJacob Keller */ 31960deb0bf7SJacob Keller if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) || 31970deb0bf7SJacob Keller pf->vfs.num_msix_per < num_q_vectors_mapped || 31980deb0bf7SJacob Keller !num_q_vectors_mapped) { 31990deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 32000deb0bf7SJacob Keller goto error_param; 32010deb0bf7SJacob Keller } 32020deb0bf7SJacob Keller 32030deb0bf7SJacob Keller vsi = ice_get_vf_vsi(vf); 32040deb0bf7SJacob Keller if (!vsi) { 32050deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 32060deb0bf7SJacob Keller goto error_param; 32070deb0bf7SJacob Keller } 32080deb0bf7SJacob Keller 32090deb0bf7SJacob Keller for (i = 0; i < num_q_vectors_mapped; i++) { 32100deb0bf7SJacob Keller struct ice_q_vector *q_vector; 32110deb0bf7SJacob Keller 32120deb0bf7SJacob Keller map = &irqmap_info->vecmap[i]; 32130deb0bf7SJacob Keller 32140deb0bf7SJacob Keller vector_id = map->vector_id; 32150deb0bf7SJacob Keller vsi_id = map->vsi_id; 32160deb0bf7SJacob Keller /* vector_id is always 0-based for each VF, and can never be 32170deb0bf7SJacob Keller * larger than or equal to the max allowed interrupts per VF 32180deb0bf7SJacob Keller */ 32190deb0bf7SJacob Keller if (!(vector_id < pf->vfs.num_msix_per) || 32200deb0bf7SJacob Keller !ice_vc_isvalid_vsi_id(vf, vsi_id) || 32210deb0bf7SJacob Keller (!vector_id && (map->rxq_map || map->txq_map))) { 32220deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 32230deb0bf7SJacob Keller goto error_param; 32240deb0bf7SJacob Keller } 32250deb0bf7SJacob Keller 32260deb0bf7SJacob Keller /* No need to map VF miscellaneous or rogue vector */ 32270deb0bf7SJacob Keller if (!vector_id) 32280deb0bf7SJacob Keller continue; 32290deb0bf7SJacob Keller 32300deb0bf7SJacob Keller /* Subtract non queue vector from vector_id passed by VF 32310deb0bf7SJacob Keller * to get actual number of VSI queue vector array index 32320deb0bf7SJacob Keller */ 32330deb0bf7SJacob Keller q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF]; 32340deb0bf7SJacob Keller if (!q_vector) { 32350deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 32360deb0bf7SJacob Keller goto error_param; 32370deb0bf7SJacob Keller } 32380deb0bf7SJacob Keller 32390deb0bf7SJacob Keller /* lookout for the invalid queue index */ 32400deb0bf7SJacob Keller v_ret = (enum virtchnl_status_code) 32410deb0bf7SJacob Keller ice_cfg_interrupt(vf, vsi, vector_id, map, q_vector); 32420deb0bf7SJacob Keller if (v_ret) 32430deb0bf7SJacob Keller goto error_param; 32440deb0bf7SJacob Keller } 32450deb0bf7SJacob Keller 32460deb0bf7SJacob Keller error_param: 32470deb0bf7SJacob Keller /* send the response to the VF */ 32480deb0bf7SJacob Keller return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret, 32490deb0bf7SJacob Keller NULL, 0); 32500deb0bf7SJacob Keller } 32510deb0bf7SJacob Keller 32520deb0bf7SJacob Keller /** 32530deb0bf7SJacob Keller * ice_vc_cfg_qs_msg 32540deb0bf7SJacob Keller * @vf: pointer to the VF info 32550deb0bf7SJacob Keller * @msg: pointer to the msg buffer 32560deb0bf7SJacob Keller * 32570deb0bf7SJacob Keller * called from the VF to configure the Rx/Tx queues 32580deb0bf7SJacob Keller */ 32590deb0bf7SJacob Keller static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) 32600deb0bf7SJacob Keller { 32610deb0bf7SJacob Keller enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 32620deb0bf7SJacob Keller struct virtchnl_vsi_queue_config_info *qci = 32630deb0bf7SJacob Keller (struct virtchnl_vsi_queue_config_info *)msg; 32640deb0bf7SJacob Keller struct virtchnl_queue_pair_info *qpi; 32650deb0bf7SJacob Keller struct ice_pf *pf = vf->pf; 32660deb0bf7SJacob Keller struct ice_vsi *vsi; 32670deb0bf7SJacob Keller int i, q_idx; 32680deb0bf7SJacob Keller 32690deb0bf7SJacob Keller if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { 32700deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 32710deb0bf7SJacob Keller goto error_param; 32720deb0bf7SJacob Keller } 32730deb0bf7SJacob Keller 32740deb0bf7SJacob Keller if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) { 32750deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 32760deb0bf7SJacob Keller goto error_param; 32770deb0bf7SJacob Keller } 32780deb0bf7SJacob Keller 32790deb0bf7SJacob Keller vsi = ice_get_vf_vsi(vf); 32800deb0bf7SJacob Keller if (!vsi) { 32810deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 32820deb0bf7SJacob Keller goto error_param; 32830deb0bf7SJacob Keller } 32840deb0bf7SJacob Keller 32850deb0bf7SJacob Keller if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF || 32860deb0bf7SJacob Keller qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) { 32870deb0bf7SJacob Keller dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n", 32880deb0bf7SJacob Keller vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)); 32890deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 32900deb0bf7SJacob Keller goto error_param; 32910deb0bf7SJacob Keller } 32920deb0bf7SJacob Keller 32930deb0bf7SJacob Keller for (i = 0; i < qci->num_queue_pairs; i++) { 32940deb0bf7SJacob Keller qpi = &qci->qpair[i]; 32950deb0bf7SJacob Keller if (qpi->txq.vsi_id != qci->vsi_id || 32960deb0bf7SJacob Keller qpi->rxq.vsi_id != qci->vsi_id || 32970deb0bf7SJacob Keller qpi->rxq.queue_id != qpi->txq.queue_id || 32980deb0bf7SJacob Keller qpi->txq.headwb_enabled || 32990deb0bf7SJacob Keller !ice_vc_isvalid_ring_len(qpi->txq.ring_len) || 33000deb0bf7SJacob Keller !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) || 33010deb0bf7SJacob Keller !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) { 33020deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 33030deb0bf7SJacob Keller goto error_param; 33040deb0bf7SJacob Keller } 33050deb0bf7SJacob Keller 33060deb0bf7SJacob Keller q_idx = qpi->rxq.queue_id; 33070deb0bf7SJacob Keller 33080deb0bf7SJacob Keller /* make sure selected "q_idx" is in valid range of queues 33090deb0bf7SJacob Keller * for selected "vsi" 33100deb0bf7SJacob Keller */ 33110deb0bf7SJacob Keller if (q_idx >= vsi->alloc_txq || q_idx >= vsi->alloc_rxq) { 33120deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 33130deb0bf7SJacob Keller goto error_param; 33140deb0bf7SJacob Keller } 33150deb0bf7SJacob Keller 33160deb0bf7SJacob Keller /* copy Tx queue info from VF into VSI */ 33170deb0bf7SJacob Keller if (qpi->txq.ring_len > 0) { 33180deb0bf7SJacob Keller vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr; 33190deb0bf7SJacob Keller vsi->tx_rings[i]->count = qpi->txq.ring_len; 33200deb0bf7SJacob Keller if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) { 33210deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 33220deb0bf7SJacob Keller goto error_param; 33230deb0bf7SJacob Keller } 33240deb0bf7SJacob Keller } 33250deb0bf7SJacob Keller 33260deb0bf7SJacob Keller /* copy Rx queue info from VF into VSI */ 33270deb0bf7SJacob Keller if (qpi->rxq.ring_len > 0) { 33280deb0bf7SJacob Keller u16 max_frame_size = ice_vc_get_max_frame_size(vf); 33290deb0bf7SJacob Keller 33300deb0bf7SJacob Keller vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr; 33310deb0bf7SJacob Keller vsi->rx_rings[i]->count = qpi->rxq.ring_len; 33320deb0bf7SJacob Keller 33330deb0bf7SJacob Keller if (qpi->rxq.databuffer_size != 0 && 33340deb0bf7SJacob Keller (qpi->rxq.databuffer_size > ((16 * 1024) - 128) || 33350deb0bf7SJacob Keller qpi->rxq.databuffer_size < 1024)) { 33360deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 33370deb0bf7SJacob Keller goto error_param; 33380deb0bf7SJacob Keller } 33390deb0bf7SJacob Keller vsi->rx_buf_len = qpi->rxq.databuffer_size; 33400deb0bf7SJacob Keller vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len; 33410deb0bf7SJacob Keller if (qpi->rxq.max_pkt_size > max_frame_size || 33420deb0bf7SJacob Keller qpi->rxq.max_pkt_size < 64) { 33430deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 33440deb0bf7SJacob Keller goto error_param; 33450deb0bf7SJacob Keller } 33460deb0bf7SJacob Keller 33470deb0bf7SJacob Keller vsi->max_frame = qpi->rxq.max_pkt_size; 33480deb0bf7SJacob Keller /* add space for the port VLAN since the VF driver is not 33490deb0bf7SJacob Keller * expected to account for it in the MTU calculation 33500deb0bf7SJacob Keller */ 33510deb0bf7SJacob Keller if (ice_vf_is_port_vlan_ena(vf)) 33520deb0bf7SJacob Keller vsi->max_frame += VLAN_HLEN; 33530deb0bf7SJacob Keller 33540deb0bf7SJacob Keller if (ice_vsi_cfg_single_rxq(vsi, q_idx)) { 33550deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 33560deb0bf7SJacob Keller goto error_param; 33570deb0bf7SJacob Keller } 33580deb0bf7SJacob Keller } 33590deb0bf7SJacob Keller } 33600deb0bf7SJacob Keller 33610deb0bf7SJacob Keller error_param: 33620deb0bf7SJacob Keller /* send the response to the VF */ 33630deb0bf7SJacob Keller return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, v_ret, 33640deb0bf7SJacob Keller NULL, 0); 33650deb0bf7SJacob Keller } 33660deb0bf7SJacob Keller 33670deb0bf7SJacob Keller /** 33680deb0bf7SJacob Keller * ice_can_vf_change_mac 33690deb0bf7SJacob Keller * @vf: pointer to the VF info 33700deb0bf7SJacob Keller * 33710deb0bf7SJacob Keller * Return true if the VF is allowed to change its MAC filters, false otherwise 33720deb0bf7SJacob Keller */ 33730deb0bf7SJacob Keller static bool ice_can_vf_change_mac(struct ice_vf *vf) 33740deb0bf7SJacob Keller { 33750deb0bf7SJacob Keller /* If the VF MAC address has been set administratively (via the 33760deb0bf7SJacob Keller * ndo_set_vf_mac command), then deny permission to the VF to 33770deb0bf7SJacob Keller * add/delete unicast MAC addresses, unless the VF is trusted 33780deb0bf7SJacob Keller */ 33790deb0bf7SJacob Keller if (vf->pf_set_mac && !ice_is_vf_trusted(vf)) 33800deb0bf7SJacob Keller return false; 33810deb0bf7SJacob Keller 33820deb0bf7SJacob Keller return true; 33830deb0bf7SJacob Keller } 33840deb0bf7SJacob Keller 33850deb0bf7SJacob Keller /** 33860deb0bf7SJacob Keller * ice_vc_ether_addr_type - get type of virtchnl_ether_addr 33870deb0bf7SJacob Keller * @vc_ether_addr: used to extract the type 33880deb0bf7SJacob Keller */ 33890deb0bf7SJacob Keller static u8 33900deb0bf7SJacob Keller ice_vc_ether_addr_type(struct virtchnl_ether_addr *vc_ether_addr) 33910deb0bf7SJacob Keller { 33920deb0bf7SJacob Keller return (vc_ether_addr->type & VIRTCHNL_ETHER_ADDR_TYPE_MASK); 33930deb0bf7SJacob Keller } 33940deb0bf7SJacob Keller 33950deb0bf7SJacob Keller /** 33960deb0bf7SJacob Keller * ice_is_vc_addr_legacy - check if the MAC address is from an older VF 33970deb0bf7SJacob Keller * @vc_ether_addr: VIRTCHNL structure that contains MAC and type 33980deb0bf7SJacob Keller */ 33990deb0bf7SJacob Keller static bool 34000deb0bf7SJacob Keller ice_is_vc_addr_legacy(struct virtchnl_ether_addr *vc_ether_addr) 34010deb0bf7SJacob Keller { 34020deb0bf7SJacob Keller u8 type = ice_vc_ether_addr_type(vc_ether_addr); 34030deb0bf7SJacob Keller 34040deb0bf7SJacob Keller return (type == VIRTCHNL_ETHER_ADDR_LEGACY); 34050deb0bf7SJacob Keller } 34060deb0bf7SJacob Keller 34070deb0bf7SJacob Keller /** 34080deb0bf7SJacob Keller * ice_is_vc_addr_primary - check if the MAC address is the VF's primary MAC 34090deb0bf7SJacob Keller * @vc_ether_addr: VIRTCHNL structure that contains MAC and type 34100deb0bf7SJacob Keller * 34110deb0bf7SJacob Keller * This function should only be called when the MAC address in 34120deb0bf7SJacob Keller * virtchnl_ether_addr is a valid unicast MAC 34130deb0bf7SJacob Keller */ 34140deb0bf7SJacob Keller static bool 34150deb0bf7SJacob Keller ice_is_vc_addr_primary(struct virtchnl_ether_addr __maybe_unused *vc_ether_addr) 34160deb0bf7SJacob Keller { 34170deb0bf7SJacob Keller u8 type = ice_vc_ether_addr_type(vc_ether_addr); 34180deb0bf7SJacob Keller 34190deb0bf7SJacob Keller return (type == VIRTCHNL_ETHER_ADDR_PRIMARY); 34200deb0bf7SJacob Keller } 34210deb0bf7SJacob Keller 34220deb0bf7SJacob Keller /** 34230deb0bf7SJacob Keller * ice_vfhw_mac_add - update the VF's cached hardware MAC if allowed 34240deb0bf7SJacob Keller * @vf: VF to update 34250deb0bf7SJacob Keller * @vc_ether_addr: structure from VIRTCHNL with MAC to add 34260deb0bf7SJacob Keller */ 34270deb0bf7SJacob Keller static void 34280deb0bf7SJacob Keller ice_vfhw_mac_add(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr) 34290deb0bf7SJacob Keller { 34300deb0bf7SJacob Keller u8 *mac_addr = vc_ether_addr->addr; 34310deb0bf7SJacob Keller 34320deb0bf7SJacob Keller if (!is_valid_ether_addr(mac_addr)) 34330deb0bf7SJacob Keller return; 34340deb0bf7SJacob Keller 34350deb0bf7SJacob Keller /* only allow legacy VF drivers to set the device and hardware MAC if it 34360deb0bf7SJacob Keller * is zero and allow new VF drivers to set the hardware MAC if the type 34370deb0bf7SJacob Keller * was correctly specified over VIRTCHNL 34380deb0bf7SJacob Keller */ 34390deb0bf7SJacob Keller if ((ice_is_vc_addr_legacy(vc_ether_addr) && 34400deb0bf7SJacob Keller is_zero_ether_addr(vf->hw_lan_addr.addr)) || 34410deb0bf7SJacob Keller ice_is_vc_addr_primary(vc_ether_addr)) { 34420deb0bf7SJacob Keller ether_addr_copy(vf->dev_lan_addr.addr, mac_addr); 34430deb0bf7SJacob Keller ether_addr_copy(vf->hw_lan_addr.addr, mac_addr); 34440deb0bf7SJacob Keller } 34450deb0bf7SJacob Keller 34460deb0bf7SJacob Keller /* hardware and device MACs are already set, but its possible that the 34470deb0bf7SJacob Keller * VF driver sent the VIRTCHNL_OP_ADD_ETH_ADDR message before the 34480deb0bf7SJacob Keller * VIRTCHNL_OP_DEL_ETH_ADDR when trying to update its MAC, so save it 34490deb0bf7SJacob Keller * away for the legacy VF driver case as it will be updated in the 34500deb0bf7SJacob Keller * delete flow for this case 34510deb0bf7SJacob Keller */ 34520deb0bf7SJacob Keller if (ice_is_vc_addr_legacy(vc_ether_addr)) { 34530deb0bf7SJacob Keller ether_addr_copy(vf->legacy_last_added_umac.addr, 34540deb0bf7SJacob Keller mac_addr); 34550deb0bf7SJacob Keller vf->legacy_last_added_umac.time_modified = jiffies; 34560deb0bf7SJacob Keller } 34570deb0bf7SJacob Keller } 34580deb0bf7SJacob Keller 34590deb0bf7SJacob Keller /** 34600deb0bf7SJacob Keller * ice_vc_add_mac_addr - attempt to add the MAC address passed in 34610deb0bf7SJacob Keller * @vf: pointer to the VF info 34620deb0bf7SJacob Keller * @vsi: pointer to the VF's VSI 34630deb0bf7SJacob Keller * @vc_ether_addr: VIRTCHNL MAC address structure used to add MAC 34640deb0bf7SJacob Keller */ 34650deb0bf7SJacob Keller static int 34660deb0bf7SJacob Keller ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, 34670deb0bf7SJacob Keller struct virtchnl_ether_addr *vc_ether_addr) 34680deb0bf7SJacob Keller { 34690deb0bf7SJacob Keller struct device *dev = ice_pf_to_dev(vf->pf); 34700deb0bf7SJacob Keller u8 *mac_addr = vc_ether_addr->addr; 34710deb0bf7SJacob Keller int ret; 34720deb0bf7SJacob Keller 34730deb0bf7SJacob Keller /* device MAC already added */ 34740deb0bf7SJacob Keller if (ether_addr_equal(mac_addr, vf->dev_lan_addr.addr)) 34750deb0bf7SJacob Keller return 0; 34760deb0bf7SJacob Keller 34770deb0bf7SJacob Keller if (is_unicast_ether_addr(mac_addr) && !ice_can_vf_change_mac(vf)) { 34780deb0bf7SJacob Keller dev_err(dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n"); 34790deb0bf7SJacob Keller return -EPERM; 34800deb0bf7SJacob Keller } 34810deb0bf7SJacob Keller 34820deb0bf7SJacob Keller ret = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI); 34830deb0bf7SJacob Keller if (ret == -EEXIST) { 34840deb0bf7SJacob Keller dev_dbg(dev, "MAC %pM already exists for VF %d\n", mac_addr, 34850deb0bf7SJacob Keller vf->vf_id); 34860deb0bf7SJacob Keller /* don't return since we might need to update 34870deb0bf7SJacob Keller * the primary MAC in ice_vfhw_mac_add() below 34880deb0bf7SJacob Keller */ 34890deb0bf7SJacob Keller } else if (ret) { 34900deb0bf7SJacob Keller dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %d\n", 34910deb0bf7SJacob Keller mac_addr, vf->vf_id, ret); 34920deb0bf7SJacob Keller return ret; 34930deb0bf7SJacob Keller } else { 34940deb0bf7SJacob Keller vf->num_mac++; 34950deb0bf7SJacob Keller } 34960deb0bf7SJacob Keller 34970deb0bf7SJacob Keller ice_vfhw_mac_add(vf, vc_ether_addr); 34980deb0bf7SJacob Keller 34990deb0bf7SJacob Keller return ret; 35000deb0bf7SJacob Keller } 35010deb0bf7SJacob Keller 35020deb0bf7SJacob Keller /** 35030deb0bf7SJacob Keller * ice_is_legacy_umac_expired - check if last added legacy unicast MAC expired 35040deb0bf7SJacob Keller * @last_added_umac: structure used to check expiration 35050deb0bf7SJacob Keller */ 35060deb0bf7SJacob Keller static bool ice_is_legacy_umac_expired(struct ice_time_mac *last_added_umac) 35070deb0bf7SJacob Keller { 35080deb0bf7SJacob Keller #define ICE_LEGACY_VF_MAC_CHANGE_EXPIRE_TIME msecs_to_jiffies(3000) 35090deb0bf7SJacob Keller return time_is_before_jiffies(last_added_umac->time_modified + 35100deb0bf7SJacob Keller ICE_LEGACY_VF_MAC_CHANGE_EXPIRE_TIME); 35110deb0bf7SJacob Keller } 35120deb0bf7SJacob Keller 35130deb0bf7SJacob Keller /** 35140deb0bf7SJacob Keller * ice_update_legacy_cached_mac - update cached hardware MAC for legacy VF 35150deb0bf7SJacob Keller * @vf: VF to update 35160deb0bf7SJacob Keller * @vc_ether_addr: structure from VIRTCHNL with MAC to check 35170deb0bf7SJacob Keller * 35180deb0bf7SJacob Keller * only update cached hardware MAC for legacy VF drivers on delete 35190deb0bf7SJacob Keller * because we cannot guarantee order/type of MAC from the VF driver 35200deb0bf7SJacob Keller */ 35210deb0bf7SJacob Keller static void 35220deb0bf7SJacob Keller ice_update_legacy_cached_mac(struct ice_vf *vf, 35230deb0bf7SJacob Keller struct virtchnl_ether_addr *vc_ether_addr) 35240deb0bf7SJacob Keller { 35250deb0bf7SJacob Keller if (!ice_is_vc_addr_legacy(vc_ether_addr) || 35260deb0bf7SJacob Keller ice_is_legacy_umac_expired(&vf->legacy_last_added_umac)) 35270deb0bf7SJacob Keller return; 35280deb0bf7SJacob Keller 35290deb0bf7SJacob Keller ether_addr_copy(vf->dev_lan_addr.addr, vf->legacy_last_added_umac.addr); 35300deb0bf7SJacob Keller ether_addr_copy(vf->hw_lan_addr.addr, vf->legacy_last_added_umac.addr); 35310deb0bf7SJacob Keller } 35320deb0bf7SJacob Keller 35330deb0bf7SJacob Keller /** 35340deb0bf7SJacob Keller * ice_vfhw_mac_del - update the VF's cached hardware MAC if allowed 35350deb0bf7SJacob Keller * @vf: VF to update 35360deb0bf7SJacob Keller * @vc_ether_addr: structure from VIRTCHNL with MAC to delete 35370deb0bf7SJacob Keller */ 35380deb0bf7SJacob Keller static void 35390deb0bf7SJacob Keller ice_vfhw_mac_del(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr) 35400deb0bf7SJacob Keller { 35410deb0bf7SJacob Keller u8 *mac_addr = vc_ether_addr->addr; 35420deb0bf7SJacob Keller 35430deb0bf7SJacob Keller if (!is_valid_ether_addr(mac_addr) || 35440deb0bf7SJacob Keller !ether_addr_equal(vf->dev_lan_addr.addr, mac_addr)) 35450deb0bf7SJacob Keller return; 35460deb0bf7SJacob Keller 35470deb0bf7SJacob Keller /* allow the device MAC to be repopulated in the add flow and don't 35480deb0bf7SJacob Keller * clear the hardware MAC (i.e. hw_lan_addr.addr) here as that is meant 35490deb0bf7SJacob Keller * to be persistent on VM reboot and across driver unload/load, which 35500deb0bf7SJacob Keller * won't work if we clear the hardware MAC here 35510deb0bf7SJacob Keller */ 35520deb0bf7SJacob Keller eth_zero_addr(vf->dev_lan_addr.addr); 35530deb0bf7SJacob Keller 35540deb0bf7SJacob Keller ice_update_legacy_cached_mac(vf, vc_ether_addr); 35550deb0bf7SJacob Keller } 35560deb0bf7SJacob Keller 35570deb0bf7SJacob Keller /** 35580deb0bf7SJacob Keller * ice_vc_del_mac_addr - attempt to delete the MAC address passed in 35590deb0bf7SJacob Keller * @vf: pointer to the VF info 35600deb0bf7SJacob Keller * @vsi: pointer to the VF's VSI 35610deb0bf7SJacob Keller * @vc_ether_addr: VIRTCHNL MAC address structure used to delete MAC 35620deb0bf7SJacob Keller */ 35630deb0bf7SJacob Keller static int 35640deb0bf7SJacob Keller ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, 35650deb0bf7SJacob Keller struct virtchnl_ether_addr *vc_ether_addr) 35660deb0bf7SJacob Keller { 35670deb0bf7SJacob Keller struct device *dev = ice_pf_to_dev(vf->pf); 35680deb0bf7SJacob Keller u8 *mac_addr = vc_ether_addr->addr; 35690deb0bf7SJacob Keller int status; 35700deb0bf7SJacob Keller 35710deb0bf7SJacob Keller if (!ice_can_vf_change_mac(vf) && 35720deb0bf7SJacob Keller ether_addr_equal(vf->dev_lan_addr.addr, mac_addr)) 35730deb0bf7SJacob Keller return 0; 35740deb0bf7SJacob Keller 35750deb0bf7SJacob Keller status = ice_fltr_remove_mac(vsi, mac_addr, ICE_FWD_TO_VSI); 35760deb0bf7SJacob Keller if (status == -ENOENT) { 35770deb0bf7SJacob Keller dev_err(dev, "MAC %pM does not exist for VF %d\n", mac_addr, 35780deb0bf7SJacob Keller vf->vf_id); 35790deb0bf7SJacob Keller return -ENOENT; 35800deb0bf7SJacob Keller } else if (status) { 35810deb0bf7SJacob Keller dev_err(dev, "Failed to delete MAC %pM for VF %d, error %d\n", 35820deb0bf7SJacob Keller mac_addr, vf->vf_id, status); 35830deb0bf7SJacob Keller return -EIO; 35840deb0bf7SJacob Keller } 35850deb0bf7SJacob Keller 35860deb0bf7SJacob Keller ice_vfhw_mac_del(vf, vc_ether_addr); 35870deb0bf7SJacob Keller 35880deb0bf7SJacob Keller vf->num_mac--; 35890deb0bf7SJacob Keller 35900deb0bf7SJacob Keller return 0; 35910deb0bf7SJacob Keller } 35920deb0bf7SJacob Keller 35930deb0bf7SJacob Keller /** 35940deb0bf7SJacob Keller * ice_vc_handle_mac_addr_msg 35950deb0bf7SJacob Keller * @vf: pointer to the VF info 35960deb0bf7SJacob Keller * @msg: pointer to the msg buffer 35970deb0bf7SJacob Keller * @set: true if MAC filters are being set, false otherwise 35980deb0bf7SJacob Keller * 35990deb0bf7SJacob Keller * add guest MAC address filter 36000deb0bf7SJacob Keller */ 36010deb0bf7SJacob Keller static int 36020deb0bf7SJacob Keller ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set) 36030deb0bf7SJacob Keller { 36040deb0bf7SJacob Keller int (*ice_vc_cfg_mac) 36050deb0bf7SJacob Keller (struct ice_vf *vf, struct ice_vsi *vsi, 36060deb0bf7SJacob Keller struct virtchnl_ether_addr *virtchnl_ether_addr); 36070deb0bf7SJacob Keller enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 36080deb0bf7SJacob Keller struct virtchnl_ether_addr_list *al = 36090deb0bf7SJacob Keller (struct virtchnl_ether_addr_list *)msg; 36100deb0bf7SJacob Keller struct ice_pf *pf = vf->pf; 36110deb0bf7SJacob Keller enum virtchnl_ops vc_op; 36120deb0bf7SJacob Keller struct ice_vsi *vsi; 36130deb0bf7SJacob Keller int i; 36140deb0bf7SJacob Keller 36150deb0bf7SJacob Keller if (set) { 36160deb0bf7SJacob Keller vc_op = VIRTCHNL_OP_ADD_ETH_ADDR; 36170deb0bf7SJacob Keller ice_vc_cfg_mac = ice_vc_add_mac_addr; 36180deb0bf7SJacob Keller } else { 36190deb0bf7SJacob Keller vc_op = VIRTCHNL_OP_DEL_ETH_ADDR; 36200deb0bf7SJacob Keller ice_vc_cfg_mac = ice_vc_del_mac_addr; 36210deb0bf7SJacob Keller } 36220deb0bf7SJacob Keller 36230deb0bf7SJacob Keller if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) || 36240deb0bf7SJacob Keller !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) { 36250deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 36260deb0bf7SJacob Keller goto handle_mac_exit; 36270deb0bf7SJacob Keller } 36280deb0bf7SJacob Keller 36290deb0bf7SJacob Keller /* If this VF is not privileged, then we can't add more than a 36300deb0bf7SJacob Keller * limited number of addresses. Check to make sure that the 36310deb0bf7SJacob Keller * additions do not push us over the limit. 36320deb0bf7SJacob Keller */ 36330deb0bf7SJacob Keller if (set && !ice_is_vf_trusted(vf) && 36340deb0bf7SJacob Keller (vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) { 36350deb0bf7SJacob Keller dev_err(ice_pf_to_dev(pf), "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n", 36360deb0bf7SJacob Keller vf->vf_id); 36370deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 36380deb0bf7SJacob Keller goto handle_mac_exit; 36390deb0bf7SJacob Keller } 36400deb0bf7SJacob Keller 36410deb0bf7SJacob Keller vsi = ice_get_vf_vsi(vf); 36420deb0bf7SJacob Keller if (!vsi) { 36430deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 36440deb0bf7SJacob Keller goto handle_mac_exit; 36450deb0bf7SJacob Keller } 36460deb0bf7SJacob Keller 36470deb0bf7SJacob Keller for (i = 0; i < al->num_elements; i++) { 36480deb0bf7SJacob Keller u8 *mac_addr = al->list[i].addr; 36490deb0bf7SJacob Keller int result; 36500deb0bf7SJacob Keller 36510deb0bf7SJacob Keller if (is_broadcast_ether_addr(mac_addr) || 36520deb0bf7SJacob Keller is_zero_ether_addr(mac_addr)) 36530deb0bf7SJacob Keller continue; 36540deb0bf7SJacob Keller 36550deb0bf7SJacob Keller result = ice_vc_cfg_mac(vf, vsi, &al->list[i]); 36560deb0bf7SJacob Keller if (result == -EEXIST || result == -ENOENT) { 36570deb0bf7SJacob Keller continue; 36580deb0bf7SJacob Keller } else if (result) { 36590deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; 36600deb0bf7SJacob Keller goto handle_mac_exit; 36610deb0bf7SJacob Keller } 36620deb0bf7SJacob Keller } 36630deb0bf7SJacob Keller 36640deb0bf7SJacob Keller handle_mac_exit: 36650deb0bf7SJacob Keller /* send the response to the VF */ 36660deb0bf7SJacob Keller return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0); 36670deb0bf7SJacob Keller } 36680deb0bf7SJacob Keller 36690deb0bf7SJacob Keller /** 36700deb0bf7SJacob Keller * ice_vc_add_mac_addr_msg 36710deb0bf7SJacob Keller * @vf: pointer to the VF info 36720deb0bf7SJacob Keller * @msg: pointer to the msg buffer 36730deb0bf7SJacob Keller * 36740deb0bf7SJacob Keller * add guest MAC address filter 36750deb0bf7SJacob Keller */ 36760deb0bf7SJacob Keller static int ice_vc_add_mac_addr_msg(struct ice_vf *vf, u8 *msg) 36770deb0bf7SJacob Keller { 36780deb0bf7SJacob Keller return ice_vc_handle_mac_addr_msg(vf, msg, true); 36790deb0bf7SJacob Keller } 36800deb0bf7SJacob Keller 36810deb0bf7SJacob Keller /** 36820deb0bf7SJacob Keller * ice_vc_del_mac_addr_msg 36830deb0bf7SJacob Keller * @vf: pointer to the VF info 36840deb0bf7SJacob Keller * @msg: pointer to the msg buffer 36850deb0bf7SJacob Keller * 36860deb0bf7SJacob Keller * remove guest MAC address filter 36870deb0bf7SJacob Keller */ 36880deb0bf7SJacob Keller static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg) 36890deb0bf7SJacob Keller { 36900deb0bf7SJacob Keller return ice_vc_handle_mac_addr_msg(vf, msg, false); 36910deb0bf7SJacob Keller } 36920deb0bf7SJacob Keller 36930deb0bf7SJacob Keller /** 36940deb0bf7SJacob Keller * ice_vc_request_qs_msg 36950deb0bf7SJacob Keller * @vf: pointer to the VF info 36960deb0bf7SJacob Keller * @msg: pointer to the msg buffer 36970deb0bf7SJacob Keller * 36980deb0bf7SJacob Keller * VFs get a default number of queues but can use this message to request a 36990deb0bf7SJacob Keller * different number. If the request is successful, PF will reset the VF and 37000deb0bf7SJacob Keller * return 0. If unsuccessful, PF will send message informing VF of number of 37010deb0bf7SJacob Keller * available queue pairs via virtchnl message response to VF. 37020deb0bf7SJacob Keller */ 37030deb0bf7SJacob Keller static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg) 37040deb0bf7SJacob Keller { 37050deb0bf7SJacob Keller enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 37060deb0bf7SJacob Keller struct virtchnl_vf_res_request *vfres = 37070deb0bf7SJacob Keller (struct virtchnl_vf_res_request *)msg; 37080deb0bf7SJacob Keller u16 req_queues = vfres->num_queue_pairs; 37090deb0bf7SJacob Keller struct ice_pf *pf = vf->pf; 37100deb0bf7SJacob Keller u16 max_allowed_vf_queues; 37110deb0bf7SJacob Keller u16 tx_rx_queue_left; 37120deb0bf7SJacob Keller struct device *dev; 37130deb0bf7SJacob Keller u16 cur_queues; 37140deb0bf7SJacob Keller 37150deb0bf7SJacob Keller dev = ice_pf_to_dev(pf); 37160deb0bf7SJacob Keller if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { 37170deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 37180deb0bf7SJacob Keller goto error_param; 37190deb0bf7SJacob Keller } 37200deb0bf7SJacob Keller 37210deb0bf7SJacob Keller cur_queues = vf->num_vf_qs; 37220deb0bf7SJacob Keller tx_rx_queue_left = min_t(u16, ice_get_avail_txq_count(pf), 37230deb0bf7SJacob Keller ice_get_avail_rxq_count(pf)); 37240deb0bf7SJacob Keller max_allowed_vf_queues = tx_rx_queue_left + cur_queues; 37250deb0bf7SJacob Keller if (!req_queues) { 37260deb0bf7SJacob Keller dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n", 37270deb0bf7SJacob Keller vf->vf_id); 37280deb0bf7SJacob Keller } else if (req_queues > ICE_MAX_RSS_QS_PER_VF) { 37290deb0bf7SJacob Keller dev_err(dev, "VF %d tried to request more than %d queues.\n", 37300deb0bf7SJacob Keller vf->vf_id, ICE_MAX_RSS_QS_PER_VF); 37310deb0bf7SJacob Keller vfres->num_queue_pairs = ICE_MAX_RSS_QS_PER_VF; 37320deb0bf7SJacob Keller } else if (req_queues > cur_queues && 37330deb0bf7SJacob Keller req_queues - cur_queues > tx_rx_queue_left) { 37340deb0bf7SJacob Keller dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n", 37350deb0bf7SJacob Keller vf->vf_id, req_queues - cur_queues, tx_rx_queue_left); 37360deb0bf7SJacob Keller vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues, 37370deb0bf7SJacob Keller ICE_MAX_RSS_QS_PER_VF); 37380deb0bf7SJacob Keller } else { 37390deb0bf7SJacob Keller /* request is successful, then reset VF */ 37400deb0bf7SJacob Keller vf->num_req_qs = req_queues; 37410deb0bf7SJacob Keller ice_vc_reset_vf(vf); 37420deb0bf7SJacob Keller dev_info(dev, "VF %d granted request of %u queues.\n", 37430deb0bf7SJacob Keller vf->vf_id, req_queues); 37440deb0bf7SJacob Keller return 0; 37450deb0bf7SJacob Keller } 37460deb0bf7SJacob Keller 37470deb0bf7SJacob Keller error_param: 37480deb0bf7SJacob Keller /* send the response to the VF */ 37490deb0bf7SJacob Keller return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 37500deb0bf7SJacob Keller v_ret, (u8 *)vfres, sizeof(*vfres)); 37510deb0bf7SJacob Keller } 37520deb0bf7SJacob Keller 37530deb0bf7SJacob Keller /** 37540deb0bf7SJacob Keller * ice_vf_vlan_offload_ena - determine if capabilities support VLAN offloads 37550deb0bf7SJacob Keller * @caps: VF driver negotiated capabilities 37560deb0bf7SJacob Keller * 37570deb0bf7SJacob Keller * Return true if VIRTCHNL_VF_OFFLOAD_VLAN capability is set, else return false 37580deb0bf7SJacob Keller */ 37590deb0bf7SJacob Keller static bool ice_vf_vlan_offload_ena(u32 caps) 37600deb0bf7SJacob Keller { 37610deb0bf7SJacob Keller return !!(caps & VIRTCHNL_VF_OFFLOAD_VLAN); 37620deb0bf7SJacob Keller } 37630deb0bf7SJacob Keller 37640deb0bf7SJacob Keller /** 37650deb0bf7SJacob Keller * ice_is_vlan_promisc_allowed - check if VLAN promiscuous config is allowed 37660deb0bf7SJacob Keller * @vf: VF used to determine if VLAN promiscuous config is allowed 37670deb0bf7SJacob Keller */ 37680deb0bf7SJacob Keller static bool ice_is_vlan_promisc_allowed(struct ice_vf *vf) 37690deb0bf7SJacob Keller { 37700deb0bf7SJacob Keller if ((test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) || 37710deb0bf7SJacob Keller test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) && 37720deb0bf7SJacob Keller test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, vf->pf->flags)) 37730deb0bf7SJacob Keller return true; 37740deb0bf7SJacob Keller 37750deb0bf7SJacob Keller return false; 37760deb0bf7SJacob Keller } 37770deb0bf7SJacob Keller 37780deb0bf7SJacob Keller /** 37790deb0bf7SJacob Keller * ice_vf_ena_vlan_promisc - Enable Tx/Rx VLAN promiscuous for the VLAN 37800deb0bf7SJacob Keller * @vsi: VF's VSI used to enable VLAN promiscuous mode 37810deb0bf7SJacob Keller * @vlan: VLAN used to enable VLAN promiscuous 37820deb0bf7SJacob Keller * 37830deb0bf7SJacob Keller * This function should only be called if VLAN promiscuous mode is allowed, 37840deb0bf7SJacob Keller * which can be determined via ice_is_vlan_promisc_allowed(). 37850deb0bf7SJacob Keller */ 37860deb0bf7SJacob Keller static int ice_vf_ena_vlan_promisc(struct ice_vsi *vsi, struct ice_vlan *vlan) 37870deb0bf7SJacob Keller { 37880deb0bf7SJacob Keller u8 promisc_m = ICE_PROMISC_VLAN_TX | ICE_PROMISC_VLAN_RX; 37890deb0bf7SJacob Keller int status; 37900deb0bf7SJacob Keller 37910deb0bf7SJacob Keller status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, 37920deb0bf7SJacob Keller vlan->vid); 37930deb0bf7SJacob Keller if (status && status != -EEXIST) 37940deb0bf7SJacob Keller return status; 37950deb0bf7SJacob Keller 37960deb0bf7SJacob Keller return 0; 37970deb0bf7SJacob Keller } 37980deb0bf7SJacob Keller 37990deb0bf7SJacob Keller /** 38000deb0bf7SJacob Keller * ice_vf_dis_vlan_promisc - Disable Tx/Rx VLAN promiscuous for the VLAN 38010deb0bf7SJacob Keller * @vsi: VF's VSI used to disable VLAN promiscuous mode for 38020deb0bf7SJacob Keller * @vlan: VLAN used to disable VLAN promiscuous 38030deb0bf7SJacob Keller * 38040deb0bf7SJacob Keller * This function should only be called if VLAN promiscuous mode is allowed, 38050deb0bf7SJacob Keller * which can be determined via ice_is_vlan_promisc_allowed(). 38060deb0bf7SJacob Keller */ 38070deb0bf7SJacob Keller static int ice_vf_dis_vlan_promisc(struct ice_vsi *vsi, struct ice_vlan *vlan) 38080deb0bf7SJacob Keller { 38090deb0bf7SJacob Keller u8 promisc_m = ICE_PROMISC_VLAN_TX | ICE_PROMISC_VLAN_RX; 38100deb0bf7SJacob Keller int status; 38110deb0bf7SJacob Keller 38120deb0bf7SJacob Keller status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, 38130deb0bf7SJacob Keller vlan->vid); 38140deb0bf7SJacob Keller if (status && status != -ENOENT) 38150deb0bf7SJacob Keller return status; 38160deb0bf7SJacob Keller 38170deb0bf7SJacob Keller return 0; 38180deb0bf7SJacob Keller } 38190deb0bf7SJacob Keller 38200deb0bf7SJacob Keller /** 38210deb0bf7SJacob Keller * ice_vf_has_max_vlans - check if VF already has the max allowed VLAN filters 38220deb0bf7SJacob Keller * @vf: VF to check against 38230deb0bf7SJacob Keller * @vsi: VF's VSI 38240deb0bf7SJacob Keller * 38250deb0bf7SJacob Keller * If the VF is trusted then the VF is allowed to add as many VLANs as it 38260deb0bf7SJacob Keller * wants to, so return false. 38270deb0bf7SJacob Keller * 38280deb0bf7SJacob Keller * When the VF is untrusted compare the number of non-zero VLANs + 1 to the max 38290deb0bf7SJacob Keller * allowed VLANs for an untrusted VF. Return the result of this comparison. 38300deb0bf7SJacob Keller */ 38310deb0bf7SJacob Keller static bool ice_vf_has_max_vlans(struct ice_vf *vf, struct ice_vsi *vsi) 38320deb0bf7SJacob Keller { 38330deb0bf7SJacob Keller if (ice_is_vf_trusted(vf)) 38340deb0bf7SJacob Keller return false; 38350deb0bf7SJacob Keller 38360deb0bf7SJacob Keller #define ICE_VF_ADDED_VLAN_ZERO_FLTRS 1 38370deb0bf7SJacob Keller return ((ice_vsi_num_non_zero_vlans(vsi) + 38380deb0bf7SJacob Keller ICE_VF_ADDED_VLAN_ZERO_FLTRS) >= ICE_MAX_VLAN_PER_VF); 38390deb0bf7SJacob Keller } 38400deb0bf7SJacob Keller 38410deb0bf7SJacob Keller /** 38420deb0bf7SJacob Keller * ice_vc_process_vlan_msg 38430deb0bf7SJacob Keller * @vf: pointer to the VF info 38440deb0bf7SJacob Keller * @msg: pointer to the msg buffer 38450deb0bf7SJacob Keller * @add_v: Add VLAN if true, otherwise delete VLAN 38460deb0bf7SJacob Keller * 38470deb0bf7SJacob Keller * Process virtchnl op to add or remove programmed guest VLAN ID 38480deb0bf7SJacob Keller */ 38490deb0bf7SJacob Keller static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v) 38500deb0bf7SJacob Keller { 38510deb0bf7SJacob Keller enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 38520deb0bf7SJacob Keller struct virtchnl_vlan_filter_list *vfl = 38530deb0bf7SJacob Keller (struct virtchnl_vlan_filter_list *)msg; 38540deb0bf7SJacob Keller struct ice_pf *pf = vf->pf; 38550deb0bf7SJacob Keller bool vlan_promisc = false; 38560deb0bf7SJacob Keller struct ice_vsi *vsi; 38570deb0bf7SJacob Keller struct device *dev; 38580deb0bf7SJacob Keller int status = 0; 38590deb0bf7SJacob Keller int i; 38600deb0bf7SJacob Keller 38610deb0bf7SJacob Keller dev = ice_pf_to_dev(pf); 38620deb0bf7SJacob Keller if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { 38630deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 38640deb0bf7SJacob Keller goto error_param; 38650deb0bf7SJacob Keller } 38660deb0bf7SJacob Keller 38670deb0bf7SJacob Keller if (!ice_vf_vlan_offload_ena(vf->driver_caps)) { 38680deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 38690deb0bf7SJacob Keller goto error_param; 38700deb0bf7SJacob Keller } 38710deb0bf7SJacob Keller 38720deb0bf7SJacob Keller if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) { 38730deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 38740deb0bf7SJacob Keller goto error_param; 38750deb0bf7SJacob Keller } 38760deb0bf7SJacob Keller 38770deb0bf7SJacob Keller for (i = 0; i < vfl->num_elements; i++) { 38780deb0bf7SJacob Keller if (vfl->vlan_id[i] >= VLAN_N_VID) { 38790deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 38800deb0bf7SJacob Keller dev_err(dev, "invalid VF VLAN id %d\n", 38810deb0bf7SJacob Keller vfl->vlan_id[i]); 38820deb0bf7SJacob Keller goto error_param; 38830deb0bf7SJacob Keller } 38840deb0bf7SJacob Keller } 38850deb0bf7SJacob Keller 38860deb0bf7SJacob Keller vsi = ice_get_vf_vsi(vf); 38870deb0bf7SJacob Keller if (!vsi) { 38880deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 38890deb0bf7SJacob Keller goto error_param; 38900deb0bf7SJacob Keller } 38910deb0bf7SJacob Keller 38920deb0bf7SJacob Keller if (add_v && ice_vf_has_max_vlans(vf, vsi)) { 38930deb0bf7SJacob Keller dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n", 38940deb0bf7SJacob Keller vf->vf_id); 38950deb0bf7SJacob Keller /* There is no need to let VF know about being not trusted, 38960deb0bf7SJacob Keller * so we can just return success message here 38970deb0bf7SJacob Keller */ 38980deb0bf7SJacob Keller goto error_param; 38990deb0bf7SJacob Keller } 39000deb0bf7SJacob Keller 39010deb0bf7SJacob Keller /* in DVM a VF can add/delete inner VLAN filters when 39020deb0bf7SJacob Keller * VIRTCHNL_VF_OFFLOAD_VLAN is negotiated, so only reject in SVM 39030deb0bf7SJacob Keller */ 39040deb0bf7SJacob Keller if (ice_vf_is_port_vlan_ena(vf) && !ice_is_dvm_ena(&pf->hw)) { 39050deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 39060deb0bf7SJacob Keller goto error_param; 39070deb0bf7SJacob Keller } 39080deb0bf7SJacob Keller 39090deb0bf7SJacob Keller /* in DVM VLAN promiscuous is based on the outer VLAN, which would be 39100deb0bf7SJacob Keller * the port VLAN if VIRTCHNL_VF_OFFLOAD_VLAN was negotiated, so only 39110deb0bf7SJacob Keller * allow vlan_promisc = true in SVM and if no port VLAN is configured 39120deb0bf7SJacob Keller */ 39130deb0bf7SJacob Keller vlan_promisc = ice_is_vlan_promisc_allowed(vf) && 39140deb0bf7SJacob Keller !ice_is_dvm_ena(&pf->hw) && 39150deb0bf7SJacob Keller !ice_vf_is_port_vlan_ena(vf); 39160deb0bf7SJacob Keller 39170deb0bf7SJacob Keller if (add_v) { 39180deb0bf7SJacob Keller for (i = 0; i < vfl->num_elements; i++) { 39190deb0bf7SJacob Keller u16 vid = vfl->vlan_id[i]; 39200deb0bf7SJacob Keller struct ice_vlan vlan; 39210deb0bf7SJacob Keller 39220deb0bf7SJacob Keller if (ice_vf_has_max_vlans(vf, vsi)) { 39230deb0bf7SJacob Keller dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n", 39240deb0bf7SJacob Keller vf->vf_id); 39250deb0bf7SJacob Keller /* There is no need to let VF know about being 39260deb0bf7SJacob Keller * not trusted, so we can just return success 39270deb0bf7SJacob Keller * message here as well. 39280deb0bf7SJacob Keller */ 39290deb0bf7SJacob Keller goto error_param; 39300deb0bf7SJacob Keller } 39310deb0bf7SJacob Keller 39320deb0bf7SJacob Keller /* we add VLAN 0 by default for each VF so we can enable 39330deb0bf7SJacob Keller * Tx VLAN anti-spoof without triggering MDD events so 39340deb0bf7SJacob Keller * we don't need to add it again here 39350deb0bf7SJacob Keller */ 39360deb0bf7SJacob Keller if (!vid) 39370deb0bf7SJacob Keller continue; 39380deb0bf7SJacob Keller 39390deb0bf7SJacob Keller vlan = ICE_VLAN(ETH_P_8021Q, vid, 0); 39400deb0bf7SJacob Keller status = vsi->inner_vlan_ops.add_vlan(vsi, &vlan); 39410deb0bf7SJacob Keller if (status) { 39420deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 39430deb0bf7SJacob Keller goto error_param; 39440deb0bf7SJacob Keller } 39450deb0bf7SJacob Keller 39460deb0bf7SJacob Keller /* Enable VLAN filtering on first non-zero VLAN */ 39470deb0bf7SJacob Keller if (!vlan_promisc && vid && !ice_is_dvm_ena(&pf->hw)) { 39480deb0bf7SJacob Keller if (vsi->inner_vlan_ops.ena_rx_filtering(vsi)) { 39490deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 39500deb0bf7SJacob Keller dev_err(dev, "Enable VLAN pruning on VLAN ID: %d failed error-%d\n", 39510deb0bf7SJacob Keller vid, status); 39520deb0bf7SJacob Keller goto error_param; 39530deb0bf7SJacob Keller } 39540deb0bf7SJacob Keller } else if (vlan_promisc) { 39550deb0bf7SJacob Keller status = ice_vf_ena_vlan_promisc(vsi, &vlan); 39560deb0bf7SJacob Keller if (status) { 39570deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 39580deb0bf7SJacob Keller dev_err(dev, "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n", 39590deb0bf7SJacob Keller vid, status); 39600deb0bf7SJacob Keller } 39610deb0bf7SJacob Keller } 39620deb0bf7SJacob Keller } 39630deb0bf7SJacob Keller } else { 39640deb0bf7SJacob Keller /* In case of non_trusted VF, number of VLAN elements passed 39650deb0bf7SJacob Keller * to PF for removal might be greater than number of VLANs 39660deb0bf7SJacob Keller * filter programmed for that VF - So, use actual number of 39670deb0bf7SJacob Keller * VLANS added earlier with add VLAN opcode. In order to avoid 39680deb0bf7SJacob Keller * removing VLAN that doesn't exist, which result to sending 39690deb0bf7SJacob Keller * erroneous failed message back to the VF 39700deb0bf7SJacob Keller */ 39710deb0bf7SJacob Keller int num_vf_vlan; 39720deb0bf7SJacob Keller 39730deb0bf7SJacob Keller num_vf_vlan = vsi->num_vlan; 39740deb0bf7SJacob Keller for (i = 0; i < vfl->num_elements && i < num_vf_vlan; i++) { 39750deb0bf7SJacob Keller u16 vid = vfl->vlan_id[i]; 39760deb0bf7SJacob Keller struct ice_vlan vlan; 39770deb0bf7SJacob Keller 39780deb0bf7SJacob Keller /* we add VLAN 0 by default for each VF so we can enable 39790deb0bf7SJacob Keller * Tx VLAN anti-spoof without triggering MDD events so 39800deb0bf7SJacob Keller * we don't want a VIRTCHNL request to remove it 39810deb0bf7SJacob Keller */ 39820deb0bf7SJacob Keller if (!vid) 39830deb0bf7SJacob Keller continue; 39840deb0bf7SJacob Keller 39850deb0bf7SJacob Keller vlan = ICE_VLAN(ETH_P_8021Q, vid, 0); 39860deb0bf7SJacob Keller status = vsi->inner_vlan_ops.del_vlan(vsi, &vlan); 39870deb0bf7SJacob Keller if (status) { 39880deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 39890deb0bf7SJacob Keller goto error_param; 39900deb0bf7SJacob Keller } 39910deb0bf7SJacob Keller 39920deb0bf7SJacob Keller /* Disable VLAN filtering when only VLAN 0 is left */ 39930deb0bf7SJacob Keller if (!ice_vsi_has_non_zero_vlans(vsi)) 39940deb0bf7SJacob Keller vsi->inner_vlan_ops.dis_rx_filtering(vsi); 39950deb0bf7SJacob Keller 39960deb0bf7SJacob Keller if (vlan_promisc) 39970deb0bf7SJacob Keller ice_vf_dis_vlan_promisc(vsi, &vlan); 39980deb0bf7SJacob Keller } 39990deb0bf7SJacob Keller } 40000deb0bf7SJacob Keller 40010deb0bf7SJacob Keller error_param: 40020deb0bf7SJacob Keller /* send the response to the VF */ 40030deb0bf7SJacob Keller if (add_v) 40040deb0bf7SJacob Keller return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, v_ret, 40050deb0bf7SJacob Keller NULL, 0); 40060deb0bf7SJacob Keller else 40070deb0bf7SJacob Keller return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, v_ret, 40080deb0bf7SJacob Keller NULL, 0); 40090deb0bf7SJacob Keller } 40100deb0bf7SJacob Keller 40110deb0bf7SJacob Keller /** 40120deb0bf7SJacob Keller * ice_vc_add_vlan_msg 40130deb0bf7SJacob Keller * @vf: pointer to the VF info 40140deb0bf7SJacob Keller * @msg: pointer to the msg buffer 40150deb0bf7SJacob Keller * 40160deb0bf7SJacob Keller * Add and program guest VLAN ID 40170deb0bf7SJacob Keller */ 40180deb0bf7SJacob Keller static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg) 40190deb0bf7SJacob Keller { 40200deb0bf7SJacob Keller return ice_vc_process_vlan_msg(vf, msg, true); 40210deb0bf7SJacob Keller } 40220deb0bf7SJacob Keller 40230deb0bf7SJacob Keller /** 40240deb0bf7SJacob Keller * ice_vc_remove_vlan_msg 40250deb0bf7SJacob Keller * @vf: pointer to the VF info 40260deb0bf7SJacob Keller * @msg: pointer to the msg buffer 40270deb0bf7SJacob Keller * 40280deb0bf7SJacob Keller * remove programmed guest VLAN ID 40290deb0bf7SJacob Keller */ 40300deb0bf7SJacob Keller static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg) 40310deb0bf7SJacob Keller { 40320deb0bf7SJacob Keller return ice_vc_process_vlan_msg(vf, msg, false); 40330deb0bf7SJacob Keller } 40340deb0bf7SJacob Keller 40350deb0bf7SJacob Keller /** 40360deb0bf7SJacob Keller * ice_vc_ena_vlan_stripping 40370deb0bf7SJacob Keller * @vf: pointer to the VF info 40380deb0bf7SJacob Keller * 40390deb0bf7SJacob Keller * Enable VLAN header stripping for a given VF 40400deb0bf7SJacob Keller */ 40410deb0bf7SJacob Keller static int ice_vc_ena_vlan_stripping(struct ice_vf *vf) 40420deb0bf7SJacob Keller { 40430deb0bf7SJacob Keller enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 40440deb0bf7SJacob Keller struct ice_vsi *vsi; 40450deb0bf7SJacob Keller 40460deb0bf7SJacob Keller if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { 40470deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 40480deb0bf7SJacob Keller goto error_param; 40490deb0bf7SJacob Keller } 40500deb0bf7SJacob Keller 40510deb0bf7SJacob Keller if (!ice_vf_vlan_offload_ena(vf->driver_caps)) { 40520deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 40530deb0bf7SJacob Keller goto error_param; 40540deb0bf7SJacob Keller } 40550deb0bf7SJacob Keller 40560deb0bf7SJacob Keller vsi = ice_get_vf_vsi(vf); 40570deb0bf7SJacob Keller if (vsi->inner_vlan_ops.ena_stripping(vsi, ETH_P_8021Q)) 40580deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 40590deb0bf7SJacob Keller 40600deb0bf7SJacob Keller error_param: 40610deb0bf7SJacob Keller return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, 40620deb0bf7SJacob Keller v_ret, NULL, 0); 40630deb0bf7SJacob Keller } 40640deb0bf7SJacob Keller 40650deb0bf7SJacob Keller /** 40660deb0bf7SJacob Keller * ice_vc_dis_vlan_stripping 40670deb0bf7SJacob Keller * @vf: pointer to the VF info 40680deb0bf7SJacob Keller * 40690deb0bf7SJacob Keller * Disable VLAN header stripping for a given VF 40700deb0bf7SJacob Keller */ 40710deb0bf7SJacob Keller static int ice_vc_dis_vlan_stripping(struct ice_vf *vf) 40720deb0bf7SJacob Keller { 40730deb0bf7SJacob Keller enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 40740deb0bf7SJacob Keller struct ice_vsi *vsi; 40750deb0bf7SJacob Keller 40760deb0bf7SJacob Keller if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { 40770deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 40780deb0bf7SJacob Keller goto error_param; 40790deb0bf7SJacob Keller } 40800deb0bf7SJacob Keller 40810deb0bf7SJacob Keller if (!ice_vf_vlan_offload_ena(vf->driver_caps)) { 40820deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 40830deb0bf7SJacob Keller goto error_param; 40840deb0bf7SJacob Keller } 40850deb0bf7SJacob Keller 40860deb0bf7SJacob Keller vsi = ice_get_vf_vsi(vf); 40870deb0bf7SJacob Keller if (!vsi) { 40880deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 40890deb0bf7SJacob Keller goto error_param; 40900deb0bf7SJacob Keller } 40910deb0bf7SJacob Keller 40920deb0bf7SJacob Keller if (vsi->inner_vlan_ops.dis_stripping(vsi)) 40930deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 40940deb0bf7SJacob Keller 40950deb0bf7SJacob Keller error_param: 40960deb0bf7SJacob Keller return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, 40970deb0bf7SJacob Keller v_ret, NULL, 0); 40980deb0bf7SJacob Keller } 40990deb0bf7SJacob Keller 41000deb0bf7SJacob Keller /** 41010deb0bf7SJacob Keller * ice_vf_init_vlan_stripping - enable/disable VLAN stripping on initialization 41020deb0bf7SJacob Keller * @vf: VF to enable/disable VLAN stripping for on initialization 41030deb0bf7SJacob Keller * 41040deb0bf7SJacob Keller * Set the default for VLAN stripping based on whether a port VLAN is configured 41050deb0bf7SJacob Keller * and the current VLAN mode of the device. 41060deb0bf7SJacob Keller */ 41070deb0bf7SJacob Keller static int ice_vf_init_vlan_stripping(struct ice_vf *vf) 41080deb0bf7SJacob Keller { 41090deb0bf7SJacob Keller struct ice_vsi *vsi = ice_get_vf_vsi(vf); 41100deb0bf7SJacob Keller 41110deb0bf7SJacob Keller if (!vsi) 41120deb0bf7SJacob Keller return -EINVAL; 41130deb0bf7SJacob Keller 41140deb0bf7SJacob Keller /* don't modify stripping if port VLAN is configured in SVM since the 41150deb0bf7SJacob Keller * port VLAN is based on the inner/single VLAN in SVM 41160deb0bf7SJacob Keller */ 41170deb0bf7SJacob Keller if (ice_vf_is_port_vlan_ena(vf) && !ice_is_dvm_ena(&vsi->back->hw)) 41180deb0bf7SJacob Keller return 0; 41190deb0bf7SJacob Keller 41200deb0bf7SJacob Keller if (ice_vf_vlan_offload_ena(vf->driver_caps)) 41210deb0bf7SJacob Keller return vsi->inner_vlan_ops.ena_stripping(vsi, ETH_P_8021Q); 41220deb0bf7SJacob Keller else 41230deb0bf7SJacob Keller return vsi->inner_vlan_ops.dis_stripping(vsi); 41240deb0bf7SJacob Keller } 41250deb0bf7SJacob Keller 41260deb0bf7SJacob Keller static u16 ice_vc_get_max_vlan_fltrs(struct ice_vf *vf) 41270deb0bf7SJacob Keller { 41280deb0bf7SJacob Keller if (vf->trusted) 41290deb0bf7SJacob Keller return VLAN_N_VID; 41300deb0bf7SJacob Keller else 41310deb0bf7SJacob Keller return ICE_MAX_VLAN_PER_VF; 41320deb0bf7SJacob Keller } 41330deb0bf7SJacob Keller 41340deb0bf7SJacob Keller /** 41350deb0bf7SJacob Keller * ice_vf_outer_vlan_not_allowed - check outer VLAN can be used when the device is in DVM 41360deb0bf7SJacob Keller * @vf: VF that being checked for 41370deb0bf7SJacob Keller */ 41380deb0bf7SJacob Keller static bool ice_vf_outer_vlan_not_allowed(struct ice_vf *vf) 41390deb0bf7SJacob Keller { 41400deb0bf7SJacob Keller if (ice_vf_is_port_vlan_ena(vf)) 41410deb0bf7SJacob Keller return true; 41420deb0bf7SJacob Keller 41430deb0bf7SJacob Keller return false; 41440deb0bf7SJacob Keller } 41450deb0bf7SJacob Keller 41460deb0bf7SJacob Keller /** 41470deb0bf7SJacob Keller * ice_vc_set_dvm_caps - set VLAN capabilities when the device is in DVM 41480deb0bf7SJacob Keller * @vf: VF that capabilities are being set for 41490deb0bf7SJacob Keller * @caps: VLAN capabilities to populate 41500deb0bf7SJacob Keller * 41510deb0bf7SJacob Keller * Determine VLAN capabilities support based on whether a port VLAN is 41520deb0bf7SJacob Keller * configured. If a port VLAN is configured then the VF should use the inner 41530deb0bf7SJacob Keller * filtering/offload capabilities since the port VLAN is using the outer VLAN 41540deb0bf7SJacob Keller * capabilies. 41550deb0bf7SJacob Keller */ 41560deb0bf7SJacob Keller static void 41570deb0bf7SJacob Keller ice_vc_set_dvm_caps(struct ice_vf *vf, struct virtchnl_vlan_caps *caps) 41580deb0bf7SJacob Keller { 41590deb0bf7SJacob Keller struct virtchnl_vlan_supported_caps *supported_caps; 41600deb0bf7SJacob Keller 41610deb0bf7SJacob Keller if (ice_vf_outer_vlan_not_allowed(vf)) { 41620deb0bf7SJacob Keller /* until support for inner VLAN filtering is added when a port 41630deb0bf7SJacob Keller * VLAN is configured, only support software offloaded inner 41640deb0bf7SJacob Keller * VLANs when a port VLAN is confgured in DVM 41650deb0bf7SJacob Keller */ 41660deb0bf7SJacob Keller supported_caps = &caps->filtering.filtering_support; 41670deb0bf7SJacob Keller supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED; 41680deb0bf7SJacob Keller 41690deb0bf7SJacob Keller supported_caps = &caps->offloads.stripping_support; 41700deb0bf7SJacob Keller supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 | 41710deb0bf7SJacob Keller VIRTCHNL_VLAN_TOGGLE | 41720deb0bf7SJacob Keller VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1; 41730deb0bf7SJacob Keller supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED; 41740deb0bf7SJacob Keller 41750deb0bf7SJacob Keller supported_caps = &caps->offloads.insertion_support; 41760deb0bf7SJacob Keller supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 | 41770deb0bf7SJacob Keller VIRTCHNL_VLAN_TOGGLE | 41780deb0bf7SJacob Keller VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1; 41790deb0bf7SJacob Keller supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED; 41800deb0bf7SJacob Keller 41810deb0bf7SJacob Keller caps->offloads.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100; 41820deb0bf7SJacob Keller caps->offloads.ethertype_match = 41830deb0bf7SJacob Keller VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION; 41840deb0bf7SJacob Keller } else { 41850deb0bf7SJacob Keller supported_caps = &caps->filtering.filtering_support; 41860deb0bf7SJacob Keller supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED; 41870deb0bf7SJacob Keller supported_caps->outer = VIRTCHNL_VLAN_ETHERTYPE_8100 | 41880deb0bf7SJacob Keller VIRTCHNL_VLAN_ETHERTYPE_88A8 | 41890deb0bf7SJacob Keller VIRTCHNL_VLAN_ETHERTYPE_9100 | 41900deb0bf7SJacob Keller VIRTCHNL_VLAN_ETHERTYPE_AND; 41910deb0bf7SJacob Keller caps->filtering.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100 | 41920deb0bf7SJacob Keller VIRTCHNL_VLAN_ETHERTYPE_88A8 | 41930deb0bf7SJacob Keller VIRTCHNL_VLAN_ETHERTYPE_9100; 41940deb0bf7SJacob Keller 41950deb0bf7SJacob Keller supported_caps = &caps->offloads.stripping_support; 41960deb0bf7SJacob Keller supported_caps->inner = VIRTCHNL_VLAN_TOGGLE | 41970deb0bf7SJacob Keller VIRTCHNL_VLAN_ETHERTYPE_8100 | 41980deb0bf7SJacob Keller VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1; 41990deb0bf7SJacob Keller supported_caps->outer = VIRTCHNL_VLAN_TOGGLE | 42000deb0bf7SJacob Keller VIRTCHNL_VLAN_ETHERTYPE_8100 | 42010deb0bf7SJacob Keller VIRTCHNL_VLAN_ETHERTYPE_88A8 | 42020deb0bf7SJacob Keller VIRTCHNL_VLAN_ETHERTYPE_9100 | 42030deb0bf7SJacob Keller VIRTCHNL_VLAN_ETHERTYPE_XOR | 42040deb0bf7SJacob Keller VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2; 42050deb0bf7SJacob Keller 42060deb0bf7SJacob Keller supported_caps = &caps->offloads.insertion_support; 42070deb0bf7SJacob Keller supported_caps->inner = VIRTCHNL_VLAN_TOGGLE | 42080deb0bf7SJacob Keller VIRTCHNL_VLAN_ETHERTYPE_8100 | 42090deb0bf7SJacob Keller VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1; 42100deb0bf7SJacob Keller supported_caps->outer = VIRTCHNL_VLAN_TOGGLE | 42110deb0bf7SJacob Keller VIRTCHNL_VLAN_ETHERTYPE_8100 | 42120deb0bf7SJacob Keller VIRTCHNL_VLAN_ETHERTYPE_88A8 | 42130deb0bf7SJacob Keller VIRTCHNL_VLAN_ETHERTYPE_9100 | 42140deb0bf7SJacob Keller VIRTCHNL_VLAN_ETHERTYPE_XOR | 42150deb0bf7SJacob Keller VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2; 42160deb0bf7SJacob Keller 42170deb0bf7SJacob Keller caps->offloads.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100; 42180deb0bf7SJacob Keller 42190deb0bf7SJacob Keller caps->offloads.ethertype_match = 42200deb0bf7SJacob Keller VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION; 42210deb0bf7SJacob Keller } 42220deb0bf7SJacob Keller 42230deb0bf7SJacob Keller caps->filtering.max_filters = ice_vc_get_max_vlan_fltrs(vf); 42240deb0bf7SJacob Keller } 42250deb0bf7SJacob Keller 42260deb0bf7SJacob Keller /** 42270deb0bf7SJacob Keller * ice_vc_set_svm_caps - set VLAN capabilities when the device is in SVM 42280deb0bf7SJacob Keller * @vf: VF that capabilities are being set for 42290deb0bf7SJacob Keller * @caps: VLAN capabilities to populate 42300deb0bf7SJacob Keller * 42310deb0bf7SJacob Keller * Determine VLAN capabilities support based on whether a port VLAN is 42320deb0bf7SJacob Keller * configured. If a port VLAN is configured then the VF does not have any VLAN 42330deb0bf7SJacob Keller * filtering or offload capabilities since the port VLAN is using the inner VLAN 42340deb0bf7SJacob Keller * capabilities in single VLAN mode (SVM). Otherwise allow the VF to use inner 42350deb0bf7SJacob Keller * VLAN fitlering and offload capabilities. 42360deb0bf7SJacob Keller */ 42370deb0bf7SJacob Keller static void 42380deb0bf7SJacob Keller ice_vc_set_svm_caps(struct ice_vf *vf, struct virtchnl_vlan_caps *caps) 42390deb0bf7SJacob Keller { 42400deb0bf7SJacob Keller struct virtchnl_vlan_supported_caps *supported_caps; 42410deb0bf7SJacob Keller 42420deb0bf7SJacob Keller if (ice_vf_is_port_vlan_ena(vf)) { 42430deb0bf7SJacob Keller supported_caps = &caps->filtering.filtering_support; 42440deb0bf7SJacob Keller supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED; 42450deb0bf7SJacob Keller supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED; 42460deb0bf7SJacob Keller 42470deb0bf7SJacob Keller supported_caps = &caps->offloads.stripping_support; 42480deb0bf7SJacob Keller supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED; 42490deb0bf7SJacob Keller supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED; 42500deb0bf7SJacob Keller 42510deb0bf7SJacob Keller supported_caps = &caps->offloads.insertion_support; 42520deb0bf7SJacob Keller supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED; 42530deb0bf7SJacob Keller supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED; 42540deb0bf7SJacob Keller 42550deb0bf7SJacob Keller caps->offloads.ethertype_init = VIRTCHNL_VLAN_UNSUPPORTED; 42560deb0bf7SJacob Keller caps->offloads.ethertype_match = VIRTCHNL_VLAN_UNSUPPORTED; 42570deb0bf7SJacob Keller caps->filtering.max_filters = 0; 42580deb0bf7SJacob Keller } else { 42590deb0bf7SJacob Keller supported_caps = &caps->filtering.filtering_support; 42600deb0bf7SJacob Keller supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100; 42610deb0bf7SJacob Keller supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED; 42620deb0bf7SJacob Keller caps->filtering.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100; 42630deb0bf7SJacob Keller 42640deb0bf7SJacob Keller supported_caps = &caps->offloads.stripping_support; 42650deb0bf7SJacob Keller supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 | 42660deb0bf7SJacob Keller VIRTCHNL_VLAN_TOGGLE | 42670deb0bf7SJacob Keller VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1; 42680deb0bf7SJacob Keller supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED; 42690deb0bf7SJacob Keller 42700deb0bf7SJacob Keller supported_caps = &caps->offloads.insertion_support; 42710deb0bf7SJacob Keller supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 | 42720deb0bf7SJacob Keller VIRTCHNL_VLAN_TOGGLE | 42730deb0bf7SJacob Keller VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1; 42740deb0bf7SJacob Keller supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED; 42750deb0bf7SJacob Keller 42760deb0bf7SJacob Keller caps->offloads.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100; 42770deb0bf7SJacob Keller caps->offloads.ethertype_match = 42780deb0bf7SJacob Keller VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION; 42790deb0bf7SJacob Keller caps->filtering.max_filters = ice_vc_get_max_vlan_fltrs(vf); 42800deb0bf7SJacob Keller } 42810deb0bf7SJacob Keller } 42820deb0bf7SJacob Keller 42830deb0bf7SJacob Keller /** 42840deb0bf7SJacob Keller * ice_vc_get_offload_vlan_v2_caps - determine VF's VLAN capabilities 42850deb0bf7SJacob Keller * @vf: VF to determine VLAN capabilities for 42860deb0bf7SJacob Keller * 42870deb0bf7SJacob Keller * This will only be called if the VF and PF successfully negotiated 42880deb0bf7SJacob Keller * VIRTCHNL_VF_OFFLOAD_VLAN_V2. 42890deb0bf7SJacob Keller * 42900deb0bf7SJacob Keller * Set VLAN capabilities based on the current VLAN mode and whether a port VLAN 42910deb0bf7SJacob Keller * is configured or not. 42920deb0bf7SJacob Keller */ 42930deb0bf7SJacob Keller static int ice_vc_get_offload_vlan_v2_caps(struct ice_vf *vf) 42940deb0bf7SJacob Keller { 42950deb0bf7SJacob Keller enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 42960deb0bf7SJacob Keller struct virtchnl_vlan_caps *caps = NULL; 42970deb0bf7SJacob Keller int err, len = 0; 42980deb0bf7SJacob Keller 42990deb0bf7SJacob Keller if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { 43000deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 43010deb0bf7SJacob Keller goto out; 43020deb0bf7SJacob Keller } 43030deb0bf7SJacob Keller 43040deb0bf7SJacob Keller caps = kzalloc(sizeof(*caps), GFP_KERNEL); 43050deb0bf7SJacob Keller if (!caps) { 43060deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 43070deb0bf7SJacob Keller goto out; 43080deb0bf7SJacob Keller } 43090deb0bf7SJacob Keller len = sizeof(*caps); 43100deb0bf7SJacob Keller 43110deb0bf7SJacob Keller if (ice_is_dvm_ena(&vf->pf->hw)) 43120deb0bf7SJacob Keller ice_vc_set_dvm_caps(vf, caps); 43130deb0bf7SJacob Keller else 43140deb0bf7SJacob Keller ice_vc_set_svm_caps(vf, caps); 43150deb0bf7SJacob Keller 43160deb0bf7SJacob Keller /* store negotiated caps to prevent invalid VF messages */ 43170deb0bf7SJacob Keller memcpy(&vf->vlan_v2_caps, caps, sizeof(*caps)); 43180deb0bf7SJacob Keller 43190deb0bf7SJacob Keller out: 43200deb0bf7SJacob Keller err = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS, 43210deb0bf7SJacob Keller v_ret, (u8 *)caps, len); 43220deb0bf7SJacob Keller kfree(caps); 43230deb0bf7SJacob Keller return err; 43240deb0bf7SJacob Keller } 43250deb0bf7SJacob Keller 43260deb0bf7SJacob Keller /** 43270deb0bf7SJacob Keller * ice_vc_validate_vlan_tpid - validate VLAN TPID 43280deb0bf7SJacob Keller * @filtering_caps: negotiated/supported VLAN filtering capabilities 43290deb0bf7SJacob Keller * @tpid: VLAN TPID used for validation 43300deb0bf7SJacob Keller * 43310deb0bf7SJacob Keller * Convert the VLAN TPID to a VIRTCHNL_VLAN_ETHERTYPE_* and then compare against 43320deb0bf7SJacob Keller * the negotiated/supported filtering caps to see if the VLAN TPID is valid. 43330deb0bf7SJacob Keller */ 43340deb0bf7SJacob Keller static bool ice_vc_validate_vlan_tpid(u16 filtering_caps, u16 tpid) 43350deb0bf7SJacob Keller { 43360deb0bf7SJacob Keller enum virtchnl_vlan_support vlan_ethertype = VIRTCHNL_VLAN_UNSUPPORTED; 43370deb0bf7SJacob Keller 43380deb0bf7SJacob Keller switch (tpid) { 43390deb0bf7SJacob Keller case ETH_P_8021Q: 43400deb0bf7SJacob Keller vlan_ethertype = VIRTCHNL_VLAN_ETHERTYPE_8100; 43410deb0bf7SJacob Keller break; 43420deb0bf7SJacob Keller case ETH_P_8021AD: 43430deb0bf7SJacob Keller vlan_ethertype = VIRTCHNL_VLAN_ETHERTYPE_88A8; 43440deb0bf7SJacob Keller break; 43450deb0bf7SJacob Keller case ETH_P_QINQ1: 43460deb0bf7SJacob Keller vlan_ethertype = VIRTCHNL_VLAN_ETHERTYPE_9100; 43470deb0bf7SJacob Keller break; 43480deb0bf7SJacob Keller } 43490deb0bf7SJacob Keller 43500deb0bf7SJacob Keller if (!(filtering_caps & vlan_ethertype)) 43510deb0bf7SJacob Keller return false; 43520deb0bf7SJacob Keller 43530deb0bf7SJacob Keller return true; 43540deb0bf7SJacob Keller } 43550deb0bf7SJacob Keller 43560deb0bf7SJacob Keller /** 43570deb0bf7SJacob Keller * ice_vc_is_valid_vlan - validate the virtchnl_vlan 43580deb0bf7SJacob Keller * @vc_vlan: virtchnl_vlan to validate 43590deb0bf7SJacob Keller * 43600deb0bf7SJacob Keller * If the VLAN TCI and VLAN TPID are 0, then this filter is invalid, so return 43610deb0bf7SJacob Keller * false. Otherwise return true. 43620deb0bf7SJacob Keller */ 43630deb0bf7SJacob Keller static bool ice_vc_is_valid_vlan(struct virtchnl_vlan *vc_vlan) 43640deb0bf7SJacob Keller { 43650deb0bf7SJacob Keller if (!vc_vlan->tci || !vc_vlan->tpid) 43660deb0bf7SJacob Keller return false; 43670deb0bf7SJacob Keller 43680deb0bf7SJacob Keller return true; 43690deb0bf7SJacob Keller } 43700deb0bf7SJacob Keller 43710deb0bf7SJacob Keller /** 43720deb0bf7SJacob Keller * ice_vc_validate_vlan_filter_list - validate the filter list from the VF 43730deb0bf7SJacob Keller * @vfc: negotiated/supported VLAN filtering capabilities 43740deb0bf7SJacob Keller * @vfl: VLAN filter list from VF to validate 43750deb0bf7SJacob Keller * 43760deb0bf7SJacob Keller * Validate all of the filters in the VLAN filter list from the VF. If any of 43770deb0bf7SJacob Keller * the checks fail then return false. Otherwise return true. 43780deb0bf7SJacob Keller */ 43790deb0bf7SJacob Keller static bool 43800deb0bf7SJacob Keller ice_vc_validate_vlan_filter_list(struct virtchnl_vlan_filtering_caps *vfc, 43810deb0bf7SJacob Keller struct virtchnl_vlan_filter_list_v2 *vfl) 43820deb0bf7SJacob Keller { 43830deb0bf7SJacob Keller u16 i; 43840deb0bf7SJacob Keller 43850deb0bf7SJacob Keller if (!vfl->num_elements) 43860deb0bf7SJacob Keller return false; 43870deb0bf7SJacob Keller 43880deb0bf7SJacob Keller for (i = 0; i < vfl->num_elements; i++) { 43890deb0bf7SJacob Keller struct virtchnl_vlan_supported_caps *filtering_support = 43900deb0bf7SJacob Keller &vfc->filtering_support; 43910deb0bf7SJacob Keller struct virtchnl_vlan_filter *vlan_fltr = &vfl->filters[i]; 43920deb0bf7SJacob Keller struct virtchnl_vlan *outer = &vlan_fltr->outer; 43930deb0bf7SJacob Keller struct virtchnl_vlan *inner = &vlan_fltr->inner; 43940deb0bf7SJacob Keller 43950deb0bf7SJacob Keller if ((ice_vc_is_valid_vlan(outer) && 43960deb0bf7SJacob Keller filtering_support->outer == VIRTCHNL_VLAN_UNSUPPORTED) || 43970deb0bf7SJacob Keller (ice_vc_is_valid_vlan(inner) && 43980deb0bf7SJacob Keller filtering_support->inner == VIRTCHNL_VLAN_UNSUPPORTED)) 43990deb0bf7SJacob Keller return false; 44000deb0bf7SJacob Keller 44010deb0bf7SJacob Keller if ((outer->tci_mask && 44020deb0bf7SJacob Keller !(filtering_support->outer & VIRTCHNL_VLAN_FILTER_MASK)) || 44030deb0bf7SJacob Keller (inner->tci_mask && 44040deb0bf7SJacob Keller !(filtering_support->inner & VIRTCHNL_VLAN_FILTER_MASK))) 44050deb0bf7SJacob Keller return false; 44060deb0bf7SJacob Keller 44070deb0bf7SJacob Keller if (((outer->tci & VLAN_PRIO_MASK) && 44080deb0bf7SJacob Keller !(filtering_support->outer & VIRTCHNL_VLAN_PRIO)) || 44090deb0bf7SJacob Keller ((inner->tci & VLAN_PRIO_MASK) && 44100deb0bf7SJacob Keller !(filtering_support->inner & VIRTCHNL_VLAN_PRIO))) 44110deb0bf7SJacob Keller return false; 44120deb0bf7SJacob Keller 44130deb0bf7SJacob Keller if ((ice_vc_is_valid_vlan(outer) && 44140deb0bf7SJacob Keller !ice_vc_validate_vlan_tpid(filtering_support->outer, outer->tpid)) || 44150deb0bf7SJacob Keller (ice_vc_is_valid_vlan(inner) && 44160deb0bf7SJacob Keller !ice_vc_validate_vlan_tpid(filtering_support->inner, inner->tpid))) 44170deb0bf7SJacob Keller return false; 44180deb0bf7SJacob Keller } 44190deb0bf7SJacob Keller 44200deb0bf7SJacob Keller return true; 44210deb0bf7SJacob Keller } 44220deb0bf7SJacob Keller 44230deb0bf7SJacob Keller /** 44240deb0bf7SJacob Keller * ice_vc_to_vlan - transform from struct virtchnl_vlan to struct ice_vlan 44250deb0bf7SJacob Keller * @vc_vlan: struct virtchnl_vlan to transform 44260deb0bf7SJacob Keller */ 44270deb0bf7SJacob Keller static struct ice_vlan ice_vc_to_vlan(struct virtchnl_vlan *vc_vlan) 44280deb0bf7SJacob Keller { 44290deb0bf7SJacob Keller struct ice_vlan vlan = { 0 }; 44300deb0bf7SJacob Keller 44310deb0bf7SJacob Keller vlan.prio = (vc_vlan->tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; 44320deb0bf7SJacob Keller vlan.vid = vc_vlan->tci & VLAN_VID_MASK; 44330deb0bf7SJacob Keller vlan.tpid = vc_vlan->tpid; 44340deb0bf7SJacob Keller 44350deb0bf7SJacob Keller return vlan; 44360deb0bf7SJacob Keller } 44370deb0bf7SJacob Keller 44380deb0bf7SJacob Keller /** 44390deb0bf7SJacob Keller * ice_vc_vlan_action - action to perform on the virthcnl_vlan 44400deb0bf7SJacob Keller * @vsi: VF's VSI used to perform the action 44410deb0bf7SJacob Keller * @vlan_action: function to perform the action with (i.e. add/del) 44420deb0bf7SJacob Keller * @vlan: VLAN filter to perform the action with 44430deb0bf7SJacob Keller */ 44440deb0bf7SJacob Keller static int 44450deb0bf7SJacob Keller ice_vc_vlan_action(struct ice_vsi *vsi, 44460deb0bf7SJacob Keller int (*vlan_action)(struct ice_vsi *, struct ice_vlan *), 44470deb0bf7SJacob Keller struct ice_vlan *vlan) 44480deb0bf7SJacob Keller { 44490deb0bf7SJacob Keller int err; 44500deb0bf7SJacob Keller 44510deb0bf7SJacob Keller err = vlan_action(vsi, vlan); 44520deb0bf7SJacob Keller if (err) 44530deb0bf7SJacob Keller return err; 44540deb0bf7SJacob Keller 44550deb0bf7SJacob Keller return 0; 44560deb0bf7SJacob Keller } 44570deb0bf7SJacob Keller 44580deb0bf7SJacob Keller /** 44590deb0bf7SJacob Keller * ice_vc_del_vlans - delete VLAN(s) from the virtchnl filter list 44600deb0bf7SJacob Keller * @vf: VF used to delete the VLAN(s) 44610deb0bf7SJacob Keller * @vsi: VF's VSI used to delete the VLAN(s) 44620deb0bf7SJacob Keller * @vfl: virthchnl filter list used to delete the filters 44630deb0bf7SJacob Keller */ 44640deb0bf7SJacob Keller static int 44650deb0bf7SJacob Keller ice_vc_del_vlans(struct ice_vf *vf, struct ice_vsi *vsi, 44660deb0bf7SJacob Keller struct virtchnl_vlan_filter_list_v2 *vfl) 44670deb0bf7SJacob Keller { 44680deb0bf7SJacob Keller bool vlan_promisc = ice_is_vlan_promisc_allowed(vf); 44690deb0bf7SJacob Keller int err; 44700deb0bf7SJacob Keller u16 i; 44710deb0bf7SJacob Keller 44720deb0bf7SJacob Keller for (i = 0; i < vfl->num_elements; i++) { 44730deb0bf7SJacob Keller struct virtchnl_vlan_filter *vlan_fltr = &vfl->filters[i]; 44740deb0bf7SJacob Keller struct virtchnl_vlan *vc_vlan; 44750deb0bf7SJacob Keller 44760deb0bf7SJacob Keller vc_vlan = &vlan_fltr->outer; 44770deb0bf7SJacob Keller if (ice_vc_is_valid_vlan(vc_vlan)) { 44780deb0bf7SJacob Keller struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan); 44790deb0bf7SJacob Keller 44800deb0bf7SJacob Keller err = ice_vc_vlan_action(vsi, 44810deb0bf7SJacob Keller vsi->outer_vlan_ops.del_vlan, 44820deb0bf7SJacob Keller &vlan); 44830deb0bf7SJacob Keller if (err) 44840deb0bf7SJacob Keller return err; 44850deb0bf7SJacob Keller 44860deb0bf7SJacob Keller if (vlan_promisc) 44870deb0bf7SJacob Keller ice_vf_dis_vlan_promisc(vsi, &vlan); 44880deb0bf7SJacob Keller } 44890deb0bf7SJacob Keller 44900deb0bf7SJacob Keller vc_vlan = &vlan_fltr->inner; 44910deb0bf7SJacob Keller if (ice_vc_is_valid_vlan(vc_vlan)) { 44920deb0bf7SJacob Keller struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan); 44930deb0bf7SJacob Keller 44940deb0bf7SJacob Keller err = ice_vc_vlan_action(vsi, 44950deb0bf7SJacob Keller vsi->inner_vlan_ops.del_vlan, 44960deb0bf7SJacob Keller &vlan); 44970deb0bf7SJacob Keller if (err) 44980deb0bf7SJacob Keller return err; 44990deb0bf7SJacob Keller 45000deb0bf7SJacob Keller /* no support for VLAN promiscuous on inner VLAN unless 45010deb0bf7SJacob Keller * we are in Single VLAN Mode (SVM) 45020deb0bf7SJacob Keller */ 45030deb0bf7SJacob Keller if (!ice_is_dvm_ena(&vsi->back->hw) && vlan_promisc) 45040deb0bf7SJacob Keller ice_vf_dis_vlan_promisc(vsi, &vlan); 45050deb0bf7SJacob Keller } 45060deb0bf7SJacob Keller } 45070deb0bf7SJacob Keller 45080deb0bf7SJacob Keller return 0; 45090deb0bf7SJacob Keller } 45100deb0bf7SJacob Keller 45110deb0bf7SJacob Keller /** 45120deb0bf7SJacob Keller * ice_vc_remove_vlan_v2_msg - virtchnl handler for VIRTCHNL_OP_DEL_VLAN_V2 45130deb0bf7SJacob Keller * @vf: VF the message was received from 45140deb0bf7SJacob Keller * @msg: message received from the VF 45150deb0bf7SJacob Keller */ 45160deb0bf7SJacob Keller static int ice_vc_remove_vlan_v2_msg(struct ice_vf *vf, u8 *msg) 45170deb0bf7SJacob Keller { 45180deb0bf7SJacob Keller struct virtchnl_vlan_filter_list_v2 *vfl = 45190deb0bf7SJacob Keller (struct virtchnl_vlan_filter_list_v2 *)msg; 45200deb0bf7SJacob Keller enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 45210deb0bf7SJacob Keller struct ice_vsi *vsi; 45220deb0bf7SJacob Keller 45230deb0bf7SJacob Keller if (!ice_vc_validate_vlan_filter_list(&vf->vlan_v2_caps.filtering, 45240deb0bf7SJacob Keller vfl)) { 45250deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 45260deb0bf7SJacob Keller goto out; 45270deb0bf7SJacob Keller } 45280deb0bf7SJacob Keller 45290deb0bf7SJacob Keller if (!ice_vc_isvalid_vsi_id(vf, vfl->vport_id)) { 45300deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 45310deb0bf7SJacob Keller goto out; 45320deb0bf7SJacob Keller } 45330deb0bf7SJacob Keller 45340deb0bf7SJacob Keller vsi = ice_get_vf_vsi(vf); 45350deb0bf7SJacob Keller if (!vsi) { 45360deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 45370deb0bf7SJacob Keller goto out; 45380deb0bf7SJacob Keller } 45390deb0bf7SJacob Keller 45400deb0bf7SJacob Keller if (ice_vc_del_vlans(vf, vsi, vfl)) 45410deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 45420deb0bf7SJacob Keller 45430deb0bf7SJacob Keller out: 45440deb0bf7SJacob Keller return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN_V2, v_ret, NULL, 45450deb0bf7SJacob Keller 0); 45460deb0bf7SJacob Keller } 45470deb0bf7SJacob Keller 45480deb0bf7SJacob Keller /** 45490deb0bf7SJacob Keller * ice_vc_add_vlans - add VLAN(s) from the virtchnl filter list 45500deb0bf7SJacob Keller * @vf: VF used to add the VLAN(s) 45510deb0bf7SJacob Keller * @vsi: VF's VSI used to add the VLAN(s) 45520deb0bf7SJacob Keller * @vfl: virthchnl filter list used to add the filters 45530deb0bf7SJacob Keller */ 45540deb0bf7SJacob Keller static int 45550deb0bf7SJacob Keller ice_vc_add_vlans(struct ice_vf *vf, struct ice_vsi *vsi, 45560deb0bf7SJacob Keller struct virtchnl_vlan_filter_list_v2 *vfl) 45570deb0bf7SJacob Keller { 45580deb0bf7SJacob Keller bool vlan_promisc = ice_is_vlan_promisc_allowed(vf); 45590deb0bf7SJacob Keller int err; 45600deb0bf7SJacob Keller u16 i; 45610deb0bf7SJacob Keller 45620deb0bf7SJacob Keller for (i = 0; i < vfl->num_elements; i++) { 45630deb0bf7SJacob Keller struct virtchnl_vlan_filter *vlan_fltr = &vfl->filters[i]; 45640deb0bf7SJacob Keller struct virtchnl_vlan *vc_vlan; 45650deb0bf7SJacob Keller 45660deb0bf7SJacob Keller vc_vlan = &vlan_fltr->outer; 45670deb0bf7SJacob Keller if (ice_vc_is_valid_vlan(vc_vlan)) { 45680deb0bf7SJacob Keller struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan); 45690deb0bf7SJacob Keller 45700deb0bf7SJacob Keller err = ice_vc_vlan_action(vsi, 45710deb0bf7SJacob Keller vsi->outer_vlan_ops.add_vlan, 45720deb0bf7SJacob Keller &vlan); 45730deb0bf7SJacob Keller if (err) 45740deb0bf7SJacob Keller return err; 45750deb0bf7SJacob Keller 45760deb0bf7SJacob Keller if (vlan_promisc) { 45770deb0bf7SJacob Keller err = ice_vf_ena_vlan_promisc(vsi, &vlan); 45780deb0bf7SJacob Keller if (err) 45790deb0bf7SJacob Keller return err; 45800deb0bf7SJacob Keller } 45810deb0bf7SJacob Keller } 45820deb0bf7SJacob Keller 45830deb0bf7SJacob Keller vc_vlan = &vlan_fltr->inner; 45840deb0bf7SJacob Keller if (ice_vc_is_valid_vlan(vc_vlan)) { 45850deb0bf7SJacob Keller struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan); 45860deb0bf7SJacob Keller 45870deb0bf7SJacob Keller err = ice_vc_vlan_action(vsi, 45880deb0bf7SJacob Keller vsi->inner_vlan_ops.add_vlan, 45890deb0bf7SJacob Keller &vlan); 45900deb0bf7SJacob Keller if (err) 45910deb0bf7SJacob Keller return err; 45920deb0bf7SJacob Keller 45930deb0bf7SJacob Keller /* no support for VLAN promiscuous on inner VLAN unless 45940deb0bf7SJacob Keller * we are in Single VLAN Mode (SVM) 45950deb0bf7SJacob Keller */ 45960deb0bf7SJacob Keller if (!ice_is_dvm_ena(&vsi->back->hw) && vlan_promisc) { 45970deb0bf7SJacob Keller err = ice_vf_ena_vlan_promisc(vsi, &vlan); 45980deb0bf7SJacob Keller if (err) 45990deb0bf7SJacob Keller return err; 46000deb0bf7SJacob Keller } 46010deb0bf7SJacob Keller } 46020deb0bf7SJacob Keller } 46030deb0bf7SJacob Keller 46040deb0bf7SJacob Keller return 0; 46050deb0bf7SJacob Keller } 46060deb0bf7SJacob Keller 46070deb0bf7SJacob Keller /** 46080deb0bf7SJacob Keller * ice_vc_validate_add_vlan_filter_list - validate add filter list from the VF 46090deb0bf7SJacob Keller * @vsi: VF VSI used to get number of existing VLAN filters 46100deb0bf7SJacob Keller * @vfc: negotiated/supported VLAN filtering capabilities 46110deb0bf7SJacob Keller * @vfl: VLAN filter list from VF to validate 46120deb0bf7SJacob Keller * 46130deb0bf7SJacob Keller * Validate all of the filters in the VLAN filter list from the VF during the 46140deb0bf7SJacob Keller * VIRTCHNL_OP_ADD_VLAN_V2 opcode. If any of the checks fail then return false. 46150deb0bf7SJacob Keller * Otherwise return true. 46160deb0bf7SJacob Keller */ 46170deb0bf7SJacob Keller static bool 46180deb0bf7SJacob Keller ice_vc_validate_add_vlan_filter_list(struct ice_vsi *vsi, 46190deb0bf7SJacob Keller struct virtchnl_vlan_filtering_caps *vfc, 46200deb0bf7SJacob Keller struct virtchnl_vlan_filter_list_v2 *vfl) 46210deb0bf7SJacob Keller { 46220deb0bf7SJacob Keller u16 num_requested_filters = vsi->num_vlan + vfl->num_elements; 46230deb0bf7SJacob Keller 46240deb0bf7SJacob Keller if (num_requested_filters > vfc->max_filters) 46250deb0bf7SJacob Keller return false; 46260deb0bf7SJacob Keller 46270deb0bf7SJacob Keller return ice_vc_validate_vlan_filter_list(vfc, vfl); 46280deb0bf7SJacob Keller } 46290deb0bf7SJacob Keller 46300deb0bf7SJacob Keller /** 46310deb0bf7SJacob Keller * ice_vc_add_vlan_v2_msg - virtchnl handler for VIRTCHNL_OP_ADD_VLAN_V2 46320deb0bf7SJacob Keller * @vf: VF the message was received from 46330deb0bf7SJacob Keller * @msg: message received from the VF 46340deb0bf7SJacob Keller */ 46350deb0bf7SJacob Keller static int ice_vc_add_vlan_v2_msg(struct ice_vf *vf, u8 *msg) 46360deb0bf7SJacob Keller { 46370deb0bf7SJacob Keller enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 46380deb0bf7SJacob Keller struct virtchnl_vlan_filter_list_v2 *vfl = 46390deb0bf7SJacob Keller (struct virtchnl_vlan_filter_list_v2 *)msg; 46400deb0bf7SJacob Keller struct ice_vsi *vsi; 46410deb0bf7SJacob Keller 46420deb0bf7SJacob Keller if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { 46430deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 46440deb0bf7SJacob Keller goto out; 46450deb0bf7SJacob Keller } 46460deb0bf7SJacob Keller 46470deb0bf7SJacob Keller if (!ice_vc_isvalid_vsi_id(vf, vfl->vport_id)) { 46480deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 46490deb0bf7SJacob Keller goto out; 46500deb0bf7SJacob Keller } 46510deb0bf7SJacob Keller 46520deb0bf7SJacob Keller vsi = ice_get_vf_vsi(vf); 46530deb0bf7SJacob Keller if (!vsi) { 46540deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 46550deb0bf7SJacob Keller goto out; 46560deb0bf7SJacob Keller } 46570deb0bf7SJacob Keller 46580deb0bf7SJacob Keller if (!ice_vc_validate_add_vlan_filter_list(vsi, 46590deb0bf7SJacob Keller &vf->vlan_v2_caps.filtering, 46600deb0bf7SJacob Keller vfl)) { 46610deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 46620deb0bf7SJacob Keller goto out; 46630deb0bf7SJacob Keller } 46640deb0bf7SJacob Keller 46650deb0bf7SJacob Keller if (ice_vc_add_vlans(vf, vsi, vfl)) 46660deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 46670deb0bf7SJacob Keller 46680deb0bf7SJacob Keller out: 46690deb0bf7SJacob Keller return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN_V2, v_ret, NULL, 46700deb0bf7SJacob Keller 0); 46710deb0bf7SJacob Keller } 46720deb0bf7SJacob Keller 46730deb0bf7SJacob Keller /** 46740deb0bf7SJacob Keller * ice_vc_valid_vlan_setting - validate VLAN setting 46750deb0bf7SJacob Keller * @negotiated_settings: negotiated VLAN settings during VF init 46760deb0bf7SJacob Keller * @ethertype_setting: ethertype(s) requested for the VLAN setting 46770deb0bf7SJacob Keller */ 46780deb0bf7SJacob Keller static bool 46790deb0bf7SJacob Keller ice_vc_valid_vlan_setting(u32 negotiated_settings, u32 ethertype_setting) 46800deb0bf7SJacob Keller { 46810deb0bf7SJacob Keller if (ethertype_setting && !(negotiated_settings & ethertype_setting)) 46820deb0bf7SJacob Keller return false; 46830deb0bf7SJacob Keller 46840deb0bf7SJacob Keller /* only allow a single VIRTCHNL_VLAN_ETHERTYPE if 46850deb0bf7SJacob Keller * VIRTHCNL_VLAN_ETHERTYPE_AND is not negotiated/supported 46860deb0bf7SJacob Keller */ 46870deb0bf7SJacob Keller if (!(negotiated_settings & VIRTCHNL_VLAN_ETHERTYPE_AND) && 46880deb0bf7SJacob Keller hweight32(ethertype_setting) > 1) 46890deb0bf7SJacob Keller return false; 46900deb0bf7SJacob Keller 46910deb0bf7SJacob Keller /* ability to modify the VLAN setting was not negotiated */ 46920deb0bf7SJacob Keller if (!(negotiated_settings & VIRTCHNL_VLAN_TOGGLE)) 46930deb0bf7SJacob Keller return false; 46940deb0bf7SJacob Keller 46950deb0bf7SJacob Keller return true; 46960deb0bf7SJacob Keller } 46970deb0bf7SJacob Keller 46980deb0bf7SJacob Keller /** 46990deb0bf7SJacob Keller * ice_vc_valid_vlan_setting_msg - validate the VLAN setting message 47000deb0bf7SJacob Keller * @caps: negotiated VLAN settings during VF init 47010deb0bf7SJacob Keller * @msg: message to validate 47020deb0bf7SJacob Keller * 47030deb0bf7SJacob Keller * Used to validate any VLAN virtchnl message sent as a 47040deb0bf7SJacob Keller * virtchnl_vlan_setting structure. Validates the message against the 47050deb0bf7SJacob Keller * negotiated/supported caps during VF driver init. 47060deb0bf7SJacob Keller */ 47070deb0bf7SJacob Keller static bool 47080deb0bf7SJacob Keller ice_vc_valid_vlan_setting_msg(struct virtchnl_vlan_supported_caps *caps, 47090deb0bf7SJacob Keller struct virtchnl_vlan_setting *msg) 47100deb0bf7SJacob Keller { 47110deb0bf7SJacob Keller if ((!msg->outer_ethertype_setting && 47120deb0bf7SJacob Keller !msg->inner_ethertype_setting) || 47130deb0bf7SJacob Keller (!caps->outer && !caps->inner)) 47140deb0bf7SJacob Keller return false; 47150deb0bf7SJacob Keller 47160deb0bf7SJacob Keller if (msg->outer_ethertype_setting && 47170deb0bf7SJacob Keller !ice_vc_valid_vlan_setting(caps->outer, 47180deb0bf7SJacob Keller msg->outer_ethertype_setting)) 47190deb0bf7SJacob Keller return false; 47200deb0bf7SJacob Keller 47210deb0bf7SJacob Keller if (msg->inner_ethertype_setting && 47220deb0bf7SJacob Keller !ice_vc_valid_vlan_setting(caps->inner, 47230deb0bf7SJacob Keller msg->inner_ethertype_setting)) 47240deb0bf7SJacob Keller return false; 47250deb0bf7SJacob Keller 47260deb0bf7SJacob Keller return true; 47270deb0bf7SJacob Keller } 47280deb0bf7SJacob Keller 47290deb0bf7SJacob Keller /** 47300deb0bf7SJacob Keller * ice_vc_get_tpid - transform from VIRTCHNL_VLAN_ETHERTYPE_* to VLAN TPID 47310deb0bf7SJacob Keller * @ethertype_setting: VIRTCHNL_VLAN_ETHERTYPE_* used to get VLAN TPID 47320deb0bf7SJacob Keller * @tpid: VLAN TPID to populate 47330deb0bf7SJacob Keller */ 47340deb0bf7SJacob Keller static int ice_vc_get_tpid(u32 ethertype_setting, u16 *tpid) 47350deb0bf7SJacob Keller { 47360deb0bf7SJacob Keller switch (ethertype_setting) { 47370deb0bf7SJacob Keller case VIRTCHNL_VLAN_ETHERTYPE_8100: 47380deb0bf7SJacob Keller *tpid = ETH_P_8021Q; 47390deb0bf7SJacob Keller break; 47400deb0bf7SJacob Keller case VIRTCHNL_VLAN_ETHERTYPE_88A8: 47410deb0bf7SJacob Keller *tpid = ETH_P_8021AD; 47420deb0bf7SJacob Keller break; 47430deb0bf7SJacob Keller case VIRTCHNL_VLAN_ETHERTYPE_9100: 47440deb0bf7SJacob Keller *tpid = ETH_P_QINQ1; 47450deb0bf7SJacob Keller break; 47460deb0bf7SJacob Keller default: 47470deb0bf7SJacob Keller *tpid = 0; 47480deb0bf7SJacob Keller return -EINVAL; 47490deb0bf7SJacob Keller } 47500deb0bf7SJacob Keller 47510deb0bf7SJacob Keller return 0; 47520deb0bf7SJacob Keller } 47530deb0bf7SJacob Keller 47540deb0bf7SJacob Keller /** 47550deb0bf7SJacob Keller * ice_vc_ena_vlan_offload - enable VLAN offload based on the ethertype_setting 47560deb0bf7SJacob Keller * @vsi: VF's VSI used to enable the VLAN offload 47570deb0bf7SJacob Keller * @ena_offload: function used to enable the VLAN offload 47580deb0bf7SJacob Keller * @ethertype_setting: VIRTCHNL_VLAN_ETHERTYPE_* to enable offloads for 47590deb0bf7SJacob Keller */ 47600deb0bf7SJacob Keller static int 47610deb0bf7SJacob Keller ice_vc_ena_vlan_offload(struct ice_vsi *vsi, 47620deb0bf7SJacob Keller int (*ena_offload)(struct ice_vsi *vsi, u16 tpid), 47630deb0bf7SJacob Keller u32 ethertype_setting) 47640deb0bf7SJacob Keller { 47650deb0bf7SJacob Keller u16 tpid; 47660deb0bf7SJacob Keller int err; 47670deb0bf7SJacob Keller 47680deb0bf7SJacob Keller err = ice_vc_get_tpid(ethertype_setting, &tpid); 47690deb0bf7SJacob Keller if (err) 47700deb0bf7SJacob Keller return err; 47710deb0bf7SJacob Keller 47720deb0bf7SJacob Keller err = ena_offload(vsi, tpid); 47730deb0bf7SJacob Keller if (err) 47740deb0bf7SJacob Keller return err; 47750deb0bf7SJacob Keller 47760deb0bf7SJacob Keller return 0; 47770deb0bf7SJacob Keller } 47780deb0bf7SJacob Keller 47790deb0bf7SJacob Keller #define ICE_L2TSEL_QRX_CONTEXT_REG_IDX 3 47800deb0bf7SJacob Keller #define ICE_L2TSEL_BIT_OFFSET 23 47810deb0bf7SJacob Keller enum ice_l2tsel { 47820deb0bf7SJacob Keller ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND, 47830deb0bf7SJacob Keller ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG1, 47840deb0bf7SJacob Keller }; 47850deb0bf7SJacob Keller 47860deb0bf7SJacob Keller /** 47870deb0bf7SJacob Keller * ice_vsi_update_l2tsel - update l2tsel field for all Rx rings on this VSI 47880deb0bf7SJacob Keller * @vsi: VSI used to update l2tsel on 47890deb0bf7SJacob Keller * @l2tsel: l2tsel setting requested 47900deb0bf7SJacob Keller * 47910deb0bf7SJacob Keller * Use the l2tsel setting to update all of the Rx queue context bits for l2tsel. 47920deb0bf7SJacob Keller * This will modify which descriptor field the first offloaded VLAN will be 47930deb0bf7SJacob Keller * stripped into. 47940deb0bf7SJacob Keller */ 47950deb0bf7SJacob Keller static void ice_vsi_update_l2tsel(struct ice_vsi *vsi, enum ice_l2tsel l2tsel) 47960deb0bf7SJacob Keller { 47970deb0bf7SJacob Keller struct ice_hw *hw = &vsi->back->hw; 47980deb0bf7SJacob Keller u32 l2tsel_bit; 47990deb0bf7SJacob Keller int i; 48000deb0bf7SJacob Keller 48010deb0bf7SJacob Keller if (l2tsel == ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND) 48020deb0bf7SJacob Keller l2tsel_bit = 0; 48030deb0bf7SJacob Keller else 48040deb0bf7SJacob Keller l2tsel_bit = BIT(ICE_L2TSEL_BIT_OFFSET); 48050deb0bf7SJacob Keller 48060deb0bf7SJacob Keller for (i = 0; i < vsi->alloc_rxq; i++) { 48070deb0bf7SJacob Keller u16 pfq = vsi->rxq_map[i]; 48080deb0bf7SJacob Keller u32 qrx_context_offset; 48090deb0bf7SJacob Keller u32 regval; 48100deb0bf7SJacob Keller 48110deb0bf7SJacob Keller qrx_context_offset = 48120deb0bf7SJacob Keller QRX_CONTEXT(ICE_L2TSEL_QRX_CONTEXT_REG_IDX, pfq); 48130deb0bf7SJacob Keller 48140deb0bf7SJacob Keller regval = rd32(hw, qrx_context_offset); 48150deb0bf7SJacob Keller regval &= ~BIT(ICE_L2TSEL_BIT_OFFSET); 48160deb0bf7SJacob Keller regval |= l2tsel_bit; 48170deb0bf7SJacob Keller wr32(hw, qrx_context_offset, regval); 48180deb0bf7SJacob Keller } 48190deb0bf7SJacob Keller } 48200deb0bf7SJacob Keller 48210deb0bf7SJacob Keller /** 48220deb0bf7SJacob Keller * ice_vc_ena_vlan_stripping_v2_msg 48230deb0bf7SJacob Keller * @vf: VF the message was received from 48240deb0bf7SJacob Keller * @msg: message received from the VF 48250deb0bf7SJacob Keller * 48260deb0bf7SJacob Keller * virthcnl handler for VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 48270deb0bf7SJacob Keller */ 48280deb0bf7SJacob Keller static int ice_vc_ena_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg) 48290deb0bf7SJacob Keller { 48300deb0bf7SJacob Keller enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 48310deb0bf7SJacob Keller struct virtchnl_vlan_supported_caps *stripping_support; 48320deb0bf7SJacob Keller struct virtchnl_vlan_setting *strip_msg = 48330deb0bf7SJacob Keller (struct virtchnl_vlan_setting *)msg; 48340deb0bf7SJacob Keller u32 ethertype_setting; 48350deb0bf7SJacob Keller struct ice_vsi *vsi; 48360deb0bf7SJacob Keller 48370deb0bf7SJacob Keller if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { 48380deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 48390deb0bf7SJacob Keller goto out; 48400deb0bf7SJacob Keller } 48410deb0bf7SJacob Keller 48420deb0bf7SJacob Keller if (!ice_vc_isvalid_vsi_id(vf, strip_msg->vport_id)) { 48430deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 48440deb0bf7SJacob Keller goto out; 48450deb0bf7SJacob Keller } 48460deb0bf7SJacob Keller 48470deb0bf7SJacob Keller vsi = ice_get_vf_vsi(vf); 48480deb0bf7SJacob Keller if (!vsi) { 48490deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 48500deb0bf7SJacob Keller goto out; 48510deb0bf7SJacob Keller } 48520deb0bf7SJacob Keller 48530deb0bf7SJacob Keller stripping_support = &vf->vlan_v2_caps.offloads.stripping_support; 48540deb0bf7SJacob Keller if (!ice_vc_valid_vlan_setting_msg(stripping_support, strip_msg)) { 48550deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 48560deb0bf7SJacob Keller goto out; 48570deb0bf7SJacob Keller } 48580deb0bf7SJacob Keller 48590deb0bf7SJacob Keller ethertype_setting = strip_msg->outer_ethertype_setting; 48600deb0bf7SJacob Keller if (ethertype_setting) { 48610deb0bf7SJacob Keller if (ice_vc_ena_vlan_offload(vsi, 48620deb0bf7SJacob Keller vsi->outer_vlan_ops.ena_stripping, 48630deb0bf7SJacob Keller ethertype_setting)) { 48640deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 48650deb0bf7SJacob Keller goto out; 48660deb0bf7SJacob Keller } else { 48670deb0bf7SJacob Keller enum ice_l2tsel l2tsel = 48680deb0bf7SJacob Keller ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND; 48690deb0bf7SJacob Keller 48700deb0bf7SJacob Keller /* PF tells the VF that the outer VLAN tag is always 48710deb0bf7SJacob Keller * extracted to VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 and 48720deb0bf7SJacob Keller * inner is always extracted to 48730deb0bf7SJacob Keller * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1. This is needed to 48740deb0bf7SJacob Keller * support outer stripping so the first tag always ends 48750deb0bf7SJacob Keller * up in L2TAG2_2ND and the second/inner tag, if 48760deb0bf7SJacob Keller * enabled, is extracted in L2TAG1. 48770deb0bf7SJacob Keller */ 48780deb0bf7SJacob Keller ice_vsi_update_l2tsel(vsi, l2tsel); 48790deb0bf7SJacob Keller } 48800deb0bf7SJacob Keller } 48810deb0bf7SJacob Keller 48820deb0bf7SJacob Keller ethertype_setting = strip_msg->inner_ethertype_setting; 48830deb0bf7SJacob Keller if (ethertype_setting && 48840deb0bf7SJacob Keller ice_vc_ena_vlan_offload(vsi, vsi->inner_vlan_ops.ena_stripping, 48850deb0bf7SJacob Keller ethertype_setting)) { 48860deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 48870deb0bf7SJacob Keller goto out; 48880deb0bf7SJacob Keller } 48890deb0bf7SJacob Keller 48900deb0bf7SJacob Keller out: 48910deb0bf7SJacob Keller return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2, v_ret, NULL, 0); 48920deb0bf7SJacob Keller } 48930deb0bf7SJacob Keller 48940deb0bf7SJacob Keller /** 48950deb0bf7SJacob Keller * ice_vc_dis_vlan_stripping_v2_msg 48960deb0bf7SJacob Keller * @vf: VF the message was received from 48970deb0bf7SJacob Keller * @msg: message received from the VF 48980deb0bf7SJacob Keller * 48990deb0bf7SJacob Keller * virthcnl handler for VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 49000deb0bf7SJacob Keller */ 49010deb0bf7SJacob Keller static int ice_vc_dis_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg) 49020deb0bf7SJacob Keller { 49030deb0bf7SJacob Keller enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 49040deb0bf7SJacob Keller struct virtchnl_vlan_supported_caps *stripping_support; 49050deb0bf7SJacob Keller struct virtchnl_vlan_setting *strip_msg = 49060deb0bf7SJacob Keller (struct virtchnl_vlan_setting *)msg; 49070deb0bf7SJacob Keller u32 ethertype_setting; 49080deb0bf7SJacob Keller struct ice_vsi *vsi; 49090deb0bf7SJacob Keller 49100deb0bf7SJacob Keller if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { 49110deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 49120deb0bf7SJacob Keller goto out; 49130deb0bf7SJacob Keller } 49140deb0bf7SJacob Keller 49150deb0bf7SJacob Keller if (!ice_vc_isvalid_vsi_id(vf, strip_msg->vport_id)) { 49160deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 49170deb0bf7SJacob Keller goto out; 49180deb0bf7SJacob Keller } 49190deb0bf7SJacob Keller 49200deb0bf7SJacob Keller vsi = ice_get_vf_vsi(vf); 49210deb0bf7SJacob Keller if (!vsi) { 49220deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 49230deb0bf7SJacob Keller goto out; 49240deb0bf7SJacob Keller } 49250deb0bf7SJacob Keller 49260deb0bf7SJacob Keller stripping_support = &vf->vlan_v2_caps.offloads.stripping_support; 49270deb0bf7SJacob Keller if (!ice_vc_valid_vlan_setting_msg(stripping_support, strip_msg)) { 49280deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 49290deb0bf7SJacob Keller goto out; 49300deb0bf7SJacob Keller } 49310deb0bf7SJacob Keller 49320deb0bf7SJacob Keller ethertype_setting = strip_msg->outer_ethertype_setting; 49330deb0bf7SJacob Keller if (ethertype_setting) { 49340deb0bf7SJacob Keller if (vsi->outer_vlan_ops.dis_stripping(vsi)) { 49350deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 49360deb0bf7SJacob Keller goto out; 49370deb0bf7SJacob Keller } else { 49380deb0bf7SJacob Keller enum ice_l2tsel l2tsel = 49390deb0bf7SJacob Keller ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG1; 49400deb0bf7SJacob Keller 49410deb0bf7SJacob Keller /* PF tells the VF that the outer VLAN tag is always 49420deb0bf7SJacob Keller * extracted to VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 and 49430deb0bf7SJacob Keller * inner is always extracted to 49440deb0bf7SJacob Keller * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1. This is needed to 49450deb0bf7SJacob Keller * support inner stripping while outer stripping is 49460deb0bf7SJacob Keller * disabled so that the first and only tag is extracted 49470deb0bf7SJacob Keller * in L2TAG1. 49480deb0bf7SJacob Keller */ 49490deb0bf7SJacob Keller ice_vsi_update_l2tsel(vsi, l2tsel); 49500deb0bf7SJacob Keller } 49510deb0bf7SJacob Keller } 49520deb0bf7SJacob Keller 49530deb0bf7SJacob Keller ethertype_setting = strip_msg->inner_ethertype_setting; 49540deb0bf7SJacob Keller if (ethertype_setting && vsi->inner_vlan_ops.dis_stripping(vsi)) { 49550deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 49560deb0bf7SJacob Keller goto out; 49570deb0bf7SJacob Keller } 49580deb0bf7SJacob Keller 49590deb0bf7SJacob Keller out: 49600deb0bf7SJacob Keller return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2, v_ret, NULL, 0); 49610deb0bf7SJacob Keller } 49620deb0bf7SJacob Keller 49630deb0bf7SJacob Keller /** 49640deb0bf7SJacob Keller * ice_vc_ena_vlan_insertion_v2_msg 49650deb0bf7SJacob Keller * @vf: VF the message was received from 49660deb0bf7SJacob Keller * @msg: message received from the VF 49670deb0bf7SJacob Keller * 49680deb0bf7SJacob Keller * virthcnl handler for VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 49690deb0bf7SJacob Keller */ 49700deb0bf7SJacob Keller static int ice_vc_ena_vlan_insertion_v2_msg(struct ice_vf *vf, u8 *msg) 49710deb0bf7SJacob Keller { 49720deb0bf7SJacob Keller enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 49730deb0bf7SJacob Keller struct virtchnl_vlan_supported_caps *insertion_support; 49740deb0bf7SJacob Keller struct virtchnl_vlan_setting *insertion_msg = 49750deb0bf7SJacob Keller (struct virtchnl_vlan_setting *)msg; 49760deb0bf7SJacob Keller u32 ethertype_setting; 49770deb0bf7SJacob Keller struct ice_vsi *vsi; 49780deb0bf7SJacob Keller 49790deb0bf7SJacob Keller if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { 49800deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 49810deb0bf7SJacob Keller goto out; 49820deb0bf7SJacob Keller } 49830deb0bf7SJacob Keller 49840deb0bf7SJacob Keller if (!ice_vc_isvalid_vsi_id(vf, insertion_msg->vport_id)) { 49850deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 49860deb0bf7SJacob Keller goto out; 49870deb0bf7SJacob Keller } 49880deb0bf7SJacob Keller 49890deb0bf7SJacob Keller vsi = ice_get_vf_vsi(vf); 49900deb0bf7SJacob Keller if (!vsi) { 49910deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 49920deb0bf7SJacob Keller goto out; 49930deb0bf7SJacob Keller } 49940deb0bf7SJacob Keller 49950deb0bf7SJacob Keller insertion_support = &vf->vlan_v2_caps.offloads.insertion_support; 49960deb0bf7SJacob Keller if (!ice_vc_valid_vlan_setting_msg(insertion_support, insertion_msg)) { 49970deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 49980deb0bf7SJacob Keller goto out; 49990deb0bf7SJacob Keller } 50000deb0bf7SJacob Keller 50010deb0bf7SJacob Keller ethertype_setting = insertion_msg->outer_ethertype_setting; 50020deb0bf7SJacob Keller if (ethertype_setting && 50030deb0bf7SJacob Keller ice_vc_ena_vlan_offload(vsi, vsi->outer_vlan_ops.ena_insertion, 50040deb0bf7SJacob Keller ethertype_setting)) { 50050deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 50060deb0bf7SJacob Keller goto out; 50070deb0bf7SJacob Keller } 50080deb0bf7SJacob Keller 50090deb0bf7SJacob Keller ethertype_setting = insertion_msg->inner_ethertype_setting; 50100deb0bf7SJacob Keller if (ethertype_setting && 50110deb0bf7SJacob Keller ice_vc_ena_vlan_offload(vsi, vsi->inner_vlan_ops.ena_insertion, 50120deb0bf7SJacob Keller ethertype_setting)) { 50130deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 50140deb0bf7SJacob Keller goto out; 50150deb0bf7SJacob Keller } 50160deb0bf7SJacob Keller 50170deb0bf7SJacob Keller out: 50180deb0bf7SJacob Keller return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2, v_ret, NULL, 0); 50190deb0bf7SJacob Keller } 50200deb0bf7SJacob Keller 50210deb0bf7SJacob Keller /** 50220deb0bf7SJacob Keller * ice_vc_dis_vlan_insertion_v2_msg 50230deb0bf7SJacob Keller * @vf: VF the message was received from 50240deb0bf7SJacob Keller * @msg: message received from the VF 50250deb0bf7SJacob Keller * 50260deb0bf7SJacob Keller * virthcnl handler for VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2 50270deb0bf7SJacob Keller */ 50280deb0bf7SJacob Keller static int ice_vc_dis_vlan_insertion_v2_msg(struct ice_vf *vf, u8 *msg) 50290deb0bf7SJacob Keller { 50300deb0bf7SJacob Keller enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 50310deb0bf7SJacob Keller struct virtchnl_vlan_supported_caps *insertion_support; 50320deb0bf7SJacob Keller struct virtchnl_vlan_setting *insertion_msg = 50330deb0bf7SJacob Keller (struct virtchnl_vlan_setting *)msg; 50340deb0bf7SJacob Keller u32 ethertype_setting; 50350deb0bf7SJacob Keller struct ice_vsi *vsi; 50360deb0bf7SJacob Keller 50370deb0bf7SJacob Keller if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { 50380deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 50390deb0bf7SJacob Keller goto out; 50400deb0bf7SJacob Keller } 50410deb0bf7SJacob Keller 50420deb0bf7SJacob Keller if (!ice_vc_isvalid_vsi_id(vf, insertion_msg->vport_id)) { 50430deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 50440deb0bf7SJacob Keller goto out; 50450deb0bf7SJacob Keller } 50460deb0bf7SJacob Keller 50470deb0bf7SJacob Keller vsi = ice_get_vf_vsi(vf); 50480deb0bf7SJacob Keller if (!vsi) { 50490deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 50500deb0bf7SJacob Keller goto out; 50510deb0bf7SJacob Keller } 50520deb0bf7SJacob Keller 50530deb0bf7SJacob Keller insertion_support = &vf->vlan_v2_caps.offloads.insertion_support; 50540deb0bf7SJacob Keller if (!ice_vc_valid_vlan_setting_msg(insertion_support, insertion_msg)) { 50550deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 50560deb0bf7SJacob Keller goto out; 50570deb0bf7SJacob Keller } 50580deb0bf7SJacob Keller 50590deb0bf7SJacob Keller ethertype_setting = insertion_msg->outer_ethertype_setting; 50600deb0bf7SJacob Keller if (ethertype_setting && vsi->outer_vlan_ops.dis_insertion(vsi)) { 50610deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 50620deb0bf7SJacob Keller goto out; 50630deb0bf7SJacob Keller } 50640deb0bf7SJacob Keller 50650deb0bf7SJacob Keller ethertype_setting = insertion_msg->inner_ethertype_setting; 50660deb0bf7SJacob Keller if (ethertype_setting && vsi->inner_vlan_ops.dis_insertion(vsi)) { 50670deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 50680deb0bf7SJacob Keller goto out; 50690deb0bf7SJacob Keller } 50700deb0bf7SJacob Keller 50710deb0bf7SJacob Keller out: 50720deb0bf7SJacob Keller return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2, v_ret, NULL, 0); 50730deb0bf7SJacob Keller } 50740deb0bf7SJacob Keller 5075a7e11710SJacob Keller static const struct ice_virtchnl_ops ice_virtchnl_dflt_ops = { 50760deb0bf7SJacob Keller .get_ver_msg = ice_vc_get_ver_msg, 50770deb0bf7SJacob Keller .get_vf_res_msg = ice_vc_get_vf_res_msg, 50780deb0bf7SJacob Keller .reset_vf = ice_vc_reset_vf_msg, 50790deb0bf7SJacob Keller .add_mac_addr_msg = ice_vc_add_mac_addr_msg, 50800deb0bf7SJacob Keller .del_mac_addr_msg = ice_vc_del_mac_addr_msg, 50810deb0bf7SJacob Keller .cfg_qs_msg = ice_vc_cfg_qs_msg, 50820deb0bf7SJacob Keller .ena_qs_msg = ice_vc_ena_qs_msg, 50830deb0bf7SJacob Keller .dis_qs_msg = ice_vc_dis_qs_msg, 50840deb0bf7SJacob Keller .request_qs_msg = ice_vc_request_qs_msg, 50850deb0bf7SJacob Keller .cfg_irq_map_msg = ice_vc_cfg_irq_map_msg, 50860deb0bf7SJacob Keller .config_rss_key = ice_vc_config_rss_key, 50870deb0bf7SJacob Keller .config_rss_lut = ice_vc_config_rss_lut, 50880deb0bf7SJacob Keller .get_stats_msg = ice_vc_get_stats_msg, 50890deb0bf7SJacob Keller .cfg_promiscuous_mode_msg = ice_vc_cfg_promiscuous_mode_msg, 50900deb0bf7SJacob Keller .add_vlan_msg = ice_vc_add_vlan_msg, 50910deb0bf7SJacob Keller .remove_vlan_msg = ice_vc_remove_vlan_msg, 50920deb0bf7SJacob Keller .ena_vlan_stripping = ice_vc_ena_vlan_stripping, 50930deb0bf7SJacob Keller .dis_vlan_stripping = ice_vc_dis_vlan_stripping, 50940deb0bf7SJacob Keller .handle_rss_cfg_msg = ice_vc_handle_rss_cfg, 50950deb0bf7SJacob Keller .add_fdir_fltr_msg = ice_vc_add_fdir_fltr, 50960deb0bf7SJacob Keller .del_fdir_fltr_msg = ice_vc_del_fdir_fltr, 50970deb0bf7SJacob Keller .get_offload_vlan_v2_caps = ice_vc_get_offload_vlan_v2_caps, 50980deb0bf7SJacob Keller .add_vlan_v2_msg = ice_vc_add_vlan_v2_msg, 50990deb0bf7SJacob Keller .remove_vlan_v2_msg = ice_vc_remove_vlan_v2_msg, 51000deb0bf7SJacob Keller .ena_vlan_stripping_v2_msg = ice_vc_ena_vlan_stripping_v2_msg, 51010deb0bf7SJacob Keller .dis_vlan_stripping_v2_msg = ice_vc_dis_vlan_stripping_v2_msg, 51020deb0bf7SJacob Keller .ena_vlan_insertion_v2_msg = ice_vc_ena_vlan_insertion_v2_msg, 51030deb0bf7SJacob Keller .dis_vlan_insertion_v2_msg = ice_vc_dis_vlan_insertion_v2_msg, 51040deb0bf7SJacob Keller }; 51050deb0bf7SJacob Keller 5106a7e11710SJacob Keller /** 5107a7e11710SJacob Keller * ice_virtchnl_set_dflt_ops - Switch to default virtchnl ops 5108a7e11710SJacob Keller * @vf: the VF to switch ops 5109a7e11710SJacob Keller */ 5110a7e11710SJacob Keller void ice_virtchnl_set_dflt_ops(struct ice_vf *vf) 51110deb0bf7SJacob Keller { 5112a7e11710SJacob Keller vf->virtchnl_ops = &ice_virtchnl_dflt_ops; 51130deb0bf7SJacob Keller } 51140deb0bf7SJacob Keller 51150deb0bf7SJacob Keller /** 51160deb0bf7SJacob Keller * ice_vc_repr_add_mac 51170deb0bf7SJacob Keller * @vf: pointer to VF 51180deb0bf7SJacob Keller * @msg: virtchannel message 51190deb0bf7SJacob Keller * 51200deb0bf7SJacob Keller * When port representors are created, we do not add MAC rule 51210deb0bf7SJacob Keller * to firmware, we store it so that PF could report same 51220deb0bf7SJacob Keller * MAC as VF. 51230deb0bf7SJacob Keller */ 51240deb0bf7SJacob Keller static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg) 51250deb0bf7SJacob Keller { 51260deb0bf7SJacob Keller enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 51270deb0bf7SJacob Keller struct virtchnl_ether_addr_list *al = 51280deb0bf7SJacob Keller (struct virtchnl_ether_addr_list *)msg; 51290deb0bf7SJacob Keller struct ice_vsi *vsi; 51300deb0bf7SJacob Keller struct ice_pf *pf; 51310deb0bf7SJacob Keller int i; 51320deb0bf7SJacob Keller 51330deb0bf7SJacob Keller if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) || 51340deb0bf7SJacob Keller !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) { 51350deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 51360deb0bf7SJacob Keller goto handle_mac_exit; 51370deb0bf7SJacob Keller } 51380deb0bf7SJacob Keller 51390deb0bf7SJacob Keller pf = vf->pf; 51400deb0bf7SJacob Keller 51410deb0bf7SJacob Keller vsi = ice_get_vf_vsi(vf); 51420deb0bf7SJacob Keller if (!vsi) { 51430deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 51440deb0bf7SJacob Keller goto handle_mac_exit; 51450deb0bf7SJacob Keller } 51460deb0bf7SJacob Keller 51470deb0bf7SJacob Keller for (i = 0; i < al->num_elements; i++) { 51480deb0bf7SJacob Keller u8 *mac_addr = al->list[i].addr; 51490deb0bf7SJacob Keller int result; 51500deb0bf7SJacob Keller 51510deb0bf7SJacob Keller if (!is_unicast_ether_addr(mac_addr) || 51520deb0bf7SJacob Keller ether_addr_equal(mac_addr, vf->hw_lan_addr.addr)) 51530deb0bf7SJacob Keller continue; 51540deb0bf7SJacob Keller 51550deb0bf7SJacob Keller if (vf->pf_set_mac) { 51560deb0bf7SJacob Keller dev_err(ice_pf_to_dev(pf), "VF attempting to override administratively set MAC address\n"); 51570deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED; 51580deb0bf7SJacob Keller goto handle_mac_exit; 51590deb0bf7SJacob Keller } 51600deb0bf7SJacob Keller 51610deb0bf7SJacob Keller result = ice_eswitch_add_vf_mac_rule(pf, vf, mac_addr); 51620deb0bf7SJacob Keller if (result) { 51630deb0bf7SJacob Keller dev_err(ice_pf_to_dev(pf), "Failed to add MAC %pM for VF %d\n, error %d\n", 51640deb0bf7SJacob Keller mac_addr, vf->vf_id, result); 51650deb0bf7SJacob Keller goto handle_mac_exit; 51660deb0bf7SJacob Keller } 51670deb0bf7SJacob Keller 51680deb0bf7SJacob Keller ice_vfhw_mac_add(vf, &al->list[i]); 51690deb0bf7SJacob Keller vf->num_mac++; 51700deb0bf7SJacob Keller break; 51710deb0bf7SJacob Keller } 51720deb0bf7SJacob Keller 51730deb0bf7SJacob Keller handle_mac_exit: 51740deb0bf7SJacob Keller return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR, 51750deb0bf7SJacob Keller v_ret, NULL, 0); 51760deb0bf7SJacob Keller } 51770deb0bf7SJacob Keller 51780deb0bf7SJacob Keller /** 51790deb0bf7SJacob Keller * ice_vc_repr_del_mac - response with success for deleting MAC 51800deb0bf7SJacob Keller * @vf: pointer to VF 51810deb0bf7SJacob Keller * @msg: virtchannel message 51820deb0bf7SJacob Keller * 51830deb0bf7SJacob Keller * Respond with success to not break normal VF flow. 51840deb0bf7SJacob Keller * For legacy VF driver try to update cached MAC address. 51850deb0bf7SJacob Keller */ 51860deb0bf7SJacob Keller static int 51870deb0bf7SJacob Keller ice_vc_repr_del_mac(struct ice_vf __always_unused *vf, u8 __always_unused *msg) 51880deb0bf7SJacob Keller { 51890deb0bf7SJacob Keller struct virtchnl_ether_addr_list *al = 51900deb0bf7SJacob Keller (struct virtchnl_ether_addr_list *)msg; 51910deb0bf7SJacob Keller 51920deb0bf7SJacob Keller ice_update_legacy_cached_mac(vf, &al->list[0]); 51930deb0bf7SJacob Keller 51940deb0bf7SJacob Keller return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR, 51950deb0bf7SJacob Keller VIRTCHNL_STATUS_SUCCESS, NULL, 0); 51960deb0bf7SJacob Keller } 51970deb0bf7SJacob Keller 51980deb0bf7SJacob Keller static int ice_vc_repr_add_vlan(struct ice_vf *vf, u8 __always_unused *msg) 51990deb0bf7SJacob Keller { 52000deb0bf7SJacob Keller dev_dbg(ice_pf_to_dev(vf->pf), 52010deb0bf7SJacob Keller "Can't add VLAN in switchdev mode for VF %d\n", vf->vf_id); 52020deb0bf7SJacob Keller return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, 52030deb0bf7SJacob Keller VIRTCHNL_STATUS_SUCCESS, NULL, 0); 52040deb0bf7SJacob Keller } 52050deb0bf7SJacob Keller 52060deb0bf7SJacob Keller static int ice_vc_repr_del_vlan(struct ice_vf *vf, u8 __always_unused *msg) 52070deb0bf7SJacob Keller { 52080deb0bf7SJacob Keller dev_dbg(ice_pf_to_dev(vf->pf), 52090deb0bf7SJacob Keller "Can't delete VLAN in switchdev mode for VF %d\n", vf->vf_id); 52100deb0bf7SJacob Keller return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, 52110deb0bf7SJacob Keller VIRTCHNL_STATUS_SUCCESS, NULL, 0); 52120deb0bf7SJacob Keller } 52130deb0bf7SJacob Keller 52140deb0bf7SJacob Keller static int ice_vc_repr_ena_vlan_stripping(struct ice_vf *vf) 52150deb0bf7SJacob Keller { 52160deb0bf7SJacob Keller dev_dbg(ice_pf_to_dev(vf->pf), 52170deb0bf7SJacob Keller "Can't enable VLAN stripping in switchdev mode for VF %d\n", 52180deb0bf7SJacob Keller vf->vf_id); 52190deb0bf7SJacob Keller return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, 52200deb0bf7SJacob Keller VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, 52210deb0bf7SJacob Keller NULL, 0); 52220deb0bf7SJacob Keller } 52230deb0bf7SJacob Keller 52240deb0bf7SJacob Keller static int ice_vc_repr_dis_vlan_stripping(struct ice_vf *vf) 52250deb0bf7SJacob Keller { 52260deb0bf7SJacob Keller dev_dbg(ice_pf_to_dev(vf->pf), 52270deb0bf7SJacob Keller "Can't disable VLAN stripping in switchdev mode for VF %d\n", 52280deb0bf7SJacob Keller vf->vf_id); 52290deb0bf7SJacob Keller return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, 52300deb0bf7SJacob Keller VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, 52310deb0bf7SJacob Keller NULL, 0); 52320deb0bf7SJacob Keller } 52330deb0bf7SJacob Keller 52340deb0bf7SJacob Keller static int 52350deb0bf7SJacob Keller ice_vc_repr_cfg_promiscuous_mode(struct ice_vf *vf, u8 __always_unused *msg) 52360deb0bf7SJacob Keller { 52370deb0bf7SJacob Keller dev_dbg(ice_pf_to_dev(vf->pf), 52380deb0bf7SJacob Keller "Can't config promiscuous mode in switchdev mode for VF %d\n", 52390deb0bf7SJacob Keller vf->vf_id); 52400deb0bf7SJacob Keller return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, 52410deb0bf7SJacob Keller VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, 52420deb0bf7SJacob Keller NULL, 0); 52430deb0bf7SJacob Keller } 52440deb0bf7SJacob Keller 5245a7e11710SJacob Keller static const struct ice_virtchnl_ops ice_virtchnl_repr_ops = { 5246a7e11710SJacob Keller .get_ver_msg = ice_vc_get_ver_msg, 5247a7e11710SJacob Keller .get_vf_res_msg = ice_vc_get_vf_res_msg, 5248a7e11710SJacob Keller .reset_vf = ice_vc_reset_vf_msg, 5249a7e11710SJacob Keller .add_mac_addr_msg = ice_vc_repr_add_mac, 5250a7e11710SJacob Keller .del_mac_addr_msg = ice_vc_repr_del_mac, 5251a7e11710SJacob Keller .cfg_qs_msg = ice_vc_cfg_qs_msg, 5252a7e11710SJacob Keller .ena_qs_msg = ice_vc_ena_qs_msg, 5253a7e11710SJacob Keller .dis_qs_msg = ice_vc_dis_qs_msg, 5254a7e11710SJacob Keller .request_qs_msg = ice_vc_request_qs_msg, 5255a7e11710SJacob Keller .cfg_irq_map_msg = ice_vc_cfg_irq_map_msg, 5256a7e11710SJacob Keller .config_rss_key = ice_vc_config_rss_key, 5257a7e11710SJacob Keller .config_rss_lut = ice_vc_config_rss_lut, 5258a7e11710SJacob Keller .get_stats_msg = ice_vc_get_stats_msg, 5259a7e11710SJacob Keller .cfg_promiscuous_mode_msg = ice_vc_repr_cfg_promiscuous_mode, 5260a7e11710SJacob Keller .add_vlan_msg = ice_vc_repr_add_vlan, 5261a7e11710SJacob Keller .remove_vlan_msg = ice_vc_repr_del_vlan, 5262a7e11710SJacob Keller .ena_vlan_stripping = ice_vc_repr_ena_vlan_stripping, 5263a7e11710SJacob Keller .dis_vlan_stripping = ice_vc_repr_dis_vlan_stripping, 5264a7e11710SJacob Keller .handle_rss_cfg_msg = ice_vc_handle_rss_cfg, 5265a7e11710SJacob Keller .add_fdir_fltr_msg = ice_vc_add_fdir_fltr, 5266a7e11710SJacob Keller .del_fdir_fltr_msg = ice_vc_del_fdir_fltr, 5267a7e11710SJacob Keller .get_offload_vlan_v2_caps = ice_vc_get_offload_vlan_v2_caps, 5268a7e11710SJacob Keller .add_vlan_v2_msg = ice_vc_add_vlan_v2_msg, 5269a7e11710SJacob Keller .remove_vlan_v2_msg = ice_vc_remove_vlan_v2_msg, 5270a7e11710SJacob Keller .ena_vlan_stripping_v2_msg = ice_vc_ena_vlan_stripping_v2_msg, 5271a7e11710SJacob Keller .dis_vlan_stripping_v2_msg = ice_vc_dis_vlan_stripping_v2_msg, 5272a7e11710SJacob Keller .ena_vlan_insertion_v2_msg = ice_vc_ena_vlan_insertion_v2_msg, 5273a7e11710SJacob Keller .dis_vlan_insertion_v2_msg = ice_vc_dis_vlan_insertion_v2_msg, 5274a7e11710SJacob Keller }; 5275a7e11710SJacob Keller 5276a7e11710SJacob Keller /** 5277a7e11710SJacob Keller * ice_virtchnl_set_repr_ops - Switch to representor virtchnl ops 5278a7e11710SJacob Keller * @vf: the VF to switch ops 5279a7e11710SJacob Keller */ 5280a7e11710SJacob Keller void ice_virtchnl_set_repr_ops(struct ice_vf *vf) 52810deb0bf7SJacob Keller { 5282a7e11710SJacob Keller vf->virtchnl_ops = &ice_virtchnl_repr_ops; 52830deb0bf7SJacob Keller } 52840deb0bf7SJacob Keller 52850deb0bf7SJacob Keller /** 52860deb0bf7SJacob Keller * ice_vc_process_vf_msg - Process request from VF 52870deb0bf7SJacob Keller * @pf: pointer to the PF structure 52880deb0bf7SJacob Keller * @event: pointer to the AQ event 52890deb0bf7SJacob Keller * 52900deb0bf7SJacob Keller * called from the common asq/arq handler to 52910deb0bf7SJacob Keller * process request from VF 52920deb0bf7SJacob Keller */ 52930deb0bf7SJacob Keller void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) 52940deb0bf7SJacob Keller { 52950deb0bf7SJacob Keller u32 v_opcode = le32_to_cpu(event->desc.cookie_high); 52960deb0bf7SJacob Keller s16 vf_id = le16_to_cpu(event->desc.retval); 5297a7e11710SJacob Keller const struct ice_virtchnl_ops *ops; 52980deb0bf7SJacob Keller u16 msglen = event->msg_len; 52990deb0bf7SJacob Keller u8 *msg = event->msg_buf; 53000deb0bf7SJacob Keller struct ice_vf *vf = NULL; 53010deb0bf7SJacob Keller struct device *dev; 53020deb0bf7SJacob Keller int err = 0; 53030deb0bf7SJacob Keller 53040deb0bf7SJacob Keller dev = ice_pf_to_dev(pf); 53050deb0bf7SJacob Keller 53060deb0bf7SJacob Keller vf = ice_get_vf_by_id(pf, vf_id); 53070deb0bf7SJacob Keller if (!vf) { 53080deb0bf7SJacob Keller dev_err(dev, "Unable to locate VF for message from VF ID %d, opcode %d, len %d\n", 53090deb0bf7SJacob Keller vf_id, v_opcode, msglen); 53100deb0bf7SJacob Keller return; 53110deb0bf7SJacob Keller } 53120deb0bf7SJacob Keller 53130deb0bf7SJacob Keller /* Check if VF is disabled. */ 53140deb0bf7SJacob Keller if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) { 53150deb0bf7SJacob Keller err = -EPERM; 53160deb0bf7SJacob Keller goto error_handler; 53170deb0bf7SJacob Keller } 53180deb0bf7SJacob Keller 5319a7e11710SJacob Keller ops = vf->virtchnl_ops; 53200deb0bf7SJacob Keller 53210deb0bf7SJacob Keller /* Perform basic checks on the msg */ 53220deb0bf7SJacob Keller err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen); 53230deb0bf7SJacob Keller if (err) { 53240deb0bf7SJacob Keller if (err == VIRTCHNL_STATUS_ERR_PARAM) 53250deb0bf7SJacob Keller err = -EPERM; 53260deb0bf7SJacob Keller else 53270deb0bf7SJacob Keller err = -EINVAL; 53280deb0bf7SJacob Keller } 53290deb0bf7SJacob Keller 53300deb0bf7SJacob Keller if (!ice_vc_is_opcode_allowed(vf, v_opcode)) { 53310deb0bf7SJacob Keller ice_vc_send_msg_to_vf(vf, v_opcode, 53320deb0bf7SJacob Keller VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, NULL, 53330deb0bf7SJacob Keller 0); 53340deb0bf7SJacob Keller ice_put_vf(vf); 53350deb0bf7SJacob Keller return; 53360deb0bf7SJacob Keller } 53370deb0bf7SJacob Keller 53380deb0bf7SJacob Keller error_handler: 53390deb0bf7SJacob Keller if (err) { 53400deb0bf7SJacob Keller ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM, 53410deb0bf7SJacob Keller NULL, 0); 53420deb0bf7SJacob Keller dev_err(dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n", 53430deb0bf7SJacob Keller vf_id, v_opcode, msglen, err); 53440deb0bf7SJacob Keller ice_put_vf(vf); 53450deb0bf7SJacob Keller return; 53460deb0bf7SJacob Keller } 53470deb0bf7SJacob Keller 53480deb0bf7SJacob Keller /* VF is being configured in another context that triggers a VFR, so no 53490deb0bf7SJacob Keller * need to process this message 53500deb0bf7SJacob Keller */ 53510deb0bf7SJacob Keller if (!mutex_trylock(&vf->cfg_lock)) { 53520deb0bf7SJacob Keller dev_info(dev, "VF %u is being configured in another context that will trigger a VFR, so there is no need to handle this message\n", 53530deb0bf7SJacob Keller vf->vf_id); 53540deb0bf7SJacob Keller ice_put_vf(vf); 53550deb0bf7SJacob Keller return; 53560deb0bf7SJacob Keller } 53570deb0bf7SJacob Keller 53580deb0bf7SJacob Keller switch (v_opcode) { 53590deb0bf7SJacob Keller case VIRTCHNL_OP_VERSION: 53600deb0bf7SJacob Keller err = ops->get_ver_msg(vf, msg); 53610deb0bf7SJacob Keller break; 53620deb0bf7SJacob Keller case VIRTCHNL_OP_GET_VF_RESOURCES: 53630deb0bf7SJacob Keller err = ops->get_vf_res_msg(vf, msg); 53640deb0bf7SJacob Keller if (ice_vf_init_vlan_stripping(vf)) 53650deb0bf7SJacob Keller dev_dbg(dev, "Failed to initialize VLAN stripping for VF %d\n", 53660deb0bf7SJacob Keller vf->vf_id); 53670deb0bf7SJacob Keller ice_vc_notify_vf_link_state(vf); 53680deb0bf7SJacob Keller break; 53690deb0bf7SJacob Keller case VIRTCHNL_OP_RESET_VF: 53700deb0bf7SJacob Keller ops->reset_vf(vf); 53710deb0bf7SJacob Keller break; 53720deb0bf7SJacob Keller case VIRTCHNL_OP_ADD_ETH_ADDR: 53730deb0bf7SJacob Keller err = ops->add_mac_addr_msg(vf, msg); 53740deb0bf7SJacob Keller break; 53750deb0bf7SJacob Keller case VIRTCHNL_OP_DEL_ETH_ADDR: 53760deb0bf7SJacob Keller err = ops->del_mac_addr_msg(vf, msg); 53770deb0bf7SJacob Keller break; 53780deb0bf7SJacob Keller case VIRTCHNL_OP_CONFIG_VSI_QUEUES: 53790deb0bf7SJacob Keller err = ops->cfg_qs_msg(vf, msg); 53800deb0bf7SJacob Keller break; 53810deb0bf7SJacob Keller case VIRTCHNL_OP_ENABLE_QUEUES: 53820deb0bf7SJacob Keller err = ops->ena_qs_msg(vf, msg); 53830deb0bf7SJacob Keller ice_vc_notify_vf_link_state(vf); 53840deb0bf7SJacob Keller break; 53850deb0bf7SJacob Keller case VIRTCHNL_OP_DISABLE_QUEUES: 53860deb0bf7SJacob Keller err = ops->dis_qs_msg(vf, msg); 53870deb0bf7SJacob Keller break; 53880deb0bf7SJacob Keller case VIRTCHNL_OP_REQUEST_QUEUES: 53890deb0bf7SJacob Keller err = ops->request_qs_msg(vf, msg); 53900deb0bf7SJacob Keller break; 53910deb0bf7SJacob Keller case VIRTCHNL_OP_CONFIG_IRQ_MAP: 53920deb0bf7SJacob Keller err = ops->cfg_irq_map_msg(vf, msg); 53930deb0bf7SJacob Keller break; 53940deb0bf7SJacob Keller case VIRTCHNL_OP_CONFIG_RSS_KEY: 53950deb0bf7SJacob Keller err = ops->config_rss_key(vf, msg); 53960deb0bf7SJacob Keller break; 53970deb0bf7SJacob Keller case VIRTCHNL_OP_CONFIG_RSS_LUT: 53980deb0bf7SJacob Keller err = ops->config_rss_lut(vf, msg); 53990deb0bf7SJacob Keller break; 54000deb0bf7SJacob Keller case VIRTCHNL_OP_GET_STATS: 54010deb0bf7SJacob Keller err = ops->get_stats_msg(vf, msg); 54020deb0bf7SJacob Keller break; 54030deb0bf7SJacob Keller case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: 54040deb0bf7SJacob Keller err = ops->cfg_promiscuous_mode_msg(vf, msg); 54050deb0bf7SJacob Keller break; 54060deb0bf7SJacob Keller case VIRTCHNL_OP_ADD_VLAN: 54070deb0bf7SJacob Keller err = ops->add_vlan_msg(vf, msg); 54080deb0bf7SJacob Keller break; 54090deb0bf7SJacob Keller case VIRTCHNL_OP_DEL_VLAN: 54100deb0bf7SJacob Keller err = ops->remove_vlan_msg(vf, msg); 54110deb0bf7SJacob Keller break; 54120deb0bf7SJacob Keller case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: 54130deb0bf7SJacob Keller err = ops->ena_vlan_stripping(vf); 54140deb0bf7SJacob Keller break; 54150deb0bf7SJacob Keller case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: 54160deb0bf7SJacob Keller err = ops->dis_vlan_stripping(vf); 54170deb0bf7SJacob Keller break; 54180deb0bf7SJacob Keller case VIRTCHNL_OP_ADD_FDIR_FILTER: 54190deb0bf7SJacob Keller err = ops->add_fdir_fltr_msg(vf, msg); 54200deb0bf7SJacob Keller break; 54210deb0bf7SJacob Keller case VIRTCHNL_OP_DEL_FDIR_FILTER: 54220deb0bf7SJacob Keller err = ops->del_fdir_fltr_msg(vf, msg); 54230deb0bf7SJacob Keller break; 54240deb0bf7SJacob Keller case VIRTCHNL_OP_ADD_RSS_CFG: 54250deb0bf7SJacob Keller err = ops->handle_rss_cfg_msg(vf, msg, true); 54260deb0bf7SJacob Keller break; 54270deb0bf7SJacob Keller case VIRTCHNL_OP_DEL_RSS_CFG: 54280deb0bf7SJacob Keller err = ops->handle_rss_cfg_msg(vf, msg, false); 54290deb0bf7SJacob Keller break; 54300deb0bf7SJacob Keller case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS: 54310deb0bf7SJacob Keller err = ops->get_offload_vlan_v2_caps(vf); 54320deb0bf7SJacob Keller break; 54330deb0bf7SJacob Keller case VIRTCHNL_OP_ADD_VLAN_V2: 54340deb0bf7SJacob Keller err = ops->add_vlan_v2_msg(vf, msg); 54350deb0bf7SJacob Keller break; 54360deb0bf7SJacob Keller case VIRTCHNL_OP_DEL_VLAN_V2: 54370deb0bf7SJacob Keller err = ops->remove_vlan_v2_msg(vf, msg); 54380deb0bf7SJacob Keller break; 54390deb0bf7SJacob Keller case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2: 54400deb0bf7SJacob Keller err = ops->ena_vlan_stripping_v2_msg(vf, msg); 54410deb0bf7SJacob Keller break; 54420deb0bf7SJacob Keller case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2: 54430deb0bf7SJacob Keller err = ops->dis_vlan_stripping_v2_msg(vf, msg); 54440deb0bf7SJacob Keller break; 54450deb0bf7SJacob Keller case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2: 54460deb0bf7SJacob Keller err = ops->ena_vlan_insertion_v2_msg(vf, msg); 54470deb0bf7SJacob Keller break; 54480deb0bf7SJacob Keller case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2: 54490deb0bf7SJacob Keller err = ops->dis_vlan_insertion_v2_msg(vf, msg); 54500deb0bf7SJacob Keller break; 54510deb0bf7SJacob Keller case VIRTCHNL_OP_UNKNOWN: 54520deb0bf7SJacob Keller default: 54530deb0bf7SJacob Keller dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode, 54540deb0bf7SJacob Keller vf_id); 54550deb0bf7SJacob Keller err = ice_vc_send_msg_to_vf(vf, v_opcode, 54560deb0bf7SJacob Keller VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, 54570deb0bf7SJacob Keller NULL, 0); 54580deb0bf7SJacob Keller break; 54590deb0bf7SJacob Keller } 54600deb0bf7SJacob Keller if (err) { 54610deb0bf7SJacob Keller /* Helper function cares less about error return values here 54620deb0bf7SJacob Keller * as it is busy with pending work. 54630deb0bf7SJacob Keller */ 54640deb0bf7SJacob Keller dev_info(dev, "PF failed to honor VF %d, opcode %d, error %d\n", 54650deb0bf7SJacob Keller vf_id, v_opcode, err); 54660deb0bf7SJacob Keller } 54670deb0bf7SJacob Keller 54680deb0bf7SJacob Keller mutex_unlock(&vf->cfg_lock); 54690deb0bf7SJacob Keller ice_put_vf(vf); 54700deb0bf7SJacob Keller } 54710deb0bf7SJacob Keller 54720deb0bf7SJacob Keller /** 54730deb0bf7SJacob Keller * ice_get_vf_cfg 54740deb0bf7SJacob Keller * @netdev: network interface device structure 54750deb0bf7SJacob Keller * @vf_id: VF identifier 54760deb0bf7SJacob Keller * @ivi: VF configuration structure 54770deb0bf7SJacob Keller * 54780deb0bf7SJacob Keller * return VF configuration 54790deb0bf7SJacob Keller */ 54800deb0bf7SJacob Keller int 54810deb0bf7SJacob Keller ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi) 54820deb0bf7SJacob Keller { 54830deb0bf7SJacob Keller struct ice_pf *pf = ice_netdev_to_pf(netdev); 54840deb0bf7SJacob Keller struct ice_vf *vf; 54850deb0bf7SJacob Keller int ret; 54860deb0bf7SJacob Keller 54870deb0bf7SJacob Keller vf = ice_get_vf_by_id(pf, vf_id); 54880deb0bf7SJacob Keller if (!vf) 54890deb0bf7SJacob Keller return -EINVAL; 54900deb0bf7SJacob Keller 54910deb0bf7SJacob Keller ret = ice_check_vf_ready_for_cfg(vf); 54920deb0bf7SJacob Keller if (ret) 54930deb0bf7SJacob Keller goto out_put_vf; 54940deb0bf7SJacob Keller 54950deb0bf7SJacob Keller ivi->vf = vf_id; 54960deb0bf7SJacob Keller ether_addr_copy(ivi->mac, vf->hw_lan_addr.addr); 54970deb0bf7SJacob Keller 54980deb0bf7SJacob Keller /* VF configuration for VLAN and applicable QoS */ 54990deb0bf7SJacob Keller ivi->vlan = ice_vf_get_port_vlan_id(vf); 55000deb0bf7SJacob Keller ivi->qos = ice_vf_get_port_vlan_prio(vf); 55010deb0bf7SJacob Keller if (ice_vf_is_port_vlan_ena(vf)) 55020deb0bf7SJacob Keller ivi->vlan_proto = cpu_to_be16(ice_vf_get_port_vlan_tpid(vf)); 55030deb0bf7SJacob Keller 55040deb0bf7SJacob Keller ivi->trusted = vf->trusted; 55050deb0bf7SJacob Keller ivi->spoofchk = vf->spoofchk; 55060deb0bf7SJacob Keller if (!vf->link_forced) 55070deb0bf7SJacob Keller ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; 55080deb0bf7SJacob Keller else if (vf->link_up) 55090deb0bf7SJacob Keller ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; 55100deb0bf7SJacob Keller else 55110deb0bf7SJacob Keller ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; 55120deb0bf7SJacob Keller ivi->max_tx_rate = vf->max_tx_rate; 55130deb0bf7SJacob Keller ivi->min_tx_rate = vf->min_tx_rate; 55140deb0bf7SJacob Keller 55150deb0bf7SJacob Keller out_put_vf: 55160deb0bf7SJacob Keller ice_put_vf(vf); 55170deb0bf7SJacob Keller return ret; 55180deb0bf7SJacob Keller } 55190deb0bf7SJacob Keller 55200deb0bf7SJacob Keller /** 55210deb0bf7SJacob Keller * ice_unicast_mac_exists - check if the unicast MAC exists on the PF's switch 55220deb0bf7SJacob Keller * @pf: PF used to reference the switch's rules 55230deb0bf7SJacob Keller * @umac: unicast MAC to compare against existing switch rules 55240deb0bf7SJacob Keller * 55250deb0bf7SJacob Keller * Return true on the first/any match, else return false 55260deb0bf7SJacob Keller */ 55270deb0bf7SJacob Keller static bool ice_unicast_mac_exists(struct ice_pf *pf, u8 *umac) 55280deb0bf7SJacob Keller { 55290deb0bf7SJacob Keller struct ice_sw_recipe *mac_recipe_list = 55300deb0bf7SJacob Keller &pf->hw.switch_info->recp_list[ICE_SW_LKUP_MAC]; 55310deb0bf7SJacob Keller struct ice_fltr_mgmt_list_entry *list_itr; 55320deb0bf7SJacob Keller struct list_head *rule_head; 55330deb0bf7SJacob Keller struct mutex *rule_lock; /* protect MAC filter list access */ 55340deb0bf7SJacob Keller 55350deb0bf7SJacob Keller rule_head = &mac_recipe_list->filt_rules; 55360deb0bf7SJacob Keller rule_lock = &mac_recipe_list->filt_rule_lock; 55370deb0bf7SJacob Keller 55380deb0bf7SJacob Keller mutex_lock(rule_lock); 55390deb0bf7SJacob Keller list_for_each_entry(list_itr, rule_head, list_entry) { 55400deb0bf7SJacob Keller u8 *existing_mac = &list_itr->fltr_info.l_data.mac.mac_addr[0]; 55410deb0bf7SJacob Keller 55420deb0bf7SJacob Keller if (ether_addr_equal(existing_mac, umac)) { 55430deb0bf7SJacob Keller mutex_unlock(rule_lock); 55440deb0bf7SJacob Keller return true; 55450deb0bf7SJacob Keller } 55460deb0bf7SJacob Keller } 55470deb0bf7SJacob Keller 55480deb0bf7SJacob Keller mutex_unlock(rule_lock); 55490deb0bf7SJacob Keller 55500deb0bf7SJacob Keller return false; 55510deb0bf7SJacob Keller } 55520deb0bf7SJacob Keller 55530deb0bf7SJacob Keller /** 55540deb0bf7SJacob Keller * ice_set_vf_mac 55550deb0bf7SJacob Keller * @netdev: network interface device structure 55560deb0bf7SJacob Keller * @vf_id: VF identifier 55570deb0bf7SJacob Keller * @mac: MAC address 55580deb0bf7SJacob Keller * 55590deb0bf7SJacob Keller * program VF MAC address 55600deb0bf7SJacob Keller */ 55610deb0bf7SJacob Keller int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) 55620deb0bf7SJacob Keller { 55630deb0bf7SJacob Keller struct ice_pf *pf = ice_netdev_to_pf(netdev); 55640deb0bf7SJacob Keller struct ice_vf *vf; 55650deb0bf7SJacob Keller int ret; 55660deb0bf7SJacob Keller 55670deb0bf7SJacob Keller if (is_multicast_ether_addr(mac)) { 55680deb0bf7SJacob Keller netdev_err(netdev, "%pM not a valid unicast address\n", mac); 55690deb0bf7SJacob Keller return -EINVAL; 55700deb0bf7SJacob Keller } 55710deb0bf7SJacob Keller 55720deb0bf7SJacob Keller vf = ice_get_vf_by_id(pf, vf_id); 55730deb0bf7SJacob Keller if (!vf) 55740deb0bf7SJacob Keller return -EINVAL; 55750deb0bf7SJacob Keller 55760deb0bf7SJacob Keller /* nothing left to do, unicast MAC already set */ 55770deb0bf7SJacob Keller if (ether_addr_equal(vf->dev_lan_addr.addr, mac) && 55780deb0bf7SJacob Keller ether_addr_equal(vf->hw_lan_addr.addr, mac)) { 55790deb0bf7SJacob Keller ret = 0; 55800deb0bf7SJacob Keller goto out_put_vf; 55810deb0bf7SJacob Keller } 55820deb0bf7SJacob Keller 55830deb0bf7SJacob Keller ret = ice_check_vf_ready_for_cfg(vf); 55840deb0bf7SJacob Keller if (ret) 55850deb0bf7SJacob Keller goto out_put_vf; 55860deb0bf7SJacob Keller 55870deb0bf7SJacob Keller if (ice_unicast_mac_exists(pf, mac)) { 55880deb0bf7SJacob Keller netdev_err(netdev, "Unicast MAC %pM already exists on this PF. Preventing setting VF %u unicast MAC address to %pM\n", 55890deb0bf7SJacob Keller mac, vf_id, mac); 55900deb0bf7SJacob Keller ret = -EINVAL; 55910deb0bf7SJacob Keller goto out_put_vf; 55920deb0bf7SJacob Keller } 55930deb0bf7SJacob Keller 55940deb0bf7SJacob Keller mutex_lock(&vf->cfg_lock); 55950deb0bf7SJacob Keller 55960deb0bf7SJacob Keller /* VF is notified of its new MAC via the PF's response to the 55970deb0bf7SJacob Keller * VIRTCHNL_OP_GET_VF_RESOURCES message after the VF has been reset 55980deb0bf7SJacob Keller */ 55990deb0bf7SJacob Keller ether_addr_copy(vf->dev_lan_addr.addr, mac); 56000deb0bf7SJacob Keller ether_addr_copy(vf->hw_lan_addr.addr, mac); 56010deb0bf7SJacob Keller if (is_zero_ether_addr(mac)) { 56020deb0bf7SJacob Keller /* VF will send VIRTCHNL_OP_ADD_ETH_ADDR message with its MAC */ 56030deb0bf7SJacob Keller vf->pf_set_mac = false; 56040deb0bf7SJacob Keller netdev_info(netdev, "Removing MAC on VF %d. VF driver will be reinitialized\n", 56050deb0bf7SJacob Keller vf->vf_id); 56060deb0bf7SJacob Keller } else { 56070deb0bf7SJacob Keller /* PF will add MAC rule for the VF */ 56080deb0bf7SJacob Keller vf->pf_set_mac = true; 56090deb0bf7SJacob Keller netdev_info(netdev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n", 56100deb0bf7SJacob Keller mac, vf_id); 56110deb0bf7SJacob Keller } 56120deb0bf7SJacob Keller 56130deb0bf7SJacob Keller ice_vc_reset_vf(vf); 56140deb0bf7SJacob Keller mutex_unlock(&vf->cfg_lock); 56150deb0bf7SJacob Keller 56160deb0bf7SJacob Keller out_put_vf: 56170deb0bf7SJacob Keller ice_put_vf(vf); 56180deb0bf7SJacob Keller return ret; 56190deb0bf7SJacob Keller } 56200deb0bf7SJacob Keller 56210deb0bf7SJacob Keller /** 56220deb0bf7SJacob Keller * ice_set_vf_trust 56230deb0bf7SJacob Keller * @netdev: network interface device structure 56240deb0bf7SJacob Keller * @vf_id: VF identifier 56250deb0bf7SJacob Keller * @trusted: Boolean value to enable/disable trusted VF 56260deb0bf7SJacob Keller * 56270deb0bf7SJacob Keller * Enable or disable a given VF as trusted 56280deb0bf7SJacob Keller */ 56290deb0bf7SJacob Keller int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted) 56300deb0bf7SJacob Keller { 56310deb0bf7SJacob Keller struct ice_pf *pf = ice_netdev_to_pf(netdev); 56320deb0bf7SJacob Keller struct ice_vf *vf; 56330deb0bf7SJacob Keller int ret; 56340deb0bf7SJacob Keller 56350deb0bf7SJacob Keller if (ice_is_eswitch_mode_switchdev(pf)) { 56360deb0bf7SJacob Keller dev_info(ice_pf_to_dev(pf), "Trusted VF is forbidden in switchdev mode\n"); 56370deb0bf7SJacob Keller return -EOPNOTSUPP; 56380deb0bf7SJacob Keller } 56390deb0bf7SJacob Keller 56400deb0bf7SJacob Keller vf = ice_get_vf_by_id(pf, vf_id); 56410deb0bf7SJacob Keller if (!vf) 56420deb0bf7SJacob Keller return -EINVAL; 56430deb0bf7SJacob Keller 56440deb0bf7SJacob Keller ret = ice_check_vf_ready_for_cfg(vf); 56450deb0bf7SJacob Keller if (ret) 56460deb0bf7SJacob Keller goto out_put_vf; 56470deb0bf7SJacob Keller 56480deb0bf7SJacob Keller /* Check if already trusted */ 56490deb0bf7SJacob Keller if (trusted == vf->trusted) { 56500deb0bf7SJacob Keller ret = 0; 56510deb0bf7SJacob Keller goto out_put_vf; 56520deb0bf7SJacob Keller } 56530deb0bf7SJacob Keller 56540deb0bf7SJacob Keller mutex_lock(&vf->cfg_lock); 56550deb0bf7SJacob Keller 56560deb0bf7SJacob Keller vf->trusted = trusted; 56570deb0bf7SJacob Keller ice_vc_reset_vf(vf); 56580deb0bf7SJacob Keller dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n", 56590deb0bf7SJacob Keller vf_id, trusted ? "" : "un"); 56600deb0bf7SJacob Keller 56610deb0bf7SJacob Keller mutex_unlock(&vf->cfg_lock); 56620deb0bf7SJacob Keller 56630deb0bf7SJacob Keller out_put_vf: 56640deb0bf7SJacob Keller ice_put_vf(vf); 56650deb0bf7SJacob Keller return ret; 56660deb0bf7SJacob Keller } 56670deb0bf7SJacob Keller 56680deb0bf7SJacob Keller /** 56690deb0bf7SJacob Keller * ice_set_vf_link_state 56700deb0bf7SJacob Keller * @netdev: network interface device structure 56710deb0bf7SJacob Keller * @vf_id: VF identifier 56720deb0bf7SJacob Keller * @link_state: required link state 56730deb0bf7SJacob Keller * 56740deb0bf7SJacob Keller * Set VF's link state, irrespective of physical link state status 56750deb0bf7SJacob Keller */ 56760deb0bf7SJacob Keller int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state) 56770deb0bf7SJacob Keller { 56780deb0bf7SJacob Keller struct ice_pf *pf = ice_netdev_to_pf(netdev); 56790deb0bf7SJacob Keller struct ice_vf *vf; 56800deb0bf7SJacob Keller int ret; 56810deb0bf7SJacob Keller 56820deb0bf7SJacob Keller vf = ice_get_vf_by_id(pf, vf_id); 56830deb0bf7SJacob Keller if (!vf) 56840deb0bf7SJacob Keller return -EINVAL; 56850deb0bf7SJacob Keller 56860deb0bf7SJacob Keller ret = ice_check_vf_ready_for_cfg(vf); 56870deb0bf7SJacob Keller if (ret) 56880deb0bf7SJacob Keller goto out_put_vf; 56890deb0bf7SJacob Keller 56900deb0bf7SJacob Keller switch (link_state) { 56910deb0bf7SJacob Keller case IFLA_VF_LINK_STATE_AUTO: 56920deb0bf7SJacob Keller vf->link_forced = false; 56930deb0bf7SJacob Keller break; 56940deb0bf7SJacob Keller case IFLA_VF_LINK_STATE_ENABLE: 56950deb0bf7SJacob Keller vf->link_forced = true; 56960deb0bf7SJacob Keller vf->link_up = true; 56970deb0bf7SJacob Keller break; 56980deb0bf7SJacob Keller case IFLA_VF_LINK_STATE_DISABLE: 56990deb0bf7SJacob Keller vf->link_forced = true; 57000deb0bf7SJacob Keller vf->link_up = false; 57010deb0bf7SJacob Keller break; 57020deb0bf7SJacob Keller default: 57030deb0bf7SJacob Keller ret = -EINVAL; 57040deb0bf7SJacob Keller goto out_put_vf; 57050deb0bf7SJacob Keller } 57060deb0bf7SJacob Keller 57070deb0bf7SJacob Keller ice_vc_notify_vf_link_state(vf); 57080deb0bf7SJacob Keller 57090deb0bf7SJacob Keller out_put_vf: 57100deb0bf7SJacob Keller ice_put_vf(vf); 57110deb0bf7SJacob Keller return ret; 57120deb0bf7SJacob Keller } 57130deb0bf7SJacob Keller 57140deb0bf7SJacob Keller /** 57150deb0bf7SJacob Keller * ice_calc_all_vfs_min_tx_rate - calculate cumulative min Tx rate on all VFs 57160deb0bf7SJacob Keller * @pf: PF associated with VFs 57170deb0bf7SJacob Keller */ 57180deb0bf7SJacob Keller static int ice_calc_all_vfs_min_tx_rate(struct ice_pf *pf) 57190deb0bf7SJacob Keller { 57200deb0bf7SJacob Keller struct ice_vf *vf; 57210deb0bf7SJacob Keller unsigned int bkt; 57220deb0bf7SJacob Keller int rate = 0; 57230deb0bf7SJacob Keller 57240deb0bf7SJacob Keller rcu_read_lock(); 57250deb0bf7SJacob Keller ice_for_each_vf_rcu(pf, bkt, vf) 57260deb0bf7SJacob Keller rate += vf->min_tx_rate; 57270deb0bf7SJacob Keller rcu_read_unlock(); 57280deb0bf7SJacob Keller 57290deb0bf7SJacob Keller return rate; 57300deb0bf7SJacob Keller } 57310deb0bf7SJacob Keller 57320deb0bf7SJacob Keller /** 57330deb0bf7SJacob Keller * ice_min_tx_rate_oversubscribed - check if min Tx rate causes oversubscription 57340deb0bf7SJacob Keller * @vf: VF trying to configure min_tx_rate 57350deb0bf7SJacob Keller * @min_tx_rate: min Tx rate in Mbps 57360deb0bf7SJacob Keller * 57370deb0bf7SJacob Keller * Check if the min_tx_rate being passed in will cause oversubscription of total 57380deb0bf7SJacob Keller * min_tx_rate based on the current link speed and all other VFs configured 57390deb0bf7SJacob Keller * min_tx_rate 57400deb0bf7SJacob Keller * 57410deb0bf7SJacob Keller * Return true if the passed min_tx_rate would cause oversubscription, else 57420deb0bf7SJacob Keller * return false 57430deb0bf7SJacob Keller */ 57440deb0bf7SJacob Keller static bool 57450deb0bf7SJacob Keller ice_min_tx_rate_oversubscribed(struct ice_vf *vf, int min_tx_rate) 57460deb0bf7SJacob Keller { 57470deb0bf7SJacob Keller int link_speed_mbps = ice_get_link_speed_mbps(ice_get_vf_vsi(vf)); 57480deb0bf7SJacob Keller int all_vfs_min_tx_rate = ice_calc_all_vfs_min_tx_rate(vf->pf); 57490deb0bf7SJacob Keller 57500deb0bf7SJacob Keller /* this VF's previous rate is being overwritten */ 57510deb0bf7SJacob Keller all_vfs_min_tx_rate -= vf->min_tx_rate; 57520deb0bf7SJacob Keller 57530deb0bf7SJacob Keller if (all_vfs_min_tx_rate + min_tx_rate > link_speed_mbps) { 57540deb0bf7SJacob Keller dev_err(ice_pf_to_dev(vf->pf), "min_tx_rate of %d Mbps on VF %u would cause oversubscription of %d Mbps based on the current link speed %d Mbps\n", 57550deb0bf7SJacob Keller min_tx_rate, vf->vf_id, 57560deb0bf7SJacob Keller all_vfs_min_tx_rate + min_tx_rate - link_speed_mbps, 57570deb0bf7SJacob Keller link_speed_mbps); 57580deb0bf7SJacob Keller return true; 57590deb0bf7SJacob Keller } 57600deb0bf7SJacob Keller 57610deb0bf7SJacob Keller return false; 57620deb0bf7SJacob Keller } 57630deb0bf7SJacob Keller 57640deb0bf7SJacob Keller /** 57650deb0bf7SJacob Keller * ice_set_vf_bw - set min/max VF bandwidth 57660deb0bf7SJacob Keller * @netdev: network interface device structure 57670deb0bf7SJacob Keller * @vf_id: VF identifier 57680deb0bf7SJacob Keller * @min_tx_rate: Minimum Tx rate in Mbps 57690deb0bf7SJacob Keller * @max_tx_rate: Maximum Tx rate in Mbps 57700deb0bf7SJacob Keller */ 57710deb0bf7SJacob Keller int 57720deb0bf7SJacob Keller ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, 57730deb0bf7SJacob Keller int max_tx_rate) 57740deb0bf7SJacob Keller { 57750deb0bf7SJacob Keller struct ice_pf *pf = ice_netdev_to_pf(netdev); 57760deb0bf7SJacob Keller struct ice_vsi *vsi; 57770deb0bf7SJacob Keller struct device *dev; 57780deb0bf7SJacob Keller struct ice_vf *vf; 57790deb0bf7SJacob Keller int ret; 57800deb0bf7SJacob Keller 57810deb0bf7SJacob Keller dev = ice_pf_to_dev(pf); 57820deb0bf7SJacob Keller 57830deb0bf7SJacob Keller vf = ice_get_vf_by_id(pf, vf_id); 57840deb0bf7SJacob Keller if (!vf) 57850deb0bf7SJacob Keller return -EINVAL; 57860deb0bf7SJacob Keller 57870deb0bf7SJacob Keller ret = ice_check_vf_ready_for_cfg(vf); 57880deb0bf7SJacob Keller if (ret) 57890deb0bf7SJacob Keller goto out_put_vf; 57900deb0bf7SJacob Keller 57910deb0bf7SJacob Keller vsi = ice_get_vf_vsi(vf); 57920deb0bf7SJacob Keller 57930deb0bf7SJacob Keller /* when max_tx_rate is zero that means no max Tx rate limiting, so only 57940deb0bf7SJacob Keller * check if max_tx_rate is non-zero 57950deb0bf7SJacob Keller */ 57960deb0bf7SJacob Keller if (max_tx_rate && min_tx_rate > max_tx_rate) { 57970deb0bf7SJacob Keller dev_err(dev, "Cannot set min Tx rate %d Mbps greater than max Tx rate %d Mbps\n", 57980deb0bf7SJacob Keller min_tx_rate, max_tx_rate); 57990deb0bf7SJacob Keller ret = -EINVAL; 58000deb0bf7SJacob Keller goto out_put_vf; 58010deb0bf7SJacob Keller } 58020deb0bf7SJacob Keller 58030deb0bf7SJacob Keller if (min_tx_rate && ice_is_dcb_active(pf)) { 58040deb0bf7SJacob Keller dev_err(dev, "DCB on PF is currently enabled. VF min Tx rate limiting not allowed on this PF.\n"); 58050deb0bf7SJacob Keller ret = -EOPNOTSUPP; 58060deb0bf7SJacob Keller goto out_put_vf; 58070deb0bf7SJacob Keller } 58080deb0bf7SJacob Keller 58090deb0bf7SJacob Keller if (ice_min_tx_rate_oversubscribed(vf, min_tx_rate)) { 58100deb0bf7SJacob Keller ret = -EINVAL; 58110deb0bf7SJacob Keller goto out_put_vf; 58120deb0bf7SJacob Keller } 58130deb0bf7SJacob Keller 58140deb0bf7SJacob Keller if (vf->min_tx_rate != (unsigned int)min_tx_rate) { 58150deb0bf7SJacob Keller ret = ice_set_min_bw_limit(vsi, (u64)min_tx_rate * 1000); 58160deb0bf7SJacob Keller if (ret) { 58170deb0bf7SJacob Keller dev_err(dev, "Unable to set min-tx-rate for VF %d\n", 58180deb0bf7SJacob Keller vf->vf_id); 58190deb0bf7SJacob Keller goto out_put_vf; 58200deb0bf7SJacob Keller } 58210deb0bf7SJacob Keller 58220deb0bf7SJacob Keller vf->min_tx_rate = min_tx_rate; 58230deb0bf7SJacob Keller } 58240deb0bf7SJacob Keller 58250deb0bf7SJacob Keller if (vf->max_tx_rate != (unsigned int)max_tx_rate) { 58260deb0bf7SJacob Keller ret = ice_set_max_bw_limit(vsi, (u64)max_tx_rate * 1000); 58270deb0bf7SJacob Keller if (ret) { 58280deb0bf7SJacob Keller dev_err(dev, "Unable to set max-tx-rate for VF %d\n", 58290deb0bf7SJacob Keller vf->vf_id); 58300deb0bf7SJacob Keller goto out_put_vf; 58310deb0bf7SJacob Keller } 58320deb0bf7SJacob Keller 58330deb0bf7SJacob Keller vf->max_tx_rate = max_tx_rate; 58340deb0bf7SJacob Keller } 58350deb0bf7SJacob Keller 58360deb0bf7SJacob Keller out_put_vf: 58370deb0bf7SJacob Keller ice_put_vf(vf); 58380deb0bf7SJacob Keller return ret; 58390deb0bf7SJacob Keller } 58400deb0bf7SJacob Keller 58410deb0bf7SJacob Keller /** 58420deb0bf7SJacob Keller * ice_get_vf_stats - populate some stats for the VF 58430deb0bf7SJacob Keller * @netdev: the netdev of the PF 58440deb0bf7SJacob Keller * @vf_id: the host OS identifier (0-255) 58450deb0bf7SJacob Keller * @vf_stats: pointer to the OS memory to be initialized 58460deb0bf7SJacob Keller */ 58470deb0bf7SJacob Keller int ice_get_vf_stats(struct net_device *netdev, int vf_id, 58480deb0bf7SJacob Keller struct ifla_vf_stats *vf_stats) 58490deb0bf7SJacob Keller { 58500deb0bf7SJacob Keller struct ice_pf *pf = ice_netdev_to_pf(netdev); 58510deb0bf7SJacob Keller struct ice_eth_stats *stats; 58520deb0bf7SJacob Keller struct ice_vsi *vsi; 58530deb0bf7SJacob Keller struct ice_vf *vf; 58540deb0bf7SJacob Keller int ret; 58550deb0bf7SJacob Keller 58560deb0bf7SJacob Keller vf = ice_get_vf_by_id(pf, vf_id); 58570deb0bf7SJacob Keller if (!vf) 58580deb0bf7SJacob Keller return -EINVAL; 58590deb0bf7SJacob Keller 58600deb0bf7SJacob Keller ret = ice_check_vf_ready_for_cfg(vf); 58610deb0bf7SJacob Keller if (ret) 58620deb0bf7SJacob Keller goto out_put_vf; 58630deb0bf7SJacob Keller 58640deb0bf7SJacob Keller vsi = ice_get_vf_vsi(vf); 58650deb0bf7SJacob Keller if (!vsi) { 58660deb0bf7SJacob Keller ret = -EINVAL; 58670deb0bf7SJacob Keller goto out_put_vf; 58680deb0bf7SJacob Keller } 58690deb0bf7SJacob Keller 58700deb0bf7SJacob Keller ice_update_eth_stats(vsi); 58710deb0bf7SJacob Keller stats = &vsi->eth_stats; 58720deb0bf7SJacob Keller 58730deb0bf7SJacob Keller memset(vf_stats, 0, sizeof(*vf_stats)); 58740deb0bf7SJacob Keller 58750deb0bf7SJacob Keller vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast + 58760deb0bf7SJacob Keller stats->rx_multicast; 58770deb0bf7SJacob Keller vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast + 58780deb0bf7SJacob Keller stats->tx_multicast; 58790deb0bf7SJacob Keller vf_stats->rx_bytes = stats->rx_bytes; 58800deb0bf7SJacob Keller vf_stats->tx_bytes = stats->tx_bytes; 58810deb0bf7SJacob Keller vf_stats->broadcast = stats->rx_broadcast; 58820deb0bf7SJacob Keller vf_stats->multicast = stats->rx_multicast; 58830deb0bf7SJacob Keller vf_stats->rx_dropped = stats->rx_discards; 58840deb0bf7SJacob Keller vf_stats->tx_dropped = stats->tx_discards; 58850deb0bf7SJacob Keller 58860deb0bf7SJacob Keller out_put_vf: 58870deb0bf7SJacob Keller ice_put_vf(vf); 58880deb0bf7SJacob Keller return ret; 58890deb0bf7SJacob Keller } 58900deb0bf7SJacob Keller 58910deb0bf7SJacob Keller /** 5892346f7aa3SJacob Keller * ice_is_supported_port_vlan_proto - make sure the vlan_proto is supported 5893346f7aa3SJacob Keller * @hw: hardware structure used to check the VLAN mode 5894346f7aa3SJacob Keller * @vlan_proto: VLAN TPID being checked 5895346f7aa3SJacob Keller * 5896346f7aa3SJacob Keller * If the device is configured in Double VLAN Mode (DVM), then both ETH_P_8021Q 5897346f7aa3SJacob Keller * and ETH_P_8021AD are supported. If the device is configured in Single VLAN 5898346f7aa3SJacob Keller * Mode (SVM), then only ETH_P_8021Q is supported. 5899346f7aa3SJacob Keller */ 5900346f7aa3SJacob Keller static bool 5901346f7aa3SJacob Keller ice_is_supported_port_vlan_proto(struct ice_hw *hw, u16 vlan_proto) 5902346f7aa3SJacob Keller { 5903346f7aa3SJacob Keller bool is_supported = false; 5904346f7aa3SJacob Keller 5905346f7aa3SJacob Keller switch (vlan_proto) { 5906346f7aa3SJacob Keller case ETH_P_8021Q: 5907346f7aa3SJacob Keller is_supported = true; 5908346f7aa3SJacob Keller break; 5909346f7aa3SJacob Keller case ETH_P_8021AD: 5910346f7aa3SJacob Keller if (ice_is_dvm_ena(hw)) 5911346f7aa3SJacob Keller is_supported = true; 5912346f7aa3SJacob Keller break; 5913346f7aa3SJacob Keller } 5914346f7aa3SJacob Keller 5915346f7aa3SJacob Keller return is_supported; 5916346f7aa3SJacob Keller } 5917346f7aa3SJacob Keller 5918346f7aa3SJacob Keller /** 5919346f7aa3SJacob Keller * ice_set_vf_port_vlan 5920346f7aa3SJacob Keller * @netdev: network interface device structure 5921346f7aa3SJacob Keller * @vf_id: VF identifier 5922346f7aa3SJacob Keller * @vlan_id: VLAN ID being set 5923346f7aa3SJacob Keller * @qos: priority setting 5924346f7aa3SJacob Keller * @vlan_proto: VLAN protocol 5925346f7aa3SJacob Keller * 5926346f7aa3SJacob Keller * program VF Port VLAN ID and/or QoS 5927346f7aa3SJacob Keller */ 5928346f7aa3SJacob Keller int 5929346f7aa3SJacob Keller ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, 5930346f7aa3SJacob Keller __be16 vlan_proto) 5931346f7aa3SJacob Keller { 5932346f7aa3SJacob Keller struct ice_pf *pf = ice_netdev_to_pf(netdev); 5933346f7aa3SJacob Keller u16 local_vlan_proto = ntohs(vlan_proto); 5934346f7aa3SJacob Keller struct device *dev; 5935346f7aa3SJacob Keller struct ice_vf *vf; 5936346f7aa3SJacob Keller int ret; 5937346f7aa3SJacob Keller 5938346f7aa3SJacob Keller dev = ice_pf_to_dev(pf); 5939346f7aa3SJacob Keller 5940346f7aa3SJacob Keller if (vlan_id >= VLAN_N_VID || qos > 7) { 5941346f7aa3SJacob Keller dev_err(dev, "Invalid Port VLAN parameters for VF %d, ID %d, QoS %d\n", 5942346f7aa3SJacob Keller vf_id, vlan_id, qos); 5943346f7aa3SJacob Keller return -EINVAL; 5944346f7aa3SJacob Keller } 5945346f7aa3SJacob Keller 5946346f7aa3SJacob Keller if (!ice_is_supported_port_vlan_proto(&pf->hw, local_vlan_proto)) { 5947346f7aa3SJacob Keller dev_err(dev, "VF VLAN protocol 0x%04x is not supported\n", 5948346f7aa3SJacob Keller local_vlan_proto); 5949346f7aa3SJacob Keller return -EPROTONOSUPPORT; 5950346f7aa3SJacob Keller } 5951346f7aa3SJacob Keller 5952346f7aa3SJacob Keller vf = ice_get_vf_by_id(pf, vf_id); 5953346f7aa3SJacob Keller if (!vf) 5954346f7aa3SJacob Keller return -EINVAL; 5955346f7aa3SJacob Keller 5956346f7aa3SJacob Keller ret = ice_check_vf_ready_for_cfg(vf); 5957346f7aa3SJacob Keller if (ret) 5958346f7aa3SJacob Keller goto out_put_vf; 5959346f7aa3SJacob Keller 5960346f7aa3SJacob Keller if (ice_vf_get_port_vlan_prio(vf) == qos && 5961346f7aa3SJacob Keller ice_vf_get_port_vlan_tpid(vf) == local_vlan_proto && 5962346f7aa3SJacob Keller ice_vf_get_port_vlan_id(vf) == vlan_id) { 5963346f7aa3SJacob Keller /* duplicate request, so just return success */ 5964346f7aa3SJacob Keller dev_dbg(dev, "Duplicate port VLAN %u, QoS %u, TPID 0x%04x request\n", 5965346f7aa3SJacob Keller vlan_id, qos, local_vlan_proto); 5966346f7aa3SJacob Keller ret = 0; 5967346f7aa3SJacob Keller goto out_put_vf; 5968346f7aa3SJacob Keller } 5969346f7aa3SJacob Keller 5970346f7aa3SJacob Keller mutex_lock(&vf->cfg_lock); 5971346f7aa3SJacob Keller 5972346f7aa3SJacob Keller vf->port_vlan_info = ICE_VLAN(local_vlan_proto, vlan_id, qos); 5973346f7aa3SJacob Keller if (ice_vf_is_port_vlan_ena(vf)) 5974346f7aa3SJacob Keller dev_info(dev, "Setting VLAN %u, QoS %u, TPID 0x%04x on VF %d\n", 5975346f7aa3SJacob Keller vlan_id, qos, local_vlan_proto, vf_id); 5976346f7aa3SJacob Keller else 5977346f7aa3SJacob Keller dev_info(dev, "Clearing port VLAN on VF %d\n", vf_id); 5978346f7aa3SJacob Keller 5979346f7aa3SJacob Keller ice_vc_reset_vf(vf); 5980346f7aa3SJacob Keller mutex_unlock(&vf->cfg_lock); 5981346f7aa3SJacob Keller 5982346f7aa3SJacob Keller out_put_vf: 5983346f7aa3SJacob Keller ice_put_vf(vf); 5984346f7aa3SJacob Keller return ret; 5985346f7aa3SJacob Keller } 5986346f7aa3SJacob Keller 5987346f7aa3SJacob Keller /** 59880deb0bf7SJacob Keller * ice_print_vf_rx_mdd_event - print VF Rx malicious driver detect event 59890deb0bf7SJacob Keller * @vf: pointer to the VF structure 59900deb0bf7SJacob Keller */ 59910deb0bf7SJacob Keller void ice_print_vf_rx_mdd_event(struct ice_vf *vf) 59920deb0bf7SJacob Keller { 59930deb0bf7SJacob Keller struct ice_pf *pf = vf->pf; 59940deb0bf7SJacob Keller struct device *dev; 59950deb0bf7SJacob Keller 59960deb0bf7SJacob Keller dev = ice_pf_to_dev(pf); 59970deb0bf7SJacob Keller 59980deb0bf7SJacob Keller dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n", 59990deb0bf7SJacob Keller vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id, 60000deb0bf7SJacob Keller vf->dev_lan_addr.addr, 60010deb0bf7SJacob Keller test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags) 60020deb0bf7SJacob Keller ? "on" : "off"); 60030deb0bf7SJacob Keller } 60040deb0bf7SJacob Keller 60050deb0bf7SJacob Keller /** 60060deb0bf7SJacob Keller * ice_print_vfs_mdd_events - print VFs malicious driver detect event 60070deb0bf7SJacob Keller * @pf: pointer to the PF structure 60080deb0bf7SJacob Keller * 60090deb0bf7SJacob Keller * Called from ice_handle_mdd_event to rate limit and print VFs MDD events. 60100deb0bf7SJacob Keller */ 60110deb0bf7SJacob Keller void ice_print_vfs_mdd_events(struct ice_pf *pf) 60120deb0bf7SJacob Keller { 60130deb0bf7SJacob Keller struct device *dev = ice_pf_to_dev(pf); 60140deb0bf7SJacob Keller struct ice_hw *hw = &pf->hw; 60150deb0bf7SJacob Keller struct ice_vf *vf; 60160deb0bf7SJacob Keller unsigned int bkt; 60170deb0bf7SJacob Keller 60180deb0bf7SJacob Keller /* check that there are pending MDD events to print */ 60190deb0bf7SJacob Keller if (!test_and_clear_bit(ICE_MDD_VF_PRINT_PENDING, pf->state)) 60200deb0bf7SJacob Keller return; 60210deb0bf7SJacob Keller 60220deb0bf7SJacob Keller /* VF MDD event logs are rate limited to one second intervals */ 60230deb0bf7SJacob Keller if (time_is_after_jiffies(pf->vfs.last_printed_mdd_jiffies + HZ * 1)) 60240deb0bf7SJacob Keller return; 60250deb0bf7SJacob Keller 60260deb0bf7SJacob Keller pf->vfs.last_printed_mdd_jiffies = jiffies; 60270deb0bf7SJacob Keller 60280deb0bf7SJacob Keller mutex_lock(&pf->vfs.table_lock); 60290deb0bf7SJacob Keller ice_for_each_vf(pf, bkt, vf) { 60300deb0bf7SJacob Keller /* only print Rx MDD event message if there are new events */ 60310deb0bf7SJacob Keller if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) { 60320deb0bf7SJacob Keller vf->mdd_rx_events.last_printed = 60330deb0bf7SJacob Keller vf->mdd_rx_events.count; 60340deb0bf7SJacob Keller ice_print_vf_rx_mdd_event(vf); 60350deb0bf7SJacob Keller } 60360deb0bf7SJacob Keller 60370deb0bf7SJacob Keller /* only print Tx MDD event message if there are new events */ 60380deb0bf7SJacob Keller if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) { 60390deb0bf7SJacob Keller vf->mdd_tx_events.last_printed = 60400deb0bf7SJacob Keller vf->mdd_tx_events.count; 60410deb0bf7SJacob Keller 60420deb0bf7SJacob Keller dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n", 60430deb0bf7SJacob Keller vf->mdd_tx_events.count, hw->pf_id, vf->vf_id, 60440deb0bf7SJacob Keller vf->dev_lan_addr.addr); 60450deb0bf7SJacob Keller } 60460deb0bf7SJacob Keller } 60470deb0bf7SJacob Keller mutex_unlock(&pf->vfs.table_lock); 60480deb0bf7SJacob Keller } 60490deb0bf7SJacob Keller 60500deb0bf7SJacob Keller /** 60510deb0bf7SJacob Keller * ice_restore_all_vfs_msi_state - restore VF MSI state after PF FLR 60520deb0bf7SJacob Keller * @pdev: pointer to a pci_dev structure 60530deb0bf7SJacob Keller * 60540deb0bf7SJacob Keller * Called when recovering from a PF FLR to restore interrupt capability to 60550deb0bf7SJacob Keller * the VFs. 60560deb0bf7SJacob Keller */ 60570deb0bf7SJacob Keller void ice_restore_all_vfs_msi_state(struct pci_dev *pdev) 60580deb0bf7SJacob Keller { 60590deb0bf7SJacob Keller u16 vf_id; 60600deb0bf7SJacob Keller int pos; 60610deb0bf7SJacob Keller 60620deb0bf7SJacob Keller if (!pci_num_vf(pdev)) 60630deb0bf7SJacob Keller return; 60640deb0bf7SJacob Keller 60650deb0bf7SJacob Keller pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); 60660deb0bf7SJacob Keller if (pos) { 60670deb0bf7SJacob Keller struct pci_dev *vfdev; 60680deb0bf7SJacob Keller 60690deb0bf7SJacob Keller pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, 60700deb0bf7SJacob Keller &vf_id); 60710deb0bf7SJacob Keller vfdev = pci_get_device(pdev->vendor, vf_id, NULL); 60720deb0bf7SJacob Keller while (vfdev) { 60730deb0bf7SJacob Keller if (vfdev->is_virtfn && vfdev->physfn == pdev) 60740deb0bf7SJacob Keller pci_restore_msi_state(vfdev); 60750deb0bf7SJacob Keller vfdev = pci_get_device(pdev->vendor, vf_id, 60760deb0bf7SJacob Keller vfdev); 60770deb0bf7SJacob Keller } 60780deb0bf7SJacob Keller } 60790deb0bf7SJacob Keller } 60800deb0bf7SJacob Keller 60810deb0bf7SJacob Keller /** 60820deb0bf7SJacob Keller * ice_is_malicious_vf - helper function to detect a malicious VF 60830deb0bf7SJacob Keller * @pf: ptr to struct ice_pf 60840deb0bf7SJacob Keller * @event: pointer to the AQ event 60850deb0bf7SJacob Keller * @num_msg_proc: the number of messages processed so far 60860deb0bf7SJacob Keller * @num_msg_pending: the number of messages peinding in admin queue 60870deb0bf7SJacob Keller */ 60880deb0bf7SJacob Keller bool 60890deb0bf7SJacob Keller ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event, 60900deb0bf7SJacob Keller u16 num_msg_proc, u16 num_msg_pending) 60910deb0bf7SJacob Keller { 60920deb0bf7SJacob Keller s16 vf_id = le16_to_cpu(event->desc.retval); 60930deb0bf7SJacob Keller struct device *dev = ice_pf_to_dev(pf); 60940deb0bf7SJacob Keller struct ice_mbx_data mbxdata; 60950deb0bf7SJacob Keller bool malvf = false; 60960deb0bf7SJacob Keller struct ice_vf *vf; 60970deb0bf7SJacob Keller int status; 60980deb0bf7SJacob Keller 60990deb0bf7SJacob Keller vf = ice_get_vf_by_id(pf, vf_id); 61000deb0bf7SJacob Keller if (!vf) 61010deb0bf7SJacob Keller return false; 61020deb0bf7SJacob Keller 61030deb0bf7SJacob Keller if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) 61040deb0bf7SJacob Keller goto out_put_vf; 61050deb0bf7SJacob Keller 61060deb0bf7SJacob Keller mbxdata.num_msg_proc = num_msg_proc; 61070deb0bf7SJacob Keller mbxdata.num_pending_arq = num_msg_pending; 61080deb0bf7SJacob Keller mbxdata.max_num_msgs_mbx = pf->hw.mailboxq.num_rq_entries; 61090deb0bf7SJacob Keller #define ICE_MBX_OVERFLOW_WATERMARK 64 61100deb0bf7SJacob Keller mbxdata.async_watermark_val = ICE_MBX_OVERFLOW_WATERMARK; 61110deb0bf7SJacob Keller 61120deb0bf7SJacob Keller /* check to see if we have a malicious VF */ 61130deb0bf7SJacob Keller status = ice_mbx_vf_state_handler(&pf->hw, &mbxdata, vf_id, &malvf); 61140deb0bf7SJacob Keller if (status) 61150deb0bf7SJacob Keller goto out_put_vf; 61160deb0bf7SJacob Keller 61170deb0bf7SJacob Keller if (malvf) { 61180deb0bf7SJacob Keller bool report_vf = false; 61190deb0bf7SJacob Keller 61200deb0bf7SJacob Keller /* if the VF is malicious and we haven't let the user 61210deb0bf7SJacob Keller * know about it, then let them know now 61220deb0bf7SJacob Keller */ 61230deb0bf7SJacob Keller status = ice_mbx_report_malvf(&pf->hw, pf->vfs.malvfs, 6124dc36796eSJacob Keller ICE_MAX_SRIOV_VFS, vf_id, 61250deb0bf7SJacob Keller &report_vf); 61260deb0bf7SJacob Keller if (status) 61270deb0bf7SJacob Keller dev_dbg(dev, "Error reporting malicious VF\n"); 61280deb0bf7SJacob Keller 61290deb0bf7SJacob Keller if (report_vf) { 61300deb0bf7SJacob Keller struct ice_vsi *pf_vsi = ice_get_main_vsi(pf); 61310deb0bf7SJacob Keller 61320deb0bf7SJacob Keller if (pf_vsi) 61330deb0bf7SJacob Keller dev_warn(dev, "VF MAC %pM on PF MAC %pM is generating asynchronous messages and may be overflowing the PF message queue. Please see the Adapter User Guide for more information\n", 61340deb0bf7SJacob Keller &vf->dev_lan_addr.addr[0], 61350deb0bf7SJacob Keller pf_vsi->netdev->dev_addr); 61360deb0bf7SJacob Keller } 61370deb0bf7SJacob Keller } 61380deb0bf7SJacob Keller 61390deb0bf7SJacob Keller out_put_vf: 61400deb0bf7SJacob Keller ice_put_vf(vf); 61410deb0bf7SJacob Keller return malvf; 61420deb0bf7SJacob Keller } 6143