10deb0bf7SJacob Keller // SPDX-License-Identifier: GPL-2.0 20deb0bf7SJacob Keller /* Copyright (c) 2018, Intel Corporation. */ 30deb0bf7SJacob Keller 40deb0bf7SJacob Keller #include "ice.h" 50deb0bf7SJacob Keller #include "ice_base.h" 60deb0bf7SJacob Keller #include "ice_lib.h" 70deb0bf7SJacob Keller #include "ice_fltr.h" 80deb0bf7SJacob Keller #include "ice_dcb_lib.h" 90deb0bf7SJacob Keller #include "ice_flow.h" 100deb0bf7SJacob Keller #include "ice_eswitch.h" 110deb0bf7SJacob Keller #include "ice_virtchnl_allowlist.h" 120deb0bf7SJacob Keller #include "ice_flex_pipe.h" 130deb0bf7SJacob Keller #include "ice_vf_vsi_vlan_ops.h" 140deb0bf7SJacob Keller #include "ice_vlan.h" 150deb0bf7SJacob Keller 160deb0bf7SJacob Keller #define FIELD_SELECTOR(proto_hdr_field) \ 170deb0bf7SJacob Keller BIT((proto_hdr_field) & PROTO_HDR_FIELD_MASK) 180deb0bf7SJacob Keller 190deb0bf7SJacob Keller struct ice_vc_hdr_match_type { 200deb0bf7SJacob Keller u32 vc_hdr; /* virtchnl headers (VIRTCHNL_PROTO_HDR_XXX) */ 210deb0bf7SJacob Keller u32 ice_hdr; /* ice headers (ICE_FLOW_SEG_HDR_XXX) */ 220deb0bf7SJacob Keller }; 230deb0bf7SJacob Keller 240deb0bf7SJacob Keller static const struct ice_vc_hdr_match_type ice_vc_hdr_list[] = { 250deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_NONE, ICE_FLOW_SEG_HDR_NONE}, 260deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_ETH, ICE_FLOW_SEG_HDR_ETH}, 270deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_S_VLAN, ICE_FLOW_SEG_HDR_VLAN}, 280deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_C_VLAN, ICE_FLOW_SEG_HDR_VLAN}, 290deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_IPV4, ICE_FLOW_SEG_HDR_IPV4 | 300deb0bf7SJacob Keller ICE_FLOW_SEG_HDR_IPV_OTHER}, 310deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_IPV6, ICE_FLOW_SEG_HDR_IPV6 | 320deb0bf7SJacob Keller ICE_FLOW_SEG_HDR_IPV_OTHER}, 330deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_TCP, ICE_FLOW_SEG_HDR_TCP}, 340deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_UDP, ICE_FLOW_SEG_HDR_UDP}, 350deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_SCTP, ICE_FLOW_SEG_HDR_SCTP}, 360deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_PPPOE, ICE_FLOW_SEG_HDR_PPPOE}, 370deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_GTPU_IP, ICE_FLOW_SEG_HDR_GTPU_IP}, 380deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_GTPU_EH, ICE_FLOW_SEG_HDR_GTPU_EH}, 390deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN, 400deb0bf7SJacob Keller ICE_FLOW_SEG_HDR_GTPU_DWN}, 410deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP, 420deb0bf7SJacob Keller ICE_FLOW_SEG_HDR_GTPU_UP}, 430deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_L2TPV3, ICE_FLOW_SEG_HDR_L2TPV3}, 440deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_ESP, ICE_FLOW_SEG_HDR_ESP}, 450deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_AH, ICE_FLOW_SEG_HDR_AH}, 460deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_PFCP, ICE_FLOW_SEG_HDR_PFCP_SESSION}, 470deb0bf7SJacob Keller }; 480deb0bf7SJacob Keller 490deb0bf7SJacob Keller struct ice_vc_hash_field_match_type { 500deb0bf7SJacob Keller u32 vc_hdr; /* virtchnl headers 510deb0bf7SJacob Keller * (VIRTCHNL_PROTO_HDR_XXX) 520deb0bf7SJacob Keller */ 530deb0bf7SJacob Keller u32 vc_hash_field; /* virtchnl hash fields selector 540deb0bf7SJacob Keller * FIELD_SELECTOR((VIRTCHNL_PROTO_HDR_ETH_XXX)) 550deb0bf7SJacob Keller */ 560deb0bf7SJacob Keller u64 ice_hash_field; /* ice hash fields 570deb0bf7SJacob Keller * (BIT_ULL(ICE_FLOW_FIELD_IDX_XXX)) 580deb0bf7SJacob Keller */ 590deb0bf7SJacob Keller }; 600deb0bf7SJacob Keller 610deb0bf7SJacob Keller static const struct 620deb0bf7SJacob Keller ice_vc_hash_field_match_type ice_vc_hash_field_list[] = { 630deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC), 640deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA)}, 650deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST), 660deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_DA)}, 670deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC) | 680deb0bf7SJacob Keller FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST), 690deb0bf7SJacob Keller ICE_FLOW_HASH_ETH}, 700deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_ETH, 710deb0bf7SJacob Keller FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE), 720deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_TYPE)}, 730deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_S_VLAN, 740deb0bf7SJacob Keller FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_S_VLAN_ID), 750deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_S_VLAN)}, 760deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_C_VLAN, 770deb0bf7SJacob Keller FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_C_VLAN_ID), 780deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_C_VLAN)}, 790deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC), 800deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)}, 810deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST), 820deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)}, 830deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | 840deb0bf7SJacob Keller FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST), 850deb0bf7SJacob Keller ICE_FLOW_HASH_IPV4}, 860deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | 870deb0bf7SJacob Keller FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), 880deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | 890deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)}, 900deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | 910deb0bf7SJacob Keller FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), 920deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) | 930deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)}, 940deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | 950deb0bf7SJacob Keller FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | 960deb0bf7SJacob Keller FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), 970deb0bf7SJacob Keller ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)}, 980deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), 990deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)}, 1000deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC), 1010deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)}, 1020deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST), 1030deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)}, 1040deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) | 1050deb0bf7SJacob Keller FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST), 1060deb0bf7SJacob Keller ICE_FLOW_HASH_IPV6}, 1070deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) | 1080deb0bf7SJacob Keller FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT), 1090deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) | 1100deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)}, 1110deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) | 1120deb0bf7SJacob Keller FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT), 1130deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) | 1140deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)}, 1150deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) | 1160deb0bf7SJacob Keller FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) | 1170deb0bf7SJacob Keller FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT), 1180deb0bf7SJacob Keller ICE_FLOW_HASH_IPV6 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)}, 1190deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT), 1200deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)}, 1210deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_TCP, 1220deb0bf7SJacob Keller FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT), 1230deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)}, 1240deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_TCP, 1250deb0bf7SJacob Keller FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT), 1260deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)}, 1270deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_TCP, 1280deb0bf7SJacob Keller FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) | 1290deb0bf7SJacob Keller FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT), 1300deb0bf7SJacob Keller ICE_FLOW_HASH_TCP_PORT}, 1310deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_UDP, 1320deb0bf7SJacob Keller FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT), 1330deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)}, 1340deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_UDP, 1350deb0bf7SJacob Keller FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT), 1360deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)}, 1370deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_UDP, 1380deb0bf7SJacob Keller FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) | 1390deb0bf7SJacob Keller FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT), 1400deb0bf7SJacob Keller ICE_FLOW_HASH_UDP_PORT}, 1410deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_SCTP, 1420deb0bf7SJacob Keller FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT), 1430deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)}, 1440deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_SCTP, 1450deb0bf7SJacob Keller FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT), 1460deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)}, 1470deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_SCTP, 1480deb0bf7SJacob Keller FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) | 1490deb0bf7SJacob Keller FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT), 1500deb0bf7SJacob Keller ICE_FLOW_HASH_SCTP_PORT}, 1510deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_PPPOE, 1520deb0bf7SJacob Keller FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID), 1530deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID)}, 1540deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_GTPU_IP, 1550deb0bf7SJacob Keller FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_GTPU_IP_TEID), 1560deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID)}, 1570deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_L2TPV3, 1580deb0bf7SJacob Keller FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID), 1590deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID)}, 1600deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_ESP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ESP_SPI), 1610deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_ESP_SPI)}, 1620deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_AH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_AH_SPI), 1630deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_AH_SPI)}, 1640deb0bf7SJacob Keller {VIRTCHNL_PROTO_HDR_PFCP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PFCP_SEID), 1650deb0bf7SJacob Keller BIT_ULL(ICE_FLOW_FIELD_IDX_PFCP_SEID)}, 1660deb0bf7SJacob Keller }; 1670deb0bf7SJacob Keller 1680deb0bf7SJacob Keller /** 1690deb0bf7SJacob Keller * ice_get_vf_vsi - get VF's VSI based on the stored index 1700deb0bf7SJacob Keller * @vf: VF used to get VSI 1710deb0bf7SJacob Keller */ 1720deb0bf7SJacob Keller struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf) 1730deb0bf7SJacob Keller { 1740deb0bf7SJacob Keller return vf->pf->vsi[vf->lan_vsi_idx]; 1750deb0bf7SJacob Keller } 1760deb0bf7SJacob Keller 1770deb0bf7SJacob Keller /** 1780deb0bf7SJacob Keller * ice_get_vf_by_id - Get pointer to VF by ID 1790deb0bf7SJacob Keller * @pf: the PF private structure 1800deb0bf7SJacob Keller * @vf_id: the VF ID to locate 1810deb0bf7SJacob Keller * 1820deb0bf7SJacob Keller * Locate and return a pointer to the VF structure associated with a given ID. 1830deb0bf7SJacob Keller * Returns NULL if the ID does not have a valid VF structure associated with 1840deb0bf7SJacob Keller * it. 1850deb0bf7SJacob Keller * 1860deb0bf7SJacob Keller * This function takes a reference to the VF, which must be released by 1870deb0bf7SJacob Keller * calling ice_put_vf() once the caller is finished accessing the VF structure 1880deb0bf7SJacob Keller * returned. 1890deb0bf7SJacob Keller */ 1900deb0bf7SJacob Keller struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id) 1910deb0bf7SJacob Keller { 1920deb0bf7SJacob Keller struct ice_vf *vf; 1930deb0bf7SJacob Keller 1940deb0bf7SJacob Keller rcu_read_lock(); 1950deb0bf7SJacob Keller hash_for_each_possible_rcu(pf->vfs.table, vf, entry, vf_id) { 1960deb0bf7SJacob Keller if (vf->vf_id == vf_id) { 1970deb0bf7SJacob Keller struct ice_vf *found; 1980deb0bf7SJacob Keller 1990deb0bf7SJacob Keller if (kref_get_unless_zero(&vf->refcnt)) 2000deb0bf7SJacob Keller found = vf; 2010deb0bf7SJacob Keller else 2020deb0bf7SJacob Keller found = NULL; 2030deb0bf7SJacob Keller 2040deb0bf7SJacob Keller rcu_read_unlock(); 2050deb0bf7SJacob Keller return found; 2060deb0bf7SJacob Keller } 2070deb0bf7SJacob Keller } 2080deb0bf7SJacob Keller rcu_read_unlock(); 2090deb0bf7SJacob Keller 2100deb0bf7SJacob Keller return NULL; 2110deb0bf7SJacob Keller } 2120deb0bf7SJacob Keller 2130deb0bf7SJacob Keller /** 2140deb0bf7SJacob Keller * ice_release_vf - Release VF associated with a refcount 2150deb0bf7SJacob Keller * @ref: the kref decremented to zero 2160deb0bf7SJacob Keller * 2170deb0bf7SJacob Keller * Callback function for kref_put to release a VF once its reference count has 2180deb0bf7SJacob Keller * hit zero. 2190deb0bf7SJacob Keller */ 2200deb0bf7SJacob Keller static void ice_release_vf(struct kref *ref) 2210deb0bf7SJacob Keller { 2220deb0bf7SJacob Keller struct ice_vf *vf = container_of(ref, struct ice_vf, refcnt); 2230deb0bf7SJacob Keller 2240deb0bf7SJacob Keller mutex_destroy(&vf->cfg_lock); 2250deb0bf7SJacob Keller 2260deb0bf7SJacob Keller kfree_rcu(vf, rcu); 2270deb0bf7SJacob Keller } 2280deb0bf7SJacob Keller 2290deb0bf7SJacob Keller /** 2300deb0bf7SJacob Keller * ice_put_vf - Release a reference to a VF 2310deb0bf7SJacob Keller * @vf: the VF structure to decrease reference count on 2320deb0bf7SJacob Keller * 2330deb0bf7SJacob Keller * This must be called after ice_get_vf_by_id() once the reference to the VF 2340deb0bf7SJacob Keller * structure is no longer used. Otherwise, the VF structure will never be 2350deb0bf7SJacob Keller * freed. 2360deb0bf7SJacob Keller */ 2370deb0bf7SJacob Keller void ice_put_vf(struct ice_vf *vf) 2380deb0bf7SJacob Keller { 2390deb0bf7SJacob Keller kref_put(&vf->refcnt, ice_release_vf); 2400deb0bf7SJacob Keller } 2410deb0bf7SJacob Keller 2420deb0bf7SJacob Keller /** 2430deb0bf7SJacob Keller * ice_has_vfs - Return true if the PF has any associated VFs 2440deb0bf7SJacob Keller * @pf: the PF private structure 2450deb0bf7SJacob Keller * 2460deb0bf7SJacob Keller * Return whether or not the PF has any allocated VFs. 2470deb0bf7SJacob Keller * 2480deb0bf7SJacob Keller * Note that this function only guarantees that there are no VFs at the point 2490deb0bf7SJacob Keller * of calling it. It does not guarantee that no more VFs will be added. 2500deb0bf7SJacob Keller */ 2510deb0bf7SJacob Keller bool ice_has_vfs(struct ice_pf *pf) 2520deb0bf7SJacob Keller { 2530deb0bf7SJacob Keller /* A simple check that the hash table is not empty does not require 2540deb0bf7SJacob Keller * the mutex or rcu_read_lock. 2550deb0bf7SJacob Keller */ 2560deb0bf7SJacob Keller return !hash_empty(pf->vfs.table); 2570deb0bf7SJacob Keller } 2580deb0bf7SJacob Keller 2590deb0bf7SJacob Keller /** 2600deb0bf7SJacob Keller * ice_get_num_vfs - Get number of allocated VFs 2610deb0bf7SJacob Keller * @pf: the PF private structure 2620deb0bf7SJacob Keller * 2630deb0bf7SJacob Keller * Return the total number of allocated VFs. NOTE: VF IDs are not guaranteed 2640deb0bf7SJacob Keller * to be contiguous. Do not assume that a VF ID is guaranteed to be less than 2650deb0bf7SJacob Keller * the output of this function. 2660deb0bf7SJacob Keller */ 2670deb0bf7SJacob Keller u16 ice_get_num_vfs(struct ice_pf *pf) 2680deb0bf7SJacob Keller { 2690deb0bf7SJacob Keller struct ice_vf *vf; 2700deb0bf7SJacob Keller unsigned int bkt; 2710deb0bf7SJacob Keller u16 num_vfs = 0; 2720deb0bf7SJacob Keller 2730deb0bf7SJacob Keller rcu_read_lock(); 2740deb0bf7SJacob Keller ice_for_each_vf_rcu(pf, bkt, vf) 2750deb0bf7SJacob Keller num_vfs++; 2760deb0bf7SJacob Keller rcu_read_unlock(); 2770deb0bf7SJacob Keller 2780deb0bf7SJacob Keller return num_vfs; 2790deb0bf7SJacob Keller } 2800deb0bf7SJacob Keller 2810deb0bf7SJacob Keller /** 2820deb0bf7SJacob Keller * ice_check_vf_init - helper to check if VF init complete 2830deb0bf7SJacob Keller * @pf: pointer to the PF structure 2840deb0bf7SJacob Keller * @vf: the pointer to the VF to check 2850deb0bf7SJacob Keller */ 2860deb0bf7SJacob Keller static int ice_check_vf_init(struct ice_pf *pf, struct ice_vf *vf) 2870deb0bf7SJacob Keller { 2880deb0bf7SJacob Keller if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { 2890deb0bf7SJacob Keller dev_err(ice_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n", 2900deb0bf7SJacob Keller vf->vf_id); 2910deb0bf7SJacob Keller return -EBUSY; 2920deb0bf7SJacob Keller } 2930deb0bf7SJacob Keller return 0; 2940deb0bf7SJacob Keller } 2950deb0bf7SJacob Keller 2960deb0bf7SJacob Keller /** 2970deb0bf7SJacob Keller * ice_free_vf_entries - Free all VF entries from the hash table 2980deb0bf7SJacob Keller * @pf: pointer to the PF structure 2990deb0bf7SJacob Keller * 3000deb0bf7SJacob Keller * Iterate over the VF hash table, removing and releasing all VF entries. 3010deb0bf7SJacob Keller * Called during VF teardown or as cleanup during failed VF initialization. 3020deb0bf7SJacob Keller */ 3030deb0bf7SJacob Keller static void ice_free_vf_entries(struct ice_pf *pf) 3040deb0bf7SJacob Keller { 3050deb0bf7SJacob Keller struct ice_vfs *vfs = &pf->vfs; 3060deb0bf7SJacob Keller struct hlist_node *tmp; 3070deb0bf7SJacob Keller struct ice_vf *vf; 3080deb0bf7SJacob Keller unsigned int bkt; 3090deb0bf7SJacob Keller 3100deb0bf7SJacob Keller /* Remove all VFs from the hash table and release their main 3110deb0bf7SJacob Keller * reference. Once all references to the VF are dropped, ice_put_vf() 3120deb0bf7SJacob Keller * will call ice_release_vf which will remove the VF memory. 3130deb0bf7SJacob Keller */ 3140deb0bf7SJacob Keller lockdep_assert_held(&vfs->table_lock); 3150deb0bf7SJacob Keller 3160deb0bf7SJacob Keller hash_for_each_safe(vfs->table, bkt, tmp, vf, entry) { 3170deb0bf7SJacob Keller hash_del_rcu(&vf->entry); 3180deb0bf7SJacob Keller ice_put_vf(vf); 3190deb0bf7SJacob Keller } 3200deb0bf7SJacob Keller } 3210deb0bf7SJacob Keller 3220deb0bf7SJacob Keller /** 3230deb0bf7SJacob Keller * ice_vc_vf_broadcast - Broadcast a message to all VFs on PF 3240deb0bf7SJacob Keller * @pf: pointer to the PF structure 3250deb0bf7SJacob Keller * @v_opcode: operation code 3260deb0bf7SJacob Keller * @v_retval: return value 3270deb0bf7SJacob Keller * @msg: pointer to the msg buffer 3280deb0bf7SJacob Keller * @msglen: msg length 3290deb0bf7SJacob Keller */ 3300deb0bf7SJacob Keller static void 3310deb0bf7SJacob Keller ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode, 3320deb0bf7SJacob Keller enum virtchnl_status_code v_retval, u8 *msg, u16 msglen) 3330deb0bf7SJacob Keller { 3340deb0bf7SJacob Keller struct ice_hw *hw = &pf->hw; 3350deb0bf7SJacob Keller struct ice_vf *vf; 3360deb0bf7SJacob Keller unsigned int bkt; 3370deb0bf7SJacob Keller 3380deb0bf7SJacob Keller mutex_lock(&pf->vfs.table_lock); 3390deb0bf7SJacob Keller ice_for_each_vf(pf, bkt, vf) { 3400deb0bf7SJacob Keller /* Not all vfs are enabled so skip the ones that are not */ 3410deb0bf7SJacob Keller if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) && 3420deb0bf7SJacob Keller !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) 3430deb0bf7SJacob Keller continue; 3440deb0bf7SJacob Keller 3450deb0bf7SJacob Keller /* Ignore return value on purpose - a given VF may fail, but 3460deb0bf7SJacob Keller * we need to keep going and send to all of them 3470deb0bf7SJacob Keller */ 3480deb0bf7SJacob Keller ice_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, msg, 3490deb0bf7SJacob Keller msglen, NULL); 3500deb0bf7SJacob Keller } 3510deb0bf7SJacob Keller mutex_unlock(&pf->vfs.table_lock); 3520deb0bf7SJacob Keller } 3530deb0bf7SJacob Keller 3540deb0bf7SJacob Keller /** 3550deb0bf7SJacob Keller * ice_set_pfe_link - Set the link speed/status of the virtchnl_pf_event 3560deb0bf7SJacob Keller * @vf: pointer to the VF structure 3570deb0bf7SJacob Keller * @pfe: pointer to the virtchnl_pf_event to set link speed/status for 3580deb0bf7SJacob Keller * @ice_link_speed: link speed specified by ICE_AQ_LINK_SPEED_* 3590deb0bf7SJacob Keller * @link_up: whether or not to set the link up/down 3600deb0bf7SJacob Keller */ 3610deb0bf7SJacob Keller static void 3620deb0bf7SJacob Keller ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe, 3630deb0bf7SJacob Keller int ice_link_speed, bool link_up) 3640deb0bf7SJacob Keller { 3650deb0bf7SJacob Keller if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) { 3660deb0bf7SJacob Keller pfe->event_data.link_event_adv.link_status = link_up; 3670deb0bf7SJacob Keller /* Speed in Mbps */ 3680deb0bf7SJacob Keller pfe->event_data.link_event_adv.link_speed = 3690deb0bf7SJacob Keller ice_conv_link_speed_to_virtchnl(true, ice_link_speed); 3700deb0bf7SJacob Keller } else { 3710deb0bf7SJacob Keller pfe->event_data.link_event.link_status = link_up; 3720deb0bf7SJacob Keller /* Legacy method for virtchnl link speeds */ 3730deb0bf7SJacob Keller pfe->event_data.link_event.link_speed = 3740deb0bf7SJacob Keller (enum virtchnl_link_speed) 3750deb0bf7SJacob Keller ice_conv_link_speed_to_virtchnl(false, ice_link_speed); 3760deb0bf7SJacob Keller } 3770deb0bf7SJacob Keller } 3780deb0bf7SJacob Keller 3790deb0bf7SJacob Keller /** 3800deb0bf7SJacob Keller * ice_vf_has_no_qs_ena - check if the VF has any Rx or Tx queues enabled 3810deb0bf7SJacob Keller * @vf: the VF to check 3820deb0bf7SJacob Keller * 3830deb0bf7SJacob Keller * Returns true if the VF has no Rx and no Tx queues enabled and returns false 3840deb0bf7SJacob Keller * otherwise 3850deb0bf7SJacob Keller */ 3860deb0bf7SJacob Keller static bool ice_vf_has_no_qs_ena(struct ice_vf *vf) 3870deb0bf7SJacob Keller { 3880deb0bf7SJacob Keller return (!bitmap_weight(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF) && 3890deb0bf7SJacob Keller !bitmap_weight(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF)); 3900deb0bf7SJacob Keller } 3910deb0bf7SJacob Keller 3920deb0bf7SJacob Keller /** 3930deb0bf7SJacob Keller * ice_is_vf_link_up - check if the VF's link is up 3940deb0bf7SJacob Keller * @vf: VF to check if link is up 3950deb0bf7SJacob Keller */ 3960deb0bf7SJacob Keller static bool ice_is_vf_link_up(struct ice_vf *vf) 3970deb0bf7SJacob Keller { 3980deb0bf7SJacob Keller struct ice_pf *pf = vf->pf; 3990deb0bf7SJacob Keller 4000deb0bf7SJacob Keller if (ice_check_vf_init(pf, vf)) 4010deb0bf7SJacob Keller return false; 4020deb0bf7SJacob Keller 4030deb0bf7SJacob Keller if (ice_vf_has_no_qs_ena(vf)) 4040deb0bf7SJacob Keller return false; 4050deb0bf7SJacob Keller else if (vf->link_forced) 4060deb0bf7SJacob Keller return vf->link_up; 4070deb0bf7SJacob Keller else 4080deb0bf7SJacob Keller return pf->hw.port_info->phy.link_info.link_info & 4090deb0bf7SJacob Keller ICE_AQ_LINK_UP; 4100deb0bf7SJacob Keller } 4110deb0bf7SJacob Keller 4120deb0bf7SJacob Keller /** 4130deb0bf7SJacob Keller * ice_vc_notify_vf_link_state - Inform a VF of link status 4140deb0bf7SJacob Keller * @vf: pointer to the VF structure 4150deb0bf7SJacob Keller * 4160deb0bf7SJacob Keller * send a link status message to a single VF 4170deb0bf7SJacob Keller */ 4180deb0bf7SJacob Keller void ice_vc_notify_vf_link_state(struct ice_vf *vf) 4190deb0bf7SJacob Keller { 4200deb0bf7SJacob Keller struct virtchnl_pf_event pfe = { 0 }; 4210deb0bf7SJacob Keller struct ice_hw *hw = &vf->pf->hw; 4220deb0bf7SJacob Keller 4230deb0bf7SJacob Keller pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; 4240deb0bf7SJacob Keller pfe.severity = PF_EVENT_SEVERITY_INFO; 4250deb0bf7SJacob Keller 4260deb0bf7SJacob Keller if (ice_is_vf_link_up(vf)) 4270deb0bf7SJacob Keller ice_set_pfe_link(vf, &pfe, 4280deb0bf7SJacob Keller hw->port_info->phy.link_info.link_speed, true); 4290deb0bf7SJacob Keller else 4300deb0bf7SJacob Keller ice_set_pfe_link(vf, &pfe, ICE_AQ_LINK_SPEED_UNKNOWN, false); 4310deb0bf7SJacob Keller 4320deb0bf7SJacob Keller ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT, 4330deb0bf7SJacob Keller VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, 4340deb0bf7SJacob Keller sizeof(pfe), NULL); 4350deb0bf7SJacob Keller } 4360deb0bf7SJacob Keller 4370deb0bf7SJacob Keller /** 4380deb0bf7SJacob Keller * ice_vf_invalidate_vsi - invalidate vsi_idx/vsi_num to remove VSI access 4390deb0bf7SJacob Keller * @vf: VF to remove access to VSI for 4400deb0bf7SJacob Keller */ 4410deb0bf7SJacob Keller static void ice_vf_invalidate_vsi(struct ice_vf *vf) 4420deb0bf7SJacob Keller { 4430deb0bf7SJacob Keller vf->lan_vsi_idx = ICE_NO_VSI; 4440deb0bf7SJacob Keller vf->lan_vsi_num = ICE_NO_VSI; 4450deb0bf7SJacob Keller } 4460deb0bf7SJacob Keller 4470deb0bf7SJacob Keller /** 4480deb0bf7SJacob Keller * ice_vf_vsi_release - invalidate the VF's VSI after freeing it 4490deb0bf7SJacob Keller * @vf: invalidate this VF's VSI after freeing it 4500deb0bf7SJacob Keller */ 4510deb0bf7SJacob Keller static void ice_vf_vsi_release(struct ice_vf *vf) 4520deb0bf7SJacob Keller { 4530deb0bf7SJacob Keller ice_vsi_release(ice_get_vf_vsi(vf)); 4540deb0bf7SJacob Keller ice_vf_invalidate_vsi(vf); 4550deb0bf7SJacob Keller } 4560deb0bf7SJacob Keller 4570deb0bf7SJacob Keller /** 4580deb0bf7SJacob Keller * ice_vf_ctrl_invalidate_vsi - invalidate ctrl_vsi_idx to remove VSI access 4590deb0bf7SJacob Keller * @vf: VF that control VSI is being invalidated on 4600deb0bf7SJacob Keller */ 4610deb0bf7SJacob Keller static void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf) 4620deb0bf7SJacob Keller { 4630deb0bf7SJacob Keller vf->ctrl_vsi_idx = ICE_NO_VSI; 4640deb0bf7SJacob Keller } 4650deb0bf7SJacob Keller 4660deb0bf7SJacob Keller /** 4670deb0bf7SJacob Keller * ice_vf_ctrl_vsi_release - invalidate the VF's control VSI after freeing it 4680deb0bf7SJacob Keller * @vf: VF that control VSI is being released on 4690deb0bf7SJacob Keller */ 4700deb0bf7SJacob Keller static void ice_vf_ctrl_vsi_release(struct ice_vf *vf) 4710deb0bf7SJacob Keller { 4720deb0bf7SJacob Keller ice_vsi_release(vf->pf->vsi[vf->ctrl_vsi_idx]); 4730deb0bf7SJacob Keller ice_vf_ctrl_invalidate_vsi(vf); 4740deb0bf7SJacob Keller } 4750deb0bf7SJacob Keller 4760deb0bf7SJacob Keller /** 4770deb0bf7SJacob Keller * ice_free_vf_res - Free a VF's resources 4780deb0bf7SJacob Keller * @vf: pointer to the VF info 4790deb0bf7SJacob Keller */ 4800deb0bf7SJacob Keller static void ice_free_vf_res(struct ice_vf *vf) 4810deb0bf7SJacob Keller { 4820deb0bf7SJacob Keller struct ice_pf *pf = vf->pf; 4830deb0bf7SJacob Keller int i, last_vector_idx; 4840deb0bf7SJacob Keller 4850deb0bf7SJacob Keller /* First, disable VF's configuration API to prevent OS from 4860deb0bf7SJacob Keller * accessing the VF's VSI after it's freed or invalidated. 4870deb0bf7SJacob Keller */ 4880deb0bf7SJacob Keller clear_bit(ICE_VF_STATE_INIT, vf->vf_states); 4890deb0bf7SJacob Keller ice_vf_fdir_exit(vf); 4900deb0bf7SJacob Keller /* free VF control VSI */ 4910deb0bf7SJacob Keller if (vf->ctrl_vsi_idx != ICE_NO_VSI) 4920deb0bf7SJacob Keller ice_vf_ctrl_vsi_release(vf); 4930deb0bf7SJacob Keller 4940deb0bf7SJacob Keller /* free VSI and disconnect it from the parent uplink */ 4950deb0bf7SJacob Keller if (vf->lan_vsi_idx != ICE_NO_VSI) { 4960deb0bf7SJacob Keller ice_vf_vsi_release(vf); 4970deb0bf7SJacob Keller vf->num_mac = 0; 4980deb0bf7SJacob Keller } 4990deb0bf7SJacob Keller 5000deb0bf7SJacob Keller last_vector_idx = vf->first_vector_idx + pf->vfs.num_msix_per - 1; 5010deb0bf7SJacob Keller 5020deb0bf7SJacob Keller /* clear VF MDD event information */ 5030deb0bf7SJacob Keller memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events)); 5040deb0bf7SJacob Keller memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events)); 5050deb0bf7SJacob Keller 5060deb0bf7SJacob Keller /* Disable interrupts so that VF starts in a known state */ 5070deb0bf7SJacob Keller for (i = vf->first_vector_idx; i <= last_vector_idx; i++) { 5080deb0bf7SJacob Keller wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M); 5090deb0bf7SJacob Keller ice_flush(&pf->hw); 5100deb0bf7SJacob Keller } 5110deb0bf7SJacob Keller /* reset some of the state variables keeping track of the resources */ 5120deb0bf7SJacob Keller clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states); 5130deb0bf7SJacob Keller clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states); 5140deb0bf7SJacob Keller } 5150deb0bf7SJacob Keller 5160deb0bf7SJacob Keller /** 5170deb0bf7SJacob Keller * ice_dis_vf_mappings 5180deb0bf7SJacob Keller * @vf: pointer to the VF structure 5190deb0bf7SJacob Keller */ 5200deb0bf7SJacob Keller static void ice_dis_vf_mappings(struct ice_vf *vf) 5210deb0bf7SJacob Keller { 5220deb0bf7SJacob Keller struct ice_pf *pf = vf->pf; 5230deb0bf7SJacob Keller struct ice_vsi *vsi; 5240deb0bf7SJacob Keller struct device *dev; 5250deb0bf7SJacob Keller int first, last, v; 5260deb0bf7SJacob Keller struct ice_hw *hw; 5270deb0bf7SJacob Keller 5280deb0bf7SJacob Keller hw = &pf->hw; 5290deb0bf7SJacob Keller vsi = ice_get_vf_vsi(vf); 5300deb0bf7SJacob Keller 5310deb0bf7SJacob Keller dev = ice_pf_to_dev(pf); 5320deb0bf7SJacob Keller wr32(hw, VPINT_ALLOC(vf->vf_id), 0); 5330deb0bf7SJacob Keller wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0); 5340deb0bf7SJacob Keller 5350deb0bf7SJacob Keller first = vf->first_vector_idx; 5360deb0bf7SJacob Keller last = first + pf->vfs.num_msix_per - 1; 5370deb0bf7SJacob Keller for (v = first; v <= last; v++) { 5380deb0bf7SJacob Keller u32 reg; 5390deb0bf7SJacob Keller 5400deb0bf7SJacob Keller reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) & 5410deb0bf7SJacob Keller GLINT_VECT2FUNC_IS_PF_M) | 5420deb0bf7SJacob Keller ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) & 5430deb0bf7SJacob Keller GLINT_VECT2FUNC_PF_NUM_M)); 5440deb0bf7SJacob Keller wr32(hw, GLINT_VECT2FUNC(v), reg); 5450deb0bf7SJacob Keller } 5460deb0bf7SJacob Keller 5470deb0bf7SJacob Keller if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) 5480deb0bf7SJacob Keller wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0); 5490deb0bf7SJacob Keller else 5500deb0bf7SJacob Keller dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n"); 5510deb0bf7SJacob Keller 5520deb0bf7SJacob Keller if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) 5530deb0bf7SJacob Keller wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0); 5540deb0bf7SJacob Keller else 5550deb0bf7SJacob Keller dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n"); 5560deb0bf7SJacob Keller } 5570deb0bf7SJacob Keller 5580deb0bf7SJacob Keller /** 5590deb0bf7SJacob Keller * ice_sriov_free_msix_res - Reset/free any used MSIX resources 5600deb0bf7SJacob Keller * @pf: pointer to the PF structure 5610deb0bf7SJacob Keller * 5620deb0bf7SJacob Keller * Since no MSIX entries are taken from the pf->irq_tracker then just clear 5630deb0bf7SJacob Keller * the pf->sriov_base_vector. 5640deb0bf7SJacob Keller * 5650deb0bf7SJacob Keller * Returns 0 on success, and -EINVAL on error. 5660deb0bf7SJacob Keller */ 5670deb0bf7SJacob Keller static int ice_sriov_free_msix_res(struct ice_pf *pf) 5680deb0bf7SJacob Keller { 5690deb0bf7SJacob Keller struct ice_res_tracker *res; 5700deb0bf7SJacob Keller 5710deb0bf7SJacob Keller if (!pf) 5720deb0bf7SJacob Keller return -EINVAL; 5730deb0bf7SJacob Keller 5740deb0bf7SJacob Keller res = pf->irq_tracker; 5750deb0bf7SJacob Keller if (!res) 5760deb0bf7SJacob Keller return -EINVAL; 5770deb0bf7SJacob Keller 5780deb0bf7SJacob Keller /* give back irq_tracker resources used */ 5790deb0bf7SJacob Keller WARN_ON(pf->sriov_base_vector < res->num_entries); 5800deb0bf7SJacob Keller 5810deb0bf7SJacob Keller pf->sriov_base_vector = 0; 5820deb0bf7SJacob Keller 5830deb0bf7SJacob Keller return 0; 5840deb0bf7SJacob Keller } 5850deb0bf7SJacob Keller 5860deb0bf7SJacob Keller /** 5870deb0bf7SJacob Keller * ice_set_vf_state_qs_dis - Set VF queues state to disabled 5880deb0bf7SJacob Keller * @vf: pointer to the VF structure 5890deb0bf7SJacob Keller */ 5900deb0bf7SJacob Keller void ice_set_vf_state_qs_dis(struct ice_vf *vf) 5910deb0bf7SJacob Keller { 5920deb0bf7SJacob Keller /* Clear Rx/Tx enabled queues flag */ 5930deb0bf7SJacob Keller bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF); 5940deb0bf7SJacob Keller bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF); 5950deb0bf7SJacob Keller clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states); 5960deb0bf7SJacob Keller } 5970deb0bf7SJacob Keller 5980deb0bf7SJacob Keller /** 5990deb0bf7SJacob Keller * ice_dis_vf_qs - Disable the VF queues 6000deb0bf7SJacob Keller * @vf: pointer to the VF structure 6010deb0bf7SJacob Keller */ 6020deb0bf7SJacob Keller static void ice_dis_vf_qs(struct ice_vf *vf) 6030deb0bf7SJacob Keller { 6040deb0bf7SJacob Keller struct ice_vsi *vsi = ice_get_vf_vsi(vf); 6050deb0bf7SJacob Keller 6060deb0bf7SJacob Keller ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id); 6070deb0bf7SJacob Keller ice_vsi_stop_all_rx_rings(vsi); 6080deb0bf7SJacob Keller ice_set_vf_state_qs_dis(vf); 6090deb0bf7SJacob Keller } 6100deb0bf7SJacob Keller 6110deb0bf7SJacob Keller /** 6120deb0bf7SJacob Keller * ice_free_vfs - Free all VFs 6130deb0bf7SJacob Keller * @pf: pointer to the PF structure 6140deb0bf7SJacob Keller */ 6150deb0bf7SJacob Keller void ice_free_vfs(struct ice_pf *pf) 6160deb0bf7SJacob Keller { 6170deb0bf7SJacob Keller struct device *dev = ice_pf_to_dev(pf); 6180deb0bf7SJacob Keller struct ice_vfs *vfs = &pf->vfs; 6190deb0bf7SJacob Keller struct ice_hw *hw = &pf->hw; 6200deb0bf7SJacob Keller struct ice_vf *vf; 6210deb0bf7SJacob Keller unsigned int bkt; 6220deb0bf7SJacob Keller 6230deb0bf7SJacob Keller if (!ice_has_vfs(pf)) 6240deb0bf7SJacob Keller return; 6250deb0bf7SJacob Keller 6260deb0bf7SJacob Keller while (test_and_set_bit(ICE_VF_DIS, pf->state)) 6270deb0bf7SJacob Keller usleep_range(1000, 2000); 6280deb0bf7SJacob Keller 6290deb0bf7SJacob Keller /* Disable IOV before freeing resources. This lets any VF drivers 6300deb0bf7SJacob Keller * running in the host get themselves cleaned up before we yank 6310deb0bf7SJacob Keller * the carpet out from underneath their feet. 6320deb0bf7SJacob Keller */ 6330deb0bf7SJacob Keller if (!pci_vfs_assigned(pf->pdev)) 6340deb0bf7SJacob Keller pci_disable_sriov(pf->pdev); 6350deb0bf7SJacob Keller else 6360deb0bf7SJacob Keller dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n"); 6370deb0bf7SJacob Keller 6380deb0bf7SJacob Keller mutex_lock(&vfs->table_lock); 6390deb0bf7SJacob Keller 6400deb0bf7SJacob Keller ice_eswitch_release(pf); 6410deb0bf7SJacob Keller 6420deb0bf7SJacob Keller ice_for_each_vf(pf, bkt, vf) { 6430deb0bf7SJacob Keller mutex_lock(&vf->cfg_lock); 6440deb0bf7SJacob Keller 6450deb0bf7SJacob Keller ice_dis_vf_qs(vf); 6460deb0bf7SJacob Keller 6470deb0bf7SJacob Keller if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { 6480deb0bf7SJacob Keller /* disable VF qp mappings and set VF disable state */ 6490deb0bf7SJacob Keller ice_dis_vf_mappings(vf); 6500deb0bf7SJacob Keller set_bit(ICE_VF_STATE_DIS, vf->vf_states); 6510deb0bf7SJacob Keller ice_free_vf_res(vf); 6520deb0bf7SJacob Keller } 6530deb0bf7SJacob Keller 6540deb0bf7SJacob Keller if (!pci_vfs_assigned(pf->pdev)) { 6550deb0bf7SJacob Keller u32 reg_idx, bit_idx; 6560deb0bf7SJacob Keller 6570deb0bf7SJacob Keller reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32; 6580deb0bf7SJacob Keller bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32; 6590deb0bf7SJacob Keller wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); 6600deb0bf7SJacob Keller } 6610deb0bf7SJacob Keller 6620deb0bf7SJacob Keller /* clear malicious info since the VF is getting released */ 6630deb0bf7SJacob Keller if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->vfs.malvfs, 6640deb0bf7SJacob Keller ICE_MAX_VF_COUNT, vf->vf_id)) 6650deb0bf7SJacob Keller dev_dbg(dev, "failed to clear malicious VF state for VF %u\n", 6660deb0bf7SJacob Keller vf->vf_id); 6670deb0bf7SJacob Keller 6680deb0bf7SJacob Keller mutex_unlock(&vf->cfg_lock); 6690deb0bf7SJacob Keller } 6700deb0bf7SJacob Keller 6710deb0bf7SJacob Keller if (ice_sriov_free_msix_res(pf)) 6720deb0bf7SJacob Keller dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n"); 6730deb0bf7SJacob Keller 6740deb0bf7SJacob Keller vfs->num_qps_per = 0; 6750deb0bf7SJacob Keller ice_free_vf_entries(pf); 6760deb0bf7SJacob Keller 6770deb0bf7SJacob Keller mutex_unlock(&vfs->table_lock); 6780deb0bf7SJacob Keller 6790deb0bf7SJacob Keller clear_bit(ICE_VF_DIS, pf->state); 6800deb0bf7SJacob Keller clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags); 6810deb0bf7SJacob Keller } 6820deb0bf7SJacob Keller 6830deb0bf7SJacob Keller /** 6840deb0bf7SJacob Keller * ice_trigger_vf_reset - Reset a VF on HW 6850deb0bf7SJacob Keller * @vf: pointer to the VF structure 6860deb0bf7SJacob Keller * @is_vflr: true if VFLR was issued, false if not 6870deb0bf7SJacob Keller * @is_pfr: true if the reset was triggered due to a previous PFR 6880deb0bf7SJacob Keller * 6890deb0bf7SJacob Keller * Trigger hardware to start a reset for a particular VF. Expects the caller 6900deb0bf7SJacob Keller * to wait the proper amount of time to allow hardware to reset the VF before 6910deb0bf7SJacob Keller * it cleans up and restores VF functionality. 6920deb0bf7SJacob Keller */ 6930deb0bf7SJacob Keller static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr) 6940deb0bf7SJacob Keller { 6950deb0bf7SJacob Keller struct ice_pf *pf = vf->pf; 6960deb0bf7SJacob Keller u32 reg, reg_idx, bit_idx; 6970deb0bf7SJacob Keller unsigned int vf_abs_id, i; 6980deb0bf7SJacob Keller struct device *dev; 6990deb0bf7SJacob Keller struct ice_hw *hw; 7000deb0bf7SJacob Keller 7010deb0bf7SJacob Keller dev = ice_pf_to_dev(pf); 7020deb0bf7SJacob Keller hw = &pf->hw; 7030deb0bf7SJacob Keller vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id; 7040deb0bf7SJacob Keller 7050deb0bf7SJacob Keller /* Inform VF that it is no longer active, as a warning */ 7060deb0bf7SJacob Keller clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states); 7070deb0bf7SJacob Keller 7080deb0bf7SJacob Keller /* Disable VF's configuration API during reset. The flag is re-enabled 7090deb0bf7SJacob Keller * when it's safe again to access VF's VSI. 7100deb0bf7SJacob Keller */ 7110deb0bf7SJacob Keller clear_bit(ICE_VF_STATE_INIT, vf->vf_states); 7120deb0bf7SJacob Keller 7130deb0bf7SJacob Keller /* VF_MBX_ARQLEN and VF_MBX_ATQLEN are cleared by PFR, so the driver 7140deb0bf7SJacob Keller * needs to clear them in the case of VFR/VFLR. If this is done for 7150deb0bf7SJacob Keller * PFR, it can mess up VF resets because the VF driver may already 7160deb0bf7SJacob Keller * have started cleanup by the time we get here. 7170deb0bf7SJacob Keller */ 7180deb0bf7SJacob Keller if (!is_pfr) { 7190deb0bf7SJacob Keller wr32(hw, VF_MBX_ARQLEN(vf->vf_id), 0); 7200deb0bf7SJacob Keller wr32(hw, VF_MBX_ATQLEN(vf->vf_id), 0); 7210deb0bf7SJacob Keller } 7220deb0bf7SJacob Keller 7230deb0bf7SJacob Keller /* In the case of a VFLR, the HW has already reset the VF and we 7240deb0bf7SJacob Keller * just need to clean up, so don't hit the VFRTRIG register. 7250deb0bf7SJacob Keller */ 7260deb0bf7SJacob Keller if (!is_vflr) { 7270deb0bf7SJacob Keller /* reset VF using VPGEN_VFRTRIG reg */ 7280deb0bf7SJacob Keller reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id)); 7290deb0bf7SJacob Keller reg |= VPGEN_VFRTRIG_VFSWR_M; 7300deb0bf7SJacob Keller wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg); 7310deb0bf7SJacob Keller } 7320deb0bf7SJacob Keller /* clear the VFLR bit in GLGEN_VFLRSTAT */ 7330deb0bf7SJacob Keller reg_idx = (vf_abs_id) / 32; 7340deb0bf7SJacob Keller bit_idx = (vf_abs_id) % 32; 7350deb0bf7SJacob Keller wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); 7360deb0bf7SJacob Keller ice_flush(hw); 7370deb0bf7SJacob Keller 7380deb0bf7SJacob Keller wr32(hw, PF_PCI_CIAA, 7390deb0bf7SJacob Keller VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S)); 7400deb0bf7SJacob Keller for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) { 7410deb0bf7SJacob Keller reg = rd32(hw, PF_PCI_CIAD); 7420deb0bf7SJacob Keller /* no transactions pending so stop polling */ 7430deb0bf7SJacob Keller if ((reg & VF_TRANS_PENDING_M) == 0) 7440deb0bf7SJacob Keller break; 7450deb0bf7SJacob Keller 7460deb0bf7SJacob Keller dev_err(dev, "VF %u PCI transactions stuck\n", vf->vf_id); 7470deb0bf7SJacob Keller udelay(ICE_PCI_CIAD_WAIT_DELAY_US); 7480deb0bf7SJacob Keller } 7490deb0bf7SJacob Keller } 7500deb0bf7SJacob Keller 7510deb0bf7SJacob Keller /** 7520deb0bf7SJacob Keller * ice_vf_get_port_info - Get the VF's port info structure 7530deb0bf7SJacob Keller * @vf: VF used to get the port info structure for 7540deb0bf7SJacob Keller */ 7550deb0bf7SJacob Keller static struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf) 7560deb0bf7SJacob Keller { 7570deb0bf7SJacob Keller return vf->pf->hw.port_info; 7580deb0bf7SJacob Keller } 7590deb0bf7SJacob Keller 7600deb0bf7SJacob Keller /** 7610deb0bf7SJacob Keller * ice_vf_vsi_setup - Set up a VF VSI 7620deb0bf7SJacob Keller * @vf: VF to setup VSI for 7630deb0bf7SJacob Keller * 7640deb0bf7SJacob Keller * Returns pointer to the successfully allocated VSI struct on success, 7650deb0bf7SJacob Keller * otherwise returns NULL on failure. 7660deb0bf7SJacob Keller */ 7670deb0bf7SJacob Keller static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf) 7680deb0bf7SJacob Keller { 7690deb0bf7SJacob Keller struct ice_port_info *pi = ice_vf_get_port_info(vf); 7700deb0bf7SJacob Keller struct ice_pf *pf = vf->pf; 7710deb0bf7SJacob Keller struct ice_vsi *vsi; 7720deb0bf7SJacob Keller 7730deb0bf7SJacob Keller vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf, NULL); 7740deb0bf7SJacob Keller 7750deb0bf7SJacob Keller if (!vsi) { 7760deb0bf7SJacob Keller dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n"); 7770deb0bf7SJacob Keller ice_vf_invalidate_vsi(vf); 7780deb0bf7SJacob Keller return NULL; 7790deb0bf7SJacob Keller } 7800deb0bf7SJacob Keller 7810deb0bf7SJacob Keller vf->lan_vsi_idx = vsi->idx; 7820deb0bf7SJacob Keller vf->lan_vsi_num = vsi->vsi_num; 7830deb0bf7SJacob Keller 7840deb0bf7SJacob Keller return vsi; 7850deb0bf7SJacob Keller } 7860deb0bf7SJacob Keller 7870deb0bf7SJacob Keller /** 7880deb0bf7SJacob Keller * ice_vf_ctrl_vsi_setup - Set up a VF control VSI 7890deb0bf7SJacob Keller * @vf: VF to setup control VSI for 7900deb0bf7SJacob Keller * 7910deb0bf7SJacob Keller * Returns pointer to the successfully allocated VSI struct on success, 7920deb0bf7SJacob Keller * otherwise returns NULL on failure. 7930deb0bf7SJacob Keller */ 7940deb0bf7SJacob Keller struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf) 7950deb0bf7SJacob Keller { 7960deb0bf7SJacob Keller struct ice_port_info *pi = ice_vf_get_port_info(vf); 7970deb0bf7SJacob Keller struct ice_pf *pf = vf->pf; 7980deb0bf7SJacob Keller struct ice_vsi *vsi; 7990deb0bf7SJacob Keller 8000deb0bf7SJacob Keller vsi = ice_vsi_setup(pf, pi, ICE_VSI_CTRL, vf, NULL); 8010deb0bf7SJacob Keller if (!vsi) { 8020deb0bf7SJacob Keller dev_err(ice_pf_to_dev(pf), "Failed to create VF control VSI\n"); 8030deb0bf7SJacob Keller ice_vf_ctrl_invalidate_vsi(vf); 8040deb0bf7SJacob Keller } 8050deb0bf7SJacob Keller 8060deb0bf7SJacob Keller return vsi; 8070deb0bf7SJacob Keller } 8080deb0bf7SJacob Keller 8090deb0bf7SJacob Keller /** 8100deb0bf7SJacob Keller * ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space 8110deb0bf7SJacob Keller * @pf: pointer to PF structure 8120deb0bf7SJacob Keller * @vf: pointer to VF that the first MSIX vector index is being calculated for 8130deb0bf7SJacob Keller * 8140deb0bf7SJacob Keller * This returns the first MSIX vector index in PF space that is used by this VF. 8150deb0bf7SJacob Keller * This index is used when accessing PF relative registers such as 8160deb0bf7SJacob Keller * GLINT_VECT2FUNC and GLINT_DYN_CTL. 8170deb0bf7SJacob Keller * This will always be the OICR index in the AVF driver so any functionality 8180deb0bf7SJacob Keller * using vf->first_vector_idx for queue configuration will have to increment by 8190deb0bf7SJacob Keller * 1 to avoid meddling with the OICR index. 8200deb0bf7SJacob Keller */ 8210deb0bf7SJacob Keller static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf) 8220deb0bf7SJacob Keller { 8230deb0bf7SJacob Keller return pf->sriov_base_vector + vf->vf_id * pf->vfs.num_msix_per; 8240deb0bf7SJacob Keller } 8250deb0bf7SJacob Keller 8260deb0bf7SJacob Keller /** 8270deb0bf7SJacob Keller * ice_vf_rebuild_host_tx_rate_cfg - re-apply the Tx rate limiting configuration 8280deb0bf7SJacob Keller * @vf: VF to re-apply the configuration for 8290deb0bf7SJacob Keller * 8300deb0bf7SJacob Keller * Called after a VF VSI has been re-added/rebuild during reset. The PF driver 8310deb0bf7SJacob Keller * needs to re-apply the host configured Tx rate limiting configuration. 8320deb0bf7SJacob Keller */ 8330deb0bf7SJacob Keller static int ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf *vf) 8340deb0bf7SJacob Keller { 8350deb0bf7SJacob Keller struct device *dev = ice_pf_to_dev(vf->pf); 8360deb0bf7SJacob Keller struct ice_vsi *vsi = ice_get_vf_vsi(vf); 8370deb0bf7SJacob Keller int err; 8380deb0bf7SJacob Keller 8390deb0bf7SJacob Keller if (vf->min_tx_rate) { 8400deb0bf7SJacob Keller err = ice_set_min_bw_limit(vsi, (u64)vf->min_tx_rate * 1000); 8410deb0bf7SJacob Keller if (err) { 8420deb0bf7SJacob Keller dev_err(dev, "failed to set min Tx rate to %d Mbps for VF %u, error %d\n", 8430deb0bf7SJacob Keller vf->min_tx_rate, vf->vf_id, err); 8440deb0bf7SJacob Keller return err; 8450deb0bf7SJacob Keller } 8460deb0bf7SJacob Keller } 8470deb0bf7SJacob Keller 8480deb0bf7SJacob Keller if (vf->max_tx_rate) { 8490deb0bf7SJacob Keller err = ice_set_max_bw_limit(vsi, (u64)vf->max_tx_rate * 1000); 8500deb0bf7SJacob Keller if (err) { 8510deb0bf7SJacob Keller dev_err(dev, "failed to set max Tx rate to %d Mbps for VF %u, error %d\n", 8520deb0bf7SJacob Keller vf->max_tx_rate, vf->vf_id, err); 8530deb0bf7SJacob Keller return err; 8540deb0bf7SJacob Keller } 8550deb0bf7SJacob Keller } 8560deb0bf7SJacob Keller 8570deb0bf7SJacob Keller return 0; 8580deb0bf7SJacob Keller } 8590deb0bf7SJacob Keller 8600deb0bf7SJacob Keller static u16 ice_vf_get_port_vlan_id(struct ice_vf *vf) 8610deb0bf7SJacob Keller { 8620deb0bf7SJacob Keller return vf->port_vlan_info.vid; 8630deb0bf7SJacob Keller } 8640deb0bf7SJacob Keller 8650deb0bf7SJacob Keller static u8 ice_vf_get_port_vlan_prio(struct ice_vf *vf) 8660deb0bf7SJacob Keller { 8670deb0bf7SJacob Keller return vf->port_vlan_info.prio; 8680deb0bf7SJacob Keller } 8690deb0bf7SJacob Keller 8700deb0bf7SJacob Keller bool ice_vf_is_port_vlan_ena(struct ice_vf *vf) 8710deb0bf7SJacob Keller { 8720deb0bf7SJacob Keller return (ice_vf_get_port_vlan_id(vf) || ice_vf_get_port_vlan_prio(vf)); 8730deb0bf7SJacob Keller } 8740deb0bf7SJacob Keller 8750deb0bf7SJacob Keller static u16 ice_vf_get_port_vlan_tpid(struct ice_vf *vf) 8760deb0bf7SJacob Keller { 8770deb0bf7SJacob Keller return vf->port_vlan_info.tpid; 8780deb0bf7SJacob Keller } 8790deb0bf7SJacob Keller 8800deb0bf7SJacob Keller /** 8810deb0bf7SJacob Keller * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN 8820deb0bf7SJacob Keller * @vf: VF to add MAC filters for 8830deb0bf7SJacob Keller * @vsi: Pointer to VSI 8840deb0bf7SJacob Keller * 8850deb0bf7SJacob Keller * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver 8860deb0bf7SJacob Keller * always re-adds either a VLAN 0 or port VLAN based filter after reset. 8870deb0bf7SJacob Keller */ 8880deb0bf7SJacob Keller static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf, struct ice_vsi *vsi) 8890deb0bf7SJacob Keller { 8900deb0bf7SJacob Keller struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 8910deb0bf7SJacob Keller struct device *dev = ice_pf_to_dev(vf->pf); 8920deb0bf7SJacob Keller int err; 8930deb0bf7SJacob Keller 8940deb0bf7SJacob Keller if (ice_vf_is_port_vlan_ena(vf)) { 8950deb0bf7SJacob Keller err = vlan_ops->set_port_vlan(vsi, &vf->port_vlan_info); 8960deb0bf7SJacob Keller if (err) { 8970deb0bf7SJacob Keller dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n", 8980deb0bf7SJacob Keller vf->vf_id, err); 8990deb0bf7SJacob Keller return err; 9000deb0bf7SJacob Keller } 9010deb0bf7SJacob Keller 9020deb0bf7SJacob Keller err = vlan_ops->add_vlan(vsi, &vf->port_vlan_info); 9030deb0bf7SJacob Keller } else { 9040deb0bf7SJacob Keller err = ice_vsi_add_vlan_zero(vsi); 9050deb0bf7SJacob Keller } 9060deb0bf7SJacob Keller 9070deb0bf7SJacob Keller if (err) { 9080deb0bf7SJacob Keller dev_err(dev, "failed to add VLAN %u filter for VF %u during VF rebuild, error %d\n", 9090deb0bf7SJacob Keller ice_vf_is_port_vlan_ena(vf) ? 9100deb0bf7SJacob Keller ice_vf_get_port_vlan_id(vf) : 0, vf->vf_id, err); 9110deb0bf7SJacob Keller return err; 9120deb0bf7SJacob Keller } 9130deb0bf7SJacob Keller 9140deb0bf7SJacob Keller err = vlan_ops->ena_rx_filtering(vsi); 9150deb0bf7SJacob Keller if (err) 9160deb0bf7SJacob Keller dev_warn(dev, "failed to enable Rx VLAN filtering for VF %d VSI %d during VF rebuild, error %d\n", 9170deb0bf7SJacob Keller vf->vf_id, vsi->idx, err); 9180deb0bf7SJacob Keller 9190deb0bf7SJacob Keller return 0; 9200deb0bf7SJacob Keller } 9210deb0bf7SJacob Keller 9220deb0bf7SJacob Keller static int ice_cfg_mac_antispoof(struct ice_vsi *vsi, bool enable) 9230deb0bf7SJacob Keller { 9240deb0bf7SJacob Keller struct ice_vsi_ctx *ctx; 9250deb0bf7SJacob Keller int err; 9260deb0bf7SJacob Keller 9270deb0bf7SJacob Keller ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 9280deb0bf7SJacob Keller if (!ctx) 9290deb0bf7SJacob Keller return -ENOMEM; 9300deb0bf7SJacob Keller 9310deb0bf7SJacob Keller ctx->info.sec_flags = vsi->info.sec_flags; 9320deb0bf7SJacob Keller ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID); 9330deb0bf7SJacob Keller 9340deb0bf7SJacob Keller if (enable) 9350deb0bf7SJacob Keller ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF; 9360deb0bf7SJacob Keller else 9370deb0bf7SJacob Keller ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF; 9380deb0bf7SJacob Keller 9390deb0bf7SJacob Keller err = ice_update_vsi(&vsi->back->hw, vsi->idx, ctx, NULL); 9400deb0bf7SJacob Keller if (err) 9410deb0bf7SJacob Keller dev_err(ice_pf_to_dev(vsi->back), "Failed to configure Tx MAC anti-spoof %s for VSI %d, error %d\n", 9420deb0bf7SJacob Keller enable ? "ON" : "OFF", vsi->vsi_num, err); 9430deb0bf7SJacob Keller else 9440deb0bf7SJacob Keller vsi->info.sec_flags = ctx->info.sec_flags; 9450deb0bf7SJacob Keller 9460deb0bf7SJacob Keller kfree(ctx); 9470deb0bf7SJacob Keller 9480deb0bf7SJacob Keller return err; 9490deb0bf7SJacob Keller } 9500deb0bf7SJacob Keller 9510deb0bf7SJacob Keller /** 9520deb0bf7SJacob Keller * ice_vsi_ena_spoofchk - enable Tx spoof checking for this VSI 9530deb0bf7SJacob Keller * @vsi: VSI to enable Tx spoof checking for 9540deb0bf7SJacob Keller */ 9550deb0bf7SJacob Keller static int ice_vsi_ena_spoofchk(struct ice_vsi *vsi) 9560deb0bf7SJacob Keller { 9570deb0bf7SJacob Keller struct ice_vsi_vlan_ops *vlan_ops; 9580deb0bf7SJacob Keller int err; 9590deb0bf7SJacob Keller 9600deb0bf7SJacob Keller vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 9610deb0bf7SJacob Keller 9620deb0bf7SJacob Keller err = vlan_ops->ena_tx_filtering(vsi); 9630deb0bf7SJacob Keller if (err) 9640deb0bf7SJacob Keller return err; 9650deb0bf7SJacob Keller 9660deb0bf7SJacob Keller return ice_cfg_mac_antispoof(vsi, true); 9670deb0bf7SJacob Keller } 9680deb0bf7SJacob Keller 9690deb0bf7SJacob Keller /** 9700deb0bf7SJacob Keller * ice_vsi_dis_spoofchk - disable Tx spoof checking for this VSI 9710deb0bf7SJacob Keller * @vsi: VSI to disable Tx spoof checking for 9720deb0bf7SJacob Keller */ 9730deb0bf7SJacob Keller static int ice_vsi_dis_spoofchk(struct ice_vsi *vsi) 9740deb0bf7SJacob Keller { 9750deb0bf7SJacob Keller struct ice_vsi_vlan_ops *vlan_ops; 9760deb0bf7SJacob Keller int err; 9770deb0bf7SJacob Keller 9780deb0bf7SJacob Keller vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 9790deb0bf7SJacob Keller 9800deb0bf7SJacob Keller err = vlan_ops->dis_tx_filtering(vsi); 9810deb0bf7SJacob Keller if (err) 9820deb0bf7SJacob Keller return err; 9830deb0bf7SJacob Keller 9840deb0bf7SJacob Keller return ice_cfg_mac_antispoof(vsi, false); 9850deb0bf7SJacob Keller } 9860deb0bf7SJacob Keller 9870deb0bf7SJacob Keller /** 9880deb0bf7SJacob Keller * ice_vf_set_spoofchk_cfg - apply Tx spoof checking setting 9890deb0bf7SJacob Keller * @vf: VF set spoofchk for 9900deb0bf7SJacob Keller * @vsi: VSI associated to the VF 9910deb0bf7SJacob Keller */ 9920deb0bf7SJacob Keller static int 9930deb0bf7SJacob Keller ice_vf_set_spoofchk_cfg(struct ice_vf *vf, struct ice_vsi *vsi) 9940deb0bf7SJacob Keller { 9950deb0bf7SJacob Keller int err; 9960deb0bf7SJacob Keller 9970deb0bf7SJacob Keller if (vf->spoofchk) 9980deb0bf7SJacob Keller err = ice_vsi_ena_spoofchk(vsi); 9990deb0bf7SJacob Keller else 10000deb0bf7SJacob Keller err = ice_vsi_dis_spoofchk(vsi); 10010deb0bf7SJacob Keller 10020deb0bf7SJacob Keller return err; 10030deb0bf7SJacob Keller } 10040deb0bf7SJacob Keller 10050deb0bf7SJacob Keller /** 10060deb0bf7SJacob Keller * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA 10070deb0bf7SJacob Keller * @vf: VF to add MAC filters for 10080deb0bf7SJacob Keller * 10090deb0bf7SJacob Keller * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver 10100deb0bf7SJacob Keller * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset. 10110deb0bf7SJacob Keller */ 10120deb0bf7SJacob Keller static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf) 10130deb0bf7SJacob Keller { 10140deb0bf7SJacob Keller struct device *dev = ice_pf_to_dev(vf->pf); 10150deb0bf7SJacob Keller struct ice_vsi *vsi = ice_get_vf_vsi(vf); 10160deb0bf7SJacob Keller u8 broadcast[ETH_ALEN]; 10170deb0bf7SJacob Keller int status; 10180deb0bf7SJacob Keller 10190deb0bf7SJacob Keller if (ice_is_eswitch_mode_switchdev(vf->pf)) 10200deb0bf7SJacob Keller return 0; 10210deb0bf7SJacob Keller 10220deb0bf7SJacob Keller eth_broadcast_addr(broadcast); 10230deb0bf7SJacob Keller status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI); 10240deb0bf7SJacob Keller if (status) { 10250deb0bf7SJacob Keller dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %d\n", 10260deb0bf7SJacob Keller vf->vf_id, status); 10270deb0bf7SJacob Keller return status; 10280deb0bf7SJacob Keller } 10290deb0bf7SJacob Keller 10300deb0bf7SJacob Keller vf->num_mac++; 10310deb0bf7SJacob Keller 10320deb0bf7SJacob Keller if (is_valid_ether_addr(vf->hw_lan_addr.addr)) { 10330deb0bf7SJacob Keller status = ice_fltr_add_mac(vsi, vf->hw_lan_addr.addr, 10340deb0bf7SJacob Keller ICE_FWD_TO_VSI); 10350deb0bf7SJacob Keller if (status) { 10360deb0bf7SJacob Keller dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %d\n", 10370deb0bf7SJacob Keller &vf->hw_lan_addr.addr[0], vf->vf_id, 10380deb0bf7SJacob Keller status); 10390deb0bf7SJacob Keller return status; 10400deb0bf7SJacob Keller } 10410deb0bf7SJacob Keller vf->num_mac++; 10420deb0bf7SJacob Keller 10430deb0bf7SJacob Keller ether_addr_copy(vf->dev_lan_addr.addr, vf->hw_lan_addr.addr); 10440deb0bf7SJacob Keller } 10450deb0bf7SJacob Keller 10460deb0bf7SJacob Keller return 0; 10470deb0bf7SJacob Keller } 10480deb0bf7SJacob Keller 10490deb0bf7SJacob Keller /** 10500deb0bf7SJacob Keller * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value 10510deb0bf7SJacob Keller * @vf: VF to configure trust setting for 10520deb0bf7SJacob Keller */ 10530deb0bf7SJacob Keller static void ice_vf_set_host_trust_cfg(struct ice_vf *vf) 10540deb0bf7SJacob Keller { 10550deb0bf7SJacob Keller if (vf->trusted) 10560deb0bf7SJacob Keller set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); 10570deb0bf7SJacob Keller else 10580deb0bf7SJacob Keller clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); 10590deb0bf7SJacob Keller } 10600deb0bf7SJacob Keller 10610deb0bf7SJacob Keller /** 10620deb0bf7SJacob Keller * ice_ena_vf_msix_mappings - enable VF MSIX mappings in hardware 10630deb0bf7SJacob Keller * @vf: VF to enable MSIX mappings for 10640deb0bf7SJacob Keller * 10650deb0bf7SJacob Keller * Some of the registers need to be indexed/configured using hardware global 10660deb0bf7SJacob Keller * device values and other registers need 0-based values, which represent PF 10670deb0bf7SJacob Keller * based values. 10680deb0bf7SJacob Keller */ 10690deb0bf7SJacob Keller static void ice_ena_vf_msix_mappings(struct ice_vf *vf) 10700deb0bf7SJacob Keller { 10710deb0bf7SJacob Keller int device_based_first_msix, device_based_last_msix; 10720deb0bf7SJacob Keller int pf_based_first_msix, pf_based_last_msix, v; 10730deb0bf7SJacob Keller struct ice_pf *pf = vf->pf; 10740deb0bf7SJacob Keller int device_based_vf_id; 10750deb0bf7SJacob Keller struct ice_hw *hw; 10760deb0bf7SJacob Keller u32 reg; 10770deb0bf7SJacob Keller 10780deb0bf7SJacob Keller hw = &pf->hw; 10790deb0bf7SJacob Keller pf_based_first_msix = vf->first_vector_idx; 10800deb0bf7SJacob Keller pf_based_last_msix = (pf_based_first_msix + pf->vfs.num_msix_per) - 1; 10810deb0bf7SJacob Keller 10820deb0bf7SJacob Keller device_based_first_msix = pf_based_first_msix + 10830deb0bf7SJacob Keller pf->hw.func_caps.common_cap.msix_vector_first_id; 10840deb0bf7SJacob Keller device_based_last_msix = 10850deb0bf7SJacob Keller (device_based_first_msix + pf->vfs.num_msix_per) - 1; 10860deb0bf7SJacob Keller device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 10870deb0bf7SJacob Keller 10880deb0bf7SJacob Keller reg = (((device_based_first_msix << VPINT_ALLOC_FIRST_S) & 10890deb0bf7SJacob Keller VPINT_ALLOC_FIRST_M) | 10900deb0bf7SJacob Keller ((device_based_last_msix << VPINT_ALLOC_LAST_S) & 10910deb0bf7SJacob Keller VPINT_ALLOC_LAST_M) | VPINT_ALLOC_VALID_M); 10920deb0bf7SJacob Keller wr32(hw, VPINT_ALLOC(vf->vf_id), reg); 10930deb0bf7SJacob Keller 10940deb0bf7SJacob Keller reg = (((device_based_first_msix << VPINT_ALLOC_PCI_FIRST_S) 10950deb0bf7SJacob Keller & VPINT_ALLOC_PCI_FIRST_M) | 10960deb0bf7SJacob Keller ((device_based_last_msix << VPINT_ALLOC_PCI_LAST_S) & 10970deb0bf7SJacob Keller VPINT_ALLOC_PCI_LAST_M) | VPINT_ALLOC_PCI_VALID_M); 10980deb0bf7SJacob Keller wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg); 10990deb0bf7SJacob Keller 11000deb0bf7SJacob Keller /* map the interrupts to its functions */ 11010deb0bf7SJacob Keller for (v = pf_based_first_msix; v <= pf_based_last_msix; v++) { 11020deb0bf7SJacob Keller reg = (((device_based_vf_id << GLINT_VECT2FUNC_VF_NUM_S) & 11030deb0bf7SJacob Keller GLINT_VECT2FUNC_VF_NUM_M) | 11040deb0bf7SJacob Keller ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) & 11050deb0bf7SJacob Keller GLINT_VECT2FUNC_PF_NUM_M)); 11060deb0bf7SJacob Keller wr32(hw, GLINT_VECT2FUNC(v), reg); 11070deb0bf7SJacob Keller } 11080deb0bf7SJacob Keller 11090deb0bf7SJacob Keller /* Map mailbox interrupt to VF MSI-X vector 0 */ 11100deb0bf7SJacob Keller wr32(hw, VPINT_MBX_CTL(device_based_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M); 11110deb0bf7SJacob Keller } 11120deb0bf7SJacob Keller 11130deb0bf7SJacob Keller /** 11140deb0bf7SJacob Keller * ice_ena_vf_q_mappings - enable Rx/Tx queue mappings for a VF 11150deb0bf7SJacob Keller * @vf: VF to enable the mappings for 11160deb0bf7SJacob Keller * @max_txq: max Tx queues allowed on the VF's VSI 11170deb0bf7SJacob Keller * @max_rxq: max Rx queues allowed on the VF's VSI 11180deb0bf7SJacob Keller */ 11190deb0bf7SJacob Keller static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq) 11200deb0bf7SJacob Keller { 11210deb0bf7SJacob Keller struct device *dev = ice_pf_to_dev(vf->pf); 11220deb0bf7SJacob Keller struct ice_vsi *vsi = ice_get_vf_vsi(vf); 11230deb0bf7SJacob Keller struct ice_hw *hw = &vf->pf->hw; 11240deb0bf7SJacob Keller u32 reg; 11250deb0bf7SJacob Keller 11260deb0bf7SJacob Keller /* set regardless of mapping mode */ 11270deb0bf7SJacob Keller wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M); 11280deb0bf7SJacob Keller 11290deb0bf7SJacob Keller /* VF Tx queues allocation */ 11300deb0bf7SJacob Keller if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) { 11310deb0bf7SJacob Keller /* set the VF PF Tx queue range 11320deb0bf7SJacob Keller * VFNUMQ value should be set to (number of queues - 1). A value 11330deb0bf7SJacob Keller * of 0 means 1 queue and a value of 255 means 256 queues 11340deb0bf7SJacob Keller */ 11350deb0bf7SJacob Keller reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) & 11360deb0bf7SJacob Keller VPLAN_TX_QBASE_VFFIRSTQ_M) | 11370deb0bf7SJacob Keller (((max_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) & 11380deb0bf7SJacob Keller VPLAN_TX_QBASE_VFNUMQ_M)); 11390deb0bf7SJacob Keller wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg); 11400deb0bf7SJacob Keller } else { 11410deb0bf7SJacob Keller dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n"); 11420deb0bf7SJacob Keller } 11430deb0bf7SJacob Keller 11440deb0bf7SJacob Keller /* set regardless of mapping mode */ 11450deb0bf7SJacob Keller wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), VPLAN_RXQ_MAPENA_RX_ENA_M); 11460deb0bf7SJacob Keller 11470deb0bf7SJacob Keller /* VF Rx queues allocation */ 11480deb0bf7SJacob Keller if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) { 11490deb0bf7SJacob Keller /* set the VF PF Rx queue range 11500deb0bf7SJacob Keller * VFNUMQ value should be set to (number of queues - 1). A value 11510deb0bf7SJacob Keller * of 0 means 1 queue and a value of 255 means 256 queues 11520deb0bf7SJacob Keller */ 11530deb0bf7SJacob Keller reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) & 11540deb0bf7SJacob Keller VPLAN_RX_QBASE_VFFIRSTQ_M) | 11550deb0bf7SJacob Keller (((max_rxq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) & 11560deb0bf7SJacob Keller VPLAN_RX_QBASE_VFNUMQ_M)); 11570deb0bf7SJacob Keller wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg); 11580deb0bf7SJacob Keller } else { 11590deb0bf7SJacob Keller dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n"); 11600deb0bf7SJacob Keller } 11610deb0bf7SJacob Keller } 11620deb0bf7SJacob Keller 11630deb0bf7SJacob Keller /** 11640deb0bf7SJacob Keller * ice_ena_vf_mappings - enable VF MSIX and queue mapping 11650deb0bf7SJacob Keller * @vf: pointer to the VF structure 11660deb0bf7SJacob Keller */ 11670deb0bf7SJacob Keller static void ice_ena_vf_mappings(struct ice_vf *vf) 11680deb0bf7SJacob Keller { 11690deb0bf7SJacob Keller struct ice_vsi *vsi = ice_get_vf_vsi(vf); 11700deb0bf7SJacob Keller 11710deb0bf7SJacob Keller ice_ena_vf_msix_mappings(vf); 11720deb0bf7SJacob Keller ice_ena_vf_q_mappings(vf, vsi->alloc_txq, vsi->alloc_rxq); 11730deb0bf7SJacob Keller } 11740deb0bf7SJacob Keller 11750deb0bf7SJacob Keller /** 11760deb0bf7SJacob Keller * ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space 11770deb0bf7SJacob Keller * @vf: VF to calculate the register index for 11780deb0bf7SJacob Keller * @q_vector: a q_vector associated to the VF 11790deb0bf7SJacob Keller */ 11800deb0bf7SJacob Keller int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector) 11810deb0bf7SJacob Keller { 11820deb0bf7SJacob Keller struct ice_pf *pf; 11830deb0bf7SJacob Keller 11840deb0bf7SJacob Keller if (!vf || !q_vector) 11850deb0bf7SJacob Keller return -EINVAL; 11860deb0bf7SJacob Keller 11870deb0bf7SJacob Keller pf = vf->pf; 11880deb0bf7SJacob Keller 11890deb0bf7SJacob Keller /* always add one to account for the OICR being the first MSIX */ 11900deb0bf7SJacob Keller return pf->sriov_base_vector + pf->vfs.num_msix_per * vf->vf_id + 11910deb0bf7SJacob Keller q_vector->v_idx + 1; 11920deb0bf7SJacob Keller } 11930deb0bf7SJacob Keller 11940deb0bf7SJacob Keller /** 11950deb0bf7SJacob Keller * ice_get_max_valid_res_idx - Get the max valid resource index 11960deb0bf7SJacob Keller * @res: pointer to the resource to find the max valid index for 11970deb0bf7SJacob Keller * 11980deb0bf7SJacob Keller * Start from the end of the ice_res_tracker and return right when we find the 11990deb0bf7SJacob Keller * first res->list entry with the ICE_RES_VALID_BIT set. This function is only 12000deb0bf7SJacob Keller * valid for SR-IOV because it is the only consumer that manipulates the 12010deb0bf7SJacob Keller * res->end and this is always called when res->end is set to res->num_entries. 12020deb0bf7SJacob Keller */ 12030deb0bf7SJacob Keller static int ice_get_max_valid_res_idx(struct ice_res_tracker *res) 12040deb0bf7SJacob Keller { 12050deb0bf7SJacob Keller int i; 12060deb0bf7SJacob Keller 12070deb0bf7SJacob Keller if (!res) 12080deb0bf7SJacob Keller return -EINVAL; 12090deb0bf7SJacob Keller 12100deb0bf7SJacob Keller for (i = res->num_entries - 1; i >= 0; i--) 12110deb0bf7SJacob Keller if (res->list[i] & ICE_RES_VALID_BIT) 12120deb0bf7SJacob Keller return i; 12130deb0bf7SJacob Keller 12140deb0bf7SJacob Keller return 0; 12150deb0bf7SJacob Keller } 12160deb0bf7SJacob Keller 12170deb0bf7SJacob Keller /** 12180deb0bf7SJacob Keller * ice_sriov_set_msix_res - Set any used MSIX resources 12190deb0bf7SJacob Keller * @pf: pointer to PF structure 12200deb0bf7SJacob Keller * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs 12210deb0bf7SJacob Keller * 12220deb0bf7SJacob Keller * This function allows SR-IOV resources to be taken from the end of the PF's 12230deb0bf7SJacob Keller * allowed HW MSIX vectors so that the irq_tracker will not be affected. We 12240deb0bf7SJacob Keller * just set the pf->sriov_base_vector and return success. 12250deb0bf7SJacob Keller * 12260deb0bf7SJacob Keller * If there are not enough resources available, return an error. This should 12270deb0bf7SJacob Keller * always be caught by ice_set_per_vf_res(). 12280deb0bf7SJacob Keller * 12290deb0bf7SJacob Keller * Return 0 on success, and -EINVAL when there are not enough MSIX vectors 12300deb0bf7SJacob Keller * in the PF's space available for SR-IOV. 12310deb0bf7SJacob Keller */ 12320deb0bf7SJacob Keller static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed) 12330deb0bf7SJacob Keller { 12340deb0bf7SJacob Keller u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors; 12350deb0bf7SJacob Keller int vectors_used = pf->irq_tracker->num_entries; 12360deb0bf7SJacob Keller int sriov_base_vector; 12370deb0bf7SJacob Keller 12380deb0bf7SJacob Keller sriov_base_vector = total_vectors - num_msix_needed; 12390deb0bf7SJacob Keller 12400deb0bf7SJacob Keller /* make sure we only grab irq_tracker entries from the list end and 12410deb0bf7SJacob Keller * that we have enough available MSIX vectors 12420deb0bf7SJacob Keller */ 12430deb0bf7SJacob Keller if (sriov_base_vector < vectors_used) 12440deb0bf7SJacob Keller return -EINVAL; 12450deb0bf7SJacob Keller 12460deb0bf7SJacob Keller pf->sriov_base_vector = sriov_base_vector; 12470deb0bf7SJacob Keller 12480deb0bf7SJacob Keller return 0; 12490deb0bf7SJacob Keller } 12500deb0bf7SJacob Keller 12510deb0bf7SJacob Keller /** 12520deb0bf7SJacob Keller * ice_set_per_vf_res - check if vectors and queues are available 12530deb0bf7SJacob Keller * @pf: pointer to the PF structure 12540deb0bf7SJacob Keller * @num_vfs: the number of SR-IOV VFs being configured 12550deb0bf7SJacob Keller * 12560deb0bf7SJacob Keller * First, determine HW interrupts from common pool. If we allocate fewer VFs, we 12570deb0bf7SJacob Keller * get more vectors and can enable more queues per VF. Note that this does not 12580deb0bf7SJacob Keller * grab any vectors from the SW pool already allocated. Also note, that all 12590deb0bf7SJacob Keller * vector counts include one for each VF's miscellaneous interrupt vector 12600deb0bf7SJacob Keller * (i.e. OICR). 12610deb0bf7SJacob Keller * 12620deb0bf7SJacob Keller * Minimum VFs - 2 vectors, 1 queue pair 12630deb0bf7SJacob Keller * Small VFs - 5 vectors, 4 queue pairs 12640deb0bf7SJacob Keller * Medium VFs - 17 vectors, 16 queue pairs 12650deb0bf7SJacob Keller * 12660deb0bf7SJacob Keller * Second, determine number of queue pairs per VF by starting with a pre-defined 12670deb0bf7SJacob Keller * maximum each VF supports. If this is not possible, then we adjust based on 12680deb0bf7SJacob Keller * queue pairs available on the device. 12690deb0bf7SJacob Keller * 12700deb0bf7SJacob Keller * Lastly, set queue and MSI-X VF variables tracked by the PF so it can be used 12710deb0bf7SJacob Keller * by each VF during VF initialization and reset. 12720deb0bf7SJacob Keller */ 12730deb0bf7SJacob Keller static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs) 12740deb0bf7SJacob Keller { 12750deb0bf7SJacob Keller int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker); 12760deb0bf7SJacob Keller u16 num_msix_per_vf, num_txq, num_rxq, avail_qs; 12770deb0bf7SJacob Keller int msix_avail_per_vf, msix_avail_for_sriov; 12780deb0bf7SJacob Keller struct device *dev = ice_pf_to_dev(pf); 12790deb0bf7SJacob Keller 12800deb0bf7SJacob Keller lockdep_assert_held(&pf->vfs.table_lock); 12810deb0bf7SJacob Keller 12820deb0bf7SJacob Keller if (!num_vfs || max_valid_res_idx < 0) 12830deb0bf7SJacob Keller return -EINVAL; 12840deb0bf7SJacob Keller 12850deb0bf7SJacob Keller /* determine MSI-X resources per VF */ 12860deb0bf7SJacob Keller msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors - 12870deb0bf7SJacob Keller pf->irq_tracker->num_entries; 12880deb0bf7SJacob Keller msix_avail_per_vf = msix_avail_for_sriov / num_vfs; 12890deb0bf7SJacob Keller if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) { 12900deb0bf7SJacob Keller num_msix_per_vf = ICE_NUM_VF_MSIX_MED; 12910deb0bf7SJacob Keller } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) { 12920deb0bf7SJacob Keller num_msix_per_vf = ICE_NUM_VF_MSIX_SMALL; 12930deb0bf7SJacob Keller } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MULTIQ_MIN) { 12940deb0bf7SJacob Keller num_msix_per_vf = ICE_NUM_VF_MSIX_MULTIQ_MIN; 12950deb0bf7SJacob Keller } else if (msix_avail_per_vf >= ICE_MIN_INTR_PER_VF) { 12960deb0bf7SJacob Keller num_msix_per_vf = ICE_MIN_INTR_PER_VF; 12970deb0bf7SJacob Keller } else { 12980deb0bf7SJacob Keller dev_err(dev, "Only %d MSI-X interrupts available for SR-IOV. Not enough to support minimum of %d MSI-X interrupts per VF for %d VFs\n", 12990deb0bf7SJacob Keller msix_avail_for_sriov, ICE_MIN_INTR_PER_VF, 13000deb0bf7SJacob Keller num_vfs); 13010deb0bf7SJacob Keller return -EIO; 13020deb0bf7SJacob Keller } 13030deb0bf7SJacob Keller 13040deb0bf7SJacob Keller num_txq = min_t(u16, num_msix_per_vf - ICE_NONQ_VECS_VF, 13050deb0bf7SJacob Keller ICE_MAX_RSS_QS_PER_VF); 13060deb0bf7SJacob Keller avail_qs = ice_get_avail_txq_count(pf) / num_vfs; 13070deb0bf7SJacob Keller if (!avail_qs) 13080deb0bf7SJacob Keller num_txq = 0; 13090deb0bf7SJacob Keller else if (num_txq > avail_qs) 13100deb0bf7SJacob Keller num_txq = rounddown_pow_of_two(avail_qs); 13110deb0bf7SJacob Keller 13120deb0bf7SJacob Keller num_rxq = min_t(u16, num_msix_per_vf - ICE_NONQ_VECS_VF, 13130deb0bf7SJacob Keller ICE_MAX_RSS_QS_PER_VF); 13140deb0bf7SJacob Keller avail_qs = ice_get_avail_rxq_count(pf) / num_vfs; 13150deb0bf7SJacob Keller if (!avail_qs) 13160deb0bf7SJacob Keller num_rxq = 0; 13170deb0bf7SJacob Keller else if (num_rxq > avail_qs) 13180deb0bf7SJacob Keller num_rxq = rounddown_pow_of_two(avail_qs); 13190deb0bf7SJacob Keller 13200deb0bf7SJacob Keller if (num_txq < ICE_MIN_QS_PER_VF || num_rxq < ICE_MIN_QS_PER_VF) { 13210deb0bf7SJacob Keller dev_err(dev, "Not enough queues to support minimum of %d queue pairs per VF for %d VFs\n", 13220deb0bf7SJacob Keller ICE_MIN_QS_PER_VF, num_vfs); 13230deb0bf7SJacob Keller return -EIO; 13240deb0bf7SJacob Keller } 13250deb0bf7SJacob Keller 13260deb0bf7SJacob Keller if (ice_sriov_set_msix_res(pf, num_msix_per_vf * num_vfs)) { 13270deb0bf7SJacob Keller dev_err(dev, "Unable to set MSI-X resources for %d VFs\n", 13280deb0bf7SJacob Keller num_vfs); 13290deb0bf7SJacob Keller return -EINVAL; 13300deb0bf7SJacob Keller } 13310deb0bf7SJacob Keller 13320deb0bf7SJacob Keller /* only allow equal Tx/Rx queue count (i.e. queue pairs) */ 13330deb0bf7SJacob Keller pf->vfs.num_qps_per = min_t(int, num_txq, num_rxq); 13340deb0bf7SJacob Keller pf->vfs.num_msix_per = num_msix_per_vf; 13350deb0bf7SJacob Keller dev_info(dev, "Enabling %d VFs with %d vectors and %d queues per VF\n", 13360deb0bf7SJacob Keller num_vfs, pf->vfs.num_msix_per, pf->vfs.num_qps_per); 13370deb0bf7SJacob Keller 13380deb0bf7SJacob Keller return 0; 13390deb0bf7SJacob Keller } 13400deb0bf7SJacob Keller 13410deb0bf7SJacob Keller /** 13420deb0bf7SJacob Keller * ice_clear_vf_reset_trigger - enable VF to access hardware 13430deb0bf7SJacob Keller * @vf: VF to enabled hardware access for 13440deb0bf7SJacob Keller */ 13450deb0bf7SJacob Keller static void ice_clear_vf_reset_trigger(struct ice_vf *vf) 13460deb0bf7SJacob Keller { 13470deb0bf7SJacob Keller struct ice_hw *hw = &vf->pf->hw; 13480deb0bf7SJacob Keller u32 reg; 13490deb0bf7SJacob Keller 13500deb0bf7SJacob Keller reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id)); 13510deb0bf7SJacob Keller reg &= ~VPGEN_VFRTRIG_VFSWR_M; 13520deb0bf7SJacob Keller wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg); 13530deb0bf7SJacob Keller ice_flush(hw); 13540deb0bf7SJacob Keller } 13550deb0bf7SJacob Keller 13560deb0bf7SJacob Keller static int 13570deb0bf7SJacob Keller ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m) 13580deb0bf7SJacob Keller { 13590deb0bf7SJacob Keller struct ice_hw *hw = &vsi->back->hw; 13600deb0bf7SJacob Keller int status; 13610deb0bf7SJacob Keller 13620deb0bf7SJacob Keller if (ice_vf_is_port_vlan_ena(vf)) 13630deb0bf7SJacob Keller status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m, 13640deb0bf7SJacob Keller ice_vf_get_port_vlan_id(vf)); 13650deb0bf7SJacob Keller else if (ice_vsi_has_non_zero_vlans(vsi)) 13660deb0bf7SJacob Keller status = ice_fltr_set_vlan_vsi_promisc(hw, vsi, promisc_m); 13670deb0bf7SJacob Keller else 13680deb0bf7SJacob Keller status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m, 0); 13690deb0bf7SJacob Keller 13700deb0bf7SJacob Keller if (status && status != -EEXIST) { 13710deb0bf7SJacob Keller dev_err(ice_pf_to_dev(vsi->back), "enable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n", 13720deb0bf7SJacob Keller vf->vf_id, status); 13730deb0bf7SJacob Keller return status; 13740deb0bf7SJacob Keller } 13750deb0bf7SJacob Keller 13760deb0bf7SJacob Keller return 0; 13770deb0bf7SJacob Keller } 13780deb0bf7SJacob Keller 13790deb0bf7SJacob Keller static int 13800deb0bf7SJacob Keller ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m) 13810deb0bf7SJacob Keller { 13820deb0bf7SJacob Keller struct ice_hw *hw = &vsi->back->hw; 13830deb0bf7SJacob Keller int status; 13840deb0bf7SJacob Keller 13850deb0bf7SJacob Keller if (ice_vf_is_port_vlan_ena(vf)) 13860deb0bf7SJacob Keller status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m, 13870deb0bf7SJacob Keller ice_vf_get_port_vlan_id(vf)); 13880deb0bf7SJacob Keller else if (ice_vsi_has_non_zero_vlans(vsi)) 13890deb0bf7SJacob Keller status = ice_fltr_clear_vlan_vsi_promisc(hw, vsi, promisc_m); 13900deb0bf7SJacob Keller else 13910deb0bf7SJacob Keller status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m, 0); 13920deb0bf7SJacob Keller 13930deb0bf7SJacob Keller if (status && status != -ENOENT) { 13940deb0bf7SJacob Keller dev_err(ice_pf_to_dev(vsi->back), "disable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n", 13950deb0bf7SJacob Keller vf->vf_id, status); 13960deb0bf7SJacob Keller return status; 13970deb0bf7SJacob Keller } 13980deb0bf7SJacob Keller 13990deb0bf7SJacob Keller return 0; 14000deb0bf7SJacob Keller } 14010deb0bf7SJacob Keller 14020deb0bf7SJacob Keller static void ice_vf_clear_counters(struct ice_vf *vf) 14030deb0bf7SJacob Keller { 14040deb0bf7SJacob Keller struct ice_vsi *vsi = ice_get_vf_vsi(vf); 14050deb0bf7SJacob Keller 14060deb0bf7SJacob Keller vf->num_mac = 0; 14070deb0bf7SJacob Keller vsi->num_vlan = 0; 14080deb0bf7SJacob Keller memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events)); 14090deb0bf7SJacob Keller memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events)); 14100deb0bf7SJacob Keller } 14110deb0bf7SJacob Keller 14120deb0bf7SJacob Keller /** 14130deb0bf7SJacob Keller * ice_vf_pre_vsi_rebuild - tasks to be done prior to VSI rebuild 14140deb0bf7SJacob Keller * @vf: VF to perform pre VSI rebuild tasks 14150deb0bf7SJacob Keller * 14160deb0bf7SJacob Keller * These tasks are items that don't need to be amortized since they are most 14170deb0bf7SJacob Keller * likely called in a for loop with all VF(s) in the reset_all_vfs() case. 14180deb0bf7SJacob Keller */ 14190deb0bf7SJacob Keller static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf) 14200deb0bf7SJacob Keller { 14210deb0bf7SJacob Keller ice_vf_clear_counters(vf); 14220deb0bf7SJacob Keller ice_clear_vf_reset_trigger(vf); 14230deb0bf7SJacob Keller } 14240deb0bf7SJacob Keller 14250deb0bf7SJacob Keller /** 14260deb0bf7SJacob Keller * ice_vf_rebuild_aggregator_node_cfg - rebuild aggregator node config 14270deb0bf7SJacob Keller * @vsi: Pointer to VSI 14280deb0bf7SJacob Keller * 14290deb0bf7SJacob Keller * This function moves VSI into corresponding scheduler aggregator node 14300deb0bf7SJacob Keller * based on cached value of "aggregator node info" per VSI 14310deb0bf7SJacob Keller */ 14320deb0bf7SJacob Keller static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi) 14330deb0bf7SJacob Keller { 14340deb0bf7SJacob Keller struct ice_pf *pf = vsi->back; 14350deb0bf7SJacob Keller struct device *dev; 14360deb0bf7SJacob Keller int status; 14370deb0bf7SJacob Keller 14380deb0bf7SJacob Keller if (!vsi->agg_node) 14390deb0bf7SJacob Keller return; 14400deb0bf7SJacob Keller 14410deb0bf7SJacob Keller dev = ice_pf_to_dev(pf); 14420deb0bf7SJacob Keller if (vsi->agg_node->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) { 14430deb0bf7SJacob Keller dev_dbg(dev, 14440deb0bf7SJacob Keller "agg_id %u already has reached max_num_vsis %u\n", 14450deb0bf7SJacob Keller vsi->agg_node->agg_id, vsi->agg_node->num_vsis); 14460deb0bf7SJacob Keller return; 14470deb0bf7SJacob Keller } 14480deb0bf7SJacob Keller 14490deb0bf7SJacob Keller status = ice_move_vsi_to_agg(pf->hw.port_info, vsi->agg_node->agg_id, 14500deb0bf7SJacob Keller vsi->idx, vsi->tc_cfg.ena_tc); 14510deb0bf7SJacob Keller if (status) 14520deb0bf7SJacob Keller dev_dbg(dev, "unable to move VSI idx %u into aggregator %u node", 14530deb0bf7SJacob Keller vsi->idx, vsi->agg_node->agg_id); 14540deb0bf7SJacob Keller else 14550deb0bf7SJacob Keller vsi->agg_node->num_vsis++; 14560deb0bf7SJacob Keller } 14570deb0bf7SJacob Keller 14580deb0bf7SJacob Keller /** 14590deb0bf7SJacob Keller * ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset 14600deb0bf7SJacob Keller * @vf: VF to rebuild host configuration on 14610deb0bf7SJacob Keller */ 14620deb0bf7SJacob Keller static void ice_vf_rebuild_host_cfg(struct ice_vf *vf) 14630deb0bf7SJacob Keller { 14640deb0bf7SJacob Keller struct device *dev = ice_pf_to_dev(vf->pf); 14650deb0bf7SJacob Keller struct ice_vsi *vsi = ice_get_vf_vsi(vf); 14660deb0bf7SJacob Keller 14670deb0bf7SJacob Keller ice_vf_set_host_trust_cfg(vf); 14680deb0bf7SJacob Keller 14690deb0bf7SJacob Keller if (ice_vf_rebuild_host_mac_cfg(vf)) 14700deb0bf7SJacob Keller dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n", 14710deb0bf7SJacob Keller vf->vf_id); 14720deb0bf7SJacob Keller 14730deb0bf7SJacob Keller if (ice_vf_rebuild_host_vlan_cfg(vf, vsi)) 14740deb0bf7SJacob Keller dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n", 14750deb0bf7SJacob Keller vf->vf_id); 14760deb0bf7SJacob Keller 14770deb0bf7SJacob Keller if (ice_vf_rebuild_host_tx_rate_cfg(vf)) 14780deb0bf7SJacob Keller dev_err(dev, "failed to rebuild Tx rate limiting configuration for VF %u\n", 14790deb0bf7SJacob Keller vf->vf_id); 14800deb0bf7SJacob Keller 14810deb0bf7SJacob Keller if (ice_vf_set_spoofchk_cfg(vf, vsi)) 14820deb0bf7SJacob Keller dev_err(dev, "failed to rebuild spoofchk configuration for VF %d\n", 14830deb0bf7SJacob Keller vf->vf_id); 14840deb0bf7SJacob Keller 14850deb0bf7SJacob Keller /* rebuild aggregator node config for main VF VSI */ 14860deb0bf7SJacob Keller ice_vf_rebuild_aggregator_node_cfg(vsi); 14870deb0bf7SJacob Keller } 14880deb0bf7SJacob Keller 14890deb0bf7SJacob Keller /** 14900deb0bf7SJacob Keller * ice_vf_rebuild_vsi_with_release - release and setup the VF's VSI 14910deb0bf7SJacob Keller * @vf: VF to release and setup the VSI for 14920deb0bf7SJacob Keller * 14930deb0bf7SJacob Keller * This is only called when a single VF is being reset (i.e. VFR, VFLR, host VF 14940deb0bf7SJacob Keller * configuration change, etc.). 14950deb0bf7SJacob Keller */ 14960deb0bf7SJacob Keller static int ice_vf_rebuild_vsi_with_release(struct ice_vf *vf) 14970deb0bf7SJacob Keller { 14980deb0bf7SJacob Keller ice_vf_vsi_release(vf); 14990deb0bf7SJacob Keller if (!ice_vf_vsi_setup(vf)) 15000deb0bf7SJacob Keller return -ENOMEM; 15010deb0bf7SJacob Keller 15020deb0bf7SJacob Keller return 0; 15030deb0bf7SJacob Keller } 15040deb0bf7SJacob Keller 15050deb0bf7SJacob Keller /** 15060deb0bf7SJacob Keller * ice_vf_rebuild_vsi - rebuild the VF's VSI 15070deb0bf7SJacob Keller * @vf: VF to rebuild the VSI for 15080deb0bf7SJacob Keller * 15090deb0bf7SJacob Keller * This is only called when all VF(s) are being reset (i.e. PCIe Reset on the 15100deb0bf7SJacob Keller * host, PFR, CORER, etc.). 15110deb0bf7SJacob Keller */ 15120deb0bf7SJacob Keller static int ice_vf_rebuild_vsi(struct ice_vf *vf) 15130deb0bf7SJacob Keller { 15140deb0bf7SJacob Keller struct ice_vsi *vsi = ice_get_vf_vsi(vf); 15150deb0bf7SJacob Keller struct ice_pf *pf = vf->pf; 15160deb0bf7SJacob Keller 15170deb0bf7SJacob Keller if (ice_vsi_rebuild(vsi, true)) { 15180deb0bf7SJacob Keller dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n", 15190deb0bf7SJacob Keller vf->vf_id); 15200deb0bf7SJacob Keller return -EIO; 15210deb0bf7SJacob Keller } 15220deb0bf7SJacob Keller /* vsi->idx will remain the same in this case so don't update 15230deb0bf7SJacob Keller * vf->lan_vsi_idx 15240deb0bf7SJacob Keller */ 15250deb0bf7SJacob Keller vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); 15260deb0bf7SJacob Keller vf->lan_vsi_num = vsi->vsi_num; 15270deb0bf7SJacob Keller 15280deb0bf7SJacob Keller return 0; 15290deb0bf7SJacob Keller } 15300deb0bf7SJacob Keller 15310deb0bf7SJacob Keller /** 15320deb0bf7SJacob Keller * ice_vf_set_initialized - VF is ready for VIRTCHNL communication 15330deb0bf7SJacob Keller * @vf: VF to set in initialized state 15340deb0bf7SJacob Keller * 15350deb0bf7SJacob Keller * After this function the VF will be ready to receive/handle the 15360deb0bf7SJacob Keller * VIRTCHNL_OP_GET_VF_RESOURCES message 15370deb0bf7SJacob Keller */ 15380deb0bf7SJacob Keller static void ice_vf_set_initialized(struct ice_vf *vf) 15390deb0bf7SJacob Keller { 15400deb0bf7SJacob Keller ice_set_vf_state_qs_dis(vf); 15410deb0bf7SJacob Keller clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states); 15420deb0bf7SJacob Keller clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states); 15430deb0bf7SJacob Keller clear_bit(ICE_VF_STATE_DIS, vf->vf_states); 15440deb0bf7SJacob Keller set_bit(ICE_VF_STATE_INIT, vf->vf_states); 15450deb0bf7SJacob Keller memset(&vf->vlan_v2_caps, 0, sizeof(vf->vlan_v2_caps)); 15460deb0bf7SJacob Keller } 15470deb0bf7SJacob Keller 15480deb0bf7SJacob Keller /** 15490deb0bf7SJacob Keller * ice_vf_post_vsi_rebuild - tasks to do after the VF's VSI have been rebuilt 15500deb0bf7SJacob Keller * @vf: VF to perform tasks on 15510deb0bf7SJacob Keller */ 15520deb0bf7SJacob Keller static void ice_vf_post_vsi_rebuild(struct ice_vf *vf) 15530deb0bf7SJacob Keller { 15540deb0bf7SJacob Keller struct ice_pf *pf = vf->pf; 15550deb0bf7SJacob Keller struct ice_hw *hw; 15560deb0bf7SJacob Keller 15570deb0bf7SJacob Keller hw = &pf->hw; 15580deb0bf7SJacob Keller 15590deb0bf7SJacob Keller ice_vf_rebuild_host_cfg(vf); 15600deb0bf7SJacob Keller 15610deb0bf7SJacob Keller ice_vf_set_initialized(vf); 15620deb0bf7SJacob Keller ice_ena_vf_mappings(vf); 15630deb0bf7SJacob Keller wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE); 15640deb0bf7SJacob Keller } 15650deb0bf7SJacob Keller 15660deb0bf7SJacob Keller /** 15670deb0bf7SJacob Keller * ice_reset_all_vfs - reset all allocated VFs in one go 15680deb0bf7SJacob Keller * @pf: pointer to the PF structure 15690deb0bf7SJacob Keller * @is_vflr: true if VFLR was issued, false if not 15700deb0bf7SJacob Keller * 15710deb0bf7SJacob Keller * First, tell the hardware to reset each VF, then do all the waiting in one 15720deb0bf7SJacob Keller * chunk, and finally finish restoring each VF after the wait. This is useful 15730deb0bf7SJacob Keller * during PF routines which need to reset all VFs, as otherwise it must perform 15740deb0bf7SJacob Keller * these resets in a serialized fashion. 15750deb0bf7SJacob Keller * 15760deb0bf7SJacob Keller * Returns true if any VFs were reset, and false otherwise. 15770deb0bf7SJacob Keller */ 15780deb0bf7SJacob Keller bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) 15790deb0bf7SJacob Keller { 15800deb0bf7SJacob Keller struct device *dev = ice_pf_to_dev(pf); 15810deb0bf7SJacob Keller struct ice_hw *hw = &pf->hw; 15820deb0bf7SJacob Keller struct ice_vf *vf; 15830deb0bf7SJacob Keller unsigned int bkt; 15840deb0bf7SJacob Keller 15850deb0bf7SJacob Keller /* If we don't have any VFs, then there is nothing to reset */ 15860deb0bf7SJacob Keller if (!ice_has_vfs(pf)) 15870deb0bf7SJacob Keller return false; 15880deb0bf7SJacob Keller 15890deb0bf7SJacob Keller mutex_lock(&pf->vfs.table_lock); 15900deb0bf7SJacob Keller 15910deb0bf7SJacob Keller /* clear all malicious info if the VFs are getting reset */ 15920deb0bf7SJacob Keller ice_for_each_vf(pf, bkt, vf) 15930deb0bf7SJacob Keller if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->vfs.malvfs, 15940deb0bf7SJacob Keller ICE_MAX_VF_COUNT, vf->vf_id)) 15950deb0bf7SJacob Keller dev_dbg(dev, "failed to clear malicious VF state for VF %u\n", 15960deb0bf7SJacob Keller vf->vf_id); 15970deb0bf7SJacob Keller 15980deb0bf7SJacob Keller /* If VFs have been disabled, there is no need to reset */ 15990deb0bf7SJacob Keller if (test_and_set_bit(ICE_VF_DIS, pf->state)) { 16000deb0bf7SJacob Keller mutex_unlock(&pf->vfs.table_lock); 16010deb0bf7SJacob Keller return false; 16020deb0bf7SJacob Keller } 16030deb0bf7SJacob Keller 16040deb0bf7SJacob Keller /* Begin reset on all VFs at once */ 16050deb0bf7SJacob Keller ice_for_each_vf(pf, bkt, vf) 16060deb0bf7SJacob Keller ice_trigger_vf_reset(vf, is_vflr, true); 16070deb0bf7SJacob Keller 16080deb0bf7SJacob Keller /* HW requires some time to make sure it can flush the FIFO for a VF 16090deb0bf7SJacob Keller * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in 16100deb0bf7SJacob Keller * sequence to make sure that it has completed. We'll keep track of 16110deb0bf7SJacob Keller * the VFs using a simple iterator that increments once that VF has 16120deb0bf7SJacob Keller * finished resetting. 16130deb0bf7SJacob Keller */ 16140deb0bf7SJacob Keller ice_for_each_vf(pf, bkt, vf) { 16150deb0bf7SJacob Keller bool done = false; 16160deb0bf7SJacob Keller unsigned int i; 16170deb0bf7SJacob Keller u32 reg; 16180deb0bf7SJacob Keller 16190deb0bf7SJacob Keller for (i = 0; i < 10; i++) { 16200deb0bf7SJacob Keller reg = rd32(&pf->hw, VPGEN_VFRSTAT(vf->vf_id)); 16210deb0bf7SJacob Keller if (reg & VPGEN_VFRSTAT_VFRD_M) { 16220deb0bf7SJacob Keller done = true; 16230deb0bf7SJacob Keller break; 16240deb0bf7SJacob Keller } 16250deb0bf7SJacob Keller 16260deb0bf7SJacob Keller /* only delay if check failed */ 16270deb0bf7SJacob Keller usleep_range(10, 20); 16280deb0bf7SJacob Keller } 16290deb0bf7SJacob Keller 16300deb0bf7SJacob Keller if (!done) { 16310deb0bf7SJacob Keller /* Display a warning if at least one VF didn't manage 16320deb0bf7SJacob Keller * to reset in time, but continue on with the 16330deb0bf7SJacob Keller * operation. 16340deb0bf7SJacob Keller */ 16350deb0bf7SJacob Keller dev_warn(dev, "VF %u reset check timeout\n", vf->vf_id); 16360deb0bf7SJacob Keller break; 16370deb0bf7SJacob Keller } 16380deb0bf7SJacob Keller } 16390deb0bf7SJacob Keller 16400deb0bf7SJacob Keller /* free VF resources to begin resetting the VSI state */ 16410deb0bf7SJacob Keller ice_for_each_vf(pf, bkt, vf) { 16420deb0bf7SJacob Keller mutex_lock(&vf->cfg_lock); 16430deb0bf7SJacob Keller 16440deb0bf7SJacob Keller vf->driver_caps = 0; 16450deb0bf7SJacob Keller ice_vc_set_default_allowlist(vf); 16460deb0bf7SJacob Keller 16470deb0bf7SJacob Keller ice_vf_fdir_exit(vf); 16480deb0bf7SJacob Keller ice_vf_fdir_init(vf); 16490deb0bf7SJacob Keller /* clean VF control VSI when resetting VFs since it should be 16500deb0bf7SJacob Keller * setup only when VF creates its first FDIR rule. 16510deb0bf7SJacob Keller */ 16520deb0bf7SJacob Keller if (vf->ctrl_vsi_idx != ICE_NO_VSI) 16530deb0bf7SJacob Keller ice_vf_ctrl_invalidate_vsi(vf); 16540deb0bf7SJacob Keller 16550deb0bf7SJacob Keller ice_vf_pre_vsi_rebuild(vf); 16560deb0bf7SJacob Keller ice_vf_rebuild_vsi(vf); 16570deb0bf7SJacob Keller ice_vf_post_vsi_rebuild(vf); 16580deb0bf7SJacob Keller 16590deb0bf7SJacob Keller mutex_unlock(&vf->cfg_lock); 16600deb0bf7SJacob Keller } 16610deb0bf7SJacob Keller 16620deb0bf7SJacob Keller if (ice_is_eswitch_mode_switchdev(pf)) 16630deb0bf7SJacob Keller if (ice_eswitch_rebuild(pf)) 16640deb0bf7SJacob Keller dev_warn(dev, "eswitch rebuild failed\n"); 16650deb0bf7SJacob Keller 16660deb0bf7SJacob Keller ice_flush(hw); 16670deb0bf7SJacob Keller clear_bit(ICE_VF_DIS, pf->state); 16680deb0bf7SJacob Keller 16690deb0bf7SJacob Keller mutex_unlock(&pf->vfs.table_lock); 16700deb0bf7SJacob Keller 16710deb0bf7SJacob Keller return true; 16720deb0bf7SJacob Keller } 16730deb0bf7SJacob Keller 16740deb0bf7SJacob Keller /** 16750deb0bf7SJacob Keller * ice_is_vf_disabled 16760deb0bf7SJacob Keller * @vf: pointer to the VF info 16770deb0bf7SJacob Keller * 16780deb0bf7SJacob Keller * Returns true if the PF or VF is disabled, false otherwise. 16790deb0bf7SJacob Keller */ 16800deb0bf7SJacob Keller bool ice_is_vf_disabled(struct ice_vf *vf) 16810deb0bf7SJacob Keller { 16820deb0bf7SJacob Keller struct ice_pf *pf = vf->pf; 16830deb0bf7SJacob Keller 16840deb0bf7SJacob Keller /* If the PF has been disabled, there is no need resetting VF until 16850deb0bf7SJacob Keller * PF is active again. Similarly, if the VF has been disabled, this 16860deb0bf7SJacob Keller * means something else is resetting the VF, so we shouldn't continue. 16870deb0bf7SJacob Keller * Otherwise, set disable VF state bit for actual reset, and continue. 16880deb0bf7SJacob Keller */ 16890deb0bf7SJacob Keller return (test_bit(ICE_VF_DIS, pf->state) || 16900deb0bf7SJacob Keller test_bit(ICE_VF_STATE_DIS, vf->vf_states)); 16910deb0bf7SJacob Keller } 16920deb0bf7SJacob Keller 16930deb0bf7SJacob Keller /** 16940deb0bf7SJacob Keller * ice_reset_vf - Reset a particular VF 16950deb0bf7SJacob Keller * @vf: pointer to the VF structure 16960deb0bf7SJacob Keller * @is_vflr: true if VFLR was issued, false if not 16970deb0bf7SJacob Keller * 16980deb0bf7SJacob Keller * Returns true if the VF is currently in reset, resets successfully, or resets 16990deb0bf7SJacob Keller * are disabled and false otherwise. 17000deb0bf7SJacob Keller */ 17010deb0bf7SJacob Keller bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) 17020deb0bf7SJacob Keller { 17030deb0bf7SJacob Keller struct ice_pf *pf = vf->pf; 17040deb0bf7SJacob Keller struct ice_vsi *vsi; 17050deb0bf7SJacob Keller struct device *dev; 17060deb0bf7SJacob Keller struct ice_hw *hw; 17070deb0bf7SJacob Keller bool rsd = false; 17080deb0bf7SJacob Keller u8 promisc_m; 17090deb0bf7SJacob Keller u32 reg; 17100deb0bf7SJacob Keller int i; 17110deb0bf7SJacob Keller 17120deb0bf7SJacob Keller lockdep_assert_held(&vf->cfg_lock); 17130deb0bf7SJacob Keller 17140deb0bf7SJacob Keller dev = ice_pf_to_dev(pf); 17150deb0bf7SJacob Keller 17160deb0bf7SJacob Keller if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) { 17170deb0bf7SJacob Keller dev_dbg(dev, "Trying to reset VF %d, but all VF resets are disabled\n", 17180deb0bf7SJacob Keller vf->vf_id); 17190deb0bf7SJacob Keller return true; 17200deb0bf7SJacob Keller } 17210deb0bf7SJacob Keller 17220deb0bf7SJacob Keller if (ice_is_vf_disabled(vf)) { 17230deb0bf7SJacob Keller dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n", 17240deb0bf7SJacob Keller vf->vf_id); 17250deb0bf7SJacob Keller return true; 17260deb0bf7SJacob Keller } 17270deb0bf7SJacob Keller 17280deb0bf7SJacob Keller /* Set VF disable bit state here, before triggering reset */ 17290deb0bf7SJacob Keller set_bit(ICE_VF_STATE_DIS, vf->vf_states); 17300deb0bf7SJacob Keller ice_trigger_vf_reset(vf, is_vflr, false); 17310deb0bf7SJacob Keller 17320deb0bf7SJacob Keller vsi = ice_get_vf_vsi(vf); 17330deb0bf7SJacob Keller 17340deb0bf7SJacob Keller ice_dis_vf_qs(vf); 17350deb0bf7SJacob Keller 17360deb0bf7SJacob Keller /* Call Disable LAN Tx queue AQ whether or not queues are 17370deb0bf7SJacob Keller * enabled. This is needed for successful completion of VFR. 17380deb0bf7SJacob Keller */ 17390deb0bf7SJacob Keller ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL, 17400deb0bf7SJacob Keller NULL, ICE_VF_RESET, vf->vf_id, NULL); 17410deb0bf7SJacob Keller 17420deb0bf7SJacob Keller hw = &pf->hw; 17430deb0bf7SJacob Keller /* poll VPGEN_VFRSTAT reg to make sure 17440deb0bf7SJacob Keller * that reset is complete 17450deb0bf7SJacob Keller */ 17460deb0bf7SJacob Keller for (i = 0; i < 10; i++) { 17470deb0bf7SJacob Keller /* VF reset requires driver to first reset the VF and then 17480deb0bf7SJacob Keller * poll the status register to make sure that the reset 17490deb0bf7SJacob Keller * completed successfully. 17500deb0bf7SJacob Keller */ 17510deb0bf7SJacob Keller reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id)); 17520deb0bf7SJacob Keller if (reg & VPGEN_VFRSTAT_VFRD_M) { 17530deb0bf7SJacob Keller rsd = true; 17540deb0bf7SJacob Keller break; 17550deb0bf7SJacob Keller } 17560deb0bf7SJacob Keller 17570deb0bf7SJacob Keller /* only sleep if the reset is not done */ 17580deb0bf7SJacob Keller usleep_range(10, 20); 17590deb0bf7SJacob Keller } 17600deb0bf7SJacob Keller 17610deb0bf7SJacob Keller vf->driver_caps = 0; 17620deb0bf7SJacob Keller ice_vc_set_default_allowlist(vf); 17630deb0bf7SJacob Keller 17640deb0bf7SJacob Keller /* Display a warning if VF didn't manage to reset in time, but need to 17650deb0bf7SJacob Keller * continue on with the operation. 17660deb0bf7SJacob Keller */ 17670deb0bf7SJacob Keller if (!rsd) 17680deb0bf7SJacob Keller dev_warn(dev, "VF reset check timeout on VF %d\n", vf->vf_id); 17690deb0bf7SJacob Keller 17700deb0bf7SJacob Keller /* disable promiscuous modes in case they were enabled 17710deb0bf7SJacob Keller * ignore any error if disabling process failed 17720deb0bf7SJacob Keller */ 17730deb0bf7SJacob Keller if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) || 17740deb0bf7SJacob Keller test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) { 17750deb0bf7SJacob Keller if (ice_vf_is_port_vlan_ena(vf) || vsi->num_vlan) 17760deb0bf7SJacob Keller promisc_m = ICE_UCAST_VLAN_PROMISC_BITS; 17770deb0bf7SJacob Keller else 17780deb0bf7SJacob Keller promisc_m = ICE_UCAST_PROMISC_BITS; 17790deb0bf7SJacob Keller 17800deb0bf7SJacob Keller if (ice_vf_clear_vsi_promisc(vf, vsi, promisc_m)) 17810deb0bf7SJacob Keller dev_err(dev, "disabling promiscuous mode failed\n"); 17820deb0bf7SJacob Keller } 17830deb0bf7SJacob Keller 17840deb0bf7SJacob Keller ice_eswitch_del_vf_mac_rule(vf); 17850deb0bf7SJacob Keller 17860deb0bf7SJacob Keller ice_vf_fdir_exit(vf); 17870deb0bf7SJacob Keller ice_vf_fdir_init(vf); 17880deb0bf7SJacob Keller /* clean VF control VSI when resetting VF since it should be setup 17890deb0bf7SJacob Keller * only when VF creates its first FDIR rule. 17900deb0bf7SJacob Keller */ 17910deb0bf7SJacob Keller if (vf->ctrl_vsi_idx != ICE_NO_VSI) 17920deb0bf7SJacob Keller ice_vf_ctrl_vsi_release(vf); 17930deb0bf7SJacob Keller 17940deb0bf7SJacob Keller ice_vf_pre_vsi_rebuild(vf); 17950deb0bf7SJacob Keller 17960deb0bf7SJacob Keller if (ice_vf_rebuild_vsi_with_release(vf)) { 17970deb0bf7SJacob Keller dev_err(dev, "Failed to release and setup the VF%u's VSI\n", vf->vf_id); 17980deb0bf7SJacob Keller return false; 17990deb0bf7SJacob Keller } 18000deb0bf7SJacob Keller 18010deb0bf7SJacob Keller ice_vf_post_vsi_rebuild(vf); 18020deb0bf7SJacob Keller vsi = ice_get_vf_vsi(vf); 18030deb0bf7SJacob Keller ice_eswitch_update_repr(vsi); 18040deb0bf7SJacob Keller ice_eswitch_replay_vf_mac_rule(vf); 18050deb0bf7SJacob Keller 18060deb0bf7SJacob Keller /* if the VF has been reset allow it to come up again */ 18070deb0bf7SJacob Keller if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->vfs.malvfs, 18080deb0bf7SJacob Keller ICE_MAX_VF_COUNT, vf->vf_id)) 18090deb0bf7SJacob Keller dev_dbg(dev, "failed to clear malicious VF state for VF %u\n", i); 18100deb0bf7SJacob Keller 18110deb0bf7SJacob Keller return true; 18120deb0bf7SJacob Keller } 18130deb0bf7SJacob Keller 18140deb0bf7SJacob Keller /** 18150deb0bf7SJacob Keller * ice_vc_notify_link_state - Inform all VFs on a PF of link status 18160deb0bf7SJacob Keller * @pf: pointer to the PF structure 18170deb0bf7SJacob Keller */ 18180deb0bf7SJacob Keller void ice_vc_notify_link_state(struct ice_pf *pf) 18190deb0bf7SJacob Keller { 18200deb0bf7SJacob Keller struct ice_vf *vf; 18210deb0bf7SJacob Keller unsigned int bkt; 18220deb0bf7SJacob Keller 18230deb0bf7SJacob Keller mutex_lock(&pf->vfs.table_lock); 18240deb0bf7SJacob Keller ice_for_each_vf(pf, bkt, vf) 18250deb0bf7SJacob Keller ice_vc_notify_vf_link_state(vf); 18260deb0bf7SJacob Keller mutex_unlock(&pf->vfs.table_lock); 18270deb0bf7SJacob Keller } 18280deb0bf7SJacob Keller 18290deb0bf7SJacob Keller /** 18300deb0bf7SJacob Keller * ice_vc_notify_reset - Send pending reset message to all VFs 18310deb0bf7SJacob Keller * @pf: pointer to the PF structure 18320deb0bf7SJacob Keller * 18330deb0bf7SJacob Keller * indicate a pending reset to all VFs on a given PF 18340deb0bf7SJacob Keller */ 18350deb0bf7SJacob Keller void ice_vc_notify_reset(struct ice_pf *pf) 18360deb0bf7SJacob Keller { 18370deb0bf7SJacob Keller struct virtchnl_pf_event pfe; 18380deb0bf7SJacob Keller 18390deb0bf7SJacob Keller if (!ice_has_vfs(pf)) 18400deb0bf7SJacob Keller return; 18410deb0bf7SJacob Keller 18420deb0bf7SJacob Keller pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; 18430deb0bf7SJacob Keller pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; 18440deb0bf7SJacob Keller ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, VIRTCHNL_STATUS_SUCCESS, 18450deb0bf7SJacob Keller (u8 *)&pfe, sizeof(struct virtchnl_pf_event)); 18460deb0bf7SJacob Keller } 18470deb0bf7SJacob Keller 18480deb0bf7SJacob Keller /** 18490deb0bf7SJacob Keller * ice_vc_notify_vf_reset - Notify VF of a reset event 18500deb0bf7SJacob Keller * @vf: pointer to the VF structure 18510deb0bf7SJacob Keller */ 18520deb0bf7SJacob Keller static void ice_vc_notify_vf_reset(struct ice_vf *vf) 18530deb0bf7SJacob Keller { 18540deb0bf7SJacob Keller struct virtchnl_pf_event pfe; 18550deb0bf7SJacob Keller struct ice_pf *pf = vf->pf; 18560deb0bf7SJacob Keller 18570deb0bf7SJacob Keller /* Bail out if VF is in disabled state, neither initialized, nor active 18580deb0bf7SJacob Keller * state - otherwise proceed with notifications 18590deb0bf7SJacob Keller */ 18600deb0bf7SJacob Keller if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) && 18610deb0bf7SJacob Keller !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) || 18620deb0bf7SJacob Keller test_bit(ICE_VF_STATE_DIS, vf->vf_states)) 18630deb0bf7SJacob Keller return; 18640deb0bf7SJacob Keller 18650deb0bf7SJacob Keller pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; 18660deb0bf7SJacob Keller pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; 18670deb0bf7SJacob Keller ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT, 18680deb0bf7SJacob Keller VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe), 18690deb0bf7SJacob Keller NULL); 18700deb0bf7SJacob Keller } 18710deb0bf7SJacob Keller 18720deb0bf7SJacob Keller /** 18730deb0bf7SJacob Keller * ice_init_vf_vsi_res - initialize/setup VF VSI resources 18740deb0bf7SJacob Keller * @vf: VF to initialize/setup the VSI for 18750deb0bf7SJacob Keller * 18760deb0bf7SJacob Keller * This function creates a VSI for the VF, adds a VLAN 0 filter, and sets up the 18770deb0bf7SJacob Keller * VF VSI's broadcast filter and is only used during initial VF creation. 18780deb0bf7SJacob Keller */ 18790deb0bf7SJacob Keller static int ice_init_vf_vsi_res(struct ice_vf *vf) 18800deb0bf7SJacob Keller { 18810deb0bf7SJacob Keller struct ice_vsi_vlan_ops *vlan_ops; 18820deb0bf7SJacob Keller struct ice_pf *pf = vf->pf; 18830deb0bf7SJacob Keller u8 broadcast[ETH_ALEN]; 18840deb0bf7SJacob Keller struct ice_vsi *vsi; 18850deb0bf7SJacob Keller struct device *dev; 18860deb0bf7SJacob Keller int err; 18870deb0bf7SJacob Keller 18880deb0bf7SJacob Keller vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf); 18890deb0bf7SJacob Keller 18900deb0bf7SJacob Keller dev = ice_pf_to_dev(pf); 18910deb0bf7SJacob Keller vsi = ice_vf_vsi_setup(vf); 18920deb0bf7SJacob Keller if (!vsi) 18930deb0bf7SJacob Keller return -ENOMEM; 18940deb0bf7SJacob Keller 18950deb0bf7SJacob Keller err = ice_vsi_add_vlan_zero(vsi); 18960deb0bf7SJacob Keller if (err) { 18970deb0bf7SJacob Keller dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n", 18980deb0bf7SJacob Keller vf->vf_id); 18990deb0bf7SJacob Keller goto release_vsi; 19000deb0bf7SJacob Keller } 19010deb0bf7SJacob Keller 19020deb0bf7SJacob Keller vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 19030deb0bf7SJacob Keller err = vlan_ops->ena_rx_filtering(vsi); 19040deb0bf7SJacob Keller if (err) { 19050deb0bf7SJacob Keller dev_warn(dev, "Failed to enable Rx VLAN filtering for VF %d\n", 19060deb0bf7SJacob Keller vf->vf_id); 19070deb0bf7SJacob Keller goto release_vsi; 19080deb0bf7SJacob Keller } 19090deb0bf7SJacob Keller 19100deb0bf7SJacob Keller eth_broadcast_addr(broadcast); 19110deb0bf7SJacob Keller err = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI); 19120deb0bf7SJacob Keller if (err) { 19130deb0bf7SJacob Keller dev_err(dev, "Failed to add broadcast MAC filter for VF %d, error %d\n", 19140deb0bf7SJacob Keller vf->vf_id, err); 19150deb0bf7SJacob Keller goto release_vsi; 19160deb0bf7SJacob Keller } 19170deb0bf7SJacob Keller 19180deb0bf7SJacob Keller err = ice_vf_set_spoofchk_cfg(vf, vsi); 19190deb0bf7SJacob Keller if (err) { 19200deb0bf7SJacob Keller dev_warn(dev, "Failed to initialize spoofchk setting for VF %d\n", 19210deb0bf7SJacob Keller vf->vf_id); 19220deb0bf7SJacob Keller goto release_vsi; 19230deb0bf7SJacob Keller } 19240deb0bf7SJacob Keller 19250deb0bf7SJacob Keller vf->num_mac = 1; 19260deb0bf7SJacob Keller 19270deb0bf7SJacob Keller return 0; 19280deb0bf7SJacob Keller 19290deb0bf7SJacob Keller release_vsi: 19300deb0bf7SJacob Keller ice_vf_vsi_release(vf); 19310deb0bf7SJacob Keller return err; 19320deb0bf7SJacob Keller } 19330deb0bf7SJacob Keller 19340deb0bf7SJacob Keller /** 19350deb0bf7SJacob Keller * ice_start_vfs - start VFs so they are ready to be used by SR-IOV 19360deb0bf7SJacob Keller * @pf: PF the VFs are associated with 19370deb0bf7SJacob Keller */ 19380deb0bf7SJacob Keller static int ice_start_vfs(struct ice_pf *pf) 19390deb0bf7SJacob Keller { 19400deb0bf7SJacob Keller struct ice_hw *hw = &pf->hw; 19410deb0bf7SJacob Keller unsigned int bkt, it_cnt; 19420deb0bf7SJacob Keller struct ice_vf *vf; 19430deb0bf7SJacob Keller int retval; 19440deb0bf7SJacob Keller 19450deb0bf7SJacob Keller lockdep_assert_held(&pf->vfs.table_lock); 19460deb0bf7SJacob Keller 19470deb0bf7SJacob Keller it_cnt = 0; 19480deb0bf7SJacob Keller ice_for_each_vf(pf, bkt, vf) { 19490deb0bf7SJacob Keller ice_clear_vf_reset_trigger(vf); 19500deb0bf7SJacob Keller 19510deb0bf7SJacob Keller retval = ice_init_vf_vsi_res(vf); 19520deb0bf7SJacob Keller if (retval) { 19530deb0bf7SJacob Keller dev_err(ice_pf_to_dev(pf), "Failed to initialize VSI resources for VF %d, error %d\n", 19540deb0bf7SJacob Keller vf->vf_id, retval); 19550deb0bf7SJacob Keller goto teardown; 19560deb0bf7SJacob Keller } 19570deb0bf7SJacob Keller 19580deb0bf7SJacob Keller set_bit(ICE_VF_STATE_INIT, vf->vf_states); 19590deb0bf7SJacob Keller ice_ena_vf_mappings(vf); 19600deb0bf7SJacob Keller wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE); 19610deb0bf7SJacob Keller it_cnt++; 19620deb0bf7SJacob Keller } 19630deb0bf7SJacob Keller 19640deb0bf7SJacob Keller ice_flush(hw); 19650deb0bf7SJacob Keller return 0; 19660deb0bf7SJacob Keller 19670deb0bf7SJacob Keller teardown: 19680deb0bf7SJacob Keller ice_for_each_vf(pf, bkt, vf) { 19690deb0bf7SJacob Keller if (it_cnt == 0) 19700deb0bf7SJacob Keller break; 19710deb0bf7SJacob Keller 19720deb0bf7SJacob Keller ice_dis_vf_mappings(vf); 19730deb0bf7SJacob Keller ice_vf_vsi_release(vf); 19740deb0bf7SJacob Keller it_cnt--; 19750deb0bf7SJacob Keller } 19760deb0bf7SJacob Keller 19770deb0bf7SJacob Keller return retval; 19780deb0bf7SJacob Keller } 19790deb0bf7SJacob Keller 19800deb0bf7SJacob Keller /** 19810deb0bf7SJacob Keller * ice_create_vf_entries - Allocate and insert VF entries 19820deb0bf7SJacob Keller * @pf: pointer to the PF structure 19830deb0bf7SJacob Keller * @num_vfs: the number of VFs to allocate 19840deb0bf7SJacob Keller * 19850deb0bf7SJacob Keller * Allocate new VF entries and insert them into the hash table. Set some 19860deb0bf7SJacob Keller * basic default fields for initializing the new VFs. 19870deb0bf7SJacob Keller * 19880deb0bf7SJacob Keller * After this function exits, the hash table will have num_vfs entries 19890deb0bf7SJacob Keller * inserted. 19900deb0bf7SJacob Keller * 19910deb0bf7SJacob Keller * Returns 0 on success or an integer error code on failure. 19920deb0bf7SJacob Keller */ 19930deb0bf7SJacob Keller static int ice_create_vf_entries(struct ice_pf *pf, u16 num_vfs) 19940deb0bf7SJacob Keller { 19950deb0bf7SJacob Keller struct ice_vfs *vfs = &pf->vfs; 19960deb0bf7SJacob Keller struct ice_vf *vf; 19970deb0bf7SJacob Keller u16 vf_id; 19980deb0bf7SJacob Keller int err; 19990deb0bf7SJacob Keller 20000deb0bf7SJacob Keller lockdep_assert_held(&vfs->table_lock); 20010deb0bf7SJacob Keller 20020deb0bf7SJacob Keller for (vf_id = 0; vf_id < num_vfs; vf_id++) { 20030deb0bf7SJacob Keller vf = kzalloc(sizeof(*vf), GFP_KERNEL); 20040deb0bf7SJacob Keller if (!vf) { 20050deb0bf7SJacob Keller err = -ENOMEM; 20060deb0bf7SJacob Keller goto err_free_entries; 20070deb0bf7SJacob Keller } 20080deb0bf7SJacob Keller kref_init(&vf->refcnt); 20090deb0bf7SJacob Keller 20100deb0bf7SJacob Keller vf->pf = pf; 20110deb0bf7SJacob Keller vf->vf_id = vf_id; 20120deb0bf7SJacob Keller 20130deb0bf7SJacob Keller vf->vf_sw_id = pf->first_sw; 20140deb0bf7SJacob Keller /* assign default capabilities */ 20150deb0bf7SJacob Keller set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vf->vf_caps); 20160deb0bf7SJacob Keller vf->spoofchk = true; 20170deb0bf7SJacob Keller vf->num_vf_qs = pf->vfs.num_qps_per; 20180deb0bf7SJacob Keller ice_vc_set_default_allowlist(vf); 20190deb0bf7SJacob Keller 20200deb0bf7SJacob Keller /* ctrl_vsi_idx will be set to a valid value only when VF 20210deb0bf7SJacob Keller * creates its first fdir rule. 20220deb0bf7SJacob Keller */ 20230deb0bf7SJacob Keller ice_vf_ctrl_invalidate_vsi(vf); 20240deb0bf7SJacob Keller ice_vf_fdir_init(vf); 20250deb0bf7SJacob Keller 2026*a7e11710SJacob Keller ice_virtchnl_set_dflt_ops(vf); 20270deb0bf7SJacob Keller 20280deb0bf7SJacob Keller mutex_init(&vf->cfg_lock); 20290deb0bf7SJacob Keller 20300deb0bf7SJacob Keller hash_add_rcu(vfs->table, &vf->entry, vf_id); 20310deb0bf7SJacob Keller } 20320deb0bf7SJacob Keller 20330deb0bf7SJacob Keller return 0; 20340deb0bf7SJacob Keller 20350deb0bf7SJacob Keller err_free_entries: 20360deb0bf7SJacob Keller ice_free_vf_entries(pf); 20370deb0bf7SJacob Keller return err; 20380deb0bf7SJacob Keller } 20390deb0bf7SJacob Keller 20400deb0bf7SJacob Keller /** 20410deb0bf7SJacob Keller * ice_ena_vfs - enable VFs so they are ready to be used 20420deb0bf7SJacob Keller * @pf: pointer to the PF structure 20430deb0bf7SJacob Keller * @num_vfs: number of VFs to enable 20440deb0bf7SJacob Keller */ 20450deb0bf7SJacob Keller static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs) 20460deb0bf7SJacob Keller { 20470deb0bf7SJacob Keller struct device *dev = ice_pf_to_dev(pf); 20480deb0bf7SJacob Keller struct ice_hw *hw = &pf->hw; 20490deb0bf7SJacob Keller int ret; 20500deb0bf7SJacob Keller 20510deb0bf7SJacob Keller /* Disable global interrupt 0 so we don't try to handle the VFLR. */ 20520deb0bf7SJacob Keller wr32(hw, GLINT_DYN_CTL(pf->oicr_idx), 20530deb0bf7SJacob Keller ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S); 20540deb0bf7SJacob Keller set_bit(ICE_OICR_INTR_DIS, pf->state); 20550deb0bf7SJacob Keller ice_flush(hw); 20560deb0bf7SJacob Keller 20570deb0bf7SJacob Keller ret = pci_enable_sriov(pf->pdev, num_vfs); 20580deb0bf7SJacob Keller if (ret) 20590deb0bf7SJacob Keller goto err_unroll_intr; 20600deb0bf7SJacob Keller 20610deb0bf7SJacob Keller mutex_lock(&pf->vfs.table_lock); 20620deb0bf7SJacob Keller 20630deb0bf7SJacob Keller if (ice_set_per_vf_res(pf, num_vfs)) { 20640deb0bf7SJacob Keller dev_err(dev, "Not enough resources for %d VFs, try with fewer number of VFs\n", 20650deb0bf7SJacob Keller num_vfs); 20660deb0bf7SJacob Keller ret = -ENOSPC; 20670deb0bf7SJacob Keller goto err_unroll_sriov; 20680deb0bf7SJacob Keller } 20690deb0bf7SJacob Keller 20700deb0bf7SJacob Keller ret = ice_create_vf_entries(pf, num_vfs); 20710deb0bf7SJacob Keller if (ret) { 20720deb0bf7SJacob Keller dev_err(dev, "Failed to allocate VF entries for %d VFs\n", 20730deb0bf7SJacob Keller num_vfs); 20740deb0bf7SJacob Keller goto err_unroll_sriov; 20750deb0bf7SJacob Keller } 20760deb0bf7SJacob Keller 20770deb0bf7SJacob Keller if (ice_start_vfs(pf)) { 20780deb0bf7SJacob Keller dev_err(dev, "Failed to start VF(s)\n"); 20790deb0bf7SJacob Keller ret = -EAGAIN; 20800deb0bf7SJacob Keller goto err_unroll_vf_entries; 20810deb0bf7SJacob Keller } 20820deb0bf7SJacob Keller 20830deb0bf7SJacob Keller clear_bit(ICE_VF_DIS, pf->state); 20840deb0bf7SJacob Keller 20850deb0bf7SJacob Keller ret = ice_eswitch_configure(pf); 20860deb0bf7SJacob Keller if (ret) 20870deb0bf7SJacob Keller goto err_unroll_sriov; 20880deb0bf7SJacob Keller 20890deb0bf7SJacob Keller /* rearm global interrupts */ 20900deb0bf7SJacob Keller if (test_and_clear_bit(ICE_OICR_INTR_DIS, pf->state)) 20910deb0bf7SJacob Keller ice_irq_dynamic_ena(hw, NULL, NULL); 20920deb0bf7SJacob Keller 20930deb0bf7SJacob Keller mutex_unlock(&pf->vfs.table_lock); 20940deb0bf7SJacob Keller 20950deb0bf7SJacob Keller return 0; 20960deb0bf7SJacob Keller 20970deb0bf7SJacob Keller err_unroll_vf_entries: 20980deb0bf7SJacob Keller ice_free_vf_entries(pf); 20990deb0bf7SJacob Keller err_unroll_sriov: 21000deb0bf7SJacob Keller mutex_unlock(&pf->vfs.table_lock); 21010deb0bf7SJacob Keller pci_disable_sriov(pf->pdev); 21020deb0bf7SJacob Keller err_unroll_intr: 21030deb0bf7SJacob Keller /* rearm interrupts here */ 21040deb0bf7SJacob Keller ice_irq_dynamic_ena(hw, NULL, NULL); 21050deb0bf7SJacob Keller clear_bit(ICE_OICR_INTR_DIS, pf->state); 21060deb0bf7SJacob Keller return ret; 21070deb0bf7SJacob Keller } 21080deb0bf7SJacob Keller 21090deb0bf7SJacob Keller /** 21100deb0bf7SJacob Keller * ice_pci_sriov_ena - Enable or change number of VFs 21110deb0bf7SJacob Keller * @pf: pointer to the PF structure 21120deb0bf7SJacob Keller * @num_vfs: number of VFs to allocate 21130deb0bf7SJacob Keller * 21140deb0bf7SJacob Keller * Returns 0 on success and negative on failure 21150deb0bf7SJacob Keller */ 21160deb0bf7SJacob Keller static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs) 21170deb0bf7SJacob Keller { 21180deb0bf7SJacob Keller int pre_existing_vfs = pci_num_vf(pf->pdev); 21190deb0bf7SJacob Keller struct device *dev = ice_pf_to_dev(pf); 21200deb0bf7SJacob Keller int err; 21210deb0bf7SJacob Keller 21220deb0bf7SJacob Keller if (pre_existing_vfs && pre_existing_vfs != num_vfs) 21230deb0bf7SJacob Keller ice_free_vfs(pf); 21240deb0bf7SJacob Keller else if (pre_existing_vfs && pre_existing_vfs == num_vfs) 21250deb0bf7SJacob Keller return 0; 21260deb0bf7SJacob Keller 21270deb0bf7SJacob Keller if (num_vfs > pf->vfs.num_supported) { 21280deb0bf7SJacob Keller dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n", 21290deb0bf7SJacob Keller num_vfs, pf->vfs.num_supported); 21300deb0bf7SJacob Keller return -EOPNOTSUPP; 21310deb0bf7SJacob Keller } 21320deb0bf7SJacob Keller 21330deb0bf7SJacob Keller dev_info(dev, "Enabling %d VFs\n", num_vfs); 21340deb0bf7SJacob Keller err = ice_ena_vfs(pf, num_vfs); 21350deb0bf7SJacob Keller if (err) { 21360deb0bf7SJacob Keller dev_err(dev, "Failed to enable SR-IOV: %d\n", err); 21370deb0bf7SJacob Keller return err; 21380deb0bf7SJacob Keller } 21390deb0bf7SJacob Keller 21400deb0bf7SJacob Keller set_bit(ICE_FLAG_SRIOV_ENA, pf->flags); 21410deb0bf7SJacob Keller return 0; 21420deb0bf7SJacob Keller } 21430deb0bf7SJacob Keller 21440deb0bf7SJacob Keller /** 21450deb0bf7SJacob Keller * ice_check_sriov_allowed - check if SR-IOV is allowed based on various checks 21460deb0bf7SJacob Keller * @pf: PF to enabled SR-IOV on 21470deb0bf7SJacob Keller */ 21480deb0bf7SJacob Keller static int ice_check_sriov_allowed(struct ice_pf *pf) 21490deb0bf7SJacob Keller { 21500deb0bf7SJacob Keller struct device *dev = ice_pf_to_dev(pf); 21510deb0bf7SJacob Keller 21520deb0bf7SJacob Keller if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) { 21530deb0bf7SJacob Keller dev_err(dev, "This device is not capable of SR-IOV\n"); 21540deb0bf7SJacob Keller return -EOPNOTSUPP; 21550deb0bf7SJacob Keller } 21560deb0bf7SJacob Keller 21570deb0bf7SJacob Keller if (ice_is_safe_mode(pf)) { 21580deb0bf7SJacob Keller dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n"); 21590deb0bf7SJacob Keller return -EOPNOTSUPP; 21600deb0bf7SJacob Keller } 21610deb0bf7SJacob Keller 21620deb0bf7SJacob Keller if (!ice_pf_state_is_nominal(pf)) { 21630deb0bf7SJacob Keller dev_err(dev, "Cannot enable SR-IOV, device not ready\n"); 21640deb0bf7SJacob Keller return -EBUSY; 21650deb0bf7SJacob Keller } 21660deb0bf7SJacob Keller 21670deb0bf7SJacob Keller return 0; 21680deb0bf7SJacob Keller } 21690deb0bf7SJacob Keller 21700deb0bf7SJacob Keller /** 21710deb0bf7SJacob Keller * ice_sriov_configure - Enable or change number of VFs via sysfs 21720deb0bf7SJacob Keller * @pdev: pointer to a pci_dev structure 21730deb0bf7SJacob Keller * @num_vfs: number of VFs to allocate or 0 to free VFs 21740deb0bf7SJacob Keller * 21750deb0bf7SJacob Keller * This function is called when the user updates the number of VFs in sysfs. On 21760deb0bf7SJacob Keller * success return whatever num_vfs was set to by the caller. Return negative on 21770deb0bf7SJacob Keller * failure. 21780deb0bf7SJacob Keller */ 21790deb0bf7SJacob Keller int ice_sriov_configure(struct pci_dev *pdev, int num_vfs) 21800deb0bf7SJacob Keller { 21810deb0bf7SJacob Keller struct ice_pf *pf = pci_get_drvdata(pdev); 21820deb0bf7SJacob Keller struct device *dev = ice_pf_to_dev(pf); 21830deb0bf7SJacob Keller int err; 21840deb0bf7SJacob Keller 21850deb0bf7SJacob Keller err = ice_check_sriov_allowed(pf); 21860deb0bf7SJacob Keller if (err) 21870deb0bf7SJacob Keller return err; 21880deb0bf7SJacob Keller 21890deb0bf7SJacob Keller if (!num_vfs) { 21900deb0bf7SJacob Keller if (!pci_vfs_assigned(pdev)) { 21910deb0bf7SJacob Keller ice_mbx_deinit_snapshot(&pf->hw); 21920deb0bf7SJacob Keller ice_free_vfs(pf); 21930deb0bf7SJacob Keller if (pf->lag) 21940deb0bf7SJacob Keller ice_enable_lag(pf->lag); 21950deb0bf7SJacob Keller return 0; 21960deb0bf7SJacob Keller } 21970deb0bf7SJacob Keller 21980deb0bf7SJacob Keller dev_err(dev, "can't free VFs because some are assigned to VMs.\n"); 21990deb0bf7SJacob Keller return -EBUSY; 22000deb0bf7SJacob Keller } 22010deb0bf7SJacob Keller 22020deb0bf7SJacob Keller err = ice_mbx_init_snapshot(&pf->hw, num_vfs); 22030deb0bf7SJacob Keller if (err) 22040deb0bf7SJacob Keller return err; 22050deb0bf7SJacob Keller 22060deb0bf7SJacob Keller err = ice_pci_sriov_ena(pf, num_vfs); 22070deb0bf7SJacob Keller if (err) { 22080deb0bf7SJacob Keller ice_mbx_deinit_snapshot(&pf->hw); 22090deb0bf7SJacob Keller return err; 22100deb0bf7SJacob Keller } 22110deb0bf7SJacob Keller 22120deb0bf7SJacob Keller if (pf->lag) 22130deb0bf7SJacob Keller ice_disable_lag(pf->lag); 22140deb0bf7SJacob Keller return num_vfs; 22150deb0bf7SJacob Keller } 22160deb0bf7SJacob Keller 22170deb0bf7SJacob Keller /** 22180deb0bf7SJacob Keller * ice_process_vflr_event - Free VF resources via IRQ calls 22190deb0bf7SJacob Keller * @pf: pointer to the PF structure 22200deb0bf7SJacob Keller * 22210deb0bf7SJacob Keller * called from the VFLR IRQ handler to 22220deb0bf7SJacob Keller * free up VF resources and state variables 22230deb0bf7SJacob Keller */ 22240deb0bf7SJacob Keller void ice_process_vflr_event(struct ice_pf *pf) 22250deb0bf7SJacob Keller { 22260deb0bf7SJacob Keller struct ice_hw *hw = &pf->hw; 22270deb0bf7SJacob Keller struct ice_vf *vf; 22280deb0bf7SJacob Keller unsigned int bkt; 22290deb0bf7SJacob Keller u32 reg; 22300deb0bf7SJacob Keller 22310deb0bf7SJacob Keller if (!test_and_clear_bit(ICE_VFLR_EVENT_PENDING, pf->state) || 22320deb0bf7SJacob Keller !ice_has_vfs(pf)) 22330deb0bf7SJacob Keller return; 22340deb0bf7SJacob Keller 22350deb0bf7SJacob Keller mutex_lock(&pf->vfs.table_lock); 22360deb0bf7SJacob Keller ice_for_each_vf(pf, bkt, vf) { 22370deb0bf7SJacob Keller u32 reg_idx, bit_idx; 22380deb0bf7SJacob Keller 22390deb0bf7SJacob Keller reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32; 22400deb0bf7SJacob Keller bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32; 22410deb0bf7SJacob Keller /* read GLGEN_VFLRSTAT register to find out the flr VFs */ 22420deb0bf7SJacob Keller reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx)); 22430deb0bf7SJacob Keller if (reg & BIT(bit_idx)) { 22440deb0bf7SJacob Keller /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */ 22450deb0bf7SJacob Keller mutex_lock(&vf->cfg_lock); 22460deb0bf7SJacob Keller ice_reset_vf(vf, true); 22470deb0bf7SJacob Keller mutex_unlock(&vf->cfg_lock); 22480deb0bf7SJacob Keller } 22490deb0bf7SJacob Keller } 22500deb0bf7SJacob Keller mutex_unlock(&pf->vfs.table_lock); 22510deb0bf7SJacob Keller } 22520deb0bf7SJacob Keller 22530deb0bf7SJacob Keller /** 22540deb0bf7SJacob Keller * ice_vc_reset_vf - Perform software reset on the VF after informing the AVF 22550deb0bf7SJacob Keller * @vf: pointer to the VF info 22560deb0bf7SJacob Keller */ 22570deb0bf7SJacob Keller static void ice_vc_reset_vf(struct ice_vf *vf) 22580deb0bf7SJacob Keller { 22590deb0bf7SJacob Keller ice_vc_notify_vf_reset(vf); 22600deb0bf7SJacob Keller ice_reset_vf(vf, false); 22610deb0bf7SJacob Keller } 22620deb0bf7SJacob Keller 22630deb0bf7SJacob Keller /** 22640deb0bf7SJacob Keller * ice_get_vf_from_pfq - get the VF who owns the PF space queue passed in 22650deb0bf7SJacob Keller * @pf: PF used to index all VFs 22660deb0bf7SJacob Keller * @pfq: queue index relative to the PF's function space 22670deb0bf7SJacob Keller * 22680deb0bf7SJacob Keller * If no VF is found who owns the pfq then return NULL, otherwise return a 22690deb0bf7SJacob Keller * pointer to the VF who owns the pfq 22700deb0bf7SJacob Keller * 22710deb0bf7SJacob Keller * If this function returns non-NULL, it acquires a reference count of the VF 22720deb0bf7SJacob Keller * structure. The caller is responsible for calling ice_put_vf() to drop this 22730deb0bf7SJacob Keller * reference. 22740deb0bf7SJacob Keller */ 22750deb0bf7SJacob Keller static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq) 22760deb0bf7SJacob Keller { 22770deb0bf7SJacob Keller struct ice_vf *vf; 22780deb0bf7SJacob Keller unsigned int bkt; 22790deb0bf7SJacob Keller 22800deb0bf7SJacob Keller rcu_read_lock(); 22810deb0bf7SJacob Keller ice_for_each_vf_rcu(pf, bkt, vf) { 22820deb0bf7SJacob Keller struct ice_vsi *vsi; 22830deb0bf7SJacob Keller u16 rxq_idx; 22840deb0bf7SJacob Keller 22850deb0bf7SJacob Keller vsi = ice_get_vf_vsi(vf); 22860deb0bf7SJacob Keller 22870deb0bf7SJacob Keller ice_for_each_rxq(vsi, rxq_idx) 22880deb0bf7SJacob Keller if (vsi->rxq_map[rxq_idx] == pfq) { 22890deb0bf7SJacob Keller struct ice_vf *found; 22900deb0bf7SJacob Keller 22910deb0bf7SJacob Keller if (kref_get_unless_zero(&vf->refcnt)) 22920deb0bf7SJacob Keller found = vf; 22930deb0bf7SJacob Keller else 22940deb0bf7SJacob Keller found = NULL; 22950deb0bf7SJacob Keller rcu_read_unlock(); 22960deb0bf7SJacob Keller return found; 22970deb0bf7SJacob Keller } 22980deb0bf7SJacob Keller } 22990deb0bf7SJacob Keller rcu_read_unlock(); 23000deb0bf7SJacob Keller 23010deb0bf7SJacob Keller return NULL; 23020deb0bf7SJacob Keller } 23030deb0bf7SJacob Keller 23040deb0bf7SJacob Keller /** 23050deb0bf7SJacob Keller * ice_globalq_to_pfq - convert from global queue index to PF space queue index 23060deb0bf7SJacob Keller * @pf: PF used for conversion 23070deb0bf7SJacob Keller * @globalq: global queue index used to convert to PF space queue index 23080deb0bf7SJacob Keller */ 23090deb0bf7SJacob Keller static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq) 23100deb0bf7SJacob Keller { 23110deb0bf7SJacob Keller return globalq - pf->hw.func_caps.common_cap.rxq_first_id; 23120deb0bf7SJacob Keller } 23130deb0bf7SJacob Keller 23140deb0bf7SJacob Keller /** 23150deb0bf7SJacob Keller * ice_vf_lan_overflow_event - handle LAN overflow event for a VF 23160deb0bf7SJacob Keller * @pf: PF that the LAN overflow event happened on 23170deb0bf7SJacob Keller * @event: structure holding the event information for the LAN overflow event 23180deb0bf7SJacob Keller * 23190deb0bf7SJacob Keller * Determine if the LAN overflow event was caused by a VF queue. If it was not 23200deb0bf7SJacob Keller * caused by a VF, do nothing. If a VF caused this LAN overflow event trigger a 23210deb0bf7SJacob Keller * reset on the offending VF. 23220deb0bf7SJacob Keller */ 23230deb0bf7SJacob Keller void 23240deb0bf7SJacob Keller ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) 23250deb0bf7SJacob Keller { 23260deb0bf7SJacob Keller u32 gldcb_rtctq, queue; 23270deb0bf7SJacob Keller struct ice_vf *vf; 23280deb0bf7SJacob Keller 23290deb0bf7SJacob Keller gldcb_rtctq = le32_to_cpu(event->desc.params.lan_overflow.prtdcb_ruptq); 23300deb0bf7SJacob Keller dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq); 23310deb0bf7SJacob Keller 23320deb0bf7SJacob Keller /* event returns device global Rx queue number */ 23330deb0bf7SJacob Keller queue = (gldcb_rtctq & GLDCB_RTCTQ_RXQNUM_M) >> 23340deb0bf7SJacob Keller GLDCB_RTCTQ_RXQNUM_S; 23350deb0bf7SJacob Keller 23360deb0bf7SJacob Keller vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue)); 23370deb0bf7SJacob Keller if (!vf) 23380deb0bf7SJacob Keller return; 23390deb0bf7SJacob Keller 23400deb0bf7SJacob Keller mutex_lock(&vf->cfg_lock); 23410deb0bf7SJacob Keller ice_vc_reset_vf(vf); 23420deb0bf7SJacob Keller mutex_unlock(&vf->cfg_lock); 23430deb0bf7SJacob Keller 23440deb0bf7SJacob Keller ice_put_vf(vf); 23450deb0bf7SJacob Keller } 23460deb0bf7SJacob Keller 23470deb0bf7SJacob Keller /** 23480deb0bf7SJacob Keller * ice_vc_send_msg_to_vf - Send message to VF 23490deb0bf7SJacob Keller * @vf: pointer to the VF info 23500deb0bf7SJacob Keller * @v_opcode: virtual channel opcode 23510deb0bf7SJacob Keller * @v_retval: virtual channel return value 23520deb0bf7SJacob Keller * @msg: pointer to the msg buffer 23530deb0bf7SJacob Keller * @msglen: msg length 23540deb0bf7SJacob Keller * 23550deb0bf7SJacob Keller * send msg to VF 23560deb0bf7SJacob Keller */ 23570deb0bf7SJacob Keller int 23580deb0bf7SJacob Keller ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode, 23590deb0bf7SJacob Keller enum virtchnl_status_code v_retval, u8 *msg, u16 msglen) 23600deb0bf7SJacob Keller { 23610deb0bf7SJacob Keller struct device *dev; 23620deb0bf7SJacob Keller struct ice_pf *pf; 23630deb0bf7SJacob Keller int aq_ret; 23640deb0bf7SJacob Keller 23650deb0bf7SJacob Keller pf = vf->pf; 23660deb0bf7SJacob Keller dev = ice_pf_to_dev(pf); 23670deb0bf7SJacob Keller 23680deb0bf7SJacob Keller aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval, 23690deb0bf7SJacob Keller msg, msglen, NULL); 23700deb0bf7SJacob Keller if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) { 23710deb0bf7SJacob Keller dev_info(dev, "Unable to send the message to VF %d ret %d aq_err %s\n", 23720deb0bf7SJacob Keller vf->vf_id, aq_ret, 23730deb0bf7SJacob Keller ice_aq_str(pf->hw.mailboxq.sq_last_status)); 23740deb0bf7SJacob Keller return -EIO; 23750deb0bf7SJacob Keller } 23760deb0bf7SJacob Keller 23770deb0bf7SJacob Keller return 0; 23780deb0bf7SJacob Keller } 23790deb0bf7SJacob Keller 23800deb0bf7SJacob Keller /** 23810deb0bf7SJacob Keller * ice_vc_get_ver_msg 23820deb0bf7SJacob Keller * @vf: pointer to the VF info 23830deb0bf7SJacob Keller * @msg: pointer to the msg buffer 23840deb0bf7SJacob Keller * 23850deb0bf7SJacob Keller * called from the VF to request the API version used by the PF 23860deb0bf7SJacob Keller */ 23870deb0bf7SJacob Keller static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg) 23880deb0bf7SJacob Keller { 23890deb0bf7SJacob Keller struct virtchnl_version_info info = { 23900deb0bf7SJacob Keller VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR 23910deb0bf7SJacob Keller }; 23920deb0bf7SJacob Keller 23930deb0bf7SJacob Keller vf->vf_ver = *(struct virtchnl_version_info *)msg; 23940deb0bf7SJacob Keller /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */ 23950deb0bf7SJacob Keller if (VF_IS_V10(&vf->vf_ver)) 23960deb0bf7SJacob Keller info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS; 23970deb0bf7SJacob Keller 23980deb0bf7SJacob Keller return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION, 23990deb0bf7SJacob Keller VIRTCHNL_STATUS_SUCCESS, (u8 *)&info, 24000deb0bf7SJacob Keller sizeof(struct virtchnl_version_info)); 24010deb0bf7SJacob Keller } 24020deb0bf7SJacob Keller 24030deb0bf7SJacob Keller /** 24040deb0bf7SJacob Keller * ice_vc_get_max_frame_size - get max frame size allowed for VF 24050deb0bf7SJacob Keller * @vf: VF used to determine max frame size 24060deb0bf7SJacob Keller * 24070deb0bf7SJacob Keller * Max frame size is determined based on the current port's max frame size and 24080deb0bf7SJacob Keller * whether a port VLAN is configured on this VF. The VF is not aware whether 24090deb0bf7SJacob Keller * it's in a port VLAN so the PF needs to account for this in max frame size 24100deb0bf7SJacob Keller * checks and sending the max frame size to the VF. 24110deb0bf7SJacob Keller */ 24120deb0bf7SJacob Keller static u16 ice_vc_get_max_frame_size(struct ice_vf *vf) 24130deb0bf7SJacob Keller { 24140deb0bf7SJacob Keller struct ice_port_info *pi = ice_vf_get_port_info(vf); 24150deb0bf7SJacob Keller u16 max_frame_size; 24160deb0bf7SJacob Keller 24170deb0bf7SJacob Keller max_frame_size = pi->phy.link_info.max_frame_size; 24180deb0bf7SJacob Keller 24190deb0bf7SJacob Keller if (ice_vf_is_port_vlan_ena(vf)) 24200deb0bf7SJacob Keller max_frame_size -= VLAN_HLEN; 24210deb0bf7SJacob Keller 24220deb0bf7SJacob Keller return max_frame_size; 24230deb0bf7SJacob Keller } 24240deb0bf7SJacob Keller 24250deb0bf7SJacob Keller /** 24260deb0bf7SJacob Keller * ice_vc_get_vf_res_msg 24270deb0bf7SJacob Keller * @vf: pointer to the VF info 24280deb0bf7SJacob Keller * @msg: pointer to the msg buffer 24290deb0bf7SJacob Keller * 24300deb0bf7SJacob Keller * called from the VF to request its resources 24310deb0bf7SJacob Keller */ 24320deb0bf7SJacob Keller static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg) 24330deb0bf7SJacob Keller { 24340deb0bf7SJacob Keller enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 24350deb0bf7SJacob Keller struct virtchnl_vf_resource *vfres = NULL; 24360deb0bf7SJacob Keller struct ice_pf *pf = vf->pf; 24370deb0bf7SJacob Keller struct ice_vsi *vsi; 24380deb0bf7SJacob Keller int len = 0; 24390deb0bf7SJacob Keller int ret; 24400deb0bf7SJacob Keller 24410deb0bf7SJacob Keller if (ice_check_vf_init(pf, vf)) { 24420deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 24430deb0bf7SJacob Keller goto err; 24440deb0bf7SJacob Keller } 24450deb0bf7SJacob Keller 24460deb0bf7SJacob Keller len = sizeof(struct virtchnl_vf_resource); 24470deb0bf7SJacob Keller 24480deb0bf7SJacob Keller vfres = kzalloc(len, GFP_KERNEL); 24490deb0bf7SJacob Keller if (!vfres) { 24500deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 24510deb0bf7SJacob Keller len = 0; 24520deb0bf7SJacob Keller goto err; 24530deb0bf7SJacob Keller } 24540deb0bf7SJacob Keller if (VF_IS_V11(&vf->vf_ver)) 24550deb0bf7SJacob Keller vf->driver_caps = *(u32 *)msg; 24560deb0bf7SJacob Keller else 24570deb0bf7SJacob Keller vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 | 24580deb0bf7SJacob Keller VIRTCHNL_VF_OFFLOAD_RSS_REG | 24590deb0bf7SJacob Keller VIRTCHNL_VF_OFFLOAD_VLAN; 24600deb0bf7SJacob Keller 24610deb0bf7SJacob Keller vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2; 24620deb0bf7SJacob Keller vsi = ice_get_vf_vsi(vf); 24630deb0bf7SJacob Keller if (!vsi) { 24640deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 24650deb0bf7SJacob Keller goto err; 24660deb0bf7SJacob Keller } 24670deb0bf7SJacob Keller 24680deb0bf7SJacob Keller if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_VLAN_V2) { 24690deb0bf7SJacob Keller /* VLAN offloads based on current device configuration */ 24700deb0bf7SJacob Keller vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN_V2; 24710deb0bf7SJacob Keller } else if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_VLAN) { 24720deb0bf7SJacob Keller /* allow VF to negotiate VIRTCHNL_VF_OFFLOAD explicitly for 24730deb0bf7SJacob Keller * these two conditions, which amounts to guest VLAN filtering 24740deb0bf7SJacob Keller * and offloads being based on the inner VLAN or the 24750deb0bf7SJacob Keller * inner/single VLAN respectively and don't allow VF to 24760deb0bf7SJacob Keller * negotiate VIRTCHNL_VF_OFFLOAD in any other cases 24770deb0bf7SJacob Keller */ 24780deb0bf7SJacob Keller if (ice_is_dvm_ena(&pf->hw) && ice_vf_is_port_vlan_ena(vf)) { 24790deb0bf7SJacob Keller vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN; 24800deb0bf7SJacob Keller } else if (!ice_is_dvm_ena(&pf->hw) && 24810deb0bf7SJacob Keller !ice_vf_is_port_vlan_ena(vf)) { 24820deb0bf7SJacob Keller vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN; 24830deb0bf7SJacob Keller /* configure backward compatible support for VFs that 24840deb0bf7SJacob Keller * only support VIRTCHNL_VF_OFFLOAD_VLAN, the PF is 24850deb0bf7SJacob Keller * configured in SVM, and no port VLAN is configured 24860deb0bf7SJacob Keller */ 24870deb0bf7SJacob Keller ice_vf_vsi_cfg_svm_legacy_vlan_mode(vsi); 24880deb0bf7SJacob Keller } else if (ice_is_dvm_ena(&pf->hw)) { 24890deb0bf7SJacob Keller /* configure software offloaded VLAN support when DVM 24900deb0bf7SJacob Keller * is enabled, but no port VLAN is enabled 24910deb0bf7SJacob Keller */ 24920deb0bf7SJacob Keller ice_vf_vsi_cfg_dvm_legacy_vlan_mode(vsi); 24930deb0bf7SJacob Keller } 24940deb0bf7SJacob Keller } 24950deb0bf7SJacob Keller 24960deb0bf7SJacob Keller if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) { 24970deb0bf7SJacob Keller vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF; 24980deb0bf7SJacob Keller } else { 24990deb0bf7SJacob Keller if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ) 25000deb0bf7SJacob Keller vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ; 25010deb0bf7SJacob Keller else 25020deb0bf7SJacob Keller vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG; 25030deb0bf7SJacob Keller } 25040deb0bf7SJacob Keller 25050deb0bf7SJacob Keller if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF) 25060deb0bf7SJacob Keller vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_FDIR_PF; 25070deb0bf7SJacob Keller 25080deb0bf7SJacob Keller if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) 25090deb0bf7SJacob Keller vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2; 25100deb0bf7SJacob Keller 25110deb0bf7SJacob Keller if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP) 25120deb0bf7SJacob Keller vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP; 25130deb0bf7SJacob Keller 25140deb0bf7SJacob Keller if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM) 25150deb0bf7SJacob Keller vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM; 25160deb0bf7SJacob Keller 25170deb0bf7SJacob Keller if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) 25180deb0bf7SJacob Keller vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING; 25190deb0bf7SJacob Keller 25200deb0bf7SJacob Keller if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) 25210deb0bf7SJacob Keller vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR; 25220deb0bf7SJacob Keller 25230deb0bf7SJacob Keller if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES) 25240deb0bf7SJacob Keller vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES; 25250deb0bf7SJacob Keller 25260deb0bf7SJacob Keller if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) 25270deb0bf7SJacob Keller vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED; 25280deb0bf7SJacob Keller 25290deb0bf7SJacob Keller if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF) 25300deb0bf7SJacob Keller vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF; 25310deb0bf7SJacob Keller 25320deb0bf7SJacob Keller if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_USO) 25330deb0bf7SJacob Keller vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_USO; 25340deb0bf7SJacob Keller 25350deb0bf7SJacob Keller vfres->num_vsis = 1; 25360deb0bf7SJacob Keller /* Tx and Rx queue are equal for VF */ 25370deb0bf7SJacob Keller vfres->num_queue_pairs = vsi->num_txq; 25380deb0bf7SJacob Keller vfres->max_vectors = pf->vfs.num_msix_per; 25390deb0bf7SJacob Keller vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE; 25400deb0bf7SJacob Keller vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE; 25410deb0bf7SJacob Keller vfres->max_mtu = ice_vc_get_max_frame_size(vf); 25420deb0bf7SJacob Keller 25430deb0bf7SJacob Keller vfres->vsi_res[0].vsi_id = vf->lan_vsi_num; 25440deb0bf7SJacob Keller vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV; 25450deb0bf7SJacob Keller vfres->vsi_res[0].num_queue_pairs = vsi->num_txq; 25460deb0bf7SJacob Keller ether_addr_copy(vfres->vsi_res[0].default_mac_addr, 25470deb0bf7SJacob Keller vf->hw_lan_addr.addr); 25480deb0bf7SJacob Keller 25490deb0bf7SJacob Keller /* match guest capabilities */ 25500deb0bf7SJacob Keller vf->driver_caps = vfres->vf_cap_flags; 25510deb0bf7SJacob Keller 25520deb0bf7SJacob Keller ice_vc_set_caps_allowlist(vf); 25530deb0bf7SJacob Keller ice_vc_set_working_allowlist(vf); 25540deb0bf7SJacob Keller 25550deb0bf7SJacob Keller set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states); 25560deb0bf7SJacob Keller 25570deb0bf7SJacob Keller err: 25580deb0bf7SJacob Keller /* send the response back to the VF */ 25590deb0bf7SJacob Keller ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, v_ret, 25600deb0bf7SJacob Keller (u8 *)vfres, len); 25610deb0bf7SJacob Keller 25620deb0bf7SJacob Keller kfree(vfres); 25630deb0bf7SJacob Keller return ret; 25640deb0bf7SJacob Keller } 25650deb0bf7SJacob Keller 25660deb0bf7SJacob Keller /** 25670deb0bf7SJacob Keller * ice_vc_reset_vf_msg 25680deb0bf7SJacob Keller * @vf: pointer to the VF info 25690deb0bf7SJacob Keller * 25700deb0bf7SJacob Keller * called from the VF to reset itself, 25710deb0bf7SJacob Keller * unlike other virtchnl messages, PF driver 25720deb0bf7SJacob Keller * doesn't send the response back to the VF 25730deb0bf7SJacob Keller */ 25740deb0bf7SJacob Keller static void ice_vc_reset_vf_msg(struct ice_vf *vf) 25750deb0bf7SJacob Keller { 25760deb0bf7SJacob Keller if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) 25770deb0bf7SJacob Keller ice_reset_vf(vf, false); 25780deb0bf7SJacob Keller } 25790deb0bf7SJacob Keller 25800deb0bf7SJacob Keller /** 25810deb0bf7SJacob Keller * ice_find_vsi_from_id 25820deb0bf7SJacob Keller * @pf: the PF structure to search for the VSI 25830deb0bf7SJacob Keller * @id: ID of the VSI it is searching for 25840deb0bf7SJacob Keller * 25850deb0bf7SJacob Keller * searches for the VSI with the given ID 25860deb0bf7SJacob Keller */ 25870deb0bf7SJacob Keller static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id) 25880deb0bf7SJacob Keller { 25890deb0bf7SJacob Keller int i; 25900deb0bf7SJacob Keller 25910deb0bf7SJacob Keller ice_for_each_vsi(pf, i) 25920deb0bf7SJacob Keller if (pf->vsi[i] && pf->vsi[i]->vsi_num == id) 25930deb0bf7SJacob Keller return pf->vsi[i]; 25940deb0bf7SJacob Keller 25950deb0bf7SJacob Keller return NULL; 25960deb0bf7SJacob Keller } 25970deb0bf7SJacob Keller 25980deb0bf7SJacob Keller /** 25990deb0bf7SJacob Keller * ice_vc_isvalid_vsi_id 26000deb0bf7SJacob Keller * @vf: pointer to the VF info 26010deb0bf7SJacob Keller * @vsi_id: VF relative VSI ID 26020deb0bf7SJacob Keller * 26030deb0bf7SJacob Keller * check for the valid VSI ID 26040deb0bf7SJacob Keller */ 26050deb0bf7SJacob Keller bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id) 26060deb0bf7SJacob Keller { 26070deb0bf7SJacob Keller struct ice_pf *pf = vf->pf; 26080deb0bf7SJacob Keller struct ice_vsi *vsi; 26090deb0bf7SJacob Keller 26100deb0bf7SJacob Keller vsi = ice_find_vsi_from_id(pf, vsi_id); 26110deb0bf7SJacob Keller 26120deb0bf7SJacob Keller return (vsi && (vsi->vf == vf)); 26130deb0bf7SJacob Keller } 26140deb0bf7SJacob Keller 26150deb0bf7SJacob Keller /** 26160deb0bf7SJacob Keller * ice_vc_isvalid_q_id 26170deb0bf7SJacob Keller * @vf: pointer to the VF info 26180deb0bf7SJacob Keller * @vsi_id: VSI ID 26190deb0bf7SJacob Keller * @qid: VSI relative queue ID 26200deb0bf7SJacob Keller * 26210deb0bf7SJacob Keller * check for the valid queue ID 26220deb0bf7SJacob Keller */ 26230deb0bf7SJacob Keller static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid) 26240deb0bf7SJacob Keller { 26250deb0bf7SJacob Keller struct ice_vsi *vsi = ice_find_vsi_from_id(vf->pf, vsi_id); 26260deb0bf7SJacob Keller /* allocated Tx and Rx queues should be always equal for VF VSI */ 26270deb0bf7SJacob Keller return (vsi && (qid < vsi->alloc_txq)); 26280deb0bf7SJacob Keller } 26290deb0bf7SJacob Keller 26300deb0bf7SJacob Keller /** 26310deb0bf7SJacob Keller * ice_vc_isvalid_ring_len 26320deb0bf7SJacob Keller * @ring_len: length of ring 26330deb0bf7SJacob Keller * 26340deb0bf7SJacob Keller * check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE 26350deb0bf7SJacob Keller * or zero 26360deb0bf7SJacob Keller */ 26370deb0bf7SJacob Keller static bool ice_vc_isvalid_ring_len(u16 ring_len) 26380deb0bf7SJacob Keller { 26390deb0bf7SJacob Keller return ring_len == 0 || 26400deb0bf7SJacob Keller (ring_len >= ICE_MIN_NUM_DESC && 26410deb0bf7SJacob Keller ring_len <= ICE_MAX_NUM_DESC && 26420deb0bf7SJacob Keller !(ring_len % ICE_REQ_DESC_MULTIPLE)); 26430deb0bf7SJacob Keller } 26440deb0bf7SJacob Keller 26450deb0bf7SJacob Keller /** 26460deb0bf7SJacob Keller * ice_vc_validate_pattern 26470deb0bf7SJacob Keller * @vf: pointer to the VF info 26480deb0bf7SJacob Keller * @proto: virtchnl protocol headers 26490deb0bf7SJacob Keller * 26500deb0bf7SJacob Keller * validate the pattern is supported or not. 26510deb0bf7SJacob Keller * 26520deb0bf7SJacob Keller * Return: true on success, false on error. 26530deb0bf7SJacob Keller */ 26540deb0bf7SJacob Keller bool 26550deb0bf7SJacob Keller ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto) 26560deb0bf7SJacob Keller { 26570deb0bf7SJacob Keller bool is_ipv4 = false; 26580deb0bf7SJacob Keller bool is_ipv6 = false; 26590deb0bf7SJacob Keller bool is_udp = false; 26600deb0bf7SJacob Keller u16 ptype = -1; 26610deb0bf7SJacob Keller int i = 0; 26620deb0bf7SJacob Keller 26630deb0bf7SJacob Keller while (i < proto->count && 26640deb0bf7SJacob Keller proto->proto_hdr[i].type != VIRTCHNL_PROTO_HDR_NONE) { 26650deb0bf7SJacob Keller switch (proto->proto_hdr[i].type) { 26660deb0bf7SJacob Keller case VIRTCHNL_PROTO_HDR_ETH: 26670deb0bf7SJacob Keller ptype = ICE_PTYPE_MAC_PAY; 26680deb0bf7SJacob Keller break; 26690deb0bf7SJacob Keller case VIRTCHNL_PROTO_HDR_IPV4: 26700deb0bf7SJacob Keller ptype = ICE_PTYPE_IPV4_PAY; 26710deb0bf7SJacob Keller is_ipv4 = true; 26720deb0bf7SJacob Keller break; 26730deb0bf7SJacob Keller case VIRTCHNL_PROTO_HDR_IPV6: 26740deb0bf7SJacob Keller ptype = ICE_PTYPE_IPV6_PAY; 26750deb0bf7SJacob Keller is_ipv6 = true; 26760deb0bf7SJacob Keller break; 26770deb0bf7SJacob Keller case VIRTCHNL_PROTO_HDR_UDP: 26780deb0bf7SJacob Keller if (is_ipv4) 26790deb0bf7SJacob Keller ptype = ICE_PTYPE_IPV4_UDP_PAY; 26800deb0bf7SJacob Keller else if (is_ipv6) 26810deb0bf7SJacob Keller ptype = ICE_PTYPE_IPV6_UDP_PAY; 26820deb0bf7SJacob Keller is_udp = true; 26830deb0bf7SJacob Keller break; 26840deb0bf7SJacob Keller case VIRTCHNL_PROTO_HDR_TCP: 26850deb0bf7SJacob Keller if (is_ipv4) 26860deb0bf7SJacob Keller ptype = ICE_PTYPE_IPV4_TCP_PAY; 26870deb0bf7SJacob Keller else if (is_ipv6) 26880deb0bf7SJacob Keller ptype = ICE_PTYPE_IPV6_TCP_PAY; 26890deb0bf7SJacob Keller break; 26900deb0bf7SJacob Keller case VIRTCHNL_PROTO_HDR_SCTP: 26910deb0bf7SJacob Keller if (is_ipv4) 26920deb0bf7SJacob Keller ptype = ICE_PTYPE_IPV4_SCTP_PAY; 26930deb0bf7SJacob Keller else if (is_ipv6) 26940deb0bf7SJacob Keller ptype = ICE_PTYPE_IPV6_SCTP_PAY; 26950deb0bf7SJacob Keller break; 26960deb0bf7SJacob Keller case VIRTCHNL_PROTO_HDR_GTPU_IP: 26970deb0bf7SJacob Keller case VIRTCHNL_PROTO_HDR_GTPU_EH: 26980deb0bf7SJacob Keller if (is_ipv4) 26990deb0bf7SJacob Keller ptype = ICE_MAC_IPV4_GTPU; 27000deb0bf7SJacob Keller else if (is_ipv6) 27010deb0bf7SJacob Keller ptype = ICE_MAC_IPV6_GTPU; 27020deb0bf7SJacob Keller goto out; 27030deb0bf7SJacob Keller case VIRTCHNL_PROTO_HDR_L2TPV3: 27040deb0bf7SJacob Keller if (is_ipv4) 27050deb0bf7SJacob Keller ptype = ICE_MAC_IPV4_L2TPV3; 27060deb0bf7SJacob Keller else if (is_ipv6) 27070deb0bf7SJacob Keller ptype = ICE_MAC_IPV6_L2TPV3; 27080deb0bf7SJacob Keller goto out; 27090deb0bf7SJacob Keller case VIRTCHNL_PROTO_HDR_ESP: 27100deb0bf7SJacob Keller if (is_ipv4) 27110deb0bf7SJacob Keller ptype = is_udp ? ICE_MAC_IPV4_NAT_T_ESP : 27120deb0bf7SJacob Keller ICE_MAC_IPV4_ESP; 27130deb0bf7SJacob Keller else if (is_ipv6) 27140deb0bf7SJacob Keller ptype = is_udp ? ICE_MAC_IPV6_NAT_T_ESP : 27150deb0bf7SJacob Keller ICE_MAC_IPV6_ESP; 27160deb0bf7SJacob Keller goto out; 27170deb0bf7SJacob Keller case VIRTCHNL_PROTO_HDR_AH: 27180deb0bf7SJacob Keller if (is_ipv4) 27190deb0bf7SJacob Keller ptype = ICE_MAC_IPV4_AH; 27200deb0bf7SJacob Keller else if (is_ipv6) 27210deb0bf7SJacob Keller ptype = ICE_MAC_IPV6_AH; 27220deb0bf7SJacob Keller goto out; 27230deb0bf7SJacob Keller case VIRTCHNL_PROTO_HDR_PFCP: 27240deb0bf7SJacob Keller if (is_ipv4) 27250deb0bf7SJacob Keller ptype = ICE_MAC_IPV4_PFCP_SESSION; 27260deb0bf7SJacob Keller else if (is_ipv6) 27270deb0bf7SJacob Keller ptype = ICE_MAC_IPV6_PFCP_SESSION; 27280deb0bf7SJacob Keller goto out; 27290deb0bf7SJacob Keller default: 27300deb0bf7SJacob Keller break; 27310deb0bf7SJacob Keller } 27320deb0bf7SJacob Keller i++; 27330deb0bf7SJacob Keller } 27340deb0bf7SJacob Keller 27350deb0bf7SJacob Keller out: 27360deb0bf7SJacob Keller return ice_hw_ptype_ena(&vf->pf->hw, ptype); 27370deb0bf7SJacob Keller } 27380deb0bf7SJacob Keller 27390deb0bf7SJacob Keller /** 27400deb0bf7SJacob Keller * ice_vc_parse_rss_cfg - parses hash fields and headers from 27410deb0bf7SJacob Keller * a specific virtchnl RSS cfg 27420deb0bf7SJacob Keller * @hw: pointer to the hardware 27430deb0bf7SJacob Keller * @rss_cfg: pointer to the virtchnl RSS cfg 27440deb0bf7SJacob Keller * @addl_hdrs: pointer to the protocol header fields (ICE_FLOW_SEG_HDR_*) 27450deb0bf7SJacob Keller * to configure 27460deb0bf7SJacob Keller * @hash_flds: pointer to the hash bit fields (ICE_FLOW_HASH_*) to configure 27470deb0bf7SJacob Keller * 27480deb0bf7SJacob Keller * Return true if all the protocol header and hash fields in the RSS cfg could 27490deb0bf7SJacob Keller * be parsed, else return false 27500deb0bf7SJacob Keller * 27510deb0bf7SJacob Keller * This function parses the virtchnl RSS cfg to be the intended 27520deb0bf7SJacob Keller * hash fields and the intended header for RSS configuration 27530deb0bf7SJacob Keller */ 27540deb0bf7SJacob Keller static bool 27550deb0bf7SJacob Keller ice_vc_parse_rss_cfg(struct ice_hw *hw, struct virtchnl_rss_cfg *rss_cfg, 27560deb0bf7SJacob Keller u32 *addl_hdrs, u64 *hash_flds) 27570deb0bf7SJacob Keller { 27580deb0bf7SJacob Keller const struct ice_vc_hash_field_match_type *hf_list; 27590deb0bf7SJacob Keller const struct ice_vc_hdr_match_type *hdr_list; 27600deb0bf7SJacob Keller int i, hf_list_len, hdr_list_len; 27610deb0bf7SJacob Keller 27620deb0bf7SJacob Keller hf_list = ice_vc_hash_field_list; 27630deb0bf7SJacob Keller hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list); 27640deb0bf7SJacob Keller hdr_list = ice_vc_hdr_list; 27650deb0bf7SJacob Keller hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list); 27660deb0bf7SJacob Keller 27670deb0bf7SJacob Keller for (i = 0; i < rss_cfg->proto_hdrs.count; i++) { 27680deb0bf7SJacob Keller struct virtchnl_proto_hdr *proto_hdr = 27690deb0bf7SJacob Keller &rss_cfg->proto_hdrs.proto_hdr[i]; 27700deb0bf7SJacob Keller bool hdr_found = false; 27710deb0bf7SJacob Keller int j; 27720deb0bf7SJacob Keller 27730deb0bf7SJacob Keller /* Find matched ice headers according to virtchnl headers. */ 27740deb0bf7SJacob Keller for (j = 0; j < hdr_list_len; j++) { 27750deb0bf7SJacob Keller struct ice_vc_hdr_match_type hdr_map = hdr_list[j]; 27760deb0bf7SJacob Keller 27770deb0bf7SJacob Keller if (proto_hdr->type == hdr_map.vc_hdr) { 27780deb0bf7SJacob Keller *addl_hdrs |= hdr_map.ice_hdr; 27790deb0bf7SJacob Keller hdr_found = true; 27800deb0bf7SJacob Keller } 27810deb0bf7SJacob Keller } 27820deb0bf7SJacob Keller 27830deb0bf7SJacob Keller if (!hdr_found) 27840deb0bf7SJacob Keller return false; 27850deb0bf7SJacob Keller 27860deb0bf7SJacob Keller /* Find matched ice hash fields according to 27870deb0bf7SJacob Keller * virtchnl hash fields. 27880deb0bf7SJacob Keller */ 27890deb0bf7SJacob Keller for (j = 0; j < hf_list_len; j++) { 27900deb0bf7SJacob Keller struct ice_vc_hash_field_match_type hf_map = hf_list[j]; 27910deb0bf7SJacob Keller 27920deb0bf7SJacob Keller if (proto_hdr->type == hf_map.vc_hdr && 27930deb0bf7SJacob Keller proto_hdr->field_selector == hf_map.vc_hash_field) { 27940deb0bf7SJacob Keller *hash_flds |= hf_map.ice_hash_field; 27950deb0bf7SJacob Keller break; 27960deb0bf7SJacob Keller } 27970deb0bf7SJacob Keller } 27980deb0bf7SJacob Keller } 27990deb0bf7SJacob Keller 28000deb0bf7SJacob Keller return true; 28010deb0bf7SJacob Keller } 28020deb0bf7SJacob Keller 28030deb0bf7SJacob Keller /** 28040deb0bf7SJacob Keller * ice_vf_adv_rss_offload_ena - determine if capabilities support advanced 28050deb0bf7SJacob Keller * RSS offloads 28060deb0bf7SJacob Keller * @caps: VF driver negotiated capabilities 28070deb0bf7SJacob Keller * 28080deb0bf7SJacob Keller * Return true if VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF capability is set, 28090deb0bf7SJacob Keller * else return false 28100deb0bf7SJacob Keller */ 28110deb0bf7SJacob Keller static bool ice_vf_adv_rss_offload_ena(u32 caps) 28120deb0bf7SJacob Keller { 28130deb0bf7SJacob Keller return !!(caps & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF); 28140deb0bf7SJacob Keller } 28150deb0bf7SJacob Keller 28160deb0bf7SJacob Keller /** 28170deb0bf7SJacob Keller * ice_vc_handle_rss_cfg 28180deb0bf7SJacob Keller * @vf: pointer to the VF info 28190deb0bf7SJacob Keller * @msg: pointer to the message buffer 28200deb0bf7SJacob Keller * @add: add a RSS config if true, otherwise delete a RSS config 28210deb0bf7SJacob Keller * 28220deb0bf7SJacob Keller * This function adds/deletes a RSS config 28230deb0bf7SJacob Keller */ 28240deb0bf7SJacob Keller static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add) 28250deb0bf7SJacob Keller { 28260deb0bf7SJacob Keller u32 v_opcode = add ? VIRTCHNL_OP_ADD_RSS_CFG : VIRTCHNL_OP_DEL_RSS_CFG; 28270deb0bf7SJacob Keller struct virtchnl_rss_cfg *rss_cfg = (struct virtchnl_rss_cfg *)msg; 28280deb0bf7SJacob Keller enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 28290deb0bf7SJacob Keller struct device *dev = ice_pf_to_dev(vf->pf); 28300deb0bf7SJacob Keller struct ice_hw *hw = &vf->pf->hw; 28310deb0bf7SJacob Keller struct ice_vsi *vsi; 28320deb0bf7SJacob Keller 28330deb0bf7SJacob Keller if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) { 28340deb0bf7SJacob Keller dev_dbg(dev, "VF %d attempting to configure RSS, but RSS is not supported by the PF\n", 28350deb0bf7SJacob Keller vf->vf_id); 28360deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED; 28370deb0bf7SJacob Keller goto error_param; 28380deb0bf7SJacob Keller } 28390deb0bf7SJacob Keller 28400deb0bf7SJacob Keller if (!ice_vf_adv_rss_offload_ena(vf->driver_caps)) { 28410deb0bf7SJacob Keller dev_dbg(dev, "VF %d attempting to configure RSS, but Advanced RSS offload is not supported\n", 28420deb0bf7SJacob Keller vf->vf_id); 28430deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 28440deb0bf7SJacob Keller goto error_param; 28450deb0bf7SJacob Keller } 28460deb0bf7SJacob Keller 28470deb0bf7SJacob Keller if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { 28480deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 28490deb0bf7SJacob Keller goto error_param; 28500deb0bf7SJacob Keller } 28510deb0bf7SJacob Keller 28520deb0bf7SJacob Keller if (rss_cfg->proto_hdrs.count > VIRTCHNL_MAX_NUM_PROTO_HDRS || 28530deb0bf7SJacob Keller rss_cfg->rss_algorithm < VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC || 28540deb0bf7SJacob Keller rss_cfg->rss_algorithm > VIRTCHNL_RSS_ALG_XOR_SYMMETRIC) { 28550deb0bf7SJacob Keller dev_dbg(dev, "VF %d attempting to configure RSS, but RSS configuration is not valid\n", 28560deb0bf7SJacob Keller vf->vf_id); 28570deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 28580deb0bf7SJacob Keller goto error_param; 28590deb0bf7SJacob Keller } 28600deb0bf7SJacob Keller 28610deb0bf7SJacob Keller vsi = ice_get_vf_vsi(vf); 28620deb0bf7SJacob Keller if (!vsi) { 28630deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 28640deb0bf7SJacob Keller goto error_param; 28650deb0bf7SJacob Keller } 28660deb0bf7SJacob Keller 28670deb0bf7SJacob Keller if (!ice_vc_validate_pattern(vf, &rss_cfg->proto_hdrs)) { 28680deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 28690deb0bf7SJacob Keller goto error_param; 28700deb0bf7SJacob Keller } 28710deb0bf7SJacob Keller 28720deb0bf7SJacob Keller if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_R_ASYMMETRIC) { 28730deb0bf7SJacob Keller struct ice_vsi_ctx *ctx; 28740deb0bf7SJacob Keller u8 lut_type, hash_type; 28750deb0bf7SJacob Keller int status; 28760deb0bf7SJacob Keller 28770deb0bf7SJacob Keller lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI; 28780deb0bf7SJacob Keller hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_XOR : 28790deb0bf7SJacob Keller ICE_AQ_VSI_Q_OPT_RSS_TPLZ; 28800deb0bf7SJacob Keller 28810deb0bf7SJacob Keller ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 28820deb0bf7SJacob Keller if (!ctx) { 28830deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 28840deb0bf7SJacob Keller goto error_param; 28850deb0bf7SJacob Keller } 28860deb0bf7SJacob Keller 28870deb0bf7SJacob Keller ctx->info.q_opt_rss = ((lut_type << 28880deb0bf7SJacob Keller ICE_AQ_VSI_Q_OPT_RSS_LUT_S) & 28890deb0bf7SJacob Keller ICE_AQ_VSI_Q_OPT_RSS_LUT_M) | 28900deb0bf7SJacob Keller (hash_type & 28910deb0bf7SJacob Keller ICE_AQ_VSI_Q_OPT_RSS_HASH_M); 28920deb0bf7SJacob Keller 28930deb0bf7SJacob Keller /* Preserve existing queueing option setting */ 28940deb0bf7SJacob Keller ctx->info.q_opt_rss |= (vsi->info.q_opt_rss & 28950deb0bf7SJacob Keller ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M); 28960deb0bf7SJacob Keller ctx->info.q_opt_tc = vsi->info.q_opt_tc; 28970deb0bf7SJacob Keller ctx->info.q_opt_flags = vsi->info.q_opt_rss; 28980deb0bf7SJacob Keller 28990deb0bf7SJacob Keller ctx->info.valid_sections = 29000deb0bf7SJacob Keller cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID); 29010deb0bf7SJacob Keller 29020deb0bf7SJacob Keller status = ice_update_vsi(hw, vsi->idx, ctx, NULL); 29030deb0bf7SJacob Keller if (status) { 29040deb0bf7SJacob Keller dev_err(dev, "update VSI for RSS failed, err %d aq_err %s\n", 29050deb0bf7SJacob Keller status, ice_aq_str(hw->adminq.sq_last_status)); 29060deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 29070deb0bf7SJacob Keller } else { 29080deb0bf7SJacob Keller vsi->info.q_opt_rss = ctx->info.q_opt_rss; 29090deb0bf7SJacob Keller } 29100deb0bf7SJacob Keller 29110deb0bf7SJacob Keller kfree(ctx); 29120deb0bf7SJacob Keller } else { 29130deb0bf7SJacob Keller u32 addl_hdrs = ICE_FLOW_SEG_HDR_NONE; 29140deb0bf7SJacob Keller u64 hash_flds = ICE_HASH_INVALID; 29150deb0bf7SJacob Keller 29160deb0bf7SJacob Keller if (!ice_vc_parse_rss_cfg(hw, rss_cfg, &addl_hdrs, 29170deb0bf7SJacob Keller &hash_flds)) { 29180deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 29190deb0bf7SJacob Keller goto error_param; 29200deb0bf7SJacob Keller } 29210deb0bf7SJacob Keller 29220deb0bf7SJacob Keller if (add) { 29230deb0bf7SJacob Keller if (ice_add_rss_cfg(hw, vsi->idx, hash_flds, 29240deb0bf7SJacob Keller addl_hdrs)) { 29250deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 29260deb0bf7SJacob Keller dev_err(dev, "ice_add_rss_cfg failed for vsi = %d, v_ret = %d\n", 29270deb0bf7SJacob Keller vsi->vsi_num, v_ret); 29280deb0bf7SJacob Keller } 29290deb0bf7SJacob Keller } else { 29300deb0bf7SJacob Keller int status; 29310deb0bf7SJacob Keller 29320deb0bf7SJacob Keller status = ice_rem_rss_cfg(hw, vsi->idx, hash_flds, 29330deb0bf7SJacob Keller addl_hdrs); 29340deb0bf7SJacob Keller /* We just ignore -ENOENT, because if two configurations 29350deb0bf7SJacob Keller * share the same profile remove one of them actually 29360deb0bf7SJacob Keller * removes both, since the profile is deleted. 29370deb0bf7SJacob Keller */ 29380deb0bf7SJacob Keller if (status && status != -ENOENT) { 29390deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 29400deb0bf7SJacob Keller dev_err(dev, "ice_rem_rss_cfg failed for VF ID:%d, error:%d\n", 29410deb0bf7SJacob Keller vf->vf_id, status); 29420deb0bf7SJacob Keller } 29430deb0bf7SJacob Keller } 29440deb0bf7SJacob Keller } 29450deb0bf7SJacob Keller 29460deb0bf7SJacob Keller error_param: 29470deb0bf7SJacob Keller return ice_vc_send_msg_to_vf(vf, v_opcode, v_ret, NULL, 0); 29480deb0bf7SJacob Keller } 29490deb0bf7SJacob Keller 29500deb0bf7SJacob Keller /** 29510deb0bf7SJacob Keller * ice_vc_config_rss_key 29520deb0bf7SJacob Keller * @vf: pointer to the VF info 29530deb0bf7SJacob Keller * @msg: pointer to the msg buffer 29540deb0bf7SJacob Keller * 29550deb0bf7SJacob Keller * Configure the VF's RSS key 29560deb0bf7SJacob Keller */ 29570deb0bf7SJacob Keller static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg) 29580deb0bf7SJacob Keller { 29590deb0bf7SJacob Keller enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 29600deb0bf7SJacob Keller struct virtchnl_rss_key *vrk = 29610deb0bf7SJacob Keller (struct virtchnl_rss_key *)msg; 29620deb0bf7SJacob Keller struct ice_vsi *vsi; 29630deb0bf7SJacob Keller 29640deb0bf7SJacob Keller if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { 29650deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 29660deb0bf7SJacob Keller goto error_param; 29670deb0bf7SJacob Keller } 29680deb0bf7SJacob Keller 29690deb0bf7SJacob Keller if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) { 29700deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 29710deb0bf7SJacob Keller goto error_param; 29720deb0bf7SJacob Keller } 29730deb0bf7SJacob Keller 29740deb0bf7SJacob Keller if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) { 29750deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 29760deb0bf7SJacob Keller goto error_param; 29770deb0bf7SJacob Keller } 29780deb0bf7SJacob Keller 29790deb0bf7SJacob Keller if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) { 29800deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 29810deb0bf7SJacob Keller goto error_param; 29820deb0bf7SJacob Keller } 29830deb0bf7SJacob Keller 29840deb0bf7SJacob Keller vsi = ice_get_vf_vsi(vf); 29850deb0bf7SJacob Keller if (!vsi) { 29860deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 29870deb0bf7SJacob Keller goto error_param; 29880deb0bf7SJacob Keller } 29890deb0bf7SJacob Keller 29900deb0bf7SJacob Keller if (ice_set_rss_key(vsi, vrk->key)) 29910deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; 29920deb0bf7SJacob Keller error_param: 29930deb0bf7SJacob Keller return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret, 29940deb0bf7SJacob Keller NULL, 0); 29950deb0bf7SJacob Keller } 29960deb0bf7SJacob Keller 29970deb0bf7SJacob Keller /** 29980deb0bf7SJacob Keller * ice_vc_config_rss_lut 29990deb0bf7SJacob Keller * @vf: pointer to the VF info 30000deb0bf7SJacob Keller * @msg: pointer to the msg buffer 30010deb0bf7SJacob Keller * 30020deb0bf7SJacob Keller * Configure the VF's RSS LUT 30030deb0bf7SJacob Keller */ 30040deb0bf7SJacob Keller static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg) 30050deb0bf7SJacob Keller { 30060deb0bf7SJacob Keller struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg; 30070deb0bf7SJacob Keller enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 30080deb0bf7SJacob Keller struct ice_vsi *vsi; 30090deb0bf7SJacob Keller 30100deb0bf7SJacob Keller if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { 30110deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 30120deb0bf7SJacob Keller goto error_param; 30130deb0bf7SJacob Keller } 30140deb0bf7SJacob Keller 30150deb0bf7SJacob Keller if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) { 30160deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 30170deb0bf7SJacob Keller goto error_param; 30180deb0bf7SJacob Keller } 30190deb0bf7SJacob Keller 30200deb0bf7SJacob Keller if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) { 30210deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 30220deb0bf7SJacob Keller goto error_param; 30230deb0bf7SJacob Keller } 30240deb0bf7SJacob Keller 30250deb0bf7SJacob Keller if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) { 30260deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 30270deb0bf7SJacob Keller goto error_param; 30280deb0bf7SJacob Keller } 30290deb0bf7SJacob Keller 30300deb0bf7SJacob Keller vsi = ice_get_vf_vsi(vf); 30310deb0bf7SJacob Keller if (!vsi) { 30320deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 30330deb0bf7SJacob Keller goto error_param; 30340deb0bf7SJacob Keller } 30350deb0bf7SJacob Keller 30360deb0bf7SJacob Keller if (ice_set_rss_lut(vsi, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE)) 30370deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; 30380deb0bf7SJacob Keller error_param: 30390deb0bf7SJacob Keller return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret, 30400deb0bf7SJacob Keller NULL, 0); 30410deb0bf7SJacob Keller } 30420deb0bf7SJacob Keller 30430deb0bf7SJacob Keller /** 30440deb0bf7SJacob Keller * ice_wait_on_vf_reset - poll to make sure a given VF is ready after reset 30450deb0bf7SJacob Keller * @vf: The VF being resseting 30460deb0bf7SJacob Keller * 30470deb0bf7SJacob Keller * The max poll time is about ~800ms, which is about the maximum time it takes 30480deb0bf7SJacob Keller * for a VF to be reset and/or a VF driver to be removed. 30490deb0bf7SJacob Keller */ 30500deb0bf7SJacob Keller static void ice_wait_on_vf_reset(struct ice_vf *vf) 30510deb0bf7SJacob Keller { 30520deb0bf7SJacob Keller int i; 30530deb0bf7SJacob Keller 30540deb0bf7SJacob Keller for (i = 0; i < ICE_MAX_VF_RESET_TRIES; i++) { 30550deb0bf7SJacob Keller if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) 30560deb0bf7SJacob Keller break; 30570deb0bf7SJacob Keller msleep(ICE_MAX_VF_RESET_SLEEP_MS); 30580deb0bf7SJacob Keller } 30590deb0bf7SJacob Keller } 30600deb0bf7SJacob Keller 30610deb0bf7SJacob Keller /** 30620deb0bf7SJacob Keller * ice_check_vf_ready_for_cfg - check if VF is ready to be configured/queried 30630deb0bf7SJacob Keller * @vf: VF to check if it's ready to be configured/queried 30640deb0bf7SJacob Keller * 30650deb0bf7SJacob Keller * The purpose of this function is to make sure the VF is not in reset, not 30660deb0bf7SJacob Keller * disabled, and initialized so it can be configured and/or queried by a host 30670deb0bf7SJacob Keller * administrator. 30680deb0bf7SJacob Keller */ 30690deb0bf7SJacob Keller int ice_check_vf_ready_for_cfg(struct ice_vf *vf) 30700deb0bf7SJacob Keller { 30710deb0bf7SJacob Keller struct ice_pf *pf; 30720deb0bf7SJacob Keller 30730deb0bf7SJacob Keller ice_wait_on_vf_reset(vf); 30740deb0bf7SJacob Keller 30750deb0bf7SJacob Keller if (ice_is_vf_disabled(vf)) 30760deb0bf7SJacob Keller return -EINVAL; 30770deb0bf7SJacob Keller 30780deb0bf7SJacob Keller pf = vf->pf; 30790deb0bf7SJacob Keller if (ice_check_vf_init(pf, vf)) 30800deb0bf7SJacob Keller return -EBUSY; 30810deb0bf7SJacob Keller 30820deb0bf7SJacob Keller return 0; 30830deb0bf7SJacob Keller } 30840deb0bf7SJacob Keller 30850deb0bf7SJacob Keller /** 30860deb0bf7SJacob Keller * ice_set_vf_spoofchk 30870deb0bf7SJacob Keller * @netdev: network interface device structure 30880deb0bf7SJacob Keller * @vf_id: VF identifier 30890deb0bf7SJacob Keller * @ena: flag to enable or disable feature 30900deb0bf7SJacob Keller * 30910deb0bf7SJacob Keller * Enable or disable VF spoof checking 30920deb0bf7SJacob Keller */ 30930deb0bf7SJacob Keller int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena) 30940deb0bf7SJacob Keller { 30950deb0bf7SJacob Keller struct ice_netdev_priv *np = netdev_priv(netdev); 30960deb0bf7SJacob Keller struct ice_pf *pf = np->vsi->back; 30970deb0bf7SJacob Keller struct ice_vsi *vf_vsi; 30980deb0bf7SJacob Keller struct device *dev; 30990deb0bf7SJacob Keller struct ice_vf *vf; 31000deb0bf7SJacob Keller int ret; 31010deb0bf7SJacob Keller 31020deb0bf7SJacob Keller dev = ice_pf_to_dev(pf); 31030deb0bf7SJacob Keller 31040deb0bf7SJacob Keller vf = ice_get_vf_by_id(pf, vf_id); 31050deb0bf7SJacob Keller if (!vf) 31060deb0bf7SJacob Keller return -EINVAL; 31070deb0bf7SJacob Keller 31080deb0bf7SJacob Keller ret = ice_check_vf_ready_for_cfg(vf); 31090deb0bf7SJacob Keller if (ret) 31100deb0bf7SJacob Keller goto out_put_vf; 31110deb0bf7SJacob Keller 31120deb0bf7SJacob Keller vf_vsi = ice_get_vf_vsi(vf); 31130deb0bf7SJacob Keller if (!vf_vsi) { 31140deb0bf7SJacob Keller netdev_err(netdev, "VSI %d for VF %d is null\n", 31150deb0bf7SJacob Keller vf->lan_vsi_idx, vf->vf_id); 31160deb0bf7SJacob Keller ret = -EINVAL; 31170deb0bf7SJacob Keller goto out_put_vf; 31180deb0bf7SJacob Keller } 31190deb0bf7SJacob Keller 31200deb0bf7SJacob Keller if (vf_vsi->type != ICE_VSI_VF) { 31210deb0bf7SJacob Keller netdev_err(netdev, "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n", 31220deb0bf7SJacob Keller vf_vsi->type, vf_vsi->vsi_num, vf->vf_id); 31230deb0bf7SJacob Keller ret = -ENODEV; 31240deb0bf7SJacob Keller goto out_put_vf; 31250deb0bf7SJacob Keller } 31260deb0bf7SJacob Keller 31270deb0bf7SJacob Keller if (ena == vf->spoofchk) { 31280deb0bf7SJacob Keller dev_dbg(dev, "VF spoofchk already %s\n", ena ? "ON" : "OFF"); 31290deb0bf7SJacob Keller ret = 0; 31300deb0bf7SJacob Keller goto out_put_vf; 31310deb0bf7SJacob Keller } 31320deb0bf7SJacob Keller 31330deb0bf7SJacob Keller if (ena) 31340deb0bf7SJacob Keller ret = ice_vsi_ena_spoofchk(vf_vsi); 31350deb0bf7SJacob Keller else 31360deb0bf7SJacob Keller ret = ice_vsi_dis_spoofchk(vf_vsi); 31370deb0bf7SJacob Keller if (ret) 31380deb0bf7SJacob Keller dev_err(dev, "Failed to set spoofchk %s for VF %d VSI %d\n error %d\n", 31390deb0bf7SJacob Keller ena ? "ON" : "OFF", vf->vf_id, vf_vsi->vsi_num, ret); 31400deb0bf7SJacob Keller else 31410deb0bf7SJacob Keller vf->spoofchk = ena; 31420deb0bf7SJacob Keller 31430deb0bf7SJacob Keller out_put_vf: 31440deb0bf7SJacob Keller ice_put_vf(vf); 31450deb0bf7SJacob Keller return ret; 31460deb0bf7SJacob Keller } 31470deb0bf7SJacob Keller 31480deb0bf7SJacob Keller /** 31490deb0bf7SJacob Keller * ice_is_any_vf_in_promisc - check if any VF(s) are in promiscuous mode 31500deb0bf7SJacob Keller * @pf: PF structure for accessing VF(s) 31510deb0bf7SJacob Keller * 31520deb0bf7SJacob Keller * Return false if no VF(s) are in unicast and/or multicast promiscuous mode, 31530deb0bf7SJacob Keller * else return true 31540deb0bf7SJacob Keller */ 31550deb0bf7SJacob Keller bool ice_is_any_vf_in_promisc(struct ice_pf *pf) 31560deb0bf7SJacob Keller { 31570deb0bf7SJacob Keller bool is_vf_promisc = false; 31580deb0bf7SJacob Keller struct ice_vf *vf; 31590deb0bf7SJacob Keller unsigned int bkt; 31600deb0bf7SJacob Keller 31610deb0bf7SJacob Keller rcu_read_lock(); 31620deb0bf7SJacob Keller ice_for_each_vf_rcu(pf, bkt, vf) { 31630deb0bf7SJacob Keller /* found a VF that has promiscuous mode configured */ 31640deb0bf7SJacob Keller if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) || 31650deb0bf7SJacob Keller test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) { 31660deb0bf7SJacob Keller is_vf_promisc = true; 31670deb0bf7SJacob Keller break; 31680deb0bf7SJacob Keller } 31690deb0bf7SJacob Keller } 31700deb0bf7SJacob Keller rcu_read_unlock(); 31710deb0bf7SJacob Keller 31720deb0bf7SJacob Keller return is_vf_promisc; 31730deb0bf7SJacob Keller } 31740deb0bf7SJacob Keller 31750deb0bf7SJacob Keller /** 31760deb0bf7SJacob Keller * ice_vc_cfg_promiscuous_mode_msg 31770deb0bf7SJacob Keller * @vf: pointer to the VF info 31780deb0bf7SJacob Keller * @msg: pointer to the msg buffer 31790deb0bf7SJacob Keller * 31800deb0bf7SJacob Keller * called from the VF to configure VF VSIs promiscuous mode 31810deb0bf7SJacob Keller */ 31820deb0bf7SJacob Keller static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg) 31830deb0bf7SJacob Keller { 31840deb0bf7SJacob Keller enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 31850deb0bf7SJacob Keller bool rm_promisc, alluni = false, allmulti = false; 31860deb0bf7SJacob Keller struct virtchnl_promisc_info *info = 31870deb0bf7SJacob Keller (struct virtchnl_promisc_info *)msg; 31880deb0bf7SJacob Keller struct ice_vsi_vlan_ops *vlan_ops; 31890deb0bf7SJacob Keller int mcast_err = 0, ucast_err = 0; 31900deb0bf7SJacob Keller struct ice_pf *pf = vf->pf; 31910deb0bf7SJacob Keller struct ice_vsi *vsi; 31920deb0bf7SJacob Keller struct device *dev; 31930deb0bf7SJacob Keller int ret = 0; 31940deb0bf7SJacob Keller 31950deb0bf7SJacob Keller if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { 31960deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 31970deb0bf7SJacob Keller goto error_param; 31980deb0bf7SJacob Keller } 31990deb0bf7SJacob Keller 32000deb0bf7SJacob Keller if (!ice_vc_isvalid_vsi_id(vf, info->vsi_id)) { 32010deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 32020deb0bf7SJacob Keller goto error_param; 32030deb0bf7SJacob Keller } 32040deb0bf7SJacob Keller 32050deb0bf7SJacob Keller vsi = ice_get_vf_vsi(vf); 32060deb0bf7SJacob Keller if (!vsi) { 32070deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 32080deb0bf7SJacob Keller goto error_param; 32090deb0bf7SJacob Keller } 32100deb0bf7SJacob Keller 32110deb0bf7SJacob Keller dev = ice_pf_to_dev(pf); 32120deb0bf7SJacob Keller if (!test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { 32130deb0bf7SJacob Keller dev_err(dev, "Unprivileged VF %d is attempting to configure promiscuous mode\n", 32140deb0bf7SJacob Keller vf->vf_id); 32150deb0bf7SJacob Keller /* Leave v_ret alone, lie to the VF on purpose. */ 32160deb0bf7SJacob Keller goto error_param; 32170deb0bf7SJacob Keller } 32180deb0bf7SJacob Keller 32190deb0bf7SJacob Keller if (info->flags & FLAG_VF_UNICAST_PROMISC) 32200deb0bf7SJacob Keller alluni = true; 32210deb0bf7SJacob Keller 32220deb0bf7SJacob Keller if (info->flags & FLAG_VF_MULTICAST_PROMISC) 32230deb0bf7SJacob Keller allmulti = true; 32240deb0bf7SJacob Keller 32250deb0bf7SJacob Keller rm_promisc = !allmulti && !alluni; 32260deb0bf7SJacob Keller 32270deb0bf7SJacob Keller vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 32280deb0bf7SJacob Keller if (rm_promisc) 32290deb0bf7SJacob Keller ret = vlan_ops->ena_rx_filtering(vsi); 32300deb0bf7SJacob Keller else 32310deb0bf7SJacob Keller ret = vlan_ops->dis_rx_filtering(vsi); 32320deb0bf7SJacob Keller if (ret) { 32330deb0bf7SJacob Keller dev_err(dev, "Failed to configure VLAN pruning in promiscuous mode\n"); 32340deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 32350deb0bf7SJacob Keller goto error_param; 32360deb0bf7SJacob Keller } 32370deb0bf7SJacob Keller 32380deb0bf7SJacob Keller if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) { 32390deb0bf7SJacob Keller bool set_dflt_vsi = alluni || allmulti; 32400deb0bf7SJacob Keller 32410deb0bf7SJacob Keller if (set_dflt_vsi && !ice_is_dflt_vsi_in_use(pf->first_sw)) 32420deb0bf7SJacob Keller /* only attempt to set the default forwarding VSI if 32430deb0bf7SJacob Keller * it's not currently set 32440deb0bf7SJacob Keller */ 32450deb0bf7SJacob Keller ret = ice_set_dflt_vsi(pf->first_sw, vsi); 32460deb0bf7SJacob Keller else if (!set_dflt_vsi && 32470deb0bf7SJacob Keller ice_is_vsi_dflt_vsi(pf->first_sw, vsi)) 32480deb0bf7SJacob Keller /* only attempt to free the default forwarding VSI if we 32490deb0bf7SJacob Keller * are the owner 32500deb0bf7SJacob Keller */ 32510deb0bf7SJacob Keller ret = ice_clear_dflt_vsi(pf->first_sw); 32520deb0bf7SJacob Keller 32530deb0bf7SJacob Keller if (ret) { 32540deb0bf7SJacob Keller dev_err(dev, "%sable VF %d as the default VSI failed, error %d\n", 32550deb0bf7SJacob Keller set_dflt_vsi ? "en" : "dis", vf->vf_id, ret); 32560deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; 32570deb0bf7SJacob Keller goto error_param; 32580deb0bf7SJacob Keller } 32590deb0bf7SJacob Keller } else { 32600deb0bf7SJacob Keller u8 mcast_m, ucast_m; 32610deb0bf7SJacob Keller 32620deb0bf7SJacob Keller if (ice_vf_is_port_vlan_ena(vf) || 32630deb0bf7SJacob Keller ice_vsi_has_non_zero_vlans(vsi)) { 32640deb0bf7SJacob Keller mcast_m = ICE_MCAST_VLAN_PROMISC_BITS; 32650deb0bf7SJacob Keller ucast_m = ICE_UCAST_VLAN_PROMISC_BITS; 32660deb0bf7SJacob Keller } else { 32670deb0bf7SJacob Keller mcast_m = ICE_MCAST_PROMISC_BITS; 32680deb0bf7SJacob Keller ucast_m = ICE_UCAST_PROMISC_BITS; 32690deb0bf7SJacob Keller } 32700deb0bf7SJacob Keller 32710deb0bf7SJacob Keller if (alluni) 32720deb0bf7SJacob Keller ucast_err = ice_vf_set_vsi_promisc(vf, vsi, ucast_m); 32730deb0bf7SJacob Keller else 32740deb0bf7SJacob Keller ucast_err = ice_vf_clear_vsi_promisc(vf, vsi, ucast_m); 32750deb0bf7SJacob Keller 32760deb0bf7SJacob Keller if (allmulti) 32770deb0bf7SJacob Keller mcast_err = ice_vf_set_vsi_promisc(vf, vsi, mcast_m); 32780deb0bf7SJacob Keller else 32790deb0bf7SJacob Keller mcast_err = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m); 32800deb0bf7SJacob Keller 32810deb0bf7SJacob Keller if (ucast_err || mcast_err) 32820deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 32830deb0bf7SJacob Keller } 32840deb0bf7SJacob Keller 32850deb0bf7SJacob Keller if (!mcast_err) { 32860deb0bf7SJacob Keller if (allmulti && 32870deb0bf7SJacob Keller !test_and_set_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) 32880deb0bf7SJacob Keller dev_info(dev, "VF %u successfully set multicast promiscuous mode\n", 32890deb0bf7SJacob Keller vf->vf_id); 32900deb0bf7SJacob Keller else if (!allmulti && test_and_clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) 32910deb0bf7SJacob Keller dev_info(dev, "VF %u successfully unset multicast promiscuous mode\n", 32920deb0bf7SJacob Keller vf->vf_id); 32930deb0bf7SJacob Keller } 32940deb0bf7SJacob Keller 32950deb0bf7SJacob Keller if (!ucast_err) { 32960deb0bf7SJacob Keller if (alluni && !test_and_set_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) 32970deb0bf7SJacob Keller dev_info(dev, "VF %u successfully set unicast promiscuous mode\n", 32980deb0bf7SJacob Keller vf->vf_id); 32990deb0bf7SJacob Keller else if (!alluni && test_and_clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) 33000deb0bf7SJacob Keller dev_info(dev, "VF %u successfully unset unicast promiscuous mode\n", 33010deb0bf7SJacob Keller vf->vf_id); 33020deb0bf7SJacob Keller } 33030deb0bf7SJacob Keller 33040deb0bf7SJacob Keller error_param: 33050deb0bf7SJacob Keller return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, 33060deb0bf7SJacob Keller v_ret, NULL, 0); 33070deb0bf7SJacob Keller } 33080deb0bf7SJacob Keller 33090deb0bf7SJacob Keller /** 33100deb0bf7SJacob Keller * ice_vc_get_stats_msg 33110deb0bf7SJacob Keller * @vf: pointer to the VF info 33120deb0bf7SJacob Keller * @msg: pointer to the msg buffer 33130deb0bf7SJacob Keller * 33140deb0bf7SJacob Keller * called from the VF to get VSI stats 33150deb0bf7SJacob Keller */ 33160deb0bf7SJacob Keller static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg) 33170deb0bf7SJacob Keller { 33180deb0bf7SJacob Keller enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 33190deb0bf7SJacob Keller struct virtchnl_queue_select *vqs = 33200deb0bf7SJacob Keller (struct virtchnl_queue_select *)msg; 33210deb0bf7SJacob Keller struct ice_eth_stats stats = { 0 }; 33220deb0bf7SJacob Keller struct ice_vsi *vsi; 33230deb0bf7SJacob Keller 33240deb0bf7SJacob Keller if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { 33250deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 33260deb0bf7SJacob Keller goto error_param; 33270deb0bf7SJacob Keller } 33280deb0bf7SJacob Keller 33290deb0bf7SJacob Keller if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 33300deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 33310deb0bf7SJacob Keller goto error_param; 33320deb0bf7SJacob Keller } 33330deb0bf7SJacob Keller 33340deb0bf7SJacob Keller vsi = ice_get_vf_vsi(vf); 33350deb0bf7SJacob Keller if (!vsi) { 33360deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 33370deb0bf7SJacob Keller goto error_param; 33380deb0bf7SJacob Keller } 33390deb0bf7SJacob Keller 33400deb0bf7SJacob Keller ice_update_eth_stats(vsi); 33410deb0bf7SJacob Keller 33420deb0bf7SJacob Keller stats = vsi->eth_stats; 33430deb0bf7SJacob Keller 33440deb0bf7SJacob Keller error_param: 33450deb0bf7SJacob Keller /* send the response to the VF */ 33460deb0bf7SJacob Keller return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, v_ret, 33470deb0bf7SJacob Keller (u8 *)&stats, sizeof(stats)); 33480deb0bf7SJacob Keller } 33490deb0bf7SJacob Keller 33500deb0bf7SJacob Keller /** 33510deb0bf7SJacob Keller * ice_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTCHNL 33520deb0bf7SJacob Keller * @vqs: virtchnl_queue_select structure containing bitmaps to validate 33530deb0bf7SJacob Keller * 33540deb0bf7SJacob Keller * Return true on successful validation, else false 33550deb0bf7SJacob Keller */ 33560deb0bf7SJacob Keller static bool ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs) 33570deb0bf7SJacob Keller { 33580deb0bf7SJacob Keller if ((!vqs->rx_queues && !vqs->tx_queues) || 33590deb0bf7SJacob Keller vqs->rx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF) || 33600deb0bf7SJacob Keller vqs->tx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF)) 33610deb0bf7SJacob Keller return false; 33620deb0bf7SJacob Keller 33630deb0bf7SJacob Keller return true; 33640deb0bf7SJacob Keller } 33650deb0bf7SJacob Keller 33660deb0bf7SJacob Keller /** 33670deb0bf7SJacob Keller * ice_vf_ena_txq_interrupt - enable Tx queue interrupt via QINT_TQCTL 33680deb0bf7SJacob Keller * @vsi: VSI of the VF to configure 33690deb0bf7SJacob Keller * @q_idx: VF queue index used to determine the queue in the PF's space 33700deb0bf7SJacob Keller */ 33710deb0bf7SJacob Keller static void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx) 33720deb0bf7SJacob Keller { 33730deb0bf7SJacob Keller struct ice_hw *hw = &vsi->back->hw; 33740deb0bf7SJacob Keller u32 pfq = vsi->txq_map[q_idx]; 33750deb0bf7SJacob Keller u32 reg; 33760deb0bf7SJacob Keller 33770deb0bf7SJacob Keller reg = rd32(hw, QINT_TQCTL(pfq)); 33780deb0bf7SJacob Keller 33790deb0bf7SJacob Keller /* MSI-X index 0 in the VF's space is always for the OICR, which means 33800deb0bf7SJacob Keller * this is most likely a poll mode VF driver, so don't enable an 33810deb0bf7SJacob Keller * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP 33820deb0bf7SJacob Keller */ 33830deb0bf7SJacob Keller if (!(reg & QINT_TQCTL_MSIX_INDX_M)) 33840deb0bf7SJacob Keller return; 33850deb0bf7SJacob Keller 33860deb0bf7SJacob Keller wr32(hw, QINT_TQCTL(pfq), reg | QINT_TQCTL_CAUSE_ENA_M); 33870deb0bf7SJacob Keller } 33880deb0bf7SJacob Keller 33890deb0bf7SJacob Keller /** 33900deb0bf7SJacob Keller * ice_vf_ena_rxq_interrupt - enable Tx queue interrupt via QINT_RQCTL 33910deb0bf7SJacob Keller * @vsi: VSI of the VF to configure 33920deb0bf7SJacob Keller * @q_idx: VF queue index used to determine the queue in the PF's space 33930deb0bf7SJacob Keller */ 33940deb0bf7SJacob Keller static void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx) 33950deb0bf7SJacob Keller { 33960deb0bf7SJacob Keller struct ice_hw *hw = &vsi->back->hw; 33970deb0bf7SJacob Keller u32 pfq = vsi->rxq_map[q_idx]; 33980deb0bf7SJacob Keller u32 reg; 33990deb0bf7SJacob Keller 34000deb0bf7SJacob Keller reg = rd32(hw, QINT_RQCTL(pfq)); 34010deb0bf7SJacob Keller 34020deb0bf7SJacob Keller /* MSI-X index 0 in the VF's space is always for the OICR, which means 34030deb0bf7SJacob Keller * this is most likely a poll mode VF driver, so don't enable an 34040deb0bf7SJacob Keller * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP 34050deb0bf7SJacob Keller */ 34060deb0bf7SJacob Keller if (!(reg & QINT_RQCTL_MSIX_INDX_M)) 34070deb0bf7SJacob Keller return; 34080deb0bf7SJacob Keller 34090deb0bf7SJacob Keller wr32(hw, QINT_RQCTL(pfq), reg | QINT_RQCTL_CAUSE_ENA_M); 34100deb0bf7SJacob Keller } 34110deb0bf7SJacob Keller 34120deb0bf7SJacob Keller /** 34130deb0bf7SJacob Keller * ice_vc_ena_qs_msg 34140deb0bf7SJacob Keller * @vf: pointer to the VF info 34150deb0bf7SJacob Keller * @msg: pointer to the msg buffer 34160deb0bf7SJacob Keller * 34170deb0bf7SJacob Keller * called from the VF to enable all or specific queue(s) 34180deb0bf7SJacob Keller */ 34190deb0bf7SJacob Keller static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg) 34200deb0bf7SJacob Keller { 34210deb0bf7SJacob Keller enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 34220deb0bf7SJacob Keller struct virtchnl_queue_select *vqs = 34230deb0bf7SJacob Keller (struct virtchnl_queue_select *)msg; 34240deb0bf7SJacob Keller struct ice_vsi *vsi; 34250deb0bf7SJacob Keller unsigned long q_map; 34260deb0bf7SJacob Keller u16 vf_q_id; 34270deb0bf7SJacob Keller 34280deb0bf7SJacob Keller if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { 34290deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 34300deb0bf7SJacob Keller goto error_param; 34310deb0bf7SJacob Keller } 34320deb0bf7SJacob Keller 34330deb0bf7SJacob Keller if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 34340deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 34350deb0bf7SJacob Keller goto error_param; 34360deb0bf7SJacob Keller } 34370deb0bf7SJacob Keller 34380deb0bf7SJacob Keller if (!ice_vc_validate_vqs_bitmaps(vqs)) { 34390deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 34400deb0bf7SJacob Keller goto error_param; 34410deb0bf7SJacob Keller } 34420deb0bf7SJacob Keller 34430deb0bf7SJacob Keller vsi = ice_get_vf_vsi(vf); 34440deb0bf7SJacob Keller if (!vsi) { 34450deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 34460deb0bf7SJacob Keller goto error_param; 34470deb0bf7SJacob Keller } 34480deb0bf7SJacob Keller 34490deb0bf7SJacob Keller /* Enable only Rx rings, Tx rings were enabled by the FW when the 34500deb0bf7SJacob Keller * Tx queue group list was configured and the context bits were 34510deb0bf7SJacob Keller * programmed using ice_vsi_cfg_txqs 34520deb0bf7SJacob Keller */ 34530deb0bf7SJacob Keller q_map = vqs->rx_queues; 34540deb0bf7SJacob Keller for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) { 34550deb0bf7SJacob Keller if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) { 34560deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 34570deb0bf7SJacob Keller goto error_param; 34580deb0bf7SJacob Keller } 34590deb0bf7SJacob Keller 34600deb0bf7SJacob Keller /* Skip queue if enabled */ 34610deb0bf7SJacob Keller if (test_bit(vf_q_id, vf->rxq_ena)) 34620deb0bf7SJacob Keller continue; 34630deb0bf7SJacob Keller 34640deb0bf7SJacob Keller if (ice_vsi_ctrl_one_rx_ring(vsi, true, vf_q_id, true)) { 34650deb0bf7SJacob Keller dev_err(ice_pf_to_dev(vsi->back), "Failed to enable Rx ring %d on VSI %d\n", 34660deb0bf7SJacob Keller vf_q_id, vsi->vsi_num); 34670deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 34680deb0bf7SJacob Keller goto error_param; 34690deb0bf7SJacob Keller } 34700deb0bf7SJacob Keller 34710deb0bf7SJacob Keller ice_vf_ena_rxq_interrupt(vsi, vf_q_id); 34720deb0bf7SJacob Keller set_bit(vf_q_id, vf->rxq_ena); 34730deb0bf7SJacob Keller } 34740deb0bf7SJacob Keller 34750deb0bf7SJacob Keller q_map = vqs->tx_queues; 34760deb0bf7SJacob Keller for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) { 34770deb0bf7SJacob Keller if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) { 34780deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 34790deb0bf7SJacob Keller goto error_param; 34800deb0bf7SJacob Keller } 34810deb0bf7SJacob Keller 34820deb0bf7SJacob Keller /* Skip queue if enabled */ 34830deb0bf7SJacob Keller if (test_bit(vf_q_id, vf->txq_ena)) 34840deb0bf7SJacob Keller continue; 34850deb0bf7SJacob Keller 34860deb0bf7SJacob Keller ice_vf_ena_txq_interrupt(vsi, vf_q_id); 34870deb0bf7SJacob Keller set_bit(vf_q_id, vf->txq_ena); 34880deb0bf7SJacob Keller } 34890deb0bf7SJacob Keller 34900deb0bf7SJacob Keller /* Set flag to indicate that queues are enabled */ 34910deb0bf7SJacob Keller if (v_ret == VIRTCHNL_STATUS_SUCCESS) 34920deb0bf7SJacob Keller set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states); 34930deb0bf7SJacob Keller 34940deb0bf7SJacob Keller error_param: 34950deb0bf7SJacob Keller /* send the response to the VF */ 34960deb0bf7SJacob Keller return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret, 34970deb0bf7SJacob Keller NULL, 0); 34980deb0bf7SJacob Keller } 34990deb0bf7SJacob Keller 35000deb0bf7SJacob Keller /** 35010deb0bf7SJacob Keller * ice_vc_dis_qs_msg 35020deb0bf7SJacob Keller * @vf: pointer to the VF info 35030deb0bf7SJacob Keller * @msg: pointer to the msg buffer 35040deb0bf7SJacob Keller * 35050deb0bf7SJacob Keller * called from the VF to disable all or specific 35060deb0bf7SJacob Keller * queue(s) 35070deb0bf7SJacob Keller */ 35080deb0bf7SJacob Keller static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg) 35090deb0bf7SJacob Keller { 35100deb0bf7SJacob Keller enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 35110deb0bf7SJacob Keller struct virtchnl_queue_select *vqs = 35120deb0bf7SJacob Keller (struct virtchnl_queue_select *)msg; 35130deb0bf7SJacob Keller struct ice_vsi *vsi; 35140deb0bf7SJacob Keller unsigned long q_map; 35150deb0bf7SJacob Keller u16 vf_q_id; 35160deb0bf7SJacob Keller 35170deb0bf7SJacob Keller if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) && 35180deb0bf7SJacob Keller !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) { 35190deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 35200deb0bf7SJacob Keller goto error_param; 35210deb0bf7SJacob Keller } 35220deb0bf7SJacob Keller 35230deb0bf7SJacob Keller if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 35240deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 35250deb0bf7SJacob Keller goto error_param; 35260deb0bf7SJacob Keller } 35270deb0bf7SJacob Keller 35280deb0bf7SJacob Keller if (!ice_vc_validate_vqs_bitmaps(vqs)) { 35290deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 35300deb0bf7SJacob Keller goto error_param; 35310deb0bf7SJacob Keller } 35320deb0bf7SJacob Keller 35330deb0bf7SJacob Keller vsi = ice_get_vf_vsi(vf); 35340deb0bf7SJacob Keller if (!vsi) { 35350deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 35360deb0bf7SJacob Keller goto error_param; 35370deb0bf7SJacob Keller } 35380deb0bf7SJacob Keller 35390deb0bf7SJacob Keller if (vqs->tx_queues) { 35400deb0bf7SJacob Keller q_map = vqs->tx_queues; 35410deb0bf7SJacob Keller 35420deb0bf7SJacob Keller for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) { 35430deb0bf7SJacob Keller struct ice_tx_ring *ring = vsi->tx_rings[vf_q_id]; 35440deb0bf7SJacob Keller struct ice_txq_meta txq_meta = { 0 }; 35450deb0bf7SJacob Keller 35460deb0bf7SJacob Keller if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) { 35470deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 35480deb0bf7SJacob Keller goto error_param; 35490deb0bf7SJacob Keller } 35500deb0bf7SJacob Keller 35510deb0bf7SJacob Keller /* Skip queue if not enabled */ 35520deb0bf7SJacob Keller if (!test_bit(vf_q_id, vf->txq_ena)) 35530deb0bf7SJacob Keller continue; 35540deb0bf7SJacob Keller 35550deb0bf7SJacob Keller ice_fill_txq_meta(vsi, ring, &txq_meta); 35560deb0bf7SJacob Keller 35570deb0bf7SJacob Keller if (ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id, 35580deb0bf7SJacob Keller ring, &txq_meta)) { 35590deb0bf7SJacob Keller dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n", 35600deb0bf7SJacob Keller vf_q_id, vsi->vsi_num); 35610deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 35620deb0bf7SJacob Keller goto error_param; 35630deb0bf7SJacob Keller } 35640deb0bf7SJacob Keller 35650deb0bf7SJacob Keller /* Clear enabled queues flag */ 35660deb0bf7SJacob Keller clear_bit(vf_q_id, vf->txq_ena); 35670deb0bf7SJacob Keller } 35680deb0bf7SJacob Keller } 35690deb0bf7SJacob Keller 35700deb0bf7SJacob Keller q_map = vqs->rx_queues; 35710deb0bf7SJacob Keller /* speed up Rx queue disable by batching them if possible */ 35720deb0bf7SJacob Keller if (q_map && 35730deb0bf7SJacob Keller bitmap_equal(&q_map, vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF)) { 35740deb0bf7SJacob Keller if (ice_vsi_stop_all_rx_rings(vsi)) { 35750deb0bf7SJacob Keller dev_err(ice_pf_to_dev(vsi->back), "Failed to stop all Rx rings on VSI %d\n", 35760deb0bf7SJacob Keller vsi->vsi_num); 35770deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 35780deb0bf7SJacob Keller goto error_param; 35790deb0bf7SJacob Keller } 35800deb0bf7SJacob Keller 35810deb0bf7SJacob Keller bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF); 35820deb0bf7SJacob Keller } else if (q_map) { 35830deb0bf7SJacob Keller for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) { 35840deb0bf7SJacob Keller if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) { 35850deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 35860deb0bf7SJacob Keller goto error_param; 35870deb0bf7SJacob Keller } 35880deb0bf7SJacob Keller 35890deb0bf7SJacob Keller /* Skip queue if not enabled */ 35900deb0bf7SJacob Keller if (!test_bit(vf_q_id, vf->rxq_ena)) 35910deb0bf7SJacob Keller continue; 35920deb0bf7SJacob Keller 35930deb0bf7SJacob Keller if (ice_vsi_ctrl_one_rx_ring(vsi, false, vf_q_id, 35940deb0bf7SJacob Keller true)) { 35950deb0bf7SJacob Keller dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Rx ring %d on VSI %d\n", 35960deb0bf7SJacob Keller vf_q_id, vsi->vsi_num); 35970deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 35980deb0bf7SJacob Keller goto error_param; 35990deb0bf7SJacob Keller } 36000deb0bf7SJacob Keller 36010deb0bf7SJacob Keller /* Clear enabled queues flag */ 36020deb0bf7SJacob Keller clear_bit(vf_q_id, vf->rxq_ena); 36030deb0bf7SJacob Keller } 36040deb0bf7SJacob Keller } 36050deb0bf7SJacob Keller 36060deb0bf7SJacob Keller /* Clear enabled queues flag */ 36070deb0bf7SJacob Keller if (v_ret == VIRTCHNL_STATUS_SUCCESS && ice_vf_has_no_qs_ena(vf)) 36080deb0bf7SJacob Keller clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states); 36090deb0bf7SJacob Keller 36100deb0bf7SJacob Keller error_param: 36110deb0bf7SJacob Keller /* send the response to the VF */ 36120deb0bf7SJacob Keller return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret, 36130deb0bf7SJacob Keller NULL, 0); 36140deb0bf7SJacob Keller } 36150deb0bf7SJacob Keller 36160deb0bf7SJacob Keller /** 36170deb0bf7SJacob Keller * ice_cfg_interrupt 36180deb0bf7SJacob Keller * @vf: pointer to the VF info 36190deb0bf7SJacob Keller * @vsi: the VSI being configured 36200deb0bf7SJacob Keller * @vector_id: vector ID 36210deb0bf7SJacob Keller * @map: vector map for mapping vectors to queues 36220deb0bf7SJacob Keller * @q_vector: structure for interrupt vector 36230deb0bf7SJacob Keller * configure the IRQ to queue map 36240deb0bf7SJacob Keller */ 36250deb0bf7SJacob Keller static int 36260deb0bf7SJacob Keller ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id, 36270deb0bf7SJacob Keller struct virtchnl_vector_map *map, 36280deb0bf7SJacob Keller struct ice_q_vector *q_vector) 36290deb0bf7SJacob Keller { 36300deb0bf7SJacob Keller u16 vsi_q_id, vsi_q_id_idx; 36310deb0bf7SJacob Keller unsigned long qmap; 36320deb0bf7SJacob Keller 36330deb0bf7SJacob Keller q_vector->num_ring_rx = 0; 36340deb0bf7SJacob Keller q_vector->num_ring_tx = 0; 36350deb0bf7SJacob Keller 36360deb0bf7SJacob Keller qmap = map->rxq_map; 36370deb0bf7SJacob Keller for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) { 36380deb0bf7SJacob Keller vsi_q_id = vsi_q_id_idx; 36390deb0bf7SJacob Keller 36400deb0bf7SJacob Keller if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id)) 36410deb0bf7SJacob Keller return VIRTCHNL_STATUS_ERR_PARAM; 36420deb0bf7SJacob Keller 36430deb0bf7SJacob Keller q_vector->num_ring_rx++; 36440deb0bf7SJacob Keller q_vector->rx.itr_idx = map->rxitr_idx; 36450deb0bf7SJacob Keller vsi->rx_rings[vsi_q_id]->q_vector = q_vector; 36460deb0bf7SJacob Keller ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id, 36470deb0bf7SJacob Keller q_vector->rx.itr_idx); 36480deb0bf7SJacob Keller } 36490deb0bf7SJacob Keller 36500deb0bf7SJacob Keller qmap = map->txq_map; 36510deb0bf7SJacob Keller for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) { 36520deb0bf7SJacob Keller vsi_q_id = vsi_q_id_idx; 36530deb0bf7SJacob Keller 36540deb0bf7SJacob Keller if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id)) 36550deb0bf7SJacob Keller return VIRTCHNL_STATUS_ERR_PARAM; 36560deb0bf7SJacob Keller 36570deb0bf7SJacob Keller q_vector->num_ring_tx++; 36580deb0bf7SJacob Keller q_vector->tx.itr_idx = map->txitr_idx; 36590deb0bf7SJacob Keller vsi->tx_rings[vsi_q_id]->q_vector = q_vector; 36600deb0bf7SJacob Keller ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id, 36610deb0bf7SJacob Keller q_vector->tx.itr_idx); 36620deb0bf7SJacob Keller } 36630deb0bf7SJacob Keller 36640deb0bf7SJacob Keller return VIRTCHNL_STATUS_SUCCESS; 36650deb0bf7SJacob Keller } 36660deb0bf7SJacob Keller 36670deb0bf7SJacob Keller /** 36680deb0bf7SJacob Keller * ice_vc_cfg_irq_map_msg 36690deb0bf7SJacob Keller * @vf: pointer to the VF info 36700deb0bf7SJacob Keller * @msg: pointer to the msg buffer 36710deb0bf7SJacob Keller * 36720deb0bf7SJacob Keller * called from the VF to configure the IRQ to queue map 36730deb0bf7SJacob Keller */ 36740deb0bf7SJacob Keller static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) 36750deb0bf7SJacob Keller { 36760deb0bf7SJacob Keller enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 36770deb0bf7SJacob Keller u16 num_q_vectors_mapped, vsi_id, vector_id; 36780deb0bf7SJacob Keller struct virtchnl_irq_map_info *irqmap_info; 36790deb0bf7SJacob Keller struct virtchnl_vector_map *map; 36800deb0bf7SJacob Keller struct ice_pf *pf = vf->pf; 36810deb0bf7SJacob Keller struct ice_vsi *vsi; 36820deb0bf7SJacob Keller int i; 36830deb0bf7SJacob Keller 36840deb0bf7SJacob Keller irqmap_info = (struct virtchnl_irq_map_info *)msg; 36850deb0bf7SJacob Keller num_q_vectors_mapped = irqmap_info->num_vectors; 36860deb0bf7SJacob Keller 36870deb0bf7SJacob Keller /* Check to make sure number of VF vectors mapped is not greater than 36880deb0bf7SJacob Keller * number of VF vectors originally allocated, and check that 36890deb0bf7SJacob Keller * there is actually at least a single VF queue vector mapped 36900deb0bf7SJacob Keller */ 36910deb0bf7SJacob Keller if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) || 36920deb0bf7SJacob Keller pf->vfs.num_msix_per < num_q_vectors_mapped || 36930deb0bf7SJacob Keller !num_q_vectors_mapped) { 36940deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 36950deb0bf7SJacob Keller goto error_param; 36960deb0bf7SJacob Keller } 36970deb0bf7SJacob Keller 36980deb0bf7SJacob Keller vsi = ice_get_vf_vsi(vf); 36990deb0bf7SJacob Keller if (!vsi) { 37000deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 37010deb0bf7SJacob Keller goto error_param; 37020deb0bf7SJacob Keller } 37030deb0bf7SJacob Keller 37040deb0bf7SJacob Keller for (i = 0; i < num_q_vectors_mapped; i++) { 37050deb0bf7SJacob Keller struct ice_q_vector *q_vector; 37060deb0bf7SJacob Keller 37070deb0bf7SJacob Keller map = &irqmap_info->vecmap[i]; 37080deb0bf7SJacob Keller 37090deb0bf7SJacob Keller vector_id = map->vector_id; 37100deb0bf7SJacob Keller vsi_id = map->vsi_id; 37110deb0bf7SJacob Keller /* vector_id is always 0-based for each VF, and can never be 37120deb0bf7SJacob Keller * larger than or equal to the max allowed interrupts per VF 37130deb0bf7SJacob Keller */ 37140deb0bf7SJacob Keller if (!(vector_id < pf->vfs.num_msix_per) || 37150deb0bf7SJacob Keller !ice_vc_isvalid_vsi_id(vf, vsi_id) || 37160deb0bf7SJacob Keller (!vector_id && (map->rxq_map || map->txq_map))) { 37170deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 37180deb0bf7SJacob Keller goto error_param; 37190deb0bf7SJacob Keller } 37200deb0bf7SJacob Keller 37210deb0bf7SJacob Keller /* No need to map VF miscellaneous or rogue vector */ 37220deb0bf7SJacob Keller if (!vector_id) 37230deb0bf7SJacob Keller continue; 37240deb0bf7SJacob Keller 37250deb0bf7SJacob Keller /* Subtract non queue vector from vector_id passed by VF 37260deb0bf7SJacob Keller * to get actual number of VSI queue vector array index 37270deb0bf7SJacob Keller */ 37280deb0bf7SJacob Keller q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF]; 37290deb0bf7SJacob Keller if (!q_vector) { 37300deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 37310deb0bf7SJacob Keller goto error_param; 37320deb0bf7SJacob Keller } 37330deb0bf7SJacob Keller 37340deb0bf7SJacob Keller /* lookout for the invalid queue index */ 37350deb0bf7SJacob Keller v_ret = (enum virtchnl_status_code) 37360deb0bf7SJacob Keller ice_cfg_interrupt(vf, vsi, vector_id, map, q_vector); 37370deb0bf7SJacob Keller if (v_ret) 37380deb0bf7SJacob Keller goto error_param; 37390deb0bf7SJacob Keller } 37400deb0bf7SJacob Keller 37410deb0bf7SJacob Keller error_param: 37420deb0bf7SJacob Keller /* send the response to the VF */ 37430deb0bf7SJacob Keller return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret, 37440deb0bf7SJacob Keller NULL, 0); 37450deb0bf7SJacob Keller } 37460deb0bf7SJacob Keller 37470deb0bf7SJacob Keller /** 37480deb0bf7SJacob Keller * ice_vc_cfg_qs_msg 37490deb0bf7SJacob Keller * @vf: pointer to the VF info 37500deb0bf7SJacob Keller * @msg: pointer to the msg buffer 37510deb0bf7SJacob Keller * 37520deb0bf7SJacob Keller * called from the VF to configure the Rx/Tx queues 37530deb0bf7SJacob Keller */ 37540deb0bf7SJacob Keller static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) 37550deb0bf7SJacob Keller { 37560deb0bf7SJacob Keller enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 37570deb0bf7SJacob Keller struct virtchnl_vsi_queue_config_info *qci = 37580deb0bf7SJacob Keller (struct virtchnl_vsi_queue_config_info *)msg; 37590deb0bf7SJacob Keller struct virtchnl_queue_pair_info *qpi; 37600deb0bf7SJacob Keller struct ice_pf *pf = vf->pf; 37610deb0bf7SJacob Keller struct ice_vsi *vsi; 37620deb0bf7SJacob Keller int i, q_idx; 37630deb0bf7SJacob Keller 37640deb0bf7SJacob Keller if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { 37650deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 37660deb0bf7SJacob Keller goto error_param; 37670deb0bf7SJacob Keller } 37680deb0bf7SJacob Keller 37690deb0bf7SJacob Keller if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) { 37700deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 37710deb0bf7SJacob Keller goto error_param; 37720deb0bf7SJacob Keller } 37730deb0bf7SJacob Keller 37740deb0bf7SJacob Keller vsi = ice_get_vf_vsi(vf); 37750deb0bf7SJacob Keller if (!vsi) { 37760deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 37770deb0bf7SJacob Keller goto error_param; 37780deb0bf7SJacob Keller } 37790deb0bf7SJacob Keller 37800deb0bf7SJacob Keller if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF || 37810deb0bf7SJacob Keller qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) { 37820deb0bf7SJacob Keller dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n", 37830deb0bf7SJacob Keller vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)); 37840deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 37850deb0bf7SJacob Keller goto error_param; 37860deb0bf7SJacob Keller } 37870deb0bf7SJacob Keller 37880deb0bf7SJacob Keller for (i = 0; i < qci->num_queue_pairs; i++) { 37890deb0bf7SJacob Keller qpi = &qci->qpair[i]; 37900deb0bf7SJacob Keller if (qpi->txq.vsi_id != qci->vsi_id || 37910deb0bf7SJacob Keller qpi->rxq.vsi_id != qci->vsi_id || 37920deb0bf7SJacob Keller qpi->rxq.queue_id != qpi->txq.queue_id || 37930deb0bf7SJacob Keller qpi->txq.headwb_enabled || 37940deb0bf7SJacob Keller !ice_vc_isvalid_ring_len(qpi->txq.ring_len) || 37950deb0bf7SJacob Keller !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) || 37960deb0bf7SJacob Keller !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) { 37970deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 37980deb0bf7SJacob Keller goto error_param; 37990deb0bf7SJacob Keller } 38000deb0bf7SJacob Keller 38010deb0bf7SJacob Keller q_idx = qpi->rxq.queue_id; 38020deb0bf7SJacob Keller 38030deb0bf7SJacob Keller /* make sure selected "q_idx" is in valid range of queues 38040deb0bf7SJacob Keller * for selected "vsi" 38050deb0bf7SJacob Keller */ 38060deb0bf7SJacob Keller if (q_idx >= vsi->alloc_txq || q_idx >= vsi->alloc_rxq) { 38070deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 38080deb0bf7SJacob Keller goto error_param; 38090deb0bf7SJacob Keller } 38100deb0bf7SJacob Keller 38110deb0bf7SJacob Keller /* copy Tx queue info from VF into VSI */ 38120deb0bf7SJacob Keller if (qpi->txq.ring_len > 0) { 38130deb0bf7SJacob Keller vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr; 38140deb0bf7SJacob Keller vsi->tx_rings[i]->count = qpi->txq.ring_len; 38150deb0bf7SJacob Keller if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) { 38160deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 38170deb0bf7SJacob Keller goto error_param; 38180deb0bf7SJacob Keller } 38190deb0bf7SJacob Keller } 38200deb0bf7SJacob Keller 38210deb0bf7SJacob Keller /* copy Rx queue info from VF into VSI */ 38220deb0bf7SJacob Keller if (qpi->rxq.ring_len > 0) { 38230deb0bf7SJacob Keller u16 max_frame_size = ice_vc_get_max_frame_size(vf); 38240deb0bf7SJacob Keller 38250deb0bf7SJacob Keller vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr; 38260deb0bf7SJacob Keller vsi->rx_rings[i]->count = qpi->rxq.ring_len; 38270deb0bf7SJacob Keller 38280deb0bf7SJacob Keller if (qpi->rxq.databuffer_size != 0 && 38290deb0bf7SJacob Keller (qpi->rxq.databuffer_size > ((16 * 1024) - 128) || 38300deb0bf7SJacob Keller qpi->rxq.databuffer_size < 1024)) { 38310deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 38320deb0bf7SJacob Keller goto error_param; 38330deb0bf7SJacob Keller } 38340deb0bf7SJacob Keller vsi->rx_buf_len = qpi->rxq.databuffer_size; 38350deb0bf7SJacob Keller vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len; 38360deb0bf7SJacob Keller if (qpi->rxq.max_pkt_size > max_frame_size || 38370deb0bf7SJacob Keller qpi->rxq.max_pkt_size < 64) { 38380deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 38390deb0bf7SJacob Keller goto error_param; 38400deb0bf7SJacob Keller } 38410deb0bf7SJacob Keller 38420deb0bf7SJacob Keller vsi->max_frame = qpi->rxq.max_pkt_size; 38430deb0bf7SJacob Keller /* add space for the port VLAN since the VF driver is not 38440deb0bf7SJacob Keller * expected to account for it in the MTU calculation 38450deb0bf7SJacob Keller */ 38460deb0bf7SJacob Keller if (ice_vf_is_port_vlan_ena(vf)) 38470deb0bf7SJacob Keller vsi->max_frame += VLAN_HLEN; 38480deb0bf7SJacob Keller 38490deb0bf7SJacob Keller if (ice_vsi_cfg_single_rxq(vsi, q_idx)) { 38500deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 38510deb0bf7SJacob Keller goto error_param; 38520deb0bf7SJacob Keller } 38530deb0bf7SJacob Keller } 38540deb0bf7SJacob Keller } 38550deb0bf7SJacob Keller 38560deb0bf7SJacob Keller error_param: 38570deb0bf7SJacob Keller /* send the response to the VF */ 38580deb0bf7SJacob Keller return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, v_ret, 38590deb0bf7SJacob Keller NULL, 0); 38600deb0bf7SJacob Keller } 38610deb0bf7SJacob Keller 38620deb0bf7SJacob Keller /** 38630deb0bf7SJacob Keller * ice_is_vf_trusted 38640deb0bf7SJacob Keller * @vf: pointer to the VF info 38650deb0bf7SJacob Keller */ 38660deb0bf7SJacob Keller static bool ice_is_vf_trusted(struct ice_vf *vf) 38670deb0bf7SJacob Keller { 38680deb0bf7SJacob Keller return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); 38690deb0bf7SJacob Keller } 38700deb0bf7SJacob Keller 38710deb0bf7SJacob Keller /** 38720deb0bf7SJacob Keller * ice_can_vf_change_mac 38730deb0bf7SJacob Keller * @vf: pointer to the VF info 38740deb0bf7SJacob Keller * 38750deb0bf7SJacob Keller * Return true if the VF is allowed to change its MAC filters, false otherwise 38760deb0bf7SJacob Keller */ 38770deb0bf7SJacob Keller static bool ice_can_vf_change_mac(struct ice_vf *vf) 38780deb0bf7SJacob Keller { 38790deb0bf7SJacob Keller /* If the VF MAC address has been set administratively (via the 38800deb0bf7SJacob Keller * ndo_set_vf_mac command), then deny permission to the VF to 38810deb0bf7SJacob Keller * add/delete unicast MAC addresses, unless the VF is trusted 38820deb0bf7SJacob Keller */ 38830deb0bf7SJacob Keller if (vf->pf_set_mac && !ice_is_vf_trusted(vf)) 38840deb0bf7SJacob Keller return false; 38850deb0bf7SJacob Keller 38860deb0bf7SJacob Keller return true; 38870deb0bf7SJacob Keller } 38880deb0bf7SJacob Keller 38890deb0bf7SJacob Keller /** 38900deb0bf7SJacob Keller * ice_vc_ether_addr_type - get type of virtchnl_ether_addr 38910deb0bf7SJacob Keller * @vc_ether_addr: used to extract the type 38920deb0bf7SJacob Keller */ 38930deb0bf7SJacob Keller static u8 38940deb0bf7SJacob Keller ice_vc_ether_addr_type(struct virtchnl_ether_addr *vc_ether_addr) 38950deb0bf7SJacob Keller { 38960deb0bf7SJacob Keller return (vc_ether_addr->type & VIRTCHNL_ETHER_ADDR_TYPE_MASK); 38970deb0bf7SJacob Keller } 38980deb0bf7SJacob Keller 38990deb0bf7SJacob Keller /** 39000deb0bf7SJacob Keller * ice_is_vc_addr_legacy - check if the MAC address is from an older VF 39010deb0bf7SJacob Keller * @vc_ether_addr: VIRTCHNL structure that contains MAC and type 39020deb0bf7SJacob Keller */ 39030deb0bf7SJacob Keller static bool 39040deb0bf7SJacob Keller ice_is_vc_addr_legacy(struct virtchnl_ether_addr *vc_ether_addr) 39050deb0bf7SJacob Keller { 39060deb0bf7SJacob Keller u8 type = ice_vc_ether_addr_type(vc_ether_addr); 39070deb0bf7SJacob Keller 39080deb0bf7SJacob Keller return (type == VIRTCHNL_ETHER_ADDR_LEGACY); 39090deb0bf7SJacob Keller } 39100deb0bf7SJacob Keller 39110deb0bf7SJacob Keller /** 39120deb0bf7SJacob Keller * ice_is_vc_addr_primary - check if the MAC address is the VF's primary MAC 39130deb0bf7SJacob Keller * @vc_ether_addr: VIRTCHNL structure that contains MAC and type 39140deb0bf7SJacob Keller * 39150deb0bf7SJacob Keller * This function should only be called when the MAC address in 39160deb0bf7SJacob Keller * virtchnl_ether_addr is a valid unicast MAC 39170deb0bf7SJacob Keller */ 39180deb0bf7SJacob Keller static bool 39190deb0bf7SJacob Keller ice_is_vc_addr_primary(struct virtchnl_ether_addr __maybe_unused *vc_ether_addr) 39200deb0bf7SJacob Keller { 39210deb0bf7SJacob Keller u8 type = ice_vc_ether_addr_type(vc_ether_addr); 39220deb0bf7SJacob Keller 39230deb0bf7SJacob Keller return (type == VIRTCHNL_ETHER_ADDR_PRIMARY); 39240deb0bf7SJacob Keller } 39250deb0bf7SJacob Keller 39260deb0bf7SJacob Keller /** 39270deb0bf7SJacob Keller * ice_vfhw_mac_add - update the VF's cached hardware MAC if allowed 39280deb0bf7SJacob Keller * @vf: VF to update 39290deb0bf7SJacob Keller * @vc_ether_addr: structure from VIRTCHNL with MAC to add 39300deb0bf7SJacob Keller */ 39310deb0bf7SJacob Keller static void 39320deb0bf7SJacob Keller ice_vfhw_mac_add(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr) 39330deb0bf7SJacob Keller { 39340deb0bf7SJacob Keller u8 *mac_addr = vc_ether_addr->addr; 39350deb0bf7SJacob Keller 39360deb0bf7SJacob Keller if (!is_valid_ether_addr(mac_addr)) 39370deb0bf7SJacob Keller return; 39380deb0bf7SJacob Keller 39390deb0bf7SJacob Keller /* only allow legacy VF drivers to set the device and hardware MAC if it 39400deb0bf7SJacob Keller * is zero and allow new VF drivers to set the hardware MAC if the type 39410deb0bf7SJacob Keller * was correctly specified over VIRTCHNL 39420deb0bf7SJacob Keller */ 39430deb0bf7SJacob Keller if ((ice_is_vc_addr_legacy(vc_ether_addr) && 39440deb0bf7SJacob Keller is_zero_ether_addr(vf->hw_lan_addr.addr)) || 39450deb0bf7SJacob Keller ice_is_vc_addr_primary(vc_ether_addr)) { 39460deb0bf7SJacob Keller ether_addr_copy(vf->dev_lan_addr.addr, mac_addr); 39470deb0bf7SJacob Keller ether_addr_copy(vf->hw_lan_addr.addr, mac_addr); 39480deb0bf7SJacob Keller } 39490deb0bf7SJacob Keller 39500deb0bf7SJacob Keller /* hardware and device MACs are already set, but its possible that the 39510deb0bf7SJacob Keller * VF driver sent the VIRTCHNL_OP_ADD_ETH_ADDR message before the 39520deb0bf7SJacob Keller * VIRTCHNL_OP_DEL_ETH_ADDR when trying to update its MAC, so save it 39530deb0bf7SJacob Keller * away for the legacy VF driver case as it will be updated in the 39540deb0bf7SJacob Keller * delete flow for this case 39550deb0bf7SJacob Keller */ 39560deb0bf7SJacob Keller if (ice_is_vc_addr_legacy(vc_ether_addr)) { 39570deb0bf7SJacob Keller ether_addr_copy(vf->legacy_last_added_umac.addr, 39580deb0bf7SJacob Keller mac_addr); 39590deb0bf7SJacob Keller vf->legacy_last_added_umac.time_modified = jiffies; 39600deb0bf7SJacob Keller } 39610deb0bf7SJacob Keller } 39620deb0bf7SJacob Keller 39630deb0bf7SJacob Keller /** 39640deb0bf7SJacob Keller * ice_vc_add_mac_addr - attempt to add the MAC address passed in 39650deb0bf7SJacob Keller * @vf: pointer to the VF info 39660deb0bf7SJacob Keller * @vsi: pointer to the VF's VSI 39670deb0bf7SJacob Keller * @vc_ether_addr: VIRTCHNL MAC address structure used to add MAC 39680deb0bf7SJacob Keller */ 39690deb0bf7SJacob Keller static int 39700deb0bf7SJacob Keller ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, 39710deb0bf7SJacob Keller struct virtchnl_ether_addr *vc_ether_addr) 39720deb0bf7SJacob Keller { 39730deb0bf7SJacob Keller struct device *dev = ice_pf_to_dev(vf->pf); 39740deb0bf7SJacob Keller u8 *mac_addr = vc_ether_addr->addr; 39750deb0bf7SJacob Keller int ret; 39760deb0bf7SJacob Keller 39770deb0bf7SJacob Keller /* device MAC already added */ 39780deb0bf7SJacob Keller if (ether_addr_equal(mac_addr, vf->dev_lan_addr.addr)) 39790deb0bf7SJacob Keller return 0; 39800deb0bf7SJacob Keller 39810deb0bf7SJacob Keller if (is_unicast_ether_addr(mac_addr) && !ice_can_vf_change_mac(vf)) { 39820deb0bf7SJacob Keller dev_err(dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n"); 39830deb0bf7SJacob Keller return -EPERM; 39840deb0bf7SJacob Keller } 39850deb0bf7SJacob Keller 39860deb0bf7SJacob Keller ret = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI); 39870deb0bf7SJacob Keller if (ret == -EEXIST) { 39880deb0bf7SJacob Keller dev_dbg(dev, "MAC %pM already exists for VF %d\n", mac_addr, 39890deb0bf7SJacob Keller vf->vf_id); 39900deb0bf7SJacob Keller /* don't return since we might need to update 39910deb0bf7SJacob Keller * the primary MAC in ice_vfhw_mac_add() below 39920deb0bf7SJacob Keller */ 39930deb0bf7SJacob Keller } else if (ret) { 39940deb0bf7SJacob Keller dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %d\n", 39950deb0bf7SJacob Keller mac_addr, vf->vf_id, ret); 39960deb0bf7SJacob Keller return ret; 39970deb0bf7SJacob Keller } else { 39980deb0bf7SJacob Keller vf->num_mac++; 39990deb0bf7SJacob Keller } 40000deb0bf7SJacob Keller 40010deb0bf7SJacob Keller ice_vfhw_mac_add(vf, vc_ether_addr); 40020deb0bf7SJacob Keller 40030deb0bf7SJacob Keller return ret; 40040deb0bf7SJacob Keller } 40050deb0bf7SJacob Keller 40060deb0bf7SJacob Keller /** 40070deb0bf7SJacob Keller * ice_is_legacy_umac_expired - check if last added legacy unicast MAC expired 40080deb0bf7SJacob Keller * @last_added_umac: structure used to check expiration 40090deb0bf7SJacob Keller */ 40100deb0bf7SJacob Keller static bool ice_is_legacy_umac_expired(struct ice_time_mac *last_added_umac) 40110deb0bf7SJacob Keller { 40120deb0bf7SJacob Keller #define ICE_LEGACY_VF_MAC_CHANGE_EXPIRE_TIME msecs_to_jiffies(3000) 40130deb0bf7SJacob Keller return time_is_before_jiffies(last_added_umac->time_modified + 40140deb0bf7SJacob Keller ICE_LEGACY_VF_MAC_CHANGE_EXPIRE_TIME); 40150deb0bf7SJacob Keller } 40160deb0bf7SJacob Keller 40170deb0bf7SJacob Keller /** 40180deb0bf7SJacob Keller * ice_update_legacy_cached_mac - update cached hardware MAC for legacy VF 40190deb0bf7SJacob Keller * @vf: VF to update 40200deb0bf7SJacob Keller * @vc_ether_addr: structure from VIRTCHNL with MAC to check 40210deb0bf7SJacob Keller * 40220deb0bf7SJacob Keller * only update cached hardware MAC for legacy VF drivers on delete 40230deb0bf7SJacob Keller * because we cannot guarantee order/type of MAC from the VF driver 40240deb0bf7SJacob Keller */ 40250deb0bf7SJacob Keller static void 40260deb0bf7SJacob Keller ice_update_legacy_cached_mac(struct ice_vf *vf, 40270deb0bf7SJacob Keller struct virtchnl_ether_addr *vc_ether_addr) 40280deb0bf7SJacob Keller { 40290deb0bf7SJacob Keller if (!ice_is_vc_addr_legacy(vc_ether_addr) || 40300deb0bf7SJacob Keller ice_is_legacy_umac_expired(&vf->legacy_last_added_umac)) 40310deb0bf7SJacob Keller return; 40320deb0bf7SJacob Keller 40330deb0bf7SJacob Keller ether_addr_copy(vf->dev_lan_addr.addr, vf->legacy_last_added_umac.addr); 40340deb0bf7SJacob Keller ether_addr_copy(vf->hw_lan_addr.addr, vf->legacy_last_added_umac.addr); 40350deb0bf7SJacob Keller } 40360deb0bf7SJacob Keller 40370deb0bf7SJacob Keller /** 40380deb0bf7SJacob Keller * ice_vfhw_mac_del - update the VF's cached hardware MAC if allowed 40390deb0bf7SJacob Keller * @vf: VF to update 40400deb0bf7SJacob Keller * @vc_ether_addr: structure from VIRTCHNL with MAC to delete 40410deb0bf7SJacob Keller */ 40420deb0bf7SJacob Keller static void 40430deb0bf7SJacob Keller ice_vfhw_mac_del(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr) 40440deb0bf7SJacob Keller { 40450deb0bf7SJacob Keller u8 *mac_addr = vc_ether_addr->addr; 40460deb0bf7SJacob Keller 40470deb0bf7SJacob Keller if (!is_valid_ether_addr(mac_addr) || 40480deb0bf7SJacob Keller !ether_addr_equal(vf->dev_lan_addr.addr, mac_addr)) 40490deb0bf7SJacob Keller return; 40500deb0bf7SJacob Keller 40510deb0bf7SJacob Keller /* allow the device MAC to be repopulated in the add flow and don't 40520deb0bf7SJacob Keller * clear the hardware MAC (i.e. hw_lan_addr.addr) here as that is meant 40530deb0bf7SJacob Keller * to be persistent on VM reboot and across driver unload/load, which 40540deb0bf7SJacob Keller * won't work if we clear the hardware MAC here 40550deb0bf7SJacob Keller */ 40560deb0bf7SJacob Keller eth_zero_addr(vf->dev_lan_addr.addr); 40570deb0bf7SJacob Keller 40580deb0bf7SJacob Keller ice_update_legacy_cached_mac(vf, vc_ether_addr); 40590deb0bf7SJacob Keller } 40600deb0bf7SJacob Keller 40610deb0bf7SJacob Keller /** 40620deb0bf7SJacob Keller * ice_vc_del_mac_addr - attempt to delete the MAC address passed in 40630deb0bf7SJacob Keller * @vf: pointer to the VF info 40640deb0bf7SJacob Keller * @vsi: pointer to the VF's VSI 40650deb0bf7SJacob Keller * @vc_ether_addr: VIRTCHNL MAC address structure used to delete MAC 40660deb0bf7SJacob Keller */ 40670deb0bf7SJacob Keller static int 40680deb0bf7SJacob Keller ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, 40690deb0bf7SJacob Keller struct virtchnl_ether_addr *vc_ether_addr) 40700deb0bf7SJacob Keller { 40710deb0bf7SJacob Keller struct device *dev = ice_pf_to_dev(vf->pf); 40720deb0bf7SJacob Keller u8 *mac_addr = vc_ether_addr->addr; 40730deb0bf7SJacob Keller int status; 40740deb0bf7SJacob Keller 40750deb0bf7SJacob Keller if (!ice_can_vf_change_mac(vf) && 40760deb0bf7SJacob Keller ether_addr_equal(vf->dev_lan_addr.addr, mac_addr)) 40770deb0bf7SJacob Keller return 0; 40780deb0bf7SJacob Keller 40790deb0bf7SJacob Keller status = ice_fltr_remove_mac(vsi, mac_addr, ICE_FWD_TO_VSI); 40800deb0bf7SJacob Keller if (status == -ENOENT) { 40810deb0bf7SJacob Keller dev_err(dev, "MAC %pM does not exist for VF %d\n", mac_addr, 40820deb0bf7SJacob Keller vf->vf_id); 40830deb0bf7SJacob Keller return -ENOENT; 40840deb0bf7SJacob Keller } else if (status) { 40850deb0bf7SJacob Keller dev_err(dev, "Failed to delete MAC %pM for VF %d, error %d\n", 40860deb0bf7SJacob Keller mac_addr, vf->vf_id, status); 40870deb0bf7SJacob Keller return -EIO; 40880deb0bf7SJacob Keller } 40890deb0bf7SJacob Keller 40900deb0bf7SJacob Keller ice_vfhw_mac_del(vf, vc_ether_addr); 40910deb0bf7SJacob Keller 40920deb0bf7SJacob Keller vf->num_mac--; 40930deb0bf7SJacob Keller 40940deb0bf7SJacob Keller return 0; 40950deb0bf7SJacob Keller } 40960deb0bf7SJacob Keller 40970deb0bf7SJacob Keller /** 40980deb0bf7SJacob Keller * ice_vc_handle_mac_addr_msg 40990deb0bf7SJacob Keller * @vf: pointer to the VF info 41000deb0bf7SJacob Keller * @msg: pointer to the msg buffer 41010deb0bf7SJacob Keller * @set: true if MAC filters are being set, false otherwise 41020deb0bf7SJacob Keller * 41030deb0bf7SJacob Keller * add guest MAC address filter 41040deb0bf7SJacob Keller */ 41050deb0bf7SJacob Keller static int 41060deb0bf7SJacob Keller ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set) 41070deb0bf7SJacob Keller { 41080deb0bf7SJacob Keller int (*ice_vc_cfg_mac) 41090deb0bf7SJacob Keller (struct ice_vf *vf, struct ice_vsi *vsi, 41100deb0bf7SJacob Keller struct virtchnl_ether_addr *virtchnl_ether_addr); 41110deb0bf7SJacob Keller enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 41120deb0bf7SJacob Keller struct virtchnl_ether_addr_list *al = 41130deb0bf7SJacob Keller (struct virtchnl_ether_addr_list *)msg; 41140deb0bf7SJacob Keller struct ice_pf *pf = vf->pf; 41150deb0bf7SJacob Keller enum virtchnl_ops vc_op; 41160deb0bf7SJacob Keller struct ice_vsi *vsi; 41170deb0bf7SJacob Keller int i; 41180deb0bf7SJacob Keller 41190deb0bf7SJacob Keller if (set) { 41200deb0bf7SJacob Keller vc_op = VIRTCHNL_OP_ADD_ETH_ADDR; 41210deb0bf7SJacob Keller ice_vc_cfg_mac = ice_vc_add_mac_addr; 41220deb0bf7SJacob Keller } else { 41230deb0bf7SJacob Keller vc_op = VIRTCHNL_OP_DEL_ETH_ADDR; 41240deb0bf7SJacob Keller ice_vc_cfg_mac = ice_vc_del_mac_addr; 41250deb0bf7SJacob Keller } 41260deb0bf7SJacob Keller 41270deb0bf7SJacob Keller if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) || 41280deb0bf7SJacob Keller !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) { 41290deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 41300deb0bf7SJacob Keller goto handle_mac_exit; 41310deb0bf7SJacob Keller } 41320deb0bf7SJacob Keller 41330deb0bf7SJacob Keller /* If this VF is not privileged, then we can't add more than a 41340deb0bf7SJacob Keller * limited number of addresses. Check to make sure that the 41350deb0bf7SJacob Keller * additions do not push us over the limit. 41360deb0bf7SJacob Keller */ 41370deb0bf7SJacob Keller if (set && !ice_is_vf_trusted(vf) && 41380deb0bf7SJacob Keller (vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) { 41390deb0bf7SJacob Keller dev_err(ice_pf_to_dev(pf), "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n", 41400deb0bf7SJacob Keller vf->vf_id); 41410deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 41420deb0bf7SJacob Keller goto handle_mac_exit; 41430deb0bf7SJacob Keller } 41440deb0bf7SJacob Keller 41450deb0bf7SJacob Keller vsi = ice_get_vf_vsi(vf); 41460deb0bf7SJacob Keller if (!vsi) { 41470deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 41480deb0bf7SJacob Keller goto handle_mac_exit; 41490deb0bf7SJacob Keller } 41500deb0bf7SJacob Keller 41510deb0bf7SJacob Keller for (i = 0; i < al->num_elements; i++) { 41520deb0bf7SJacob Keller u8 *mac_addr = al->list[i].addr; 41530deb0bf7SJacob Keller int result; 41540deb0bf7SJacob Keller 41550deb0bf7SJacob Keller if (is_broadcast_ether_addr(mac_addr) || 41560deb0bf7SJacob Keller is_zero_ether_addr(mac_addr)) 41570deb0bf7SJacob Keller continue; 41580deb0bf7SJacob Keller 41590deb0bf7SJacob Keller result = ice_vc_cfg_mac(vf, vsi, &al->list[i]); 41600deb0bf7SJacob Keller if (result == -EEXIST || result == -ENOENT) { 41610deb0bf7SJacob Keller continue; 41620deb0bf7SJacob Keller } else if (result) { 41630deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; 41640deb0bf7SJacob Keller goto handle_mac_exit; 41650deb0bf7SJacob Keller } 41660deb0bf7SJacob Keller } 41670deb0bf7SJacob Keller 41680deb0bf7SJacob Keller handle_mac_exit: 41690deb0bf7SJacob Keller /* send the response to the VF */ 41700deb0bf7SJacob Keller return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0); 41710deb0bf7SJacob Keller } 41720deb0bf7SJacob Keller 41730deb0bf7SJacob Keller /** 41740deb0bf7SJacob Keller * ice_vc_add_mac_addr_msg 41750deb0bf7SJacob Keller * @vf: pointer to the VF info 41760deb0bf7SJacob Keller * @msg: pointer to the msg buffer 41770deb0bf7SJacob Keller * 41780deb0bf7SJacob Keller * add guest MAC address filter 41790deb0bf7SJacob Keller */ 41800deb0bf7SJacob Keller static int ice_vc_add_mac_addr_msg(struct ice_vf *vf, u8 *msg) 41810deb0bf7SJacob Keller { 41820deb0bf7SJacob Keller return ice_vc_handle_mac_addr_msg(vf, msg, true); 41830deb0bf7SJacob Keller } 41840deb0bf7SJacob Keller 41850deb0bf7SJacob Keller /** 41860deb0bf7SJacob Keller * ice_vc_del_mac_addr_msg 41870deb0bf7SJacob Keller * @vf: pointer to the VF info 41880deb0bf7SJacob Keller * @msg: pointer to the msg buffer 41890deb0bf7SJacob Keller * 41900deb0bf7SJacob Keller * remove guest MAC address filter 41910deb0bf7SJacob Keller */ 41920deb0bf7SJacob Keller static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg) 41930deb0bf7SJacob Keller { 41940deb0bf7SJacob Keller return ice_vc_handle_mac_addr_msg(vf, msg, false); 41950deb0bf7SJacob Keller } 41960deb0bf7SJacob Keller 41970deb0bf7SJacob Keller /** 41980deb0bf7SJacob Keller * ice_vc_request_qs_msg 41990deb0bf7SJacob Keller * @vf: pointer to the VF info 42000deb0bf7SJacob Keller * @msg: pointer to the msg buffer 42010deb0bf7SJacob Keller * 42020deb0bf7SJacob Keller * VFs get a default number of queues but can use this message to request a 42030deb0bf7SJacob Keller * different number. If the request is successful, PF will reset the VF and 42040deb0bf7SJacob Keller * return 0. If unsuccessful, PF will send message informing VF of number of 42050deb0bf7SJacob Keller * available queue pairs via virtchnl message response to VF. 42060deb0bf7SJacob Keller */ 42070deb0bf7SJacob Keller static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg) 42080deb0bf7SJacob Keller { 42090deb0bf7SJacob Keller enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 42100deb0bf7SJacob Keller struct virtchnl_vf_res_request *vfres = 42110deb0bf7SJacob Keller (struct virtchnl_vf_res_request *)msg; 42120deb0bf7SJacob Keller u16 req_queues = vfres->num_queue_pairs; 42130deb0bf7SJacob Keller struct ice_pf *pf = vf->pf; 42140deb0bf7SJacob Keller u16 max_allowed_vf_queues; 42150deb0bf7SJacob Keller u16 tx_rx_queue_left; 42160deb0bf7SJacob Keller struct device *dev; 42170deb0bf7SJacob Keller u16 cur_queues; 42180deb0bf7SJacob Keller 42190deb0bf7SJacob Keller dev = ice_pf_to_dev(pf); 42200deb0bf7SJacob Keller if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { 42210deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 42220deb0bf7SJacob Keller goto error_param; 42230deb0bf7SJacob Keller } 42240deb0bf7SJacob Keller 42250deb0bf7SJacob Keller cur_queues = vf->num_vf_qs; 42260deb0bf7SJacob Keller tx_rx_queue_left = min_t(u16, ice_get_avail_txq_count(pf), 42270deb0bf7SJacob Keller ice_get_avail_rxq_count(pf)); 42280deb0bf7SJacob Keller max_allowed_vf_queues = tx_rx_queue_left + cur_queues; 42290deb0bf7SJacob Keller if (!req_queues) { 42300deb0bf7SJacob Keller dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n", 42310deb0bf7SJacob Keller vf->vf_id); 42320deb0bf7SJacob Keller } else if (req_queues > ICE_MAX_RSS_QS_PER_VF) { 42330deb0bf7SJacob Keller dev_err(dev, "VF %d tried to request more than %d queues.\n", 42340deb0bf7SJacob Keller vf->vf_id, ICE_MAX_RSS_QS_PER_VF); 42350deb0bf7SJacob Keller vfres->num_queue_pairs = ICE_MAX_RSS_QS_PER_VF; 42360deb0bf7SJacob Keller } else if (req_queues > cur_queues && 42370deb0bf7SJacob Keller req_queues - cur_queues > tx_rx_queue_left) { 42380deb0bf7SJacob Keller dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n", 42390deb0bf7SJacob Keller vf->vf_id, req_queues - cur_queues, tx_rx_queue_left); 42400deb0bf7SJacob Keller vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues, 42410deb0bf7SJacob Keller ICE_MAX_RSS_QS_PER_VF); 42420deb0bf7SJacob Keller } else { 42430deb0bf7SJacob Keller /* request is successful, then reset VF */ 42440deb0bf7SJacob Keller vf->num_req_qs = req_queues; 42450deb0bf7SJacob Keller ice_vc_reset_vf(vf); 42460deb0bf7SJacob Keller dev_info(dev, "VF %d granted request of %u queues.\n", 42470deb0bf7SJacob Keller vf->vf_id, req_queues); 42480deb0bf7SJacob Keller return 0; 42490deb0bf7SJacob Keller } 42500deb0bf7SJacob Keller 42510deb0bf7SJacob Keller error_param: 42520deb0bf7SJacob Keller /* send the response to the VF */ 42530deb0bf7SJacob Keller return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 42540deb0bf7SJacob Keller v_ret, (u8 *)vfres, sizeof(*vfres)); 42550deb0bf7SJacob Keller } 42560deb0bf7SJacob Keller 42570deb0bf7SJacob Keller /** 42580deb0bf7SJacob Keller * ice_is_supported_port_vlan_proto - make sure the vlan_proto is supported 42590deb0bf7SJacob Keller * @hw: hardware structure used to check the VLAN mode 42600deb0bf7SJacob Keller * @vlan_proto: VLAN TPID being checked 42610deb0bf7SJacob Keller * 42620deb0bf7SJacob Keller * If the device is configured in Double VLAN Mode (DVM), then both ETH_P_8021Q 42630deb0bf7SJacob Keller * and ETH_P_8021AD are supported. If the device is configured in Single VLAN 42640deb0bf7SJacob Keller * Mode (SVM), then only ETH_P_8021Q is supported. 42650deb0bf7SJacob Keller */ 42660deb0bf7SJacob Keller static bool 42670deb0bf7SJacob Keller ice_is_supported_port_vlan_proto(struct ice_hw *hw, u16 vlan_proto) 42680deb0bf7SJacob Keller { 42690deb0bf7SJacob Keller bool is_supported = false; 42700deb0bf7SJacob Keller 42710deb0bf7SJacob Keller switch (vlan_proto) { 42720deb0bf7SJacob Keller case ETH_P_8021Q: 42730deb0bf7SJacob Keller is_supported = true; 42740deb0bf7SJacob Keller break; 42750deb0bf7SJacob Keller case ETH_P_8021AD: 42760deb0bf7SJacob Keller if (ice_is_dvm_ena(hw)) 42770deb0bf7SJacob Keller is_supported = true; 42780deb0bf7SJacob Keller break; 42790deb0bf7SJacob Keller } 42800deb0bf7SJacob Keller 42810deb0bf7SJacob Keller return is_supported; 42820deb0bf7SJacob Keller } 42830deb0bf7SJacob Keller 42840deb0bf7SJacob Keller /** 42850deb0bf7SJacob Keller * ice_set_vf_port_vlan 42860deb0bf7SJacob Keller * @netdev: network interface device structure 42870deb0bf7SJacob Keller * @vf_id: VF identifier 42880deb0bf7SJacob Keller * @vlan_id: VLAN ID being set 42890deb0bf7SJacob Keller * @qos: priority setting 42900deb0bf7SJacob Keller * @vlan_proto: VLAN protocol 42910deb0bf7SJacob Keller * 42920deb0bf7SJacob Keller * program VF Port VLAN ID and/or QoS 42930deb0bf7SJacob Keller */ 42940deb0bf7SJacob Keller int 42950deb0bf7SJacob Keller ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, 42960deb0bf7SJacob Keller __be16 vlan_proto) 42970deb0bf7SJacob Keller { 42980deb0bf7SJacob Keller struct ice_pf *pf = ice_netdev_to_pf(netdev); 42990deb0bf7SJacob Keller u16 local_vlan_proto = ntohs(vlan_proto); 43000deb0bf7SJacob Keller struct device *dev; 43010deb0bf7SJacob Keller struct ice_vf *vf; 43020deb0bf7SJacob Keller int ret; 43030deb0bf7SJacob Keller 43040deb0bf7SJacob Keller dev = ice_pf_to_dev(pf); 43050deb0bf7SJacob Keller 43060deb0bf7SJacob Keller if (vlan_id >= VLAN_N_VID || qos > 7) { 43070deb0bf7SJacob Keller dev_err(dev, "Invalid Port VLAN parameters for VF %d, ID %d, QoS %d\n", 43080deb0bf7SJacob Keller vf_id, vlan_id, qos); 43090deb0bf7SJacob Keller return -EINVAL; 43100deb0bf7SJacob Keller } 43110deb0bf7SJacob Keller 43120deb0bf7SJacob Keller if (!ice_is_supported_port_vlan_proto(&pf->hw, local_vlan_proto)) { 43130deb0bf7SJacob Keller dev_err(dev, "VF VLAN protocol 0x%04x is not supported\n", 43140deb0bf7SJacob Keller local_vlan_proto); 43150deb0bf7SJacob Keller return -EPROTONOSUPPORT; 43160deb0bf7SJacob Keller } 43170deb0bf7SJacob Keller 43180deb0bf7SJacob Keller vf = ice_get_vf_by_id(pf, vf_id); 43190deb0bf7SJacob Keller if (!vf) 43200deb0bf7SJacob Keller return -EINVAL; 43210deb0bf7SJacob Keller 43220deb0bf7SJacob Keller ret = ice_check_vf_ready_for_cfg(vf); 43230deb0bf7SJacob Keller if (ret) 43240deb0bf7SJacob Keller goto out_put_vf; 43250deb0bf7SJacob Keller 43260deb0bf7SJacob Keller if (ice_vf_get_port_vlan_prio(vf) == qos && 43270deb0bf7SJacob Keller ice_vf_get_port_vlan_tpid(vf) == local_vlan_proto && 43280deb0bf7SJacob Keller ice_vf_get_port_vlan_id(vf) == vlan_id) { 43290deb0bf7SJacob Keller /* duplicate request, so just return success */ 43300deb0bf7SJacob Keller dev_dbg(dev, "Duplicate port VLAN %u, QoS %u, TPID 0x%04x request\n", 43310deb0bf7SJacob Keller vlan_id, qos, local_vlan_proto); 43320deb0bf7SJacob Keller ret = 0; 43330deb0bf7SJacob Keller goto out_put_vf; 43340deb0bf7SJacob Keller } 43350deb0bf7SJacob Keller 43360deb0bf7SJacob Keller mutex_lock(&vf->cfg_lock); 43370deb0bf7SJacob Keller 43380deb0bf7SJacob Keller vf->port_vlan_info = ICE_VLAN(local_vlan_proto, vlan_id, qos); 43390deb0bf7SJacob Keller if (ice_vf_is_port_vlan_ena(vf)) 43400deb0bf7SJacob Keller dev_info(dev, "Setting VLAN %u, QoS %u, TPID 0x%04x on VF %d\n", 43410deb0bf7SJacob Keller vlan_id, qos, local_vlan_proto, vf_id); 43420deb0bf7SJacob Keller else 43430deb0bf7SJacob Keller dev_info(dev, "Clearing port VLAN on VF %d\n", vf_id); 43440deb0bf7SJacob Keller 43450deb0bf7SJacob Keller ice_vc_reset_vf(vf); 43460deb0bf7SJacob Keller mutex_unlock(&vf->cfg_lock); 43470deb0bf7SJacob Keller 43480deb0bf7SJacob Keller out_put_vf: 43490deb0bf7SJacob Keller ice_put_vf(vf); 43500deb0bf7SJacob Keller return ret; 43510deb0bf7SJacob Keller } 43520deb0bf7SJacob Keller 43530deb0bf7SJacob Keller /** 43540deb0bf7SJacob Keller * ice_vf_vlan_offload_ena - determine if capabilities support VLAN offloads 43550deb0bf7SJacob Keller * @caps: VF driver negotiated capabilities 43560deb0bf7SJacob Keller * 43570deb0bf7SJacob Keller * Return true if VIRTCHNL_VF_OFFLOAD_VLAN capability is set, else return false 43580deb0bf7SJacob Keller */ 43590deb0bf7SJacob Keller static bool ice_vf_vlan_offload_ena(u32 caps) 43600deb0bf7SJacob Keller { 43610deb0bf7SJacob Keller return !!(caps & VIRTCHNL_VF_OFFLOAD_VLAN); 43620deb0bf7SJacob Keller } 43630deb0bf7SJacob Keller 43640deb0bf7SJacob Keller /** 43650deb0bf7SJacob Keller * ice_is_vlan_promisc_allowed - check if VLAN promiscuous config is allowed 43660deb0bf7SJacob Keller * @vf: VF used to determine if VLAN promiscuous config is allowed 43670deb0bf7SJacob Keller */ 43680deb0bf7SJacob Keller static bool ice_is_vlan_promisc_allowed(struct ice_vf *vf) 43690deb0bf7SJacob Keller { 43700deb0bf7SJacob Keller if ((test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) || 43710deb0bf7SJacob Keller test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) && 43720deb0bf7SJacob Keller test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, vf->pf->flags)) 43730deb0bf7SJacob Keller return true; 43740deb0bf7SJacob Keller 43750deb0bf7SJacob Keller return false; 43760deb0bf7SJacob Keller } 43770deb0bf7SJacob Keller 43780deb0bf7SJacob Keller /** 43790deb0bf7SJacob Keller * ice_vf_ena_vlan_promisc - Enable Tx/Rx VLAN promiscuous for the VLAN 43800deb0bf7SJacob Keller * @vsi: VF's VSI used to enable VLAN promiscuous mode 43810deb0bf7SJacob Keller * @vlan: VLAN used to enable VLAN promiscuous 43820deb0bf7SJacob Keller * 43830deb0bf7SJacob Keller * This function should only be called if VLAN promiscuous mode is allowed, 43840deb0bf7SJacob Keller * which can be determined via ice_is_vlan_promisc_allowed(). 43850deb0bf7SJacob Keller */ 43860deb0bf7SJacob Keller static int ice_vf_ena_vlan_promisc(struct ice_vsi *vsi, struct ice_vlan *vlan) 43870deb0bf7SJacob Keller { 43880deb0bf7SJacob Keller u8 promisc_m = ICE_PROMISC_VLAN_TX | ICE_PROMISC_VLAN_RX; 43890deb0bf7SJacob Keller int status; 43900deb0bf7SJacob Keller 43910deb0bf7SJacob Keller status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, 43920deb0bf7SJacob Keller vlan->vid); 43930deb0bf7SJacob Keller if (status && status != -EEXIST) 43940deb0bf7SJacob Keller return status; 43950deb0bf7SJacob Keller 43960deb0bf7SJacob Keller return 0; 43970deb0bf7SJacob Keller } 43980deb0bf7SJacob Keller 43990deb0bf7SJacob Keller /** 44000deb0bf7SJacob Keller * ice_vf_dis_vlan_promisc - Disable Tx/Rx VLAN promiscuous for the VLAN 44010deb0bf7SJacob Keller * @vsi: VF's VSI used to disable VLAN promiscuous mode for 44020deb0bf7SJacob Keller * @vlan: VLAN used to disable VLAN promiscuous 44030deb0bf7SJacob Keller * 44040deb0bf7SJacob Keller * This function should only be called if VLAN promiscuous mode is allowed, 44050deb0bf7SJacob Keller * which can be determined via ice_is_vlan_promisc_allowed(). 44060deb0bf7SJacob Keller */ 44070deb0bf7SJacob Keller static int ice_vf_dis_vlan_promisc(struct ice_vsi *vsi, struct ice_vlan *vlan) 44080deb0bf7SJacob Keller { 44090deb0bf7SJacob Keller u8 promisc_m = ICE_PROMISC_VLAN_TX | ICE_PROMISC_VLAN_RX; 44100deb0bf7SJacob Keller int status; 44110deb0bf7SJacob Keller 44120deb0bf7SJacob Keller status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, 44130deb0bf7SJacob Keller vlan->vid); 44140deb0bf7SJacob Keller if (status && status != -ENOENT) 44150deb0bf7SJacob Keller return status; 44160deb0bf7SJacob Keller 44170deb0bf7SJacob Keller return 0; 44180deb0bf7SJacob Keller } 44190deb0bf7SJacob Keller 44200deb0bf7SJacob Keller /** 44210deb0bf7SJacob Keller * ice_vf_has_max_vlans - check if VF already has the max allowed VLAN filters 44220deb0bf7SJacob Keller * @vf: VF to check against 44230deb0bf7SJacob Keller * @vsi: VF's VSI 44240deb0bf7SJacob Keller * 44250deb0bf7SJacob Keller * If the VF is trusted then the VF is allowed to add as many VLANs as it 44260deb0bf7SJacob Keller * wants to, so return false. 44270deb0bf7SJacob Keller * 44280deb0bf7SJacob Keller * When the VF is untrusted compare the number of non-zero VLANs + 1 to the max 44290deb0bf7SJacob Keller * allowed VLANs for an untrusted VF. Return the result of this comparison. 44300deb0bf7SJacob Keller */ 44310deb0bf7SJacob Keller static bool ice_vf_has_max_vlans(struct ice_vf *vf, struct ice_vsi *vsi) 44320deb0bf7SJacob Keller { 44330deb0bf7SJacob Keller if (ice_is_vf_trusted(vf)) 44340deb0bf7SJacob Keller return false; 44350deb0bf7SJacob Keller 44360deb0bf7SJacob Keller #define ICE_VF_ADDED_VLAN_ZERO_FLTRS 1 44370deb0bf7SJacob Keller return ((ice_vsi_num_non_zero_vlans(vsi) + 44380deb0bf7SJacob Keller ICE_VF_ADDED_VLAN_ZERO_FLTRS) >= ICE_MAX_VLAN_PER_VF); 44390deb0bf7SJacob Keller } 44400deb0bf7SJacob Keller 44410deb0bf7SJacob Keller /** 44420deb0bf7SJacob Keller * ice_vc_process_vlan_msg 44430deb0bf7SJacob Keller * @vf: pointer to the VF info 44440deb0bf7SJacob Keller * @msg: pointer to the msg buffer 44450deb0bf7SJacob Keller * @add_v: Add VLAN if true, otherwise delete VLAN 44460deb0bf7SJacob Keller * 44470deb0bf7SJacob Keller * Process virtchnl op to add or remove programmed guest VLAN ID 44480deb0bf7SJacob Keller */ 44490deb0bf7SJacob Keller static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v) 44500deb0bf7SJacob Keller { 44510deb0bf7SJacob Keller enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 44520deb0bf7SJacob Keller struct virtchnl_vlan_filter_list *vfl = 44530deb0bf7SJacob Keller (struct virtchnl_vlan_filter_list *)msg; 44540deb0bf7SJacob Keller struct ice_pf *pf = vf->pf; 44550deb0bf7SJacob Keller bool vlan_promisc = false; 44560deb0bf7SJacob Keller struct ice_vsi *vsi; 44570deb0bf7SJacob Keller struct device *dev; 44580deb0bf7SJacob Keller int status = 0; 44590deb0bf7SJacob Keller int i; 44600deb0bf7SJacob Keller 44610deb0bf7SJacob Keller dev = ice_pf_to_dev(pf); 44620deb0bf7SJacob Keller if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { 44630deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 44640deb0bf7SJacob Keller goto error_param; 44650deb0bf7SJacob Keller } 44660deb0bf7SJacob Keller 44670deb0bf7SJacob Keller if (!ice_vf_vlan_offload_ena(vf->driver_caps)) { 44680deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 44690deb0bf7SJacob Keller goto error_param; 44700deb0bf7SJacob Keller } 44710deb0bf7SJacob Keller 44720deb0bf7SJacob Keller if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) { 44730deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 44740deb0bf7SJacob Keller goto error_param; 44750deb0bf7SJacob Keller } 44760deb0bf7SJacob Keller 44770deb0bf7SJacob Keller for (i = 0; i < vfl->num_elements; i++) { 44780deb0bf7SJacob Keller if (vfl->vlan_id[i] >= VLAN_N_VID) { 44790deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 44800deb0bf7SJacob Keller dev_err(dev, "invalid VF VLAN id %d\n", 44810deb0bf7SJacob Keller vfl->vlan_id[i]); 44820deb0bf7SJacob Keller goto error_param; 44830deb0bf7SJacob Keller } 44840deb0bf7SJacob Keller } 44850deb0bf7SJacob Keller 44860deb0bf7SJacob Keller vsi = ice_get_vf_vsi(vf); 44870deb0bf7SJacob Keller if (!vsi) { 44880deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 44890deb0bf7SJacob Keller goto error_param; 44900deb0bf7SJacob Keller } 44910deb0bf7SJacob Keller 44920deb0bf7SJacob Keller if (add_v && ice_vf_has_max_vlans(vf, vsi)) { 44930deb0bf7SJacob Keller dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n", 44940deb0bf7SJacob Keller vf->vf_id); 44950deb0bf7SJacob Keller /* There is no need to let VF know about being not trusted, 44960deb0bf7SJacob Keller * so we can just return success message here 44970deb0bf7SJacob Keller */ 44980deb0bf7SJacob Keller goto error_param; 44990deb0bf7SJacob Keller } 45000deb0bf7SJacob Keller 45010deb0bf7SJacob Keller /* in DVM a VF can add/delete inner VLAN filters when 45020deb0bf7SJacob Keller * VIRTCHNL_VF_OFFLOAD_VLAN is negotiated, so only reject in SVM 45030deb0bf7SJacob Keller */ 45040deb0bf7SJacob Keller if (ice_vf_is_port_vlan_ena(vf) && !ice_is_dvm_ena(&pf->hw)) { 45050deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 45060deb0bf7SJacob Keller goto error_param; 45070deb0bf7SJacob Keller } 45080deb0bf7SJacob Keller 45090deb0bf7SJacob Keller /* in DVM VLAN promiscuous is based on the outer VLAN, which would be 45100deb0bf7SJacob Keller * the port VLAN if VIRTCHNL_VF_OFFLOAD_VLAN was negotiated, so only 45110deb0bf7SJacob Keller * allow vlan_promisc = true in SVM and if no port VLAN is configured 45120deb0bf7SJacob Keller */ 45130deb0bf7SJacob Keller vlan_promisc = ice_is_vlan_promisc_allowed(vf) && 45140deb0bf7SJacob Keller !ice_is_dvm_ena(&pf->hw) && 45150deb0bf7SJacob Keller !ice_vf_is_port_vlan_ena(vf); 45160deb0bf7SJacob Keller 45170deb0bf7SJacob Keller if (add_v) { 45180deb0bf7SJacob Keller for (i = 0; i < vfl->num_elements; i++) { 45190deb0bf7SJacob Keller u16 vid = vfl->vlan_id[i]; 45200deb0bf7SJacob Keller struct ice_vlan vlan; 45210deb0bf7SJacob Keller 45220deb0bf7SJacob Keller if (ice_vf_has_max_vlans(vf, vsi)) { 45230deb0bf7SJacob Keller dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n", 45240deb0bf7SJacob Keller vf->vf_id); 45250deb0bf7SJacob Keller /* There is no need to let VF know about being 45260deb0bf7SJacob Keller * not trusted, so we can just return success 45270deb0bf7SJacob Keller * message here as well. 45280deb0bf7SJacob Keller */ 45290deb0bf7SJacob Keller goto error_param; 45300deb0bf7SJacob Keller } 45310deb0bf7SJacob Keller 45320deb0bf7SJacob Keller /* we add VLAN 0 by default for each VF so we can enable 45330deb0bf7SJacob Keller * Tx VLAN anti-spoof without triggering MDD events so 45340deb0bf7SJacob Keller * we don't need to add it again here 45350deb0bf7SJacob Keller */ 45360deb0bf7SJacob Keller if (!vid) 45370deb0bf7SJacob Keller continue; 45380deb0bf7SJacob Keller 45390deb0bf7SJacob Keller vlan = ICE_VLAN(ETH_P_8021Q, vid, 0); 45400deb0bf7SJacob Keller status = vsi->inner_vlan_ops.add_vlan(vsi, &vlan); 45410deb0bf7SJacob Keller if (status) { 45420deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 45430deb0bf7SJacob Keller goto error_param; 45440deb0bf7SJacob Keller } 45450deb0bf7SJacob Keller 45460deb0bf7SJacob Keller /* Enable VLAN filtering on first non-zero VLAN */ 45470deb0bf7SJacob Keller if (!vlan_promisc && vid && !ice_is_dvm_ena(&pf->hw)) { 45480deb0bf7SJacob Keller if (vsi->inner_vlan_ops.ena_rx_filtering(vsi)) { 45490deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 45500deb0bf7SJacob Keller dev_err(dev, "Enable VLAN pruning on VLAN ID: %d failed error-%d\n", 45510deb0bf7SJacob Keller vid, status); 45520deb0bf7SJacob Keller goto error_param; 45530deb0bf7SJacob Keller } 45540deb0bf7SJacob Keller } else if (vlan_promisc) { 45550deb0bf7SJacob Keller status = ice_vf_ena_vlan_promisc(vsi, &vlan); 45560deb0bf7SJacob Keller if (status) { 45570deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 45580deb0bf7SJacob Keller dev_err(dev, "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n", 45590deb0bf7SJacob Keller vid, status); 45600deb0bf7SJacob Keller } 45610deb0bf7SJacob Keller } 45620deb0bf7SJacob Keller } 45630deb0bf7SJacob Keller } else { 45640deb0bf7SJacob Keller /* In case of non_trusted VF, number of VLAN elements passed 45650deb0bf7SJacob Keller * to PF for removal might be greater than number of VLANs 45660deb0bf7SJacob Keller * filter programmed for that VF - So, use actual number of 45670deb0bf7SJacob Keller * VLANS added earlier with add VLAN opcode. In order to avoid 45680deb0bf7SJacob Keller * removing VLAN that doesn't exist, which result to sending 45690deb0bf7SJacob Keller * erroneous failed message back to the VF 45700deb0bf7SJacob Keller */ 45710deb0bf7SJacob Keller int num_vf_vlan; 45720deb0bf7SJacob Keller 45730deb0bf7SJacob Keller num_vf_vlan = vsi->num_vlan; 45740deb0bf7SJacob Keller for (i = 0; i < vfl->num_elements && i < num_vf_vlan; i++) { 45750deb0bf7SJacob Keller u16 vid = vfl->vlan_id[i]; 45760deb0bf7SJacob Keller struct ice_vlan vlan; 45770deb0bf7SJacob Keller 45780deb0bf7SJacob Keller /* we add VLAN 0 by default for each VF so we can enable 45790deb0bf7SJacob Keller * Tx VLAN anti-spoof without triggering MDD events so 45800deb0bf7SJacob Keller * we don't want a VIRTCHNL request to remove it 45810deb0bf7SJacob Keller */ 45820deb0bf7SJacob Keller if (!vid) 45830deb0bf7SJacob Keller continue; 45840deb0bf7SJacob Keller 45850deb0bf7SJacob Keller vlan = ICE_VLAN(ETH_P_8021Q, vid, 0); 45860deb0bf7SJacob Keller status = vsi->inner_vlan_ops.del_vlan(vsi, &vlan); 45870deb0bf7SJacob Keller if (status) { 45880deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 45890deb0bf7SJacob Keller goto error_param; 45900deb0bf7SJacob Keller } 45910deb0bf7SJacob Keller 45920deb0bf7SJacob Keller /* Disable VLAN filtering when only VLAN 0 is left */ 45930deb0bf7SJacob Keller if (!ice_vsi_has_non_zero_vlans(vsi)) 45940deb0bf7SJacob Keller vsi->inner_vlan_ops.dis_rx_filtering(vsi); 45950deb0bf7SJacob Keller 45960deb0bf7SJacob Keller if (vlan_promisc) 45970deb0bf7SJacob Keller ice_vf_dis_vlan_promisc(vsi, &vlan); 45980deb0bf7SJacob Keller } 45990deb0bf7SJacob Keller } 46000deb0bf7SJacob Keller 46010deb0bf7SJacob Keller error_param: 46020deb0bf7SJacob Keller /* send the response to the VF */ 46030deb0bf7SJacob Keller if (add_v) 46040deb0bf7SJacob Keller return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, v_ret, 46050deb0bf7SJacob Keller NULL, 0); 46060deb0bf7SJacob Keller else 46070deb0bf7SJacob Keller return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, v_ret, 46080deb0bf7SJacob Keller NULL, 0); 46090deb0bf7SJacob Keller } 46100deb0bf7SJacob Keller 46110deb0bf7SJacob Keller /** 46120deb0bf7SJacob Keller * ice_vc_add_vlan_msg 46130deb0bf7SJacob Keller * @vf: pointer to the VF info 46140deb0bf7SJacob Keller * @msg: pointer to the msg buffer 46150deb0bf7SJacob Keller * 46160deb0bf7SJacob Keller * Add and program guest VLAN ID 46170deb0bf7SJacob Keller */ 46180deb0bf7SJacob Keller static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg) 46190deb0bf7SJacob Keller { 46200deb0bf7SJacob Keller return ice_vc_process_vlan_msg(vf, msg, true); 46210deb0bf7SJacob Keller } 46220deb0bf7SJacob Keller 46230deb0bf7SJacob Keller /** 46240deb0bf7SJacob Keller * ice_vc_remove_vlan_msg 46250deb0bf7SJacob Keller * @vf: pointer to the VF info 46260deb0bf7SJacob Keller * @msg: pointer to the msg buffer 46270deb0bf7SJacob Keller * 46280deb0bf7SJacob Keller * remove programmed guest VLAN ID 46290deb0bf7SJacob Keller */ 46300deb0bf7SJacob Keller static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg) 46310deb0bf7SJacob Keller { 46320deb0bf7SJacob Keller return ice_vc_process_vlan_msg(vf, msg, false); 46330deb0bf7SJacob Keller } 46340deb0bf7SJacob Keller 46350deb0bf7SJacob Keller /** 46360deb0bf7SJacob Keller * ice_vc_ena_vlan_stripping 46370deb0bf7SJacob Keller * @vf: pointer to the VF info 46380deb0bf7SJacob Keller * 46390deb0bf7SJacob Keller * Enable VLAN header stripping for a given VF 46400deb0bf7SJacob Keller */ 46410deb0bf7SJacob Keller static int ice_vc_ena_vlan_stripping(struct ice_vf *vf) 46420deb0bf7SJacob Keller { 46430deb0bf7SJacob Keller enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 46440deb0bf7SJacob Keller struct ice_vsi *vsi; 46450deb0bf7SJacob Keller 46460deb0bf7SJacob Keller if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { 46470deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 46480deb0bf7SJacob Keller goto error_param; 46490deb0bf7SJacob Keller } 46500deb0bf7SJacob Keller 46510deb0bf7SJacob Keller if (!ice_vf_vlan_offload_ena(vf->driver_caps)) { 46520deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 46530deb0bf7SJacob Keller goto error_param; 46540deb0bf7SJacob Keller } 46550deb0bf7SJacob Keller 46560deb0bf7SJacob Keller vsi = ice_get_vf_vsi(vf); 46570deb0bf7SJacob Keller if (vsi->inner_vlan_ops.ena_stripping(vsi, ETH_P_8021Q)) 46580deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 46590deb0bf7SJacob Keller 46600deb0bf7SJacob Keller error_param: 46610deb0bf7SJacob Keller return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, 46620deb0bf7SJacob Keller v_ret, NULL, 0); 46630deb0bf7SJacob Keller } 46640deb0bf7SJacob Keller 46650deb0bf7SJacob Keller /** 46660deb0bf7SJacob Keller * ice_vc_dis_vlan_stripping 46670deb0bf7SJacob Keller * @vf: pointer to the VF info 46680deb0bf7SJacob Keller * 46690deb0bf7SJacob Keller * Disable VLAN header stripping for a given VF 46700deb0bf7SJacob Keller */ 46710deb0bf7SJacob Keller static int ice_vc_dis_vlan_stripping(struct ice_vf *vf) 46720deb0bf7SJacob Keller { 46730deb0bf7SJacob Keller enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 46740deb0bf7SJacob Keller struct ice_vsi *vsi; 46750deb0bf7SJacob Keller 46760deb0bf7SJacob Keller if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { 46770deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 46780deb0bf7SJacob Keller goto error_param; 46790deb0bf7SJacob Keller } 46800deb0bf7SJacob Keller 46810deb0bf7SJacob Keller if (!ice_vf_vlan_offload_ena(vf->driver_caps)) { 46820deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 46830deb0bf7SJacob Keller goto error_param; 46840deb0bf7SJacob Keller } 46850deb0bf7SJacob Keller 46860deb0bf7SJacob Keller vsi = ice_get_vf_vsi(vf); 46870deb0bf7SJacob Keller if (!vsi) { 46880deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 46890deb0bf7SJacob Keller goto error_param; 46900deb0bf7SJacob Keller } 46910deb0bf7SJacob Keller 46920deb0bf7SJacob Keller if (vsi->inner_vlan_ops.dis_stripping(vsi)) 46930deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 46940deb0bf7SJacob Keller 46950deb0bf7SJacob Keller error_param: 46960deb0bf7SJacob Keller return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, 46970deb0bf7SJacob Keller v_ret, NULL, 0); 46980deb0bf7SJacob Keller } 46990deb0bf7SJacob Keller 47000deb0bf7SJacob Keller /** 47010deb0bf7SJacob Keller * ice_vf_init_vlan_stripping - enable/disable VLAN stripping on initialization 47020deb0bf7SJacob Keller * @vf: VF to enable/disable VLAN stripping for on initialization 47030deb0bf7SJacob Keller * 47040deb0bf7SJacob Keller * Set the default for VLAN stripping based on whether a port VLAN is configured 47050deb0bf7SJacob Keller * and the current VLAN mode of the device. 47060deb0bf7SJacob Keller */ 47070deb0bf7SJacob Keller static int ice_vf_init_vlan_stripping(struct ice_vf *vf) 47080deb0bf7SJacob Keller { 47090deb0bf7SJacob Keller struct ice_vsi *vsi = ice_get_vf_vsi(vf); 47100deb0bf7SJacob Keller 47110deb0bf7SJacob Keller if (!vsi) 47120deb0bf7SJacob Keller return -EINVAL; 47130deb0bf7SJacob Keller 47140deb0bf7SJacob Keller /* don't modify stripping if port VLAN is configured in SVM since the 47150deb0bf7SJacob Keller * port VLAN is based on the inner/single VLAN in SVM 47160deb0bf7SJacob Keller */ 47170deb0bf7SJacob Keller if (ice_vf_is_port_vlan_ena(vf) && !ice_is_dvm_ena(&vsi->back->hw)) 47180deb0bf7SJacob Keller return 0; 47190deb0bf7SJacob Keller 47200deb0bf7SJacob Keller if (ice_vf_vlan_offload_ena(vf->driver_caps)) 47210deb0bf7SJacob Keller return vsi->inner_vlan_ops.ena_stripping(vsi, ETH_P_8021Q); 47220deb0bf7SJacob Keller else 47230deb0bf7SJacob Keller return vsi->inner_vlan_ops.dis_stripping(vsi); 47240deb0bf7SJacob Keller } 47250deb0bf7SJacob Keller 47260deb0bf7SJacob Keller static u16 ice_vc_get_max_vlan_fltrs(struct ice_vf *vf) 47270deb0bf7SJacob Keller { 47280deb0bf7SJacob Keller if (vf->trusted) 47290deb0bf7SJacob Keller return VLAN_N_VID; 47300deb0bf7SJacob Keller else 47310deb0bf7SJacob Keller return ICE_MAX_VLAN_PER_VF; 47320deb0bf7SJacob Keller } 47330deb0bf7SJacob Keller 47340deb0bf7SJacob Keller /** 47350deb0bf7SJacob Keller * ice_vf_outer_vlan_not_allowed - check outer VLAN can be used when the device is in DVM 47360deb0bf7SJacob Keller * @vf: VF that being checked for 47370deb0bf7SJacob Keller */ 47380deb0bf7SJacob Keller static bool ice_vf_outer_vlan_not_allowed(struct ice_vf *vf) 47390deb0bf7SJacob Keller { 47400deb0bf7SJacob Keller if (ice_vf_is_port_vlan_ena(vf)) 47410deb0bf7SJacob Keller return true; 47420deb0bf7SJacob Keller 47430deb0bf7SJacob Keller return false; 47440deb0bf7SJacob Keller } 47450deb0bf7SJacob Keller 47460deb0bf7SJacob Keller /** 47470deb0bf7SJacob Keller * ice_vc_set_dvm_caps - set VLAN capabilities when the device is in DVM 47480deb0bf7SJacob Keller * @vf: VF that capabilities are being set for 47490deb0bf7SJacob Keller * @caps: VLAN capabilities to populate 47500deb0bf7SJacob Keller * 47510deb0bf7SJacob Keller * Determine VLAN capabilities support based on whether a port VLAN is 47520deb0bf7SJacob Keller * configured. If a port VLAN is configured then the VF should use the inner 47530deb0bf7SJacob Keller * filtering/offload capabilities since the port VLAN is using the outer VLAN 47540deb0bf7SJacob Keller * capabilies. 47550deb0bf7SJacob Keller */ 47560deb0bf7SJacob Keller static void 47570deb0bf7SJacob Keller ice_vc_set_dvm_caps(struct ice_vf *vf, struct virtchnl_vlan_caps *caps) 47580deb0bf7SJacob Keller { 47590deb0bf7SJacob Keller struct virtchnl_vlan_supported_caps *supported_caps; 47600deb0bf7SJacob Keller 47610deb0bf7SJacob Keller if (ice_vf_outer_vlan_not_allowed(vf)) { 47620deb0bf7SJacob Keller /* until support for inner VLAN filtering is added when a port 47630deb0bf7SJacob Keller * VLAN is configured, only support software offloaded inner 47640deb0bf7SJacob Keller * VLANs when a port VLAN is confgured in DVM 47650deb0bf7SJacob Keller */ 47660deb0bf7SJacob Keller supported_caps = &caps->filtering.filtering_support; 47670deb0bf7SJacob Keller supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED; 47680deb0bf7SJacob Keller 47690deb0bf7SJacob Keller supported_caps = &caps->offloads.stripping_support; 47700deb0bf7SJacob Keller supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 | 47710deb0bf7SJacob Keller VIRTCHNL_VLAN_TOGGLE | 47720deb0bf7SJacob Keller VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1; 47730deb0bf7SJacob Keller supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED; 47740deb0bf7SJacob Keller 47750deb0bf7SJacob Keller supported_caps = &caps->offloads.insertion_support; 47760deb0bf7SJacob Keller supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 | 47770deb0bf7SJacob Keller VIRTCHNL_VLAN_TOGGLE | 47780deb0bf7SJacob Keller VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1; 47790deb0bf7SJacob Keller supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED; 47800deb0bf7SJacob Keller 47810deb0bf7SJacob Keller caps->offloads.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100; 47820deb0bf7SJacob Keller caps->offloads.ethertype_match = 47830deb0bf7SJacob Keller VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION; 47840deb0bf7SJacob Keller } else { 47850deb0bf7SJacob Keller supported_caps = &caps->filtering.filtering_support; 47860deb0bf7SJacob Keller supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED; 47870deb0bf7SJacob Keller supported_caps->outer = VIRTCHNL_VLAN_ETHERTYPE_8100 | 47880deb0bf7SJacob Keller VIRTCHNL_VLAN_ETHERTYPE_88A8 | 47890deb0bf7SJacob Keller VIRTCHNL_VLAN_ETHERTYPE_9100 | 47900deb0bf7SJacob Keller VIRTCHNL_VLAN_ETHERTYPE_AND; 47910deb0bf7SJacob Keller caps->filtering.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100 | 47920deb0bf7SJacob Keller VIRTCHNL_VLAN_ETHERTYPE_88A8 | 47930deb0bf7SJacob Keller VIRTCHNL_VLAN_ETHERTYPE_9100; 47940deb0bf7SJacob Keller 47950deb0bf7SJacob Keller supported_caps = &caps->offloads.stripping_support; 47960deb0bf7SJacob Keller supported_caps->inner = VIRTCHNL_VLAN_TOGGLE | 47970deb0bf7SJacob Keller VIRTCHNL_VLAN_ETHERTYPE_8100 | 47980deb0bf7SJacob Keller VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1; 47990deb0bf7SJacob Keller supported_caps->outer = VIRTCHNL_VLAN_TOGGLE | 48000deb0bf7SJacob Keller VIRTCHNL_VLAN_ETHERTYPE_8100 | 48010deb0bf7SJacob Keller VIRTCHNL_VLAN_ETHERTYPE_88A8 | 48020deb0bf7SJacob Keller VIRTCHNL_VLAN_ETHERTYPE_9100 | 48030deb0bf7SJacob Keller VIRTCHNL_VLAN_ETHERTYPE_XOR | 48040deb0bf7SJacob Keller VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2; 48050deb0bf7SJacob Keller 48060deb0bf7SJacob Keller supported_caps = &caps->offloads.insertion_support; 48070deb0bf7SJacob Keller supported_caps->inner = VIRTCHNL_VLAN_TOGGLE | 48080deb0bf7SJacob Keller VIRTCHNL_VLAN_ETHERTYPE_8100 | 48090deb0bf7SJacob Keller VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1; 48100deb0bf7SJacob Keller supported_caps->outer = VIRTCHNL_VLAN_TOGGLE | 48110deb0bf7SJacob Keller VIRTCHNL_VLAN_ETHERTYPE_8100 | 48120deb0bf7SJacob Keller VIRTCHNL_VLAN_ETHERTYPE_88A8 | 48130deb0bf7SJacob Keller VIRTCHNL_VLAN_ETHERTYPE_9100 | 48140deb0bf7SJacob Keller VIRTCHNL_VLAN_ETHERTYPE_XOR | 48150deb0bf7SJacob Keller VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2; 48160deb0bf7SJacob Keller 48170deb0bf7SJacob Keller caps->offloads.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100; 48180deb0bf7SJacob Keller 48190deb0bf7SJacob Keller caps->offloads.ethertype_match = 48200deb0bf7SJacob Keller VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION; 48210deb0bf7SJacob Keller } 48220deb0bf7SJacob Keller 48230deb0bf7SJacob Keller caps->filtering.max_filters = ice_vc_get_max_vlan_fltrs(vf); 48240deb0bf7SJacob Keller } 48250deb0bf7SJacob Keller 48260deb0bf7SJacob Keller /** 48270deb0bf7SJacob Keller * ice_vc_set_svm_caps - set VLAN capabilities when the device is in SVM 48280deb0bf7SJacob Keller * @vf: VF that capabilities are being set for 48290deb0bf7SJacob Keller * @caps: VLAN capabilities to populate 48300deb0bf7SJacob Keller * 48310deb0bf7SJacob Keller * Determine VLAN capabilities support based on whether a port VLAN is 48320deb0bf7SJacob Keller * configured. If a port VLAN is configured then the VF does not have any VLAN 48330deb0bf7SJacob Keller * filtering or offload capabilities since the port VLAN is using the inner VLAN 48340deb0bf7SJacob Keller * capabilities in single VLAN mode (SVM). Otherwise allow the VF to use inner 48350deb0bf7SJacob Keller * VLAN fitlering and offload capabilities. 48360deb0bf7SJacob Keller */ 48370deb0bf7SJacob Keller static void 48380deb0bf7SJacob Keller ice_vc_set_svm_caps(struct ice_vf *vf, struct virtchnl_vlan_caps *caps) 48390deb0bf7SJacob Keller { 48400deb0bf7SJacob Keller struct virtchnl_vlan_supported_caps *supported_caps; 48410deb0bf7SJacob Keller 48420deb0bf7SJacob Keller if (ice_vf_is_port_vlan_ena(vf)) { 48430deb0bf7SJacob Keller supported_caps = &caps->filtering.filtering_support; 48440deb0bf7SJacob Keller supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED; 48450deb0bf7SJacob Keller supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED; 48460deb0bf7SJacob Keller 48470deb0bf7SJacob Keller supported_caps = &caps->offloads.stripping_support; 48480deb0bf7SJacob Keller supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED; 48490deb0bf7SJacob Keller supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED; 48500deb0bf7SJacob Keller 48510deb0bf7SJacob Keller supported_caps = &caps->offloads.insertion_support; 48520deb0bf7SJacob Keller supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED; 48530deb0bf7SJacob Keller supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED; 48540deb0bf7SJacob Keller 48550deb0bf7SJacob Keller caps->offloads.ethertype_init = VIRTCHNL_VLAN_UNSUPPORTED; 48560deb0bf7SJacob Keller caps->offloads.ethertype_match = VIRTCHNL_VLAN_UNSUPPORTED; 48570deb0bf7SJacob Keller caps->filtering.max_filters = 0; 48580deb0bf7SJacob Keller } else { 48590deb0bf7SJacob Keller supported_caps = &caps->filtering.filtering_support; 48600deb0bf7SJacob Keller supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100; 48610deb0bf7SJacob Keller supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED; 48620deb0bf7SJacob Keller caps->filtering.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100; 48630deb0bf7SJacob Keller 48640deb0bf7SJacob Keller supported_caps = &caps->offloads.stripping_support; 48650deb0bf7SJacob Keller supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 | 48660deb0bf7SJacob Keller VIRTCHNL_VLAN_TOGGLE | 48670deb0bf7SJacob Keller VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1; 48680deb0bf7SJacob Keller supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED; 48690deb0bf7SJacob Keller 48700deb0bf7SJacob Keller supported_caps = &caps->offloads.insertion_support; 48710deb0bf7SJacob Keller supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 | 48720deb0bf7SJacob Keller VIRTCHNL_VLAN_TOGGLE | 48730deb0bf7SJacob Keller VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1; 48740deb0bf7SJacob Keller supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED; 48750deb0bf7SJacob Keller 48760deb0bf7SJacob Keller caps->offloads.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100; 48770deb0bf7SJacob Keller caps->offloads.ethertype_match = 48780deb0bf7SJacob Keller VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION; 48790deb0bf7SJacob Keller caps->filtering.max_filters = ice_vc_get_max_vlan_fltrs(vf); 48800deb0bf7SJacob Keller } 48810deb0bf7SJacob Keller } 48820deb0bf7SJacob Keller 48830deb0bf7SJacob Keller /** 48840deb0bf7SJacob Keller * ice_vc_get_offload_vlan_v2_caps - determine VF's VLAN capabilities 48850deb0bf7SJacob Keller * @vf: VF to determine VLAN capabilities for 48860deb0bf7SJacob Keller * 48870deb0bf7SJacob Keller * This will only be called if the VF and PF successfully negotiated 48880deb0bf7SJacob Keller * VIRTCHNL_VF_OFFLOAD_VLAN_V2. 48890deb0bf7SJacob Keller * 48900deb0bf7SJacob Keller * Set VLAN capabilities based on the current VLAN mode and whether a port VLAN 48910deb0bf7SJacob Keller * is configured or not. 48920deb0bf7SJacob Keller */ 48930deb0bf7SJacob Keller static int ice_vc_get_offload_vlan_v2_caps(struct ice_vf *vf) 48940deb0bf7SJacob Keller { 48950deb0bf7SJacob Keller enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 48960deb0bf7SJacob Keller struct virtchnl_vlan_caps *caps = NULL; 48970deb0bf7SJacob Keller int err, len = 0; 48980deb0bf7SJacob Keller 48990deb0bf7SJacob Keller if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { 49000deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 49010deb0bf7SJacob Keller goto out; 49020deb0bf7SJacob Keller } 49030deb0bf7SJacob Keller 49040deb0bf7SJacob Keller caps = kzalloc(sizeof(*caps), GFP_KERNEL); 49050deb0bf7SJacob Keller if (!caps) { 49060deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 49070deb0bf7SJacob Keller goto out; 49080deb0bf7SJacob Keller } 49090deb0bf7SJacob Keller len = sizeof(*caps); 49100deb0bf7SJacob Keller 49110deb0bf7SJacob Keller if (ice_is_dvm_ena(&vf->pf->hw)) 49120deb0bf7SJacob Keller ice_vc_set_dvm_caps(vf, caps); 49130deb0bf7SJacob Keller else 49140deb0bf7SJacob Keller ice_vc_set_svm_caps(vf, caps); 49150deb0bf7SJacob Keller 49160deb0bf7SJacob Keller /* store negotiated caps to prevent invalid VF messages */ 49170deb0bf7SJacob Keller memcpy(&vf->vlan_v2_caps, caps, sizeof(*caps)); 49180deb0bf7SJacob Keller 49190deb0bf7SJacob Keller out: 49200deb0bf7SJacob Keller err = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS, 49210deb0bf7SJacob Keller v_ret, (u8 *)caps, len); 49220deb0bf7SJacob Keller kfree(caps); 49230deb0bf7SJacob Keller return err; 49240deb0bf7SJacob Keller } 49250deb0bf7SJacob Keller 49260deb0bf7SJacob Keller /** 49270deb0bf7SJacob Keller * ice_vc_validate_vlan_tpid - validate VLAN TPID 49280deb0bf7SJacob Keller * @filtering_caps: negotiated/supported VLAN filtering capabilities 49290deb0bf7SJacob Keller * @tpid: VLAN TPID used for validation 49300deb0bf7SJacob Keller * 49310deb0bf7SJacob Keller * Convert the VLAN TPID to a VIRTCHNL_VLAN_ETHERTYPE_* and then compare against 49320deb0bf7SJacob Keller * the negotiated/supported filtering caps to see if the VLAN TPID is valid. 49330deb0bf7SJacob Keller */ 49340deb0bf7SJacob Keller static bool ice_vc_validate_vlan_tpid(u16 filtering_caps, u16 tpid) 49350deb0bf7SJacob Keller { 49360deb0bf7SJacob Keller enum virtchnl_vlan_support vlan_ethertype = VIRTCHNL_VLAN_UNSUPPORTED; 49370deb0bf7SJacob Keller 49380deb0bf7SJacob Keller switch (tpid) { 49390deb0bf7SJacob Keller case ETH_P_8021Q: 49400deb0bf7SJacob Keller vlan_ethertype = VIRTCHNL_VLAN_ETHERTYPE_8100; 49410deb0bf7SJacob Keller break; 49420deb0bf7SJacob Keller case ETH_P_8021AD: 49430deb0bf7SJacob Keller vlan_ethertype = VIRTCHNL_VLAN_ETHERTYPE_88A8; 49440deb0bf7SJacob Keller break; 49450deb0bf7SJacob Keller case ETH_P_QINQ1: 49460deb0bf7SJacob Keller vlan_ethertype = VIRTCHNL_VLAN_ETHERTYPE_9100; 49470deb0bf7SJacob Keller break; 49480deb0bf7SJacob Keller } 49490deb0bf7SJacob Keller 49500deb0bf7SJacob Keller if (!(filtering_caps & vlan_ethertype)) 49510deb0bf7SJacob Keller return false; 49520deb0bf7SJacob Keller 49530deb0bf7SJacob Keller return true; 49540deb0bf7SJacob Keller } 49550deb0bf7SJacob Keller 49560deb0bf7SJacob Keller /** 49570deb0bf7SJacob Keller * ice_vc_is_valid_vlan - validate the virtchnl_vlan 49580deb0bf7SJacob Keller * @vc_vlan: virtchnl_vlan to validate 49590deb0bf7SJacob Keller * 49600deb0bf7SJacob Keller * If the VLAN TCI and VLAN TPID are 0, then this filter is invalid, so return 49610deb0bf7SJacob Keller * false. Otherwise return true. 49620deb0bf7SJacob Keller */ 49630deb0bf7SJacob Keller static bool ice_vc_is_valid_vlan(struct virtchnl_vlan *vc_vlan) 49640deb0bf7SJacob Keller { 49650deb0bf7SJacob Keller if (!vc_vlan->tci || !vc_vlan->tpid) 49660deb0bf7SJacob Keller return false; 49670deb0bf7SJacob Keller 49680deb0bf7SJacob Keller return true; 49690deb0bf7SJacob Keller } 49700deb0bf7SJacob Keller 49710deb0bf7SJacob Keller /** 49720deb0bf7SJacob Keller * ice_vc_validate_vlan_filter_list - validate the filter list from the VF 49730deb0bf7SJacob Keller * @vfc: negotiated/supported VLAN filtering capabilities 49740deb0bf7SJacob Keller * @vfl: VLAN filter list from VF to validate 49750deb0bf7SJacob Keller * 49760deb0bf7SJacob Keller * Validate all of the filters in the VLAN filter list from the VF. If any of 49770deb0bf7SJacob Keller * the checks fail then return false. Otherwise return true. 49780deb0bf7SJacob Keller */ 49790deb0bf7SJacob Keller static bool 49800deb0bf7SJacob Keller ice_vc_validate_vlan_filter_list(struct virtchnl_vlan_filtering_caps *vfc, 49810deb0bf7SJacob Keller struct virtchnl_vlan_filter_list_v2 *vfl) 49820deb0bf7SJacob Keller { 49830deb0bf7SJacob Keller u16 i; 49840deb0bf7SJacob Keller 49850deb0bf7SJacob Keller if (!vfl->num_elements) 49860deb0bf7SJacob Keller return false; 49870deb0bf7SJacob Keller 49880deb0bf7SJacob Keller for (i = 0; i < vfl->num_elements; i++) { 49890deb0bf7SJacob Keller struct virtchnl_vlan_supported_caps *filtering_support = 49900deb0bf7SJacob Keller &vfc->filtering_support; 49910deb0bf7SJacob Keller struct virtchnl_vlan_filter *vlan_fltr = &vfl->filters[i]; 49920deb0bf7SJacob Keller struct virtchnl_vlan *outer = &vlan_fltr->outer; 49930deb0bf7SJacob Keller struct virtchnl_vlan *inner = &vlan_fltr->inner; 49940deb0bf7SJacob Keller 49950deb0bf7SJacob Keller if ((ice_vc_is_valid_vlan(outer) && 49960deb0bf7SJacob Keller filtering_support->outer == VIRTCHNL_VLAN_UNSUPPORTED) || 49970deb0bf7SJacob Keller (ice_vc_is_valid_vlan(inner) && 49980deb0bf7SJacob Keller filtering_support->inner == VIRTCHNL_VLAN_UNSUPPORTED)) 49990deb0bf7SJacob Keller return false; 50000deb0bf7SJacob Keller 50010deb0bf7SJacob Keller if ((outer->tci_mask && 50020deb0bf7SJacob Keller !(filtering_support->outer & VIRTCHNL_VLAN_FILTER_MASK)) || 50030deb0bf7SJacob Keller (inner->tci_mask && 50040deb0bf7SJacob Keller !(filtering_support->inner & VIRTCHNL_VLAN_FILTER_MASK))) 50050deb0bf7SJacob Keller return false; 50060deb0bf7SJacob Keller 50070deb0bf7SJacob Keller if (((outer->tci & VLAN_PRIO_MASK) && 50080deb0bf7SJacob Keller !(filtering_support->outer & VIRTCHNL_VLAN_PRIO)) || 50090deb0bf7SJacob Keller ((inner->tci & VLAN_PRIO_MASK) && 50100deb0bf7SJacob Keller !(filtering_support->inner & VIRTCHNL_VLAN_PRIO))) 50110deb0bf7SJacob Keller return false; 50120deb0bf7SJacob Keller 50130deb0bf7SJacob Keller if ((ice_vc_is_valid_vlan(outer) && 50140deb0bf7SJacob Keller !ice_vc_validate_vlan_tpid(filtering_support->outer, outer->tpid)) || 50150deb0bf7SJacob Keller (ice_vc_is_valid_vlan(inner) && 50160deb0bf7SJacob Keller !ice_vc_validate_vlan_tpid(filtering_support->inner, inner->tpid))) 50170deb0bf7SJacob Keller return false; 50180deb0bf7SJacob Keller } 50190deb0bf7SJacob Keller 50200deb0bf7SJacob Keller return true; 50210deb0bf7SJacob Keller } 50220deb0bf7SJacob Keller 50230deb0bf7SJacob Keller /** 50240deb0bf7SJacob Keller * ice_vc_to_vlan - transform from struct virtchnl_vlan to struct ice_vlan 50250deb0bf7SJacob Keller * @vc_vlan: struct virtchnl_vlan to transform 50260deb0bf7SJacob Keller */ 50270deb0bf7SJacob Keller static struct ice_vlan ice_vc_to_vlan(struct virtchnl_vlan *vc_vlan) 50280deb0bf7SJacob Keller { 50290deb0bf7SJacob Keller struct ice_vlan vlan = { 0 }; 50300deb0bf7SJacob Keller 50310deb0bf7SJacob Keller vlan.prio = (vc_vlan->tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; 50320deb0bf7SJacob Keller vlan.vid = vc_vlan->tci & VLAN_VID_MASK; 50330deb0bf7SJacob Keller vlan.tpid = vc_vlan->tpid; 50340deb0bf7SJacob Keller 50350deb0bf7SJacob Keller return vlan; 50360deb0bf7SJacob Keller } 50370deb0bf7SJacob Keller 50380deb0bf7SJacob Keller /** 50390deb0bf7SJacob Keller * ice_vc_vlan_action - action to perform on the virthcnl_vlan 50400deb0bf7SJacob Keller * @vsi: VF's VSI used to perform the action 50410deb0bf7SJacob Keller * @vlan_action: function to perform the action with (i.e. add/del) 50420deb0bf7SJacob Keller * @vlan: VLAN filter to perform the action with 50430deb0bf7SJacob Keller */ 50440deb0bf7SJacob Keller static int 50450deb0bf7SJacob Keller ice_vc_vlan_action(struct ice_vsi *vsi, 50460deb0bf7SJacob Keller int (*vlan_action)(struct ice_vsi *, struct ice_vlan *), 50470deb0bf7SJacob Keller struct ice_vlan *vlan) 50480deb0bf7SJacob Keller { 50490deb0bf7SJacob Keller int err; 50500deb0bf7SJacob Keller 50510deb0bf7SJacob Keller err = vlan_action(vsi, vlan); 50520deb0bf7SJacob Keller if (err) 50530deb0bf7SJacob Keller return err; 50540deb0bf7SJacob Keller 50550deb0bf7SJacob Keller return 0; 50560deb0bf7SJacob Keller } 50570deb0bf7SJacob Keller 50580deb0bf7SJacob Keller /** 50590deb0bf7SJacob Keller * ice_vc_del_vlans - delete VLAN(s) from the virtchnl filter list 50600deb0bf7SJacob Keller * @vf: VF used to delete the VLAN(s) 50610deb0bf7SJacob Keller * @vsi: VF's VSI used to delete the VLAN(s) 50620deb0bf7SJacob Keller * @vfl: virthchnl filter list used to delete the filters 50630deb0bf7SJacob Keller */ 50640deb0bf7SJacob Keller static int 50650deb0bf7SJacob Keller ice_vc_del_vlans(struct ice_vf *vf, struct ice_vsi *vsi, 50660deb0bf7SJacob Keller struct virtchnl_vlan_filter_list_v2 *vfl) 50670deb0bf7SJacob Keller { 50680deb0bf7SJacob Keller bool vlan_promisc = ice_is_vlan_promisc_allowed(vf); 50690deb0bf7SJacob Keller int err; 50700deb0bf7SJacob Keller u16 i; 50710deb0bf7SJacob Keller 50720deb0bf7SJacob Keller for (i = 0; i < vfl->num_elements; i++) { 50730deb0bf7SJacob Keller struct virtchnl_vlan_filter *vlan_fltr = &vfl->filters[i]; 50740deb0bf7SJacob Keller struct virtchnl_vlan *vc_vlan; 50750deb0bf7SJacob Keller 50760deb0bf7SJacob Keller vc_vlan = &vlan_fltr->outer; 50770deb0bf7SJacob Keller if (ice_vc_is_valid_vlan(vc_vlan)) { 50780deb0bf7SJacob Keller struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan); 50790deb0bf7SJacob Keller 50800deb0bf7SJacob Keller err = ice_vc_vlan_action(vsi, 50810deb0bf7SJacob Keller vsi->outer_vlan_ops.del_vlan, 50820deb0bf7SJacob Keller &vlan); 50830deb0bf7SJacob Keller if (err) 50840deb0bf7SJacob Keller return err; 50850deb0bf7SJacob Keller 50860deb0bf7SJacob Keller if (vlan_promisc) 50870deb0bf7SJacob Keller ice_vf_dis_vlan_promisc(vsi, &vlan); 50880deb0bf7SJacob Keller } 50890deb0bf7SJacob Keller 50900deb0bf7SJacob Keller vc_vlan = &vlan_fltr->inner; 50910deb0bf7SJacob Keller if (ice_vc_is_valid_vlan(vc_vlan)) { 50920deb0bf7SJacob Keller struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan); 50930deb0bf7SJacob Keller 50940deb0bf7SJacob Keller err = ice_vc_vlan_action(vsi, 50950deb0bf7SJacob Keller vsi->inner_vlan_ops.del_vlan, 50960deb0bf7SJacob Keller &vlan); 50970deb0bf7SJacob Keller if (err) 50980deb0bf7SJacob Keller return err; 50990deb0bf7SJacob Keller 51000deb0bf7SJacob Keller /* no support for VLAN promiscuous on inner VLAN unless 51010deb0bf7SJacob Keller * we are in Single VLAN Mode (SVM) 51020deb0bf7SJacob Keller */ 51030deb0bf7SJacob Keller if (!ice_is_dvm_ena(&vsi->back->hw) && vlan_promisc) 51040deb0bf7SJacob Keller ice_vf_dis_vlan_promisc(vsi, &vlan); 51050deb0bf7SJacob Keller } 51060deb0bf7SJacob Keller } 51070deb0bf7SJacob Keller 51080deb0bf7SJacob Keller return 0; 51090deb0bf7SJacob Keller } 51100deb0bf7SJacob Keller 51110deb0bf7SJacob Keller /** 51120deb0bf7SJacob Keller * ice_vc_remove_vlan_v2_msg - virtchnl handler for VIRTCHNL_OP_DEL_VLAN_V2 51130deb0bf7SJacob Keller * @vf: VF the message was received from 51140deb0bf7SJacob Keller * @msg: message received from the VF 51150deb0bf7SJacob Keller */ 51160deb0bf7SJacob Keller static int ice_vc_remove_vlan_v2_msg(struct ice_vf *vf, u8 *msg) 51170deb0bf7SJacob Keller { 51180deb0bf7SJacob Keller struct virtchnl_vlan_filter_list_v2 *vfl = 51190deb0bf7SJacob Keller (struct virtchnl_vlan_filter_list_v2 *)msg; 51200deb0bf7SJacob Keller enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 51210deb0bf7SJacob Keller struct ice_vsi *vsi; 51220deb0bf7SJacob Keller 51230deb0bf7SJacob Keller if (!ice_vc_validate_vlan_filter_list(&vf->vlan_v2_caps.filtering, 51240deb0bf7SJacob Keller vfl)) { 51250deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 51260deb0bf7SJacob Keller goto out; 51270deb0bf7SJacob Keller } 51280deb0bf7SJacob Keller 51290deb0bf7SJacob Keller if (!ice_vc_isvalid_vsi_id(vf, vfl->vport_id)) { 51300deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 51310deb0bf7SJacob Keller goto out; 51320deb0bf7SJacob Keller } 51330deb0bf7SJacob Keller 51340deb0bf7SJacob Keller vsi = ice_get_vf_vsi(vf); 51350deb0bf7SJacob Keller if (!vsi) { 51360deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 51370deb0bf7SJacob Keller goto out; 51380deb0bf7SJacob Keller } 51390deb0bf7SJacob Keller 51400deb0bf7SJacob Keller if (ice_vc_del_vlans(vf, vsi, vfl)) 51410deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 51420deb0bf7SJacob Keller 51430deb0bf7SJacob Keller out: 51440deb0bf7SJacob Keller return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN_V2, v_ret, NULL, 51450deb0bf7SJacob Keller 0); 51460deb0bf7SJacob Keller } 51470deb0bf7SJacob Keller 51480deb0bf7SJacob Keller /** 51490deb0bf7SJacob Keller * ice_vc_add_vlans - add VLAN(s) from the virtchnl filter list 51500deb0bf7SJacob Keller * @vf: VF used to add the VLAN(s) 51510deb0bf7SJacob Keller * @vsi: VF's VSI used to add the VLAN(s) 51520deb0bf7SJacob Keller * @vfl: virthchnl filter list used to add the filters 51530deb0bf7SJacob Keller */ 51540deb0bf7SJacob Keller static int 51550deb0bf7SJacob Keller ice_vc_add_vlans(struct ice_vf *vf, struct ice_vsi *vsi, 51560deb0bf7SJacob Keller struct virtchnl_vlan_filter_list_v2 *vfl) 51570deb0bf7SJacob Keller { 51580deb0bf7SJacob Keller bool vlan_promisc = ice_is_vlan_promisc_allowed(vf); 51590deb0bf7SJacob Keller int err; 51600deb0bf7SJacob Keller u16 i; 51610deb0bf7SJacob Keller 51620deb0bf7SJacob Keller for (i = 0; i < vfl->num_elements; i++) { 51630deb0bf7SJacob Keller struct virtchnl_vlan_filter *vlan_fltr = &vfl->filters[i]; 51640deb0bf7SJacob Keller struct virtchnl_vlan *vc_vlan; 51650deb0bf7SJacob Keller 51660deb0bf7SJacob Keller vc_vlan = &vlan_fltr->outer; 51670deb0bf7SJacob Keller if (ice_vc_is_valid_vlan(vc_vlan)) { 51680deb0bf7SJacob Keller struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan); 51690deb0bf7SJacob Keller 51700deb0bf7SJacob Keller err = ice_vc_vlan_action(vsi, 51710deb0bf7SJacob Keller vsi->outer_vlan_ops.add_vlan, 51720deb0bf7SJacob Keller &vlan); 51730deb0bf7SJacob Keller if (err) 51740deb0bf7SJacob Keller return err; 51750deb0bf7SJacob Keller 51760deb0bf7SJacob Keller if (vlan_promisc) { 51770deb0bf7SJacob Keller err = ice_vf_ena_vlan_promisc(vsi, &vlan); 51780deb0bf7SJacob Keller if (err) 51790deb0bf7SJacob Keller return err; 51800deb0bf7SJacob Keller } 51810deb0bf7SJacob Keller } 51820deb0bf7SJacob Keller 51830deb0bf7SJacob Keller vc_vlan = &vlan_fltr->inner; 51840deb0bf7SJacob Keller if (ice_vc_is_valid_vlan(vc_vlan)) { 51850deb0bf7SJacob Keller struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan); 51860deb0bf7SJacob Keller 51870deb0bf7SJacob Keller err = ice_vc_vlan_action(vsi, 51880deb0bf7SJacob Keller vsi->inner_vlan_ops.add_vlan, 51890deb0bf7SJacob Keller &vlan); 51900deb0bf7SJacob Keller if (err) 51910deb0bf7SJacob Keller return err; 51920deb0bf7SJacob Keller 51930deb0bf7SJacob Keller /* no support for VLAN promiscuous on inner VLAN unless 51940deb0bf7SJacob Keller * we are in Single VLAN Mode (SVM) 51950deb0bf7SJacob Keller */ 51960deb0bf7SJacob Keller if (!ice_is_dvm_ena(&vsi->back->hw) && vlan_promisc) { 51970deb0bf7SJacob Keller err = ice_vf_ena_vlan_promisc(vsi, &vlan); 51980deb0bf7SJacob Keller if (err) 51990deb0bf7SJacob Keller return err; 52000deb0bf7SJacob Keller } 52010deb0bf7SJacob Keller } 52020deb0bf7SJacob Keller } 52030deb0bf7SJacob Keller 52040deb0bf7SJacob Keller return 0; 52050deb0bf7SJacob Keller } 52060deb0bf7SJacob Keller 52070deb0bf7SJacob Keller /** 52080deb0bf7SJacob Keller * ice_vc_validate_add_vlan_filter_list - validate add filter list from the VF 52090deb0bf7SJacob Keller * @vsi: VF VSI used to get number of existing VLAN filters 52100deb0bf7SJacob Keller * @vfc: negotiated/supported VLAN filtering capabilities 52110deb0bf7SJacob Keller * @vfl: VLAN filter list from VF to validate 52120deb0bf7SJacob Keller * 52130deb0bf7SJacob Keller * Validate all of the filters in the VLAN filter list from the VF during the 52140deb0bf7SJacob Keller * VIRTCHNL_OP_ADD_VLAN_V2 opcode. If any of the checks fail then return false. 52150deb0bf7SJacob Keller * Otherwise return true. 52160deb0bf7SJacob Keller */ 52170deb0bf7SJacob Keller static bool 52180deb0bf7SJacob Keller ice_vc_validate_add_vlan_filter_list(struct ice_vsi *vsi, 52190deb0bf7SJacob Keller struct virtchnl_vlan_filtering_caps *vfc, 52200deb0bf7SJacob Keller struct virtchnl_vlan_filter_list_v2 *vfl) 52210deb0bf7SJacob Keller { 52220deb0bf7SJacob Keller u16 num_requested_filters = vsi->num_vlan + vfl->num_elements; 52230deb0bf7SJacob Keller 52240deb0bf7SJacob Keller if (num_requested_filters > vfc->max_filters) 52250deb0bf7SJacob Keller return false; 52260deb0bf7SJacob Keller 52270deb0bf7SJacob Keller return ice_vc_validate_vlan_filter_list(vfc, vfl); 52280deb0bf7SJacob Keller } 52290deb0bf7SJacob Keller 52300deb0bf7SJacob Keller /** 52310deb0bf7SJacob Keller * ice_vc_add_vlan_v2_msg - virtchnl handler for VIRTCHNL_OP_ADD_VLAN_V2 52320deb0bf7SJacob Keller * @vf: VF the message was received from 52330deb0bf7SJacob Keller * @msg: message received from the VF 52340deb0bf7SJacob Keller */ 52350deb0bf7SJacob Keller static int ice_vc_add_vlan_v2_msg(struct ice_vf *vf, u8 *msg) 52360deb0bf7SJacob Keller { 52370deb0bf7SJacob Keller enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 52380deb0bf7SJacob Keller struct virtchnl_vlan_filter_list_v2 *vfl = 52390deb0bf7SJacob Keller (struct virtchnl_vlan_filter_list_v2 *)msg; 52400deb0bf7SJacob Keller struct ice_vsi *vsi; 52410deb0bf7SJacob Keller 52420deb0bf7SJacob Keller if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { 52430deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 52440deb0bf7SJacob Keller goto out; 52450deb0bf7SJacob Keller } 52460deb0bf7SJacob Keller 52470deb0bf7SJacob Keller if (!ice_vc_isvalid_vsi_id(vf, vfl->vport_id)) { 52480deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 52490deb0bf7SJacob Keller goto out; 52500deb0bf7SJacob Keller } 52510deb0bf7SJacob Keller 52520deb0bf7SJacob Keller vsi = ice_get_vf_vsi(vf); 52530deb0bf7SJacob Keller if (!vsi) { 52540deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 52550deb0bf7SJacob Keller goto out; 52560deb0bf7SJacob Keller } 52570deb0bf7SJacob Keller 52580deb0bf7SJacob Keller if (!ice_vc_validate_add_vlan_filter_list(vsi, 52590deb0bf7SJacob Keller &vf->vlan_v2_caps.filtering, 52600deb0bf7SJacob Keller vfl)) { 52610deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 52620deb0bf7SJacob Keller goto out; 52630deb0bf7SJacob Keller } 52640deb0bf7SJacob Keller 52650deb0bf7SJacob Keller if (ice_vc_add_vlans(vf, vsi, vfl)) 52660deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 52670deb0bf7SJacob Keller 52680deb0bf7SJacob Keller out: 52690deb0bf7SJacob Keller return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN_V2, v_ret, NULL, 52700deb0bf7SJacob Keller 0); 52710deb0bf7SJacob Keller } 52720deb0bf7SJacob Keller 52730deb0bf7SJacob Keller /** 52740deb0bf7SJacob Keller * ice_vc_valid_vlan_setting - validate VLAN setting 52750deb0bf7SJacob Keller * @negotiated_settings: negotiated VLAN settings during VF init 52760deb0bf7SJacob Keller * @ethertype_setting: ethertype(s) requested for the VLAN setting 52770deb0bf7SJacob Keller */ 52780deb0bf7SJacob Keller static bool 52790deb0bf7SJacob Keller ice_vc_valid_vlan_setting(u32 negotiated_settings, u32 ethertype_setting) 52800deb0bf7SJacob Keller { 52810deb0bf7SJacob Keller if (ethertype_setting && !(negotiated_settings & ethertype_setting)) 52820deb0bf7SJacob Keller return false; 52830deb0bf7SJacob Keller 52840deb0bf7SJacob Keller /* only allow a single VIRTCHNL_VLAN_ETHERTYPE if 52850deb0bf7SJacob Keller * VIRTHCNL_VLAN_ETHERTYPE_AND is not negotiated/supported 52860deb0bf7SJacob Keller */ 52870deb0bf7SJacob Keller if (!(negotiated_settings & VIRTCHNL_VLAN_ETHERTYPE_AND) && 52880deb0bf7SJacob Keller hweight32(ethertype_setting) > 1) 52890deb0bf7SJacob Keller return false; 52900deb0bf7SJacob Keller 52910deb0bf7SJacob Keller /* ability to modify the VLAN setting was not negotiated */ 52920deb0bf7SJacob Keller if (!(negotiated_settings & VIRTCHNL_VLAN_TOGGLE)) 52930deb0bf7SJacob Keller return false; 52940deb0bf7SJacob Keller 52950deb0bf7SJacob Keller return true; 52960deb0bf7SJacob Keller } 52970deb0bf7SJacob Keller 52980deb0bf7SJacob Keller /** 52990deb0bf7SJacob Keller * ice_vc_valid_vlan_setting_msg - validate the VLAN setting message 53000deb0bf7SJacob Keller * @caps: negotiated VLAN settings during VF init 53010deb0bf7SJacob Keller * @msg: message to validate 53020deb0bf7SJacob Keller * 53030deb0bf7SJacob Keller * Used to validate any VLAN virtchnl message sent as a 53040deb0bf7SJacob Keller * virtchnl_vlan_setting structure. Validates the message against the 53050deb0bf7SJacob Keller * negotiated/supported caps during VF driver init. 53060deb0bf7SJacob Keller */ 53070deb0bf7SJacob Keller static bool 53080deb0bf7SJacob Keller ice_vc_valid_vlan_setting_msg(struct virtchnl_vlan_supported_caps *caps, 53090deb0bf7SJacob Keller struct virtchnl_vlan_setting *msg) 53100deb0bf7SJacob Keller { 53110deb0bf7SJacob Keller if ((!msg->outer_ethertype_setting && 53120deb0bf7SJacob Keller !msg->inner_ethertype_setting) || 53130deb0bf7SJacob Keller (!caps->outer && !caps->inner)) 53140deb0bf7SJacob Keller return false; 53150deb0bf7SJacob Keller 53160deb0bf7SJacob Keller if (msg->outer_ethertype_setting && 53170deb0bf7SJacob Keller !ice_vc_valid_vlan_setting(caps->outer, 53180deb0bf7SJacob Keller msg->outer_ethertype_setting)) 53190deb0bf7SJacob Keller return false; 53200deb0bf7SJacob Keller 53210deb0bf7SJacob Keller if (msg->inner_ethertype_setting && 53220deb0bf7SJacob Keller !ice_vc_valid_vlan_setting(caps->inner, 53230deb0bf7SJacob Keller msg->inner_ethertype_setting)) 53240deb0bf7SJacob Keller return false; 53250deb0bf7SJacob Keller 53260deb0bf7SJacob Keller return true; 53270deb0bf7SJacob Keller } 53280deb0bf7SJacob Keller 53290deb0bf7SJacob Keller /** 53300deb0bf7SJacob Keller * ice_vc_get_tpid - transform from VIRTCHNL_VLAN_ETHERTYPE_* to VLAN TPID 53310deb0bf7SJacob Keller * @ethertype_setting: VIRTCHNL_VLAN_ETHERTYPE_* used to get VLAN TPID 53320deb0bf7SJacob Keller * @tpid: VLAN TPID to populate 53330deb0bf7SJacob Keller */ 53340deb0bf7SJacob Keller static int ice_vc_get_tpid(u32 ethertype_setting, u16 *tpid) 53350deb0bf7SJacob Keller { 53360deb0bf7SJacob Keller switch (ethertype_setting) { 53370deb0bf7SJacob Keller case VIRTCHNL_VLAN_ETHERTYPE_8100: 53380deb0bf7SJacob Keller *tpid = ETH_P_8021Q; 53390deb0bf7SJacob Keller break; 53400deb0bf7SJacob Keller case VIRTCHNL_VLAN_ETHERTYPE_88A8: 53410deb0bf7SJacob Keller *tpid = ETH_P_8021AD; 53420deb0bf7SJacob Keller break; 53430deb0bf7SJacob Keller case VIRTCHNL_VLAN_ETHERTYPE_9100: 53440deb0bf7SJacob Keller *tpid = ETH_P_QINQ1; 53450deb0bf7SJacob Keller break; 53460deb0bf7SJacob Keller default: 53470deb0bf7SJacob Keller *tpid = 0; 53480deb0bf7SJacob Keller return -EINVAL; 53490deb0bf7SJacob Keller } 53500deb0bf7SJacob Keller 53510deb0bf7SJacob Keller return 0; 53520deb0bf7SJacob Keller } 53530deb0bf7SJacob Keller 53540deb0bf7SJacob Keller /** 53550deb0bf7SJacob Keller * ice_vc_ena_vlan_offload - enable VLAN offload based on the ethertype_setting 53560deb0bf7SJacob Keller * @vsi: VF's VSI used to enable the VLAN offload 53570deb0bf7SJacob Keller * @ena_offload: function used to enable the VLAN offload 53580deb0bf7SJacob Keller * @ethertype_setting: VIRTCHNL_VLAN_ETHERTYPE_* to enable offloads for 53590deb0bf7SJacob Keller */ 53600deb0bf7SJacob Keller static int 53610deb0bf7SJacob Keller ice_vc_ena_vlan_offload(struct ice_vsi *vsi, 53620deb0bf7SJacob Keller int (*ena_offload)(struct ice_vsi *vsi, u16 tpid), 53630deb0bf7SJacob Keller u32 ethertype_setting) 53640deb0bf7SJacob Keller { 53650deb0bf7SJacob Keller u16 tpid; 53660deb0bf7SJacob Keller int err; 53670deb0bf7SJacob Keller 53680deb0bf7SJacob Keller err = ice_vc_get_tpid(ethertype_setting, &tpid); 53690deb0bf7SJacob Keller if (err) 53700deb0bf7SJacob Keller return err; 53710deb0bf7SJacob Keller 53720deb0bf7SJacob Keller err = ena_offload(vsi, tpid); 53730deb0bf7SJacob Keller if (err) 53740deb0bf7SJacob Keller return err; 53750deb0bf7SJacob Keller 53760deb0bf7SJacob Keller return 0; 53770deb0bf7SJacob Keller } 53780deb0bf7SJacob Keller 53790deb0bf7SJacob Keller #define ICE_L2TSEL_QRX_CONTEXT_REG_IDX 3 53800deb0bf7SJacob Keller #define ICE_L2TSEL_BIT_OFFSET 23 53810deb0bf7SJacob Keller enum ice_l2tsel { 53820deb0bf7SJacob Keller ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND, 53830deb0bf7SJacob Keller ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG1, 53840deb0bf7SJacob Keller }; 53850deb0bf7SJacob Keller 53860deb0bf7SJacob Keller /** 53870deb0bf7SJacob Keller * ice_vsi_update_l2tsel - update l2tsel field for all Rx rings on this VSI 53880deb0bf7SJacob Keller * @vsi: VSI used to update l2tsel on 53890deb0bf7SJacob Keller * @l2tsel: l2tsel setting requested 53900deb0bf7SJacob Keller * 53910deb0bf7SJacob Keller * Use the l2tsel setting to update all of the Rx queue context bits for l2tsel. 53920deb0bf7SJacob Keller * This will modify which descriptor field the first offloaded VLAN will be 53930deb0bf7SJacob Keller * stripped into. 53940deb0bf7SJacob Keller */ 53950deb0bf7SJacob Keller static void ice_vsi_update_l2tsel(struct ice_vsi *vsi, enum ice_l2tsel l2tsel) 53960deb0bf7SJacob Keller { 53970deb0bf7SJacob Keller struct ice_hw *hw = &vsi->back->hw; 53980deb0bf7SJacob Keller u32 l2tsel_bit; 53990deb0bf7SJacob Keller int i; 54000deb0bf7SJacob Keller 54010deb0bf7SJacob Keller if (l2tsel == ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND) 54020deb0bf7SJacob Keller l2tsel_bit = 0; 54030deb0bf7SJacob Keller else 54040deb0bf7SJacob Keller l2tsel_bit = BIT(ICE_L2TSEL_BIT_OFFSET); 54050deb0bf7SJacob Keller 54060deb0bf7SJacob Keller for (i = 0; i < vsi->alloc_rxq; i++) { 54070deb0bf7SJacob Keller u16 pfq = vsi->rxq_map[i]; 54080deb0bf7SJacob Keller u32 qrx_context_offset; 54090deb0bf7SJacob Keller u32 regval; 54100deb0bf7SJacob Keller 54110deb0bf7SJacob Keller qrx_context_offset = 54120deb0bf7SJacob Keller QRX_CONTEXT(ICE_L2TSEL_QRX_CONTEXT_REG_IDX, pfq); 54130deb0bf7SJacob Keller 54140deb0bf7SJacob Keller regval = rd32(hw, qrx_context_offset); 54150deb0bf7SJacob Keller regval &= ~BIT(ICE_L2TSEL_BIT_OFFSET); 54160deb0bf7SJacob Keller regval |= l2tsel_bit; 54170deb0bf7SJacob Keller wr32(hw, qrx_context_offset, regval); 54180deb0bf7SJacob Keller } 54190deb0bf7SJacob Keller } 54200deb0bf7SJacob Keller 54210deb0bf7SJacob Keller /** 54220deb0bf7SJacob Keller * ice_vc_ena_vlan_stripping_v2_msg 54230deb0bf7SJacob Keller * @vf: VF the message was received from 54240deb0bf7SJacob Keller * @msg: message received from the VF 54250deb0bf7SJacob Keller * 54260deb0bf7SJacob Keller * virthcnl handler for VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 54270deb0bf7SJacob Keller */ 54280deb0bf7SJacob Keller static int ice_vc_ena_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg) 54290deb0bf7SJacob Keller { 54300deb0bf7SJacob Keller enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 54310deb0bf7SJacob Keller struct virtchnl_vlan_supported_caps *stripping_support; 54320deb0bf7SJacob Keller struct virtchnl_vlan_setting *strip_msg = 54330deb0bf7SJacob Keller (struct virtchnl_vlan_setting *)msg; 54340deb0bf7SJacob Keller u32 ethertype_setting; 54350deb0bf7SJacob Keller struct ice_vsi *vsi; 54360deb0bf7SJacob Keller 54370deb0bf7SJacob Keller if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { 54380deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 54390deb0bf7SJacob Keller goto out; 54400deb0bf7SJacob Keller } 54410deb0bf7SJacob Keller 54420deb0bf7SJacob Keller if (!ice_vc_isvalid_vsi_id(vf, strip_msg->vport_id)) { 54430deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 54440deb0bf7SJacob Keller goto out; 54450deb0bf7SJacob Keller } 54460deb0bf7SJacob Keller 54470deb0bf7SJacob Keller vsi = ice_get_vf_vsi(vf); 54480deb0bf7SJacob Keller if (!vsi) { 54490deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 54500deb0bf7SJacob Keller goto out; 54510deb0bf7SJacob Keller } 54520deb0bf7SJacob Keller 54530deb0bf7SJacob Keller stripping_support = &vf->vlan_v2_caps.offloads.stripping_support; 54540deb0bf7SJacob Keller if (!ice_vc_valid_vlan_setting_msg(stripping_support, strip_msg)) { 54550deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 54560deb0bf7SJacob Keller goto out; 54570deb0bf7SJacob Keller } 54580deb0bf7SJacob Keller 54590deb0bf7SJacob Keller ethertype_setting = strip_msg->outer_ethertype_setting; 54600deb0bf7SJacob Keller if (ethertype_setting) { 54610deb0bf7SJacob Keller if (ice_vc_ena_vlan_offload(vsi, 54620deb0bf7SJacob Keller vsi->outer_vlan_ops.ena_stripping, 54630deb0bf7SJacob Keller ethertype_setting)) { 54640deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 54650deb0bf7SJacob Keller goto out; 54660deb0bf7SJacob Keller } else { 54670deb0bf7SJacob Keller enum ice_l2tsel l2tsel = 54680deb0bf7SJacob Keller ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND; 54690deb0bf7SJacob Keller 54700deb0bf7SJacob Keller /* PF tells the VF that the outer VLAN tag is always 54710deb0bf7SJacob Keller * extracted to VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 and 54720deb0bf7SJacob Keller * inner is always extracted to 54730deb0bf7SJacob Keller * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1. This is needed to 54740deb0bf7SJacob Keller * support outer stripping so the first tag always ends 54750deb0bf7SJacob Keller * up in L2TAG2_2ND and the second/inner tag, if 54760deb0bf7SJacob Keller * enabled, is extracted in L2TAG1. 54770deb0bf7SJacob Keller */ 54780deb0bf7SJacob Keller ice_vsi_update_l2tsel(vsi, l2tsel); 54790deb0bf7SJacob Keller } 54800deb0bf7SJacob Keller } 54810deb0bf7SJacob Keller 54820deb0bf7SJacob Keller ethertype_setting = strip_msg->inner_ethertype_setting; 54830deb0bf7SJacob Keller if (ethertype_setting && 54840deb0bf7SJacob Keller ice_vc_ena_vlan_offload(vsi, vsi->inner_vlan_ops.ena_stripping, 54850deb0bf7SJacob Keller ethertype_setting)) { 54860deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 54870deb0bf7SJacob Keller goto out; 54880deb0bf7SJacob Keller } 54890deb0bf7SJacob Keller 54900deb0bf7SJacob Keller out: 54910deb0bf7SJacob Keller return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2, v_ret, NULL, 0); 54920deb0bf7SJacob Keller } 54930deb0bf7SJacob Keller 54940deb0bf7SJacob Keller /** 54950deb0bf7SJacob Keller * ice_vc_dis_vlan_stripping_v2_msg 54960deb0bf7SJacob Keller * @vf: VF the message was received from 54970deb0bf7SJacob Keller * @msg: message received from the VF 54980deb0bf7SJacob Keller * 54990deb0bf7SJacob Keller * virthcnl handler for VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 55000deb0bf7SJacob Keller */ 55010deb0bf7SJacob Keller static int ice_vc_dis_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg) 55020deb0bf7SJacob Keller { 55030deb0bf7SJacob Keller enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 55040deb0bf7SJacob Keller struct virtchnl_vlan_supported_caps *stripping_support; 55050deb0bf7SJacob Keller struct virtchnl_vlan_setting *strip_msg = 55060deb0bf7SJacob Keller (struct virtchnl_vlan_setting *)msg; 55070deb0bf7SJacob Keller u32 ethertype_setting; 55080deb0bf7SJacob Keller struct ice_vsi *vsi; 55090deb0bf7SJacob Keller 55100deb0bf7SJacob Keller if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { 55110deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 55120deb0bf7SJacob Keller goto out; 55130deb0bf7SJacob Keller } 55140deb0bf7SJacob Keller 55150deb0bf7SJacob Keller if (!ice_vc_isvalid_vsi_id(vf, strip_msg->vport_id)) { 55160deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 55170deb0bf7SJacob Keller goto out; 55180deb0bf7SJacob Keller } 55190deb0bf7SJacob Keller 55200deb0bf7SJacob Keller vsi = ice_get_vf_vsi(vf); 55210deb0bf7SJacob Keller if (!vsi) { 55220deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 55230deb0bf7SJacob Keller goto out; 55240deb0bf7SJacob Keller } 55250deb0bf7SJacob Keller 55260deb0bf7SJacob Keller stripping_support = &vf->vlan_v2_caps.offloads.stripping_support; 55270deb0bf7SJacob Keller if (!ice_vc_valid_vlan_setting_msg(stripping_support, strip_msg)) { 55280deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 55290deb0bf7SJacob Keller goto out; 55300deb0bf7SJacob Keller } 55310deb0bf7SJacob Keller 55320deb0bf7SJacob Keller ethertype_setting = strip_msg->outer_ethertype_setting; 55330deb0bf7SJacob Keller if (ethertype_setting) { 55340deb0bf7SJacob Keller if (vsi->outer_vlan_ops.dis_stripping(vsi)) { 55350deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 55360deb0bf7SJacob Keller goto out; 55370deb0bf7SJacob Keller } else { 55380deb0bf7SJacob Keller enum ice_l2tsel l2tsel = 55390deb0bf7SJacob Keller ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG1; 55400deb0bf7SJacob Keller 55410deb0bf7SJacob Keller /* PF tells the VF that the outer VLAN tag is always 55420deb0bf7SJacob Keller * extracted to VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 and 55430deb0bf7SJacob Keller * inner is always extracted to 55440deb0bf7SJacob Keller * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1. This is needed to 55450deb0bf7SJacob Keller * support inner stripping while outer stripping is 55460deb0bf7SJacob Keller * disabled so that the first and only tag is extracted 55470deb0bf7SJacob Keller * in L2TAG1. 55480deb0bf7SJacob Keller */ 55490deb0bf7SJacob Keller ice_vsi_update_l2tsel(vsi, l2tsel); 55500deb0bf7SJacob Keller } 55510deb0bf7SJacob Keller } 55520deb0bf7SJacob Keller 55530deb0bf7SJacob Keller ethertype_setting = strip_msg->inner_ethertype_setting; 55540deb0bf7SJacob Keller if (ethertype_setting && vsi->inner_vlan_ops.dis_stripping(vsi)) { 55550deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 55560deb0bf7SJacob Keller goto out; 55570deb0bf7SJacob Keller } 55580deb0bf7SJacob Keller 55590deb0bf7SJacob Keller out: 55600deb0bf7SJacob Keller return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2, v_ret, NULL, 0); 55610deb0bf7SJacob Keller } 55620deb0bf7SJacob Keller 55630deb0bf7SJacob Keller /** 55640deb0bf7SJacob Keller * ice_vc_ena_vlan_insertion_v2_msg 55650deb0bf7SJacob Keller * @vf: VF the message was received from 55660deb0bf7SJacob Keller * @msg: message received from the VF 55670deb0bf7SJacob Keller * 55680deb0bf7SJacob Keller * virthcnl handler for VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 55690deb0bf7SJacob Keller */ 55700deb0bf7SJacob Keller static int ice_vc_ena_vlan_insertion_v2_msg(struct ice_vf *vf, u8 *msg) 55710deb0bf7SJacob Keller { 55720deb0bf7SJacob Keller enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 55730deb0bf7SJacob Keller struct virtchnl_vlan_supported_caps *insertion_support; 55740deb0bf7SJacob Keller struct virtchnl_vlan_setting *insertion_msg = 55750deb0bf7SJacob Keller (struct virtchnl_vlan_setting *)msg; 55760deb0bf7SJacob Keller u32 ethertype_setting; 55770deb0bf7SJacob Keller struct ice_vsi *vsi; 55780deb0bf7SJacob Keller 55790deb0bf7SJacob Keller if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { 55800deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 55810deb0bf7SJacob Keller goto out; 55820deb0bf7SJacob Keller } 55830deb0bf7SJacob Keller 55840deb0bf7SJacob Keller if (!ice_vc_isvalid_vsi_id(vf, insertion_msg->vport_id)) { 55850deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 55860deb0bf7SJacob Keller goto out; 55870deb0bf7SJacob Keller } 55880deb0bf7SJacob Keller 55890deb0bf7SJacob Keller vsi = ice_get_vf_vsi(vf); 55900deb0bf7SJacob Keller if (!vsi) { 55910deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 55920deb0bf7SJacob Keller goto out; 55930deb0bf7SJacob Keller } 55940deb0bf7SJacob Keller 55950deb0bf7SJacob Keller insertion_support = &vf->vlan_v2_caps.offloads.insertion_support; 55960deb0bf7SJacob Keller if (!ice_vc_valid_vlan_setting_msg(insertion_support, insertion_msg)) { 55970deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 55980deb0bf7SJacob Keller goto out; 55990deb0bf7SJacob Keller } 56000deb0bf7SJacob Keller 56010deb0bf7SJacob Keller ethertype_setting = insertion_msg->outer_ethertype_setting; 56020deb0bf7SJacob Keller if (ethertype_setting && 56030deb0bf7SJacob Keller ice_vc_ena_vlan_offload(vsi, vsi->outer_vlan_ops.ena_insertion, 56040deb0bf7SJacob Keller ethertype_setting)) { 56050deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 56060deb0bf7SJacob Keller goto out; 56070deb0bf7SJacob Keller } 56080deb0bf7SJacob Keller 56090deb0bf7SJacob Keller ethertype_setting = insertion_msg->inner_ethertype_setting; 56100deb0bf7SJacob Keller if (ethertype_setting && 56110deb0bf7SJacob Keller ice_vc_ena_vlan_offload(vsi, vsi->inner_vlan_ops.ena_insertion, 56120deb0bf7SJacob Keller ethertype_setting)) { 56130deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 56140deb0bf7SJacob Keller goto out; 56150deb0bf7SJacob Keller } 56160deb0bf7SJacob Keller 56170deb0bf7SJacob Keller out: 56180deb0bf7SJacob Keller return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2, v_ret, NULL, 0); 56190deb0bf7SJacob Keller } 56200deb0bf7SJacob Keller 56210deb0bf7SJacob Keller /** 56220deb0bf7SJacob Keller * ice_vc_dis_vlan_insertion_v2_msg 56230deb0bf7SJacob Keller * @vf: VF the message was received from 56240deb0bf7SJacob Keller * @msg: message received from the VF 56250deb0bf7SJacob Keller * 56260deb0bf7SJacob Keller * virthcnl handler for VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2 56270deb0bf7SJacob Keller */ 56280deb0bf7SJacob Keller static int ice_vc_dis_vlan_insertion_v2_msg(struct ice_vf *vf, u8 *msg) 56290deb0bf7SJacob Keller { 56300deb0bf7SJacob Keller enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 56310deb0bf7SJacob Keller struct virtchnl_vlan_supported_caps *insertion_support; 56320deb0bf7SJacob Keller struct virtchnl_vlan_setting *insertion_msg = 56330deb0bf7SJacob Keller (struct virtchnl_vlan_setting *)msg; 56340deb0bf7SJacob Keller u32 ethertype_setting; 56350deb0bf7SJacob Keller struct ice_vsi *vsi; 56360deb0bf7SJacob Keller 56370deb0bf7SJacob Keller if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { 56380deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 56390deb0bf7SJacob Keller goto out; 56400deb0bf7SJacob Keller } 56410deb0bf7SJacob Keller 56420deb0bf7SJacob Keller if (!ice_vc_isvalid_vsi_id(vf, insertion_msg->vport_id)) { 56430deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 56440deb0bf7SJacob Keller goto out; 56450deb0bf7SJacob Keller } 56460deb0bf7SJacob Keller 56470deb0bf7SJacob Keller vsi = ice_get_vf_vsi(vf); 56480deb0bf7SJacob Keller if (!vsi) { 56490deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 56500deb0bf7SJacob Keller goto out; 56510deb0bf7SJacob Keller } 56520deb0bf7SJacob Keller 56530deb0bf7SJacob Keller insertion_support = &vf->vlan_v2_caps.offloads.insertion_support; 56540deb0bf7SJacob Keller if (!ice_vc_valid_vlan_setting_msg(insertion_support, insertion_msg)) { 56550deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 56560deb0bf7SJacob Keller goto out; 56570deb0bf7SJacob Keller } 56580deb0bf7SJacob Keller 56590deb0bf7SJacob Keller ethertype_setting = insertion_msg->outer_ethertype_setting; 56600deb0bf7SJacob Keller if (ethertype_setting && vsi->outer_vlan_ops.dis_insertion(vsi)) { 56610deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 56620deb0bf7SJacob Keller goto out; 56630deb0bf7SJacob Keller } 56640deb0bf7SJacob Keller 56650deb0bf7SJacob Keller ethertype_setting = insertion_msg->inner_ethertype_setting; 56660deb0bf7SJacob Keller if (ethertype_setting && vsi->inner_vlan_ops.dis_insertion(vsi)) { 56670deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 56680deb0bf7SJacob Keller goto out; 56690deb0bf7SJacob Keller } 56700deb0bf7SJacob Keller 56710deb0bf7SJacob Keller out: 56720deb0bf7SJacob Keller return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2, v_ret, NULL, 0); 56730deb0bf7SJacob Keller } 56740deb0bf7SJacob Keller 5675*a7e11710SJacob Keller static const struct ice_virtchnl_ops ice_virtchnl_dflt_ops = { 56760deb0bf7SJacob Keller .get_ver_msg = ice_vc_get_ver_msg, 56770deb0bf7SJacob Keller .get_vf_res_msg = ice_vc_get_vf_res_msg, 56780deb0bf7SJacob Keller .reset_vf = ice_vc_reset_vf_msg, 56790deb0bf7SJacob Keller .add_mac_addr_msg = ice_vc_add_mac_addr_msg, 56800deb0bf7SJacob Keller .del_mac_addr_msg = ice_vc_del_mac_addr_msg, 56810deb0bf7SJacob Keller .cfg_qs_msg = ice_vc_cfg_qs_msg, 56820deb0bf7SJacob Keller .ena_qs_msg = ice_vc_ena_qs_msg, 56830deb0bf7SJacob Keller .dis_qs_msg = ice_vc_dis_qs_msg, 56840deb0bf7SJacob Keller .request_qs_msg = ice_vc_request_qs_msg, 56850deb0bf7SJacob Keller .cfg_irq_map_msg = ice_vc_cfg_irq_map_msg, 56860deb0bf7SJacob Keller .config_rss_key = ice_vc_config_rss_key, 56870deb0bf7SJacob Keller .config_rss_lut = ice_vc_config_rss_lut, 56880deb0bf7SJacob Keller .get_stats_msg = ice_vc_get_stats_msg, 56890deb0bf7SJacob Keller .cfg_promiscuous_mode_msg = ice_vc_cfg_promiscuous_mode_msg, 56900deb0bf7SJacob Keller .add_vlan_msg = ice_vc_add_vlan_msg, 56910deb0bf7SJacob Keller .remove_vlan_msg = ice_vc_remove_vlan_msg, 56920deb0bf7SJacob Keller .ena_vlan_stripping = ice_vc_ena_vlan_stripping, 56930deb0bf7SJacob Keller .dis_vlan_stripping = ice_vc_dis_vlan_stripping, 56940deb0bf7SJacob Keller .handle_rss_cfg_msg = ice_vc_handle_rss_cfg, 56950deb0bf7SJacob Keller .add_fdir_fltr_msg = ice_vc_add_fdir_fltr, 56960deb0bf7SJacob Keller .del_fdir_fltr_msg = ice_vc_del_fdir_fltr, 56970deb0bf7SJacob Keller .get_offload_vlan_v2_caps = ice_vc_get_offload_vlan_v2_caps, 56980deb0bf7SJacob Keller .add_vlan_v2_msg = ice_vc_add_vlan_v2_msg, 56990deb0bf7SJacob Keller .remove_vlan_v2_msg = ice_vc_remove_vlan_v2_msg, 57000deb0bf7SJacob Keller .ena_vlan_stripping_v2_msg = ice_vc_ena_vlan_stripping_v2_msg, 57010deb0bf7SJacob Keller .dis_vlan_stripping_v2_msg = ice_vc_dis_vlan_stripping_v2_msg, 57020deb0bf7SJacob Keller .ena_vlan_insertion_v2_msg = ice_vc_ena_vlan_insertion_v2_msg, 57030deb0bf7SJacob Keller .dis_vlan_insertion_v2_msg = ice_vc_dis_vlan_insertion_v2_msg, 57040deb0bf7SJacob Keller }; 57050deb0bf7SJacob Keller 5706*a7e11710SJacob Keller /** 5707*a7e11710SJacob Keller * ice_virtchnl_set_dflt_ops - Switch to default virtchnl ops 5708*a7e11710SJacob Keller * @vf: the VF to switch ops 5709*a7e11710SJacob Keller */ 5710*a7e11710SJacob Keller void ice_virtchnl_set_dflt_ops(struct ice_vf *vf) 57110deb0bf7SJacob Keller { 5712*a7e11710SJacob Keller vf->virtchnl_ops = &ice_virtchnl_dflt_ops; 57130deb0bf7SJacob Keller } 57140deb0bf7SJacob Keller 57150deb0bf7SJacob Keller /** 57160deb0bf7SJacob Keller * ice_vc_repr_add_mac 57170deb0bf7SJacob Keller * @vf: pointer to VF 57180deb0bf7SJacob Keller * @msg: virtchannel message 57190deb0bf7SJacob Keller * 57200deb0bf7SJacob Keller * When port representors are created, we do not add MAC rule 57210deb0bf7SJacob Keller * to firmware, we store it so that PF could report same 57220deb0bf7SJacob Keller * MAC as VF. 57230deb0bf7SJacob Keller */ 57240deb0bf7SJacob Keller static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg) 57250deb0bf7SJacob Keller { 57260deb0bf7SJacob Keller enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 57270deb0bf7SJacob Keller struct virtchnl_ether_addr_list *al = 57280deb0bf7SJacob Keller (struct virtchnl_ether_addr_list *)msg; 57290deb0bf7SJacob Keller struct ice_vsi *vsi; 57300deb0bf7SJacob Keller struct ice_pf *pf; 57310deb0bf7SJacob Keller int i; 57320deb0bf7SJacob Keller 57330deb0bf7SJacob Keller if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) || 57340deb0bf7SJacob Keller !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) { 57350deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 57360deb0bf7SJacob Keller goto handle_mac_exit; 57370deb0bf7SJacob Keller } 57380deb0bf7SJacob Keller 57390deb0bf7SJacob Keller pf = vf->pf; 57400deb0bf7SJacob Keller 57410deb0bf7SJacob Keller vsi = ice_get_vf_vsi(vf); 57420deb0bf7SJacob Keller if (!vsi) { 57430deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_PARAM; 57440deb0bf7SJacob Keller goto handle_mac_exit; 57450deb0bf7SJacob Keller } 57460deb0bf7SJacob Keller 57470deb0bf7SJacob Keller for (i = 0; i < al->num_elements; i++) { 57480deb0bf7SJacob Keller u8 *mac_addr = al->list[i].addr; 57490deb0bf7SJacob Keller int result; 57500deb0bf7SJacob Keller 57510deb0bf7SJacob Keller if (!is_unicast_ether_addr(mac_addr) || 57520deb0bf7SJacob Keller ether_addr_equal(mac_addr, vf->hw_lan_addr.addr)) 57530deb0bf7SJacob Keller continue; 57540deb0bf7SJacob Keller 57550deb0bf7SJacob Keller if (vf->pf_set_mac) { 57560deb0bf7SJacob Keller dev_err(ice_pf_to_dev(pf), "VF attempting to override administratively set MAC address\n"); 57570deb0bf7SJacob Keller v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED; 57580deb0bf7SJacob Keller goto handle_mac_exit; 57590deb0bf7SJacob Keller } 57600deb0bf7SJacob Keller 57610deb0bf7SJacob Keller result = ice_eswitch_add_vf_mac_rule(pf, vf, mac_addr); 57620deb0bf7SJacob Keller if (result) { 57630deb0bf7SJacob Keller dev_err(ice_pf_to_dev(pf), "Failed to add MAC %pM for VF %d\n, error %d\n", 57640deb0bf7SJacob Keller mac_addr, vf->vf_id, result); 57650deb0bf7SJacob Keller goto handle_mac_exit; 57660deb0bf7SJacob Keller } 57670deb0bf7SJacob Keller 57680deb0bf7SJacob Keller ice_vfhw_mac_add(vf, &al->list[i]); 57690deb0bf7SJacob Keller vf->num_mac++; 57700deb0bf7SJacob Keller break; 57710deb0bf7SJacob Keller } 57720deb0bf7SJacob Keller 57730deb0bf7SJacob Keller handle_mac_exit: 57740deb0bf7SJacob Keller return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR, 57750deb0bf7SJacob Keller v_ret, NULL, 0); 57760deb0bf7SJacob Keller } 57770deb0bf7SJacob Keller 57780deb0bf7SJacob Keller /** 57790deb0bf7SJacob Keller * ice_vc_repr_del_mac - response with success for deleting MAC 57800deb0bf7SJacob Keller * @vf: pointer to VF 57810deb0bf7SJacob Keller * @msg: virtchannel message 57820deb0bf7SJacob Keller * 57830deb0bf7SJacob Keller * Respond with success to not break normal VF flow. 57840deb0bf7SJacob Keller * For legacy VF driver try to update cached MAC address. 57850deb0bf7SJacob Keller */ 57860deb0bf7SJacob Keller static int 57870deb0bf7SJacob Keller ice_vc_repr_del_mac(struct ice_vf __always_unused *vf, u8 __always_unused *msg) 57880deb0bf7SJacob Keller { 57890deb0bf7SJacob Keller struct virtchnl_ether_addr_list *al = 57900deb0bf7SJacob Keller (struct virtchnl_ether_addr_list *)msg; 57910deb0bf7SJacob Keller 57920deb0bf7SJacob Keller ice_update_legacy_cached_mac(vf, &al->list[0]); 57930deb0bf7SJacob Keller 57940deb0bf7SJacob Keller return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR, 57950deb0bf7SJacob Keller VIRTCHNL_STATUS_SUCCESS, NULL, 0); 57960deb0bf7SJacob Keller } 57970deb0bf7SJacob Keller 57980deb0bf7SJacob Keller static int ice_vc_repr_add_vlan(struct ice_vf *vf, u8 __always_unused *msg) 57990deb0bf7SJacob Keller { 58000deb0bf7SJacob Keller dev_dbg(ice_pf_to_dev(vf->pf), 58010deb0bf7SJacob Keller "Can't add VLAN in switchdev mode for VF %d\n", vf->vf_id); 58020deb0bf7SJacob Keller return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, 58030deb0bf7SJacob Keller VIRTCHNL_STATUS_SUCCESS, NULL, 0); 58040deb0bf7SJacob Keller } 58050deb0bf7SJacob Keller 58060deb0bf7SJacob Keller static int ice_vc_repr_del_vlan(struct ice_vf *vf, u8 __always_unused *msg) 58070deb0bf7SJacob Keller { 58080deb0bf7SJacob Keller dev_dbg(ice_pf_to_dev(vf->pf), 58090deb0bf7SJacob Keller "Can't delete VLAN in switchdev mode for VF %d\n", vf->vf_id); 58100deb0bf7SJacob Keller return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, 58110deb0bf7SJacob Keller VIRTCHNL_STATUS_SUCCESS, NULL, 0); 58120deb0bf7SJacob Keller } 58130deb0bf7SJacob Keller 58140deb0bf7SJacob Keller static int ice_vc_repr_ena_vlan_stripping(struct ice_vf *vf) 58150deb0bf7SJacob Keller { 58160deb0bf7SJacob Keller dev_dbg(ice_pf_to_dev(vf->pf), 58170deb0bf7SJacob Keller "Can't enable VLAN stripping in switchdev mode for VF %d\n", 58180deb0bf7SJacob Keller vf->vf_id); 58190deb0bf7SJacob Keller return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, 58200deb0bf7SJacob Keller VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, 58210deb0bf7SJacob Keller NULL, 0); 58220deb0bf7SJacob Keller } 58230deb0bf7SJacob Keller 58240deb0bf7SJacob Keller static int ice_vc_repr_dis_vlan_stripping(struct ice_vf *vf) 58250deb0bf7SJacob Keller { 58260deb0bf7SJacob Keller dev_dbg(ice_pf_to_dev(vf->pf), 58270deb0bf7SJacob Keller "Can't disable VLAN stripping in switchdev mode for VF %d\n", 58280deb0bf7SJacob Keller vf->vf_id); 58290deb0bf7SJacob Keller return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, 58300deb0bf7SJacob Keller VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, 58310deb0bf7SJacob Keller NULL, 0); 58320deb0bf7SJacob Keller } 58330deb0bf7SJacob Keller 58340deb0bf7SJacob Keller static int 58350deb0bf7SJacob Keller ice_vc_repr_cfg_promiscuous_mode(struct ice_vf *vf, u8 __always_unused *msg) 58360deb0bf7SJacob Keller { 58370deb0bf7SJacob Keller dev_dbg(ice_pf_to_dev(vf->pf), 58380deb0bf7SJacob Keller "Can't config promiscuous mode in switchdev mode for VF %d\n", 58390deb0bf7SJacob Keller vf->vf_id); 58400deb0bf7SJacob Keller return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, 58410deb0bf7SJacob Keller VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, 58420deb0bf7SJacob Keller NULL, 0); 58430deb0bf7SJacob Keller } 58440deb0bf7SJacob Keller 5845*a7e11710SJacob Keller static const struct ice_virtchnl_ops ice_virtchnl_repr_ops = { 5846*a7e11710SJacob Keller .get_ver_msg = ice_vc_get_ver_msg, 5847*a7e11710SJacob Keller .get_vf_res_msg = ice_vc_get_vf_res_msg, 5848*a7e11710SJacob Keller .reset_vf = ice_vc_reset_vf_msg, 5849*a7e11710SJacob Keller .add_mac_addr_msg = ice_vc_repr_add_mac, 5850*a7e11710SJacob Keller .del_mac_addr_msg = ice_vc_repr_del_mac, 5851*a7e11710SJacob Keller .cfg_qs_msg = ice_vc_cfg_qs_msg, 5852*a7e11710SJacob Keller .ena_qs_msg = ice_vc_ena_qs_msg, 5853*a7e11710SJacob Keller .dis_qs_msg = ice_vc_dis_qs_msg, 5854*a7e11710SJacob Keller .request_qs_msg = ice_vc_request_qs_msg, 5855*a7e11710SJacob Keller .cfg_irq_map_msg = ice_vc_cfg_irq_map_msg, 5856*a7e11710SJacob Keller .config_rss_key = ice_vc_config_rss_key, 5857*a7e11710SJacob Keller .config_rss_lut = ice_vc_config_rss_lut, 5858*a7e11710SJacob Keller .get_stats_msg = ice_vc_get_stats_msg, 5859*a7e11710SJacob Keller .cfg_promiscuous_mode_msg = ice_vc_repr_cfg_promiscuous_mode, 5860*a7e11710SJacob Keller .add_vlan_msg = ice_vc_repr_add_vlan, 5861*a7e11710SJacob Keller .remove_vlan_msg = ice_vc_repr_del_vlan, 5862*a7e11710SJacob Keller .ena_vlan_stripping = ice_vc_repr_ena_vlan_stripping, 5863*a7e11710SJacob Keller .dis_vlan_stripping = ice_vc_repr_dis_vlan_stripping, 5864*a7e11710SJacob Keller .handle_rss_cfg_msg = ice_vc_handle_rss_cfg, 5865*a7e11710SJacob Keller .add_fdir_fltr_msg = ice_vc_add_fdir_fltr, 5866*a7e11710SJacob Keller .del_fdir_fltr_msg = ice_vc_del_fdir_fltr, 5867*a7e11710SJacob Keller .get_offload_vlan_v2_caps = ice_vc_get_offload_vlan_v2_caps, 5868*a7e11710SJacob Keller .add_vlan_v2_msg = ice_vc_add_vlan_v2_msg, 5869*a7e11710SJacob Keller .remove_vlan_v2_msg = ice_vc_remove_vlan_v2_msg, 5870*a7e11710SJacob Keller .ena_vlan_stripping_v2_msg = ice_vc_ena_vlan_stripping_v2_msg, 5871*a7e11710SJacob Keller .dis_vlan_stripping_v2_msg = ice_vc_dis_vlan_stripping_v2_msg, 5872*a7e11710SJacob Keller .ena_vlan_insertion_v2_msg = ice_vc_ena_vlan_insertion_v2_msg, 5873*a7e11710SJacob Keller .dis_vlan_insertion_v2_msg = ice_vc_dis_vlan_insertion_v2_msg, 5874*a7e11710SJacob Keller }; 5875*a7e11710SJacob Keller 5876*a7e11710SJacob Keller /** 5877*a7e11710SJacob Keller * ice_virtchnl_set_repr_ops - Switch to representor virtchnl ops 5878*a7e11710SJacob Keller * @vf: the VF to switch ops 5879*a7e11710SJacob Keller */ 5880*a7e11710SJacob Keller void ice_virtchnl_set_repr_ops(struct ice_vf *vf) 58810deb0bf7SJacob Keller { 5882*a7e11710SJacob Keller vf->virtchnl_ops = &ice_virtchnl_repr_ops; 58830deb0bf7SJacob Keller } 58840deb0bf7SJacob Keller 58850deb0bf7SJacob Keller /** 58860deb0bf7SJacob Keller * ice_vc_process_vf_msg - Process request from VF 58870deb0bf7SJacob Keller * @pf: pointer to the PF structure 58880deb0bf7SJacob Keller * @event: pointer to the AQ event 58890deb0bf7SJacob Keller * 58900deb0bf7SJacob Keller * called from the common asq/arq handler to 58910deb0bf7SJacob Keller * process request from VF 58920deb0bf7SJacob Keller */ 58930deb0bf7SJacob Keller void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) 58940deb0bf7SJacob Keller { 58950deb0bf7SJacob Keller u32 v_opcode = le32_to_cpu(event->desc.cookie_high); 58960deb0bf7SJacob Keller s16 vf_id = le16_to_cpu(event->desc.retval); 5897*a7e11710SJacob Keller const struct ice_virtchnl_ops *ops; 58980deb0bf7SJacob Keller u16 msglen = event->msg_len; 58990deb0bf7SJacob Keller u8 *msg = event->msg_buf; 59000deb0bf7SJacob Keller struct ice_vf *vf = NULL; 59010deb0bf7SJacob Keller struct device *dev; 59020deb0bf7SJacob Keller int err = 0; 59030deb0bf7SJacob Keller 59040deb0bf7SJacob Keller dev = ice_pf_to_dev(pf); 59050deb0bf7SJacob Keller 59060deb0bf7SJacob Keller vf = ice_get_vf_by_id(pf, vf_id); 59070deb0bf7SJacob Keller if (!vf) { 59080deb0bf7SJacob Keller dev_err(dev, "Unable to locate VF for message from VF ID %d, opcode %d, len %d\n", 59090deb0bf7SJacob Keller vf_id, v_opcode, msglen); 59100deb0bf7SJacob Keller return; 59110deb0bf7SJacob Keller } 59120deb0bf7SJacob Keller 59130deb0bf7SJacob Keller /* Check if VF is disabled. */ 59140deb0bf7SJacob Keller if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) { 59150deb0bf7SJacob Keller err = -EPERM; 59160deb0bf7SJacob Keller goto error_handler; 59170deb0bf7SJacob Keller } 59180deb0bf7SJacob Keller 5919*a7e11710SJacob Keller ops = vf->virtchnl_ops; 59200deb0bf7SJacob Keller 59210deb0bf7SJacob Keller /* Perform basic checks on the msg */ 59220deb0bf7SJacob Keller err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen); 59230deb0bf7SJacob Keller if (err) { 59240deb0bf7SJacob Keller if (err == VIRTCHNL_STATUS_ERR_PARAM) 59250deb0bf7SJacob Keller err = -EPERM; 59260deb0bf7SJacob Keller else 59270deb0bf7SJacob Keller err = -EINVAL; 59280deb0bf7SJacob Keller } 59290deb0bf7SJacob Keller 59300deb0bf7SJacob Keller if (!ice_vc_is_opcode_allowed(vf, v_opcode)) { 59310deb0bf7SJacob Keller ice_vc_send_msg_to_vf(vf, v_opcode, 59320deb0bf7SJacob Keller VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, NULL, 59330deb0bf7SJacob Keller 0); 59340deb0bf7SJacob Keller ice_put_vf(vf); 59350deb0bf7SJacob Keller return; 59360deb0bf7SJacob Keller } 59370deb0bf7SJacob Keller 59380deb0bf7SJacob Keller error_handler: 59390deb0bf7SJacob Keller if (err) { 59400deb0bf7SJacob Keller ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM, 59410deb0bf7SJacob Keller NULL, 0); 59420deb0bf7SJacob Keller dev_err(dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n", 59430deb0bf7SJacob Keller vf_id, v_opcode, msglen, err); 59440deb0bf7SJacob Keller ice_put_vf(vf); 59450deb0bf7SJacob Keller return; 59460deb0bf7SJacob Keller } 59470deb0bf7SJacob Keller 59480deb0bf7SJacob Keller /* VF is being configured in another context that triggers a VFR, so no 59490deb0bf7SJacob Keller * need to process this message 59500deb0bf7SJacob Keller */ 59510deb0bf7SJacob Keller if (!mutex_trylock(&vf->cfg_lock)) { 59520deb0bf7SJacob Keller dev_info(dev, "VF %u is being configured in another context that will trigger a VFR, so there is no need to handle this message\n", 59530deb0bf7SJacob Keller vf->vf_id); 59540deb0bf7SJacob Keller ice_put_vf(vf); 59550deb0bf7SJacob Keller return; 59560deb0bf7SJacob Keller } 59570deb0bf7SJacob Keller 59580deb0bf7SJacob Keller switch (v_opcode) { 59590deb0bf7SJacob Keller case VIRTCHNL_OP_VERSION: 59600deb0bf7SJacob Keller err = ops->get_ver_msg(vf, msg); 59610deb0bf7SJacob Keller break; 59620deb0bf7SJacob Keller case VIRTCHNL_OP_GET_VF_RESOURCES: 59630deb0bf7SJacob Keller err = ops->get_vf_res_msg(vf, msg); 59640deb0bf7SJacob Keller if (ice_vf_init_vlan_stripping(vf)) 59650deb0bf7SJacob Keller dev_dbg(dev, "Failed to initialize VLAN stripping for VF %d\n", 59660deb0bf7SJacob Keller vf->vf_id); 59670deb0bf7SJacob Keller ice_vc_notify_vf_link_state(vf); 59680deb0bf7SJacob Keller break; 59690deb0bf7SJacob Keller case VIRTCHNL_OP_RESET_VF: 59700deb0bf7SJacob Keller ops->reset_vf(vf); 59710deb0bf7SJacob Keller break; 59720deb0bf7SJacob Keller case VIRTCHNL_OP_ADD_ETH_ADDR: 59730deb0bf7SJacob Keller err = ops->add_mac_addr_msg(vf, msg); 59740deb0bf7SJacob Keller break; 59750deb0bf7SJacob Keller case VIRTCHNL_OP_DEL_ETH_ADDR: 59760deb0bf7SJacob Keller err = ops->del_mac_addr_msg(vf, msg); 59770deb0bf7SJacob Keller break; 59780deb0bf7SJacob Keller case VIRTCHNL_OP_CONFIG_VSI_QUEUES: 59790deb0bf7SJacob Keller err = ops->cfg_qs_msg(vf, msg); 59800deb0bf7SJacob Keller break; 59810deb0bf7SJacob Keller case VIRTCHNL_OP_ENABLE_QUEUES: 59820deb0bf7SJacob Keller err = ops->ena_qs_msg(vf, msg); 59830deb0bf7SJacob Keller ice_vc_notify_vf_link_state(vf); 59840deb0bf7SJacob Keller break; 59850deb0bf7SJacob Keller case VIRTCHNL_OP_DISABLE_QUEUES: 59860deb0bf7SJacob Keller err = ops->dis_qs_msg(vf, msg); 59870deb0bf7SJacob Keller break; 59880deb0bf7SJacob Keller case VIRTCHNL_OP_REQUEST_QUEUES: 59890deb0bf7SJacob Keller err = ops->request_qs_msg(vf, msg); 59900deb0bf7SJacob Keller break; 59910deb0bf7SJacob Keller case VIRTCHNL_OP_CONFIG_IRQ_MAP: 59920deb0bf7SJacob Keller err = ops->cfg_irq_map_msg(vf, msg); 59930deb0bf7SJacob Keller break; 59940deb0bf7SJacob Keller case VIRTCHNL_OP_CONFIG_RSS_KEY: 59950deb0bf7SJacob Keller err = ops->config_rss_key(vf, msg); 59960deb0bf7SJacob Keller break; 59970deb0bf7SJacob Keller case VIRTCHNL_OP_CONFIG_RSS_LUT: 59980deb0bf7SJacob Keller err = ops->config_rss_lut(vf, msg); 59990deb0bf7SJacob Keller break; 60000deb0bf7SJacob Keller case VIRTCHNL_OP_GET_STATS: 60010deb0bf7SJacob Keller err = ops->get_stats_msg(vf, msg); 60020deb0bf7SJacob Keller break; 60030deb0bf7SJacob Keller case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: 60040deb0bf7SJacob Keller err = ops->cfg_promiscuous_mode_msg(vf, msg); 60050deb0bf7SJacob Keller break; 60060deb0bf7SJacob Keller case VIRTCHNL_OP_ADD_VLAN: 60070deb0bf7SJacob Keller err = ops->add_vlan_msg(vf, msg); 60080deb0bf7SJacob Keller break; 60090deb0bf7SJacob Keller case VIRTCHNL_OP_DEL_VLAN: 60100deb0bf7SJacob Keller err = ops->remove_vlan_msg(vf, msg); 60110deb0bf7SJacob Keller break; 60120deb0bf7SJacob Keller case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: 60130deb0bf7SJacob Keller err = ops->ena_vlan_stripping(vf); 60140deb0bf7SJacob Keller break; 60150deb0bf7SJacob Keller case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: 60160deb0bf7SJacob Keller err = ops->dis_vlan_stripping(vf); 60170deb0bf7SJacob Keller break; 60180deb0bf7SJacob Keller case VIRTCHNL_OP_ADD_FDIR_FILTER: 60190deb0bf7SJacob Keller err = ops->add_fdir_fltr_msg(vf, msg); 60200deb0bf7SJacob Keller break; 60210deb0bf7SJacob Keller case VIRTCHNL_OP_DEL_FDIR_FILTER: 60220deb0bf7SJacob Keller err = ops->del_fdir_fltr_msg(vf, msg); 60230deb0bf7SJacob Keller break; 60240deb0bf7SJacob Keller case VIRTCHNL_OP_ADD_RSS_CFG: 60250deb0bf7SJacob Keller err = ops->handle_rss_cfg_msg(vf, msg, true); 60260deb0bf7SJacob Keller break; 60270deb0bf7SJacob Keller case VIRTCHNL_OP_DEL_RSS_CFG: 60280deb0bf7SJacob Keller err = ops->handle_rss_cfg_msg(vf, msg, false); 60290deb0bf7SJacob Keller break; 60300deb0bf7SJacob Keller case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS: 60310deb0bf7SJacob Keller err = ops->get_offload_vlan_v2_caps(vf); 60320deb0bf7SJacob Keller break; 60330deb0bf7SJacob Keller case VIRTCHNL_OP_ADD_VLAN_V2: 60340deb0bf7SJacob Keller err = ops->add_vlan_v2_msg(vf, msg); 60350deb0bf7SJacob Keller break; 60360deb0bf7SJacob Keller case VIRTCHNL_OP_DEL_VLAN_V2: 60370deb0bf7SJacob Keller err = ops->remove_vlan_v2_msg(vf, msg); 60380deb0bf7SJacob Keller break; 60390deb0bf7SJacob Keller case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2: 60400deb0bf7SJacob Keller err = ops->ena_vlan_stripping_v2_msg(vf, msg); 60410deb0bf7SJacob Keller break; 60420deb0bf7SJacob Keller case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2: 60430deb0bf7SJacob Keller err = ops->dis_vlan_stripping_v2_msg(vf, msg); 60440deb0bf7SJacob Keller break; 60450deb0bf7SJacob Keller case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2: 60460deb0bf7SJacob Keller err = ops->ena_vlan_insertion_v2_msg(vf, msg); 60470deb0bf7SJacob Keller break; 60480deb0bf7SJacob Keller case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2: 60490deb0bf7SJacob Keller err = ops->dis_vlan_insertion_v2_msg(vf, msg); 60500deb0bf7SJacob Keller break; 60510deb0bf7SJacob Keller case VIRTCHNL_OP_UNKNOWN: 60520deb0bf7SJacob Keller default: 60530deb0bf7SJacob Keller dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode, 60540deb0bf7SJacob Keller vf_id); 60550deb0bf7SJacob Keller err = ice_vc_send_msg_to_vf(vf, v_opcode, 60560deb0bf7SJacob Keller VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, 60570deb0bf7SJacob Keller NULL, 0); 60580deb0bf7SJacob Keller break; 60590deb0bf7SJacob Keller } 60600deb0bf7SJacob Keller if (err) { 60610deb0bf7SJacob Keller /* Helper function cares less about error return values here 60620deb0bf7SJacob Keller * as it is busy with pending work. 60630deb0bf7SJacob Keller */ 60640deb0bf7SJacob Keller dev_info(dev, "PF failed to honor VF %d, opcode %d, error %d\n", 60650deb0bf7SJacob Keller vf_id, v_opcode, err); 60660deb0bf7SJacob Keller } 60670deb0bf7SJacob Keller 60680deb0bf7SJacob Keller mutex_unlock(&vf->cfg_lock); 60690deb0bf7SJacob Keller ice_put_vf(vf); 60700deb0bf7SJacob Keller } 60710deb0bf7SJacob Keller 60720deb0bf7SJacob Keller /** 60730deb0bf7SJacob Keller * ice_get_vf_cfg 60740deb0bf7SJacob Keller * @netdev: network interface device structure 60750deb0bf7SJacob Keller * @vf_id: VF identifier 60760deb0bf7SJacob Keller * @ivi: VF configuration structure 60770deb0bf7SJacob Keller * 60780deb0bf7SJacob Keller * return VF configuration 60790deb0bf7SJacob Keller */ 60800deb0bf7SJacob Keller int 60810deb0bf7SJacob Keller ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi) 60820deb0bf7SJacob Keller { 60830deb0bf7SJacob Keller struct ice_pf *pf = ice_netdev_to_pf(netdev); 60840deb0bf7SJacob Keller struct ice_vf *vf; 60850deb0bf7SJacob Keller int ret; 60860deb0bf7SJacob Keller 60870deb0bf7SJacob Keller vf = ice_get_vf_by_id(pf, vf_id); 60880deb0bf7SJacob Keller if (!vf) 60890deb0bf7SJacob Keller return -EINVAL; 60900deb0bf7SJacob Keller 60910deb0bf7SJacob Keller ret = ice_check_vf_ready_for_cfg(vf); 60920deb0bf7SJacob Keller if (ret) 60930deb0bf7SJacob Keller goto out_put_vf; 60940deb0bf7SJacob Keller 60950deb0bf7SJacob Keller ivi->vf = vf_id; 60960deb0bf7SJacob Keller ether_addr_copy(ivi->mac, vf->hw_lan_addr.addr); 60970deb0bf7SJacob Keller 60980deb0bf7SJacob Keller /* VF configuration for VLAN and applicable QoS */ 60990deb0bf7SJacob Keller ivi->vlan = ice_vf_get_port_vlan_id(vf); 61000deb0bf7SJacob Keller ivi->qos = ice_vf_get_port_vlan_prio(vf); 61010deb0bf7SJacob Keller if (ice_vf_is_port_vlan_ena(vf)) 61020deb0bf7SJacob Keller ivi->vlan_proto = cpu_to_be16(ice_vf_get_port_vlan_tpid(vf)); 61030deb0bf7SJacob Keller 61040deb0bf7SJacob Keller ivi->trusted = vf->trusted; 61050deb0bf7SJacob Keller ivi->spoofchk = vf->spoofchk; 61060deb0bf7SJacob Keller if (!vf->link_forced) 61070deb0bf7SJacob Keller ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; 61080deb0bf7SJacob Keller else if (vf->link_up) 61090deb0bf7SJacob Keller ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; 61100deb0bf7SJacob Keller else 61110deb0bf7SJacob Keller ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; 61120deb0bf7SJacob Keller ivi->max_tx_rate = vf->max_tx_rate; 61130deb0bf7SJacob Keller ivi->min_tx_rate = vf->min_tx_rate; 61140deb0bf7SJacob Keller 61150deb0bf7SJacob Keller out_put_vf: 61160deb0bf7SJacob Keller ice_put_vf(vf); 61170deb0bf7SJacob Keller return ret; 61180deb0bf7SJacob Keller } 61190deb0bf7SJacob Keller 61200deb0bf7SJacob Keller /** 61210deb0bf7SJacob Keller * ice_unicast_mac_exists - check if the unicast MAC exists on the PF's switch 61220deb0bf7SJacob Keller * @pf: PF used to reference the switch's rules 61230deb0bf7SJacob Keller * @umac: unicast MAC to compare against existing switch rules 61240deb0bf7SJacob Keller * 61250deb0bf7SJacob Keller * Return true on the first/any match, else return false 61260deb0bf7SJacob Keller */ 61270deb0bf7SJacob Keller static bool ice_unicast_mac_exists(struct ice_pf *pf, u8 *umac) 61280deb0bf7SJacob Keller { 61290deb0bf7SJacob Keller struct ice_sw_recipe *mac_recipe_list = 61300deb0bf7SJacob Keller &pf->hw.switch_info->recp_list[ICE_SW_LKUP_MAC]; 61310deb0bf7SJacob Keller struct ice_fltr_mgmt_list_entry *list_itr; 61320deb0bf7SJacob Keller struct list_head *rule_head; 61330deb0bf7SJacob Keller struct mutex *rule_lock; /* protect MAC filter list access */ 61340deb0bf7SJacob Keller 61350deb0bf7SJacob Keller rule_head = &mac_recipe_list->filt_rules; 61360deb0bf7SJacob Keller rule_lock = &mac_recipe_list->filt_rule_lock; 61370deb0bf7SJacob Keller 61380deb0bf7SJacob Keller mutex_lock(rule_lock); 61390deb0bf7SJacob Keller list_for_each_entry(list_itr, rule_head, list_entry) { 61400deb0bf7SJacob Keller u8 *existing_mac = &list_itr->fltr_info.l_data.mac.mac_addr[0]; 61410deb0bf7SJacob Keller 61420deb0bf7SJacob Keller if (ether_addr_equal(existing_mac, umac)) { 61430deb0bf7SJacob Keller mutex_unlock(rule_lock); 61440deb0bf7SJacob Keller return true; 61450deb0bf7SJacob Keller } 61460deb0bf7SJacob Keller } 61470deb0bf7SJacob Keller 61480deb0bf7SJacob Keller mutex_unlock(rule_lock); 61490deb0bf7SJacob Keller 61500deb0bf7SJacob Keller return false; 61510deb0bf7SJacob Keller } 61520deb0bf7SJacob Keller 61530deb0bf7SJacob Keller /** 61540deb0bf7SJacob Keller * ice_set_vf_mac 61550deb0bf7SJacob Keller * @netdev: network interface device structure 61560deb0bf7SJacob Keller * @vf_id: VF identifier 61570deb0bf7SJacob Keller * @mac: MAC address 61580deb0bf7SJacob Keller * 61590deb0bf7SJacob Keller * program VF MAC address 61600deb0bf7SJacob Keller */ 61610deb0bf7SJacob Keller int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) 61620deb0bf7SJacob Keller { 61630deb0bf7SJacob Keller struct ice_pf *pf = ice_netdev_to_pf(netdev); 61640deb0bf7SJacob Keller struct ice_vf *vf; 61650deb0bf7SJacob Keller int ret; 61660deb0bf7SJacob Keller 61670deb0bf7SJacob Keller if (is_multicast_ether_addr(mac)) { 61680deb0bf7SJacob Keller netdev_err(netdev, "%pM not a valid unicast address\n", mac); 61690deb0bf7SJacob Keller return -EINVAL; 61700deb0bf7SJacob Keller } 61710deb0bf7SJacob Keller 61720deb0bf7SJacob Keller vf = ice_get_vf_by_id(pf, vf_id); 61730deb0bf7SJacob Keller if (!vf) 61740deb0bf7SJacob Keller return -EINVAL; 61750deb0bf7SJacob Keller 61760deb0bf7SJacob Keller /* nothing left to do, unicast MAC already set */ 61770deb0bf7SJacob Keller if (ether_addr_equal(vf->dev_lan_addr.addr, mac) && 61780deb0bf7SJacob Keller ether_addr_equal(vf->hw_lan_addr.addr, mac)) { 61790deb0bf7SJacob Keller ret = 0; 61800deb0bf7SJacob Keller goto out_put_vf; 61810deb0bf7SJacob Keller } 61820deb0bf7SJacob Keller 61830deb0bf7SJacob Keller ret = ice_check_vf_ready_for_cfg(vf); 61840deb0bf7SJacob Keller if (ret) 61850deb0bf7SJacob Keller goto out_put_vf; 61860deb0bf7SJacob Keller 61870deb0bf7SJacob Keller if (ice_unicast_mac_exists(pf, mac)) { 61880deb0bf7SJacob Keller netdev_err(netdev, "Unicast MAC %pM already exists on this PF. Preventing setting VF %u unicast MAC address to %pM\n", 61890deb0bf7SJacob Keller mac, vf_id, mac); 61900deb0bf7SJacob Keller ret = -EINVAL; 61910deb0bf7SJacob Keller goto out_put_vf; 61920deb0bf7SJacob Keller } 61930deb0bf7SJacob Keller 61940deb0bf7SJacob Keller mutex_lock(&vf->cfg_lock); 61950deb0bf7SJacob Keller 61960deb0bf7SJacob Keller /* VF is notified of its new MAC via the PF's response to the 61970deb0bf7SJacob Keller * VIRTCHNL_OP_GET_VF_RESOURCES message after the VF has been reset 61980deb0bf7SJacob Keller */ 61990deb0bf7SJacob Keller ether_addr_copy(vf->dev_lan_addr.addr, mac); 62000deb0bf7SJacob Keller ether_addr_copy(vf->hw_lan_addr.addr, mac); 62010deb0bf7SJacob Keller if (is_zero_ether_addr(mac)) { 62020deb0bf7SJacob Keller /* VF will send VIRTCHNL_OP_ADD_ETH_ADDR message with its MAC */ 62030deb0bf7SJacob Keller vf->pf_set_mac = false; 62040deb0bf7SJacob Keller netdev_info(netdev, "Removing MAC on VF %d. VF driver will be reinitialized\n", 62050deb0bf7SJacob Keller vf->vf_id); 62060deb0bf7SJacob Keller } else { 62070deb0bf7SJacob Keller /* PF will add MAC rule for the VF */ 62080deb0bf7SJacob Keller vf->pf_set_mac = true; 62090deb0bf7SJacob Keller netdev_info(netdev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n", 62100deb0bf7SJacob Keller mac, vf_id); 62110deb0bf7SJacob Keller } 62120deb0bf7SJacob Keller 62130deb0bf7SJacob Keller ice_vc_reset_vf(vf); 62140deb0bf7SJacob Keller mutex_unlock(&vf->cfg_lock); 62150deb0bf7SJacob Keller 62160deb0bf7SJacob Keller out_put_vf: 62170deb0bf7SJacob Keller ice_put_vf(vf); 62180deb0bf7SJacob Keller return ret; 62190deb0bf7SJacob Keller } 62200deb0bf7SJacob Keller 62210deb0bf7SJacob Keller /** 62220deb0bf7SJacob Keller * ice_set_vf_trust 62230deb0bf7SJacob Keller * @netdev: network interface device structure 62240deb0bf7SJacob Keller * @vf_id: VF identifier 62250deb0bf7SJacob Keller * @trusted: Boolean value to enable/disable trusted VF 62260deb0bf7SJacob Keller * 62270deb0bf7SJacob Keller * Enable or disable a given VF as trusted 62280deb0bf7SJacob Keller */ 62290deb0bf7SJacob Keller int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted) 62300deb0bf7SJacob Keller { 62310deb0bf7SJacob Keller struct ice_pf *pf = ice_netdev_to_pf(netdev); 62320deb0bf7SJacob Keller struct ice_vf *vf; 62330deb0bf7SJacob Keller int ret; 62340deb0bf7SJacob Keller 62350deb0bf7SJacob Keller if (ice_is_eswitch_mode_switchdev(pf)) { 62360deb0bf7SJacob Keller dev_info(ice_pf_to_dev(pf), "Trusted VF is forbidden in switchdev mode\n"); 62370deb0bf7SJacob Keller return -EOPNOTSUPP; 62380deb0bf7SJacob Keller } 62390deb0bf7SJacob Keller 62400deb0bf7SJacob Keller vf = ice_get_vf_by_id(pf, vf_id); 62410deb0bf7SJacob Keller if (!vf) 62420deb0bf7SJacob Keller return -EINVAL; 62430deb0bf7SJacob Keller 62440deb0bf7SJacob Keller ret = ice_check_vf_ready_for_cfg(vf); 62450deb0bf7SJacob Keller if (ret) 62460deb0bf7SJacob Keller goto out_put_vf; 62470deb0bf7SJacob Keller 62480deb0bf7SJacob Keller /* Check if already trusted */ 62490deb0bf7SJacob Keller if (trusted == vf->trusted) { 62500deb0bf7SJacob Keller ret = 0; 62510deb0bf7SJacob Keller goto out_put_vf; 62520deb0bf7SJacob Keller } 62530deb0bf7SJacob Keller 62540deb0bf7SJacob Keller mutex_lock(&vf->cfg_lock); 62550deb0bf7SJacob Keller 62560deb0bf7SJacob Keller vf->trusted = trusted; 62570deb0bf7SJacob Keller ice_vc_reset_vf(vf); 62580deb0bf7SJacob Keller dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n", 62590deb0bf7SJacob Keller vf_id, trusted ? "" : "un"); 62600deb0bf7SJacob Keller 62610deb0bf7SJacob Keller mutex_unlock(&vf->cfg_lock); 62620deb0bf7SJacob Keller 62630deb0bf7SJacob Keller out_put_vf: 62640deb0bf7SJacob Keller ice_put_vf(vf); 62650deb0bf7SJacob Keller return ret; 62660deb0bf7SJacob Keller } 62670deb0bf7SJacob Keller 62680deb0bf7SJacob Keller /** 62690deb0bf7SJacob Keller * ice_set_vf_link_state 62700deb0bf7SJacob Keller * @netdev: network interface device structure 62710deb0bf7SJacob Keller * @vf_id: VF identifier 62720deb0bf7SJacob Keller * @link_state: required link state 62730deb0bf7SJacob Keller * 62740deb0bf7SJacob Keller * Set VF's link state, irrespective of physical link state status 62750deb0bf7SJacob Keller */ 62760deb0bf7SJacob Keller int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state) 62770deb0bf7SJacob Keller { 62780deb0bf7SJacob Keller struct ice_pf *pf = ice_netdev_to_pf(netdev); 62790deb0bf7SJacob Keller struct ice_vf *vf; 62800deb0bf7SJacob Keller int ret; 62810deb0bf7SJacob Keller 62820deb0bf7SJacob Keller vf = ice_get_vf_by_id(pf, vf_id); 62830deb0bf7SJacob Keller if (!vf) 62840deb0bf7SJacob Keller return -EINVAL; 62850deb0bf7SJacob Keller 62860deb0bf7SJacob Keller ret = ice_check_vf_ready_for_cfg(vf); 62870deb0bf7SJacob Keller if (ret) 62880deb0bf7SJacob Keller goto out_put_vf; 62890deb0bf7SJacob Keller 62900deb0bf7SJacob Keller switch (link_state) { 62910deb0bf7SJacob Keller case IFLA_VF_LINK_STATE_AUTO: 62920deb0bf7SJacob Keller vf->link_forced = false; 62930deb0bf7SJacob Keller break; 62940deb0bf7SJacob Keller case IFLA_VF_LINK_STATE_ENABLE: 62950deb0bf7SJacob Keller vf->link_forced = true; 62960deb0bf7SJacob Keller vf->link_up = true; 62970deb0bf7SJacob Keller break; 62980deb0bf7SJacob Keller case IFLA_VF_LINK_STATE_DISABLE: 62990deb0bf7SJacob Keller vf->link_forced = true; 63000deb0bf7SJacob Keller vf->link_up = false; 63010deb0bf7SJacob Keller break; 63020deb0bf7SJacob Keller default: 63030deb0bf7SJacob Keller ret = -EINVAL; 63040deb0bf7SJacob Keller goto out_put_vf; 63050deb0bf7SJacob Keller } 63060deb0bf7SJacob Keller 63070deb0bf7SJacob Keller ice_vc_notify_vf_link_state(vf); 63080deb0bf7SJacob Keller 63090deb0bf7SJacob Keller out_put_vf: 63100deb0bf7SJacob Keller ice_put_vf(vf); 63110deb0bf7SJacob Keller return ret; 63120deb0bf7SJacob Keller } 63130deb0bf7SJacob Keller 63140deb0bf7SJacob Keller /** 63150deb0bf7SJacob Keller * ice_calc_all_vfs_min_tx_rate - calculate cumulative min Tx rate on all VFs 63160deb0bf7SJacob Keller * @pf: PF associated with VFs 63170deb0bf7SJacob Keller */ 63180deb0bf7SJacob Keller static int ice_calc_all_vfs_min_tx_rate(struct ice_pf *pf) 63190deb0bf7SJacob Keller { 63200deb0bf7SJacob Keller struct ice_vf *vf; 63210deb0bf7SJacob Keller unsigned int bkt; 63220deb0bf7SJacob Keller int rate = 0; 63230deb0bf7SJacob Keller 63240deb0bf7SJacob Keller rcu_read_lock(); 63250deb0bf7SJacob Keller ice_for_each_vf_rcu(pf, bkt, vf) 63260deb0bf7SJacob Keller rate += vf->min_tx_rate; 63270deb0bf7SJacob Keller rcu_read_unlock(); 63280deb0bf7SJacob Keller 63290deb0bf7SJacob Keller return rate; 63300deb0bf7SJacob Keller } 63310deb0bf7SJacob Keller 63320deb0bf7SJacob Keller /** 63330deb0bf7SJacob Keller * ice_min_tx_rate_oversubscribed - check if min Tx rate causes oversubscription 63340deb0bf7SJacob Keller * @vf: VF trying to configure min_tx_rate 63350deb0bf7SJacob Keller * @min_tx_rate: min Tx rate in Mbps 63360deb0bf7SJacob Keller * 63370deb0bf7SJacob Keller * Check if the min_tx_rate being passed in will cause oversubscription of total 63380deb0bf7SJacob Keller * min_tx_rate based on the current link speed and all other VFs configured 63390deb0bf7SJacob Keller * min_tx_rate 63400deb0bf7SJacob Keller * 63410deb0bf7SJacob Keller * Return true if the passed min_tx_rate would cause oversubscription, else 63420deb0bf7SJacob Keller * return false 63430deb0bf7SJacob Keller */ 63440deb0bf7SJacob Keller static bool 63450deb0bf7SJacob Keller ice_min_tx_rate_oversubscribed(struct ice_vf *vf, int min_tx_rate) 63460deb0bf7SJacob Keller { 63470deb0bf7SJacob Keller int link_speed_mbps = ice_get_link_speed_mbps(ice_get_vf_vsi(vf)); 63480deb0bf7SJacob Keller int all_vfs_min_tx_rate = ice_calc_all_vfs_min_tx_rate(vf->pf); 63490deb0bf7SJacob Keller 63500deb0bf7SJacob Keller /* this VF's previous rate is being overwritten */ 63510deb0bf7SJacob Keller all_vfs_min_tx_rate -= vf->min_tx_rate; 63520deb0bf7SJacob Keller 63530deb0bf7SJacob Keller if (all_vfs_min_tx_rate + min_tx_rate > link_speed_mbps) { 63540deb0bf7SJacob Keller dev_err(ice_pf_to_dev(vf->pf), "min_tx_rate of %d Mbps on VF %u would cause oversubscription of %d Mbps based on the current link speed %d Mbps\n", 63550deb0bf7SJacob Keller min_tx_rate, vf->vf_id, 63560deb0bf7SJacob Keller all_vfs_min_tx_rate + min_tx_rate - link_speed_mbps, 63570deb0bf7SJacob Keller link_speed_mbps); 63580deb0bf7SJacob Keller return true; 63590deb0bf7SJacob Keller } 63600deb0bf7SJacob Keller 63610deb0bf7SJacob Keller return false; 63620deb0bf7SJacob Keller } 63630deb0bf7SJacob Keller 63640deb0bf7SJacob Keller /** 63650deb0bf7SJacob Keller * ice_set_vf_bw - set min/max VF bandwidth 63660deb0bf7SJacob Keller * @netdev: network interface device structure 63670deb0bf7SJacob Keller * @vf_id: VF identifier 63680deb0bf7SJacob Keller * @min_tx_rate: Minimum Tx rate in Mbps 63690deb0bf7SJacob Keller * @max_tx_rate: Maximum Tx rate in Mbps 63700deb0bf7SJacob Keller */ 63710deb0bf7SJacob Keller int 63720deb0bf7SJacob Keller ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, 63730deb0bf7SJacob Keller int max_tx_rate) 63740deb0bf7SJacob Keller { 63750deb0bf7SJacob Keller struct ice_pf *pf = ice_netdev_to_pf(netdev); 63760deb0bf7SJacob Keller struct ice_vsi *vsi; 63770deb0bf7SJacob Keller struct device *dev; 63780deb0bf7SJacob Keller struct ice_vf *vf; 63790deb0bf7SJacob Keller int ret; 63800deb0bf7SJacob Keller 63810deb0bf7SJacob Keller dev = ice_pf_to_dev(pf); 63820deb0bf7SJacob Keller 63830deb0bf7SJacob Keller vf = ice_get_vf_by_id(pf, vf_id); 63840deb0bf7SJacob Keller if (!vf) 63850deb0bf7SJacob Keller return -EINVAL; 63860deb0bf7SJacob Keller 63870deb0bf7SJacob Keller ret = ice_check_vf_ready_for_cfg(vf); 63880deb0bf7SJacob Keller if (ret) 63890deb0bf7SJacob Keller goto out_put_vf; 63900deb0bf7SJacob Keller 63910deb0bf7SJacob Keller vsi = ice_get_vf_vsi(vf); 63920deb0bf7SJacob Keller 63930deb0bf7SJacob Keller /* when max_tx_rate is zero that means no max Tx rate limiting, so only 63940deb0bf7SJacob Keller * check if max_tx_rate is non-zero 63950deb0bf7SJacob Keller */ 63960deb0bf7SJacob Keller if (max_tx_rate && min_tx_rate > max_tx_rate) { 63970deb0bf7SJacob Keller dev_err(dev, "Cannot set min Tx rate %d Mbps greater than max Tx rate %d Mbps\n", 63980deb0bf7SJacob Keller min_tx_rate, max_tx_rate); 63990deb0bf7SJacob Keller ret = -EINVAL; 64000deb0bf7SJacob Keller goto out_put_vf; 64010deb0bf7SJacob Keller } 64020deb0bf7SJacob Keller 64030deb0bf7SJacob Keller if (min_tx_rate && ice_is_dcb_active(pf)) { 64040deb0bf7SJacob Keller dev_err(dev, "DCB on PF is currently enabled. VF min Tx rate limiting not allowed on this PF.\n"); 64050deb0bf7SJacob Keller ret = -EOPNOTSUPP; 64060deb0bf7SJacob Keller goto out_put_vf; 64070deb0bf7SJacob Keller } 64080deb0bf7SJacob Keller 64090deb0bf7SJacob Keller if (ice_min_tx_rate_oversubscribed(vf, min_tx_rate)) { 64100deb0bf7SJacob Keller ret = -EINVAL; 64110deb0bf7SJacob Keller goto out_put_vf; 64120deb0bf7SJacob Keller } 64130deb0bf7SJacob Keller 64140deb0bf7SJacob Keller if (vf->min_tx_rate != (unsigned int)min_tx_rate) { 64150deb0bf7SJacob Keller ret = ice_set_min_bw_limit(vsi, (u64)min_tx_rate * 1000); 64160deb0bf7SJacob Keller if (ret) { 64170deb0bf7SJacob Keller dev_err(dev, "Unable to set min-tx-rate for VF %d\n", 64180deb0bf7SJacob Keller vf->vf_id); 64190deb0bf7SJacob Keller goto out_put_vf; 64200deb0bf7SJacob Keller } 64210deb0bf7SJacob Keller 64220deb0bf7SJacob Keller vf->min_tx_rate = min_tx_rate; 64230deb0bf7SJacob Keller } 64240deb0bf7SJacob Keller 64250deb0bf7SJacob Keller if (vf->max_tx_rate != (unsigned int)max_tx_rate) { 64260deb0bf7SJacob Keller ret = ice_set_max_bw_limit(vsi, (u64)max_tx_rate * 1000); 64270deb0bf7SJacob Keller if (ret) { 64280deb0bf7SJacob Keller dev_err(dev, "Unable to set max-tx-rate for VF %d\n", 64290deb0bf7SJacob Keller vf->vf_id); 64300deb0bf7SJacob Keller goto out_put_vf; 64310deb0bf7SJacob Keller } 64320deb0bf7SJacob Keller 64330deb0bf7SJacob Keller vf->max_tx_rate = max_tx_rate; 64340deb0bf7SJacob Keller } 64350deb0bf7SJacob Keller 64360deb0bf7SJacob Keller out_put_vf: 64370deb0bf7SJacob Keller ice_put_vf(vf); 64380deb0bf7SJacob Keller return ret; 64390deb0bf7SJacob Keller } 64400deb0bf7SJacob Keller 64410deb0bf7SJacob Keller /** 64420deb0bf7SJacob Keller * ice_get_vf_stats - populate some stats for the VF 64430deb0bf7SJacob Keller * @netdev: the netdev of the PF 64440deb0bf7SJacob Keller * @vf_id: the host OS identifier (0-255) 64450deb0bf7SJacob Keller * @vf_stats: pointer to the OS memory to be initialized 64460deb0bf7SJacob Keller */ 64470deb0bf7SJacob Keller int ice_get_vf_stats(struct net_device *netdev, int vf_id, 64480deb0bf7SJacob Keller struct ifla_vf_stats *vf_stats) 64490deb0bf7SJacob Keller { 64500deb0bf7SJacob Keller struct ice_pf *pf = ice_netdev_to_pf(netdev); 64510deb0bf7SJacob Keller struct ice_eth_stats *stats; 64520deb0bf7SJacob Keller struct ice_vsi *vsi; 64530deb0bf7SJacob Keller struct ice_vf *vf; 64540deb0bf7SJacob Keller int ret; 64550deb0bf7SJacob Keller 64560deb0bf7SJacob Keller vf = ice_get_vf_by_id(pf, vf_id); 64570deb0bf7SJacob Keller if (!vf) 64580deb0bf7SJacob Keller return -EINVAL; 64590deb0bf7SJacob Keller 64600deb0bf7SJacob Keller ret = ice_check_vf_ready_for_cfg(vf); 64610deb0bf7SJacob Keller if (ret) 64620deb0bf7SJacob Keller goto out_put_vf; 64630deb0bf7SJacob Keller 64640deb0bf7SJacob Keller vsi = ice_get_vf_vsi(vf); 64650deb0bf7SJacob Keller if (!vsi) { 64660deb0bf7SJacob Keller ret = -EINVAL; 64670deb0bf7SJacob Keller goto out_put_vf; 64680deb0bf7SJacob Keller } 64690deb0bf7SJacob Keller 64700deb0bf7SJacob Keller ice_update_eth_stats(vsi); 64710deb0bf7SJacob Keller stats = &vsi->eth_stats; 64720deb0bf7SJacob Keller 64730deb0bf7SJacob Keller memset(vf_stats, 0, sizeof(*vf_stats)); 64740deb0bf7SJacob Keller 64750deb0bf7SJacob Keller vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast + 64760deb0bf7SJacob Keller stats->rx_multicast; 64770deb0bf7SJacob Keller vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast + 64780deb0bf7SJacob Keller stats->tx_multicast; 64790deb0bf7SJacob Keller vf_stats->rx_bytes = stats->rx_bytes; 64800deb0bf7SJacob Keller vf_stats->tx_bytes = stats->tx_bytes; 64810deb0bf7SJacob Keller vf_stats->broadcast = stats->rx_broadcast; 64820deb0bf7SJacob Keller vf_stats->multicast = stats->rx_multicast; 64830deb0bf7SJacob Keller vf_stats->rx_dropped = stats->rx_discards; 64840deb0bf7SJacob Keller vf_stats->tx_dropped = stats->tx_discards; 64850deb0bf7SJacob Keller 64860deb0bf7SJacob Keller out_put_vf: 64870deb0bf7SJacob Keller ice_put_vf(vf); 64880deb0bf7SJacob Keller return ret; 64890deb0bf7SJacob Keller } 64900deb0bf7SJacob Keller 64910deb0bf7SJacob Keller /** 64920deb0bf7SJacob Keller * ice_print_vf_rx_mdd_event - print VF Rx malicious driver detect event 64930deb0bf7SJacob Keller * @vf: pointer to the VF structure 64940deb0bf7SJacob Keller */ 64950deb0bf7SJacob Keller void ice_print_vf_rx_mdd_event(struct ice_vf *vf) 64960deb0bf7SJacob Keller { 64970deb0bf7SJacob Keller struct ice_pf *pf = vf->pf; 64980deb0bf7SJacob Keller struct device *dev; 64990deb0bf7SJacob Keller 65000deb0bf7SJacob Keller dev = ice_pf_to_dev(pf); 65010deb0bf7SJacob Keller 65020deb0bf7SJacob Keller dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n", 65030deb0bf7SJacob Keller vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id, 65040deb0bf7SJacob Keller vf->dev_lan_addr.addr, 65050deb0bf7SJacob Keller test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags) 65060deb0bf7SJacob Keller ? "on" : "off"); 65070deb0bf7SJacob Keller } 65080deb0bf7SJacob Keller 65090deb0bf7SJacob Keller /** 65100deb0bf7SJacob Keller * ice_print_vfs_mdd_events - print VFs malicious driver detect event 65110deb0bf7SJacob Keller * @pf: pointer to the PF structure 65120deb0bf7SJacob Keller * 65130deb0bf7SJacob Keller * Called from ice_handle_mdd_event to rate limit and print VFs MDD events. 65140deb0bf7SJacob Keller */ 65150deb0bf7SJacob Keller void ice_print_vfs_mdd_events(struct ice_pf *pf) 65160deb0bf7SJacob Keller { 65170deb0bf7SJacob Keller struct device *dev = ice_pf_to_dev(pf); 65180deb0bf7SJacob Keller struct ice_hw *hw = &pf->hw; 65190deb0bf7SJacob Keller struct ice_vf *vf; 65200deb0bf7SJacob Keller unsigned int bkt; 65210deb0bf7SJacob Keller 65220deb0bf7SJacob Keller /* check that there are pending MDD events to print */ 65230deb0bf7SJacob Keller if (!test_and_clear_bit(ICE_MDD_VF_PRINT_PENDING, pf->state)) 65240deb0bf7SJacob Keller return; 65250deb0bf7SJacob Keller 65260deb0bf7SJacob Keller /* VF MDD event logs are rate limited to one second intervals */ 65270deb0bf7SJacob Keller if (time_is_after_jiffies(pf->vfs.last_printed_mdd_jiffies + HZ * 1)) 65280deb0bf7SJacob Keller return; 65290deb0bf7SJacob Keller 65300deb0bf7SJacob Keller pf->vfs.last_printed_mdd_jiffies = jiffies; 65310deb0bf7SJacob Keller 65320deb0bf7SJacob Keller mutex_lock(&pf->vfs.table_lock); 65330deb0bf7SJacob Keller ice_for_each_vf(pf, bkt, vf) { 65340deb0bf7SJacob Keller /* only print Rx MDD event message if there are new events */ 65350deb0bf7SJacob Keller if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) { 65360deb0bf7SJacob Keller vf->mdd_rx_events.last_printed = 65370deb0bf7SJacob Keller vf->mdd_rx_events.count; 65380deb0bf7SJacob Keller ice_print_vf_rx_mdd_event(vf); 65390deb0bf7SJacob Keller } 65400deb0bf7SJacob Keller 65410deb0bf7SJacob Keller /* only print Tx MDD event message if there are new events */ 65420deb0bf7SJacob Keller if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) { 65430deb0bf7SJacob Keller vf->mdd_tx_events.last_printed = 65440deb0bf7SJacob Keller vf->mdd_tx_events.count; 65450deb0bf7SJacob Keller 65460deb0bf7SJacob Keller dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n", 65470deb0bf7SJacob Keller vf->mdd_tx_events.count, hw->pf_id, vf->vf_id, 65480deb0bf7SJacob Keller vf->dev_lan_addr.addr); 65490deb0bf7SJacob Keller } 65500deb0bf7SJacob Keller } 65510deb0bf7SJacob Keller mutex_unlock(&pf->vfs.table_lock); 65520deb0bf7SJacob Keller } 65530deb0bf7SJacob Keller 65540deb0bf7SJacob Keller /** 65550deb0bf7SJacob Keller * ice_restore_all_vfs_msi_state - restore VF MSI state after PF FLR 65560deb0bf7SJacob Keller * @pdev: pointer to a pci_dev structure 65570deb0bf7SJacob Keller * 65580deb0bf7SJacob Keller * Called when recovering from a PF FLR to restore interrupt capability to 65590deb0bf7SJacob Keller * the VFs. 65600deb0bf7SJacob Keller */ 65610deb0bf7SJacob Keller void ice_restore_all_vfs_msi_state(struct pci_dev *pdev) 65620deb0bf7SJacob Keller { 65630deb0bf7SJacob Keller u16 vf_id; 65640deb0bf7SJacob Keller int pos; 65650deb0bf7SJacob Keller 65660deb0bf7SJacob Keller if (!pci_num_vf(pdev)) 65670deb0bf7SJacob Keller return; 65680deb0bf7SJacob Keller 65690deb0bf7SJacob Keller pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); 65700deb0bf7SJacob Keller if (pos) { 65710deb0bf7SJacob Keller struct pci_dev *vfdev; 65720deb0bf7SJacob Keller 65730deb0bf7SJacob Keller pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, 65740deb0bf7SJacob Keller &vf_id); 65750deb0bf7SJacob Keller vfdev = pci_get_device(pdev->vendor, vf_id, NULL); 65760deb0bf7SJacob Keller while (vfdev) { 65770deb0bf7SJacob Keller if (vfdev->is_virtfn && vfdev->physfn == pdev) 65780deb0bf7SJacob Keller pci_restore_msi_state(vfdev); 65790deb0bf7SJacob Keller vfdev = pci_get_device(pdev->vendor, vf_id, 65800deb0bf7SJacob Keller vfdev); 65810deb0bf7SJacob Keller } 65820deb0bf7SJacob Keller } 65830deb0bf7SJacob Keller } 65840deb0bf7SJacob Keller 65850deb0bf7SJacob Keller /** 65860deb0bf7SJacob Keller * ice_is_malicious_vf - helper function to detect a malicious VF 65870deb0bf7SJacob Keller * @pf: ptr to struct ice_pf 65880deb0bf7SJacob Keller * @event: pointer to the AQ event 65890deb0bf7SJacob Keller * @num_msg_proc: the number of messages processed so far 65900deb0bf7SJacob Keller * @num_msg_pending: the number of messages peinding in admin queue 65910deb0bf7SJacob Keller */ 65920deb0bf7SJacob Keller bool 65930deb0bf7SJacob Keller ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event, 65940deb0bf7SJacob Keller u16 num_msg_proc, u16 num_msg_pending) 65950deb0bf7SJacob Keller { 65960deb0bf7SJacob Keller s16 vf_id = le16_to_cpu(event->desc.retval); 65970deb0bf7SJacob Keller struct device *dev = ice_pf_to_dev(pf); 65980deb0bf7SJacob Keller struct ice_mbx_data mbxdata; 65990deb0bf7SJacob Keller bool malvf = false; 66000deb0bf7SJacob Keller struct ice_vf *vf; 66010deb0bf7SJacob Keller int status; 66020deb0bf7SJacob Keller 66030deb0bf7SJacob Keller vf = ice_get_vf_by_id(pf, vf_id); 66040deb0bf7SJacob Keller if (!vf) 66050deb0bf7SJacob Keller return false; 66060deb0bf7SJacob Keller 66070deb0bf7SJacob Keller if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) 66080deb0bf7SJacob Keller goto out_put_vf; 66090deb0bf7SJacob Keller 66100deb0bf7SJacob Keller mbxdata.num_msg_proc = num_msg_proc; 66110deb0bf7SJacob Keller mbxdata.num_pending_arq = num_msg_pending; 66120deb0bf7SJacob Keller mbxdata.max_num_msgs_mbx = pf->hw.mailboxq.num_rq_entries; 66130deb0bf7SJacob Keller #define ICE_MBX_OVERFLOW_WATERMARK 64 66140deb0bf7SJacob Keller mbxdata.async_watermark_val = ICE_MBX_OVERFLOW_WATERMARK; 66150deb0bf7SJacob Keller 66160deb0bf7SJacob Keller /* check to see if we have a malicious VF */ 66170deb0bf7SJacob Keller status = ice_mbx_vf_state_handler(&pf->hw, &mbxdata, vf_id, &malvf); 66180deb0bf7SJacob Keller if (status) 66190deb0bf7SJacob Keller goto out_put_vf; 66200deb0bf7SJacob Keller 66210deb0bf7SJacob Keller if (malvf) { 66220deb0bf7SJacob Keller bool report_vf = false; 66230deb0bf7SJacob Keller 66240deb0bf7SJacob Keller /* if the VF is malicious and we haven't let the user 66250deb0bf7SJacob Keller * know about it, then let them know now 66260deb0bf7SJacob Keller */ 66270deb0bf7SJacob Keller status = ice_mbx_report_malvf(&pf->hw, pf->vfs.malvfs, 66280deb0bf7SJacob Keller ICE_MAX_VF_COUNT, vf_id, 66290deb0bf7SJacob Keller &report_vf); 66300deb0bf7SJacob Keller if (status) 66310deb0bf7SJacob Keller dev_dbg(dev, "Error reporting malicious VF\n"); 66320deb0bf7SJacob Keller 66330deb0bf7SJacob Keller if (report_vf) { 66340deb0bf7SJacob Keller struct ice_vsi *pf_vsi = ice_get_main_vsi(pf); 66350deb0bf7SJacob Keller 66360deb0bf7SJacob Keller if (pf_vsi) 66370deb0bf7SJacob Keller dev_warn(dev, "VF MAC %pM on PF MAC %pM is generating asynchronous messages and may be overflowing the PF message queue. Please see the Adapter User Guide for more information\n", 66380deb0bf7SJacob Keller &vf->dev_lan_addr.addr[0], 66390deb0bf7SJacob Keller pf_vsi->netdev->dev_addr); 66400deb0bf7SJacob Keller } 66410deb0bf7SJacob Keller } 66420deb0bf7SJacob Keller 66430deb0bf7SJacob Keller out_put_vf: 66440deb0bf7SJacob Keller ice_put_vf(vf); 66450deb0bf7SJacob Keller return malvf; 66460deb0bf7SJacob Keller } 6647