1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2022, Intel Corporation. */
3 
4 #include "ice_virtchnl.h"
5 #include "ice_vf_lib_private.h"
6 #include "ice.h"
7 #include "ice_base.h"
8 #include "ice_lib.h"
9 #include "ice_fltr.h"
10 #include "ice_virtchnl_allowlist.h"
11 #include "ice_vf_vsi_vlan_ops.h"
12 #include "ice_vlan.h"
13 #include "ice_flex_pipe.h"
14 #include "ice_dcb_lib.h"
15 
16 #define FIELD_SELECTOR(proto_hdr_field) \
17 		BIT((proto_hdr_field) & PROTO_HDR_FIELD_MASK)
18 
19 struct ice_vc_hdr_match_type {
20 	u32 vc_hdr;	/* virtchnl headers (VIRTCHNL_PROTO_HDR_XXX) */
21 	u32 ice_hdr;	/* ice headers (ICE_FLOW_SEG_HDR_XXX) */
22 };
23 
24 static const struct ice_vc_hdr_match_type ice_vc_hdr_list[] = {
25 	{VIRTCHNL_PROTO_HDR_NONE,	ICE_FLOW_SEG_HDR_NONE},
26 	{VIRTCHNL_PROTO_HDR_ETH,	ICE_FLOW_SEG_HDR_ETH},
27 	{VIRTCHNL_PROTO_HDR_S_VLAN,	ICE_FLOW_SEG_HDR_VLAN},
28 	{VIRTCHNL_PROTO_HDR_C_VLAN,	ICE_FLOW_SEG_HDR_VLAN},
29 	{VIRTCHNL_PROTO_HDR_IPV4,	ICE_FLOW_SEG_HDR_IPV4 |
30 					ICE_FLOW_SEG_HDR_IPV_OTHER},
31 	{VIRTCHNL_PROTO_HDR_IPV6,	ICE_FLOW_SEG_HDR_IPV6 |
32 					ICE_FLOW_SEG_HDR_IPV_OTHER},
33 	{VIRTCHNL_PROTO_HDR_TCP,	ICE_FLOW_SEG_HDR_TCP},
34 	{VIRTCHNL_PROTO_HDR_UDP,	ICE_FLOW_SEG_HDR_UDP},
35 	{VIRTCHNL_PROTO_HDR_SCTP,	ICE_FLOW_SEG_HDR_SCTP},
36 	{VIRTCHNL_PROTO_HDR_PPPOE,	ICE_FLOW_SEG_HDR_PPPOE},
37 	{VIRTCHNL_PROTO_HDR_GTPU_IP,	ICE_FLOW_SEG_HDR_GTPU_IP},
38 	{VIRTCHNL_PROTO_HDR_GTPU_EH,	ICE_FLOW_SEG_HDR_GTPU_EH},
39 	{VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN,
40 					ICE_FLOW_SEG_HDR_GTPU_DWN},
41 	{VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP,
42 					ICE_FLOW_SEG_HDR_GTPU_UP},
43 	{VIRTCHNL_PROTO_HDR_L2TPV3,	ICE_FLOW_SEG_HDR_L2TPV3},
44 	{VIRTCHNL_PROTO_HDR_ESP,	ICE_FLOW_SEG_HDR_ESP},
45 	{VIRTCHNL_PROTO_HDR_AH,		ICE_FLOW_SEG_HDR_AH},
46 	{VIRTCHNL_PROTO_HDR_PFCP,	ICE_FLOW_SEG_HDR_PFCP_SESSION},
47 };
48 
49 struct ice_vc_hash_field_match_type {
50 	u32 vc_hdr;		/* virtchnl headers
51 				 * (VIRTCHNL_PROTO_HDR_XXX)
52 				 */
53 	u32 vc_hash_field;	/* virtchnl hash fields selector
54 				 * FIELD_SELECTOR((VIRTCHNL_PROTO_HDR_ETH_XXX))
55 				 */
56 	u64 ice_hash_field;	/* ice hash fields
57 				 * (BIT_ULL(ICE_FLOW_FIELD_IDX_XXX))
58 				 */
59 };
60 
61 static const struct
62 ice_vc_hash_field_match_type ice_vc_hash_field_list[] = {
63 	{VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC),
64 		BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA)},
65 	{VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
66 		BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_DA)},
67 	{VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC) |
68 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
69 		ICE_FLOW_HASH_ETH},
70 	{VIRTCHNL_PROTO_HDR_ETH,
71 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE),
72 		BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_TYPE)},
73 	{VIRTCHNL_PROTO_HDR_S_VLAN,
74 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_S_VLAN_ID),
75 		BIT_ULL(ICE_FLOW_FIELD_IDX_S_VLAN)},
76 	{VIRTCHNL_PROTO_HDR_C_VLAN,
77 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_C_VLAN_ID),
78 		BIT_ULL(ICE_FLOW_FIELD_IDX_C_VLAN)},
79 	{VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC),
80 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)},
81 	{VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
82 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)},
83 	{VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
84 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
85 		ICE_FLOW_HASH_IPV4},
86 	{VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
87 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
88 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
89 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
90 	{VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
91 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
92 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
93 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
94 	{VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
95 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
96 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
97 		ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
98 	{VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
99 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
100 	{VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC),
101 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)},
102 	{VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
103 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)},
104 	{VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
105 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
106 		ICE_FLOW_HASH_IPV6},
107 	{VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
108 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
109 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) |
110 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
111 	{VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
112 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
113 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) |
114 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
115 	{VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
116 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
117 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
118 		ICE_FLOW_HASH_IPV6 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
119 	{VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
120 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
121 	{VIRTCHNL_PROTO_HDR_TCP,
122 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT),
123 		BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)},
124 	{VIRTCHNL_PROTO_HDR_TCP,
125 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
126 		BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)},
127 	{VIRTCHNL_PROTO_HDR_TCP,
128 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) |
129 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
130 		ICE_FLOW_HASH_TCP_PORT},
131 	{VIRTCHNL_PROTO_HDR_UDP,
132 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT),
133 		BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)},
134 	{VIRTCHNL_PROTO_HDR_UDP,
135 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
136 		BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)},
137 	{VIRTCHNL_PROTO_HDR_UDP,
138 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) |
139 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
140 		ICE_FLOW_HASH_UDP_PORT},
141 	{VIRTCHNL_PROTO_HDR_SCTP,
142 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT),
143 		BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)},
144 	{VIRTCHNL_PROTO_HDR_SCTP,
145 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
146 		BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)},
147 	{VIRTCHNL_PROTO_HDR_SCTP,
148 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) |
149 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
150 		ICE_FLOW_HASH_SCTP_PORT},
151 	{VIRTCHNL_PROTO_HDR_PPPOE,
152 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID),
153 		BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID)},
154 	{VIRTCHNL_PROTO_HDR_GTPU_IP,
155 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_GTPU_IP_TEID),
156 		BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID)},
157 	{VIRTCHNL_PROTO_HDR_L2TPV3,
158 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID),
159 		BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID)},
160 	{VIRTCHNL_PROTO_HDR_ESP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ESP_SPI),
161 		BIT_ULL(ICE_FLOW_FIELD_IDX_ESP_SPI)},
162 	{VIRTCHNL_PROTO_HDR_AH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_AH_SPI),
163 		BIT_ULL(ICE_FLOW_FIELD_IDX_AH_SPI)},
164 	{VIRTCHNL_PROTO_HDR_PFCP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PFCP_SEID),
165 		BIT_ULL(ICE_FLOW_FIELD_IDX_PFCP_SEID)},
166 };
167 
168 /**
169  * ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
170  * @pf: pointer to the PF structure
171  * @v_opcode: operation code
172  * @v_retval: return value
173  * @msg: pointer to the msg buffer
174  * @msglen: msg length
175  */
176 static void
177 ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
178 		    enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
179 {
180 	struct ice_hw *hw = &pf->hw;
181 	struct ice_vf *vf;
182 	unsigned int bkt;
183 
184 	mutex_lock(&pf->vfs.table_lock);
185 	ice_for_each_vf(pf, bkt, vf) {
186 		/* Not all vfs are enabled so skip the ones that are not */
187 		if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
188 		    !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
189 			continue;
190 
191 		/* Ignore return value on purpose - a given VF may fail, but
192 		 * we need to keep going and send to all of them
193 		 */
194 		ice_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, msg,
195 				      msglen, NULL);
196 	}
197 	mutex_unlock(&pf->vfs.table_lock);
198 }
199 
200 /**
201  * ice_set_pfe_link - Set the link speed/status of the virtchnl_pf_event
202  * @vf: pointer to the VF structure
203  * @pfe: pointer to the virtchnl_pf_event to set link speed/status for
204  * @ice_link_speed: link speed specified by ICE_AQ_LINK_SPEED_*
205  * @link_up: whether or not to set the link up/down
206  */
207 static void
208 ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
209 		 int ice_link_speed, bool link_up)
210 {
211 	if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
212 		pfe->event_data.link_event_adv.link_status = link_up;
213 		/* Speed in Mbps */
214 		pfe->event_data.link_event_adv.link_speed =
215 			ice_conv_link_speed_to_virtchnl(true, ice_link_speed);
216 	} else {
217 		pfe->event_data.link_event.link_status = link_up;
218 		/* Legacy method for virtchnl link speeds */
219 		pfe->event_data.link_event.link_speed =
220 			(enum virtchnl_link_speed)
221 			ice_conv_link_speed_to_virtchnl(false, ice_link_speed);
222 	}
223 }
224 
225 /**
226  * ice_vc_notify_vf_link_state - Inform a VF of link status
227  * @vf: pointer to the VF structure
228  *
229  * send a link status message to a single VF
230  */
231 void ice_vc_notify_vf_link_state(struct ice_vf *vf)
232 {
233 	struct virtchnl_pf_event pfe = { 0 };
234 	struct ice_hw *hw = &vf->pf->hw;
235 
236 	pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
237 	pfe.severity = PF_EVENT_SEVERITY_INFO;
238 
239 	if (ice_is_vf_link_up(vf))
240 		ice_set_pfe_link(vf, &pfe,
241 				 hw->port_info->phy.link_info.link_speed, true);
242 	else
243 		ice_set_pfe_link(vf, &pfe, ICE_AQ_LINK_SPEED_UNKNOWN, false);
244 
245 	ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
246 			      VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
247 			      sizeof(pfe), NULL);
248 }
249 
250 /**
251  * ice_vc_notify_link_state - Inform all VFs on a PF of link status
252  * @pf: pointer to the PF structure
253  */
254 void ice_vc_notify_link_state(struct ice_pf *pf)
255 {
256 	struct ice_vf *vf;
257 	unsigned int bkt;
258 
259 	mutex_lock(&pf->vfs.table_lock);
260 	ice_for_each_vf(pf, bkt, vf)
261 		ice_vc_notify_vf_link_state(vf);
262 	mutex_unlock(&pf->vfs.table_lock);
263 }
264 
265 /**
266  * ice_vc_notify_reset - Send pending reset message to all VFs
267  * @pf: pointer to the PF structure
268  *
269  * indicate a pending reset to all VFs on a given PF
270  */
271 void ice_vc_notify_reset(struct ice_pf *pf)
272 {
273 	struct virtchnl_pf_event pfe;
274 
275 	if (!ice_has_vfs(pf))
276 		return;
277 
278 	pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
279 	pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
280 	ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, VIRTCHNL_STATUS_SUCCESS,
281 			    (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
282 }
283 
284 /**
285  * ice_vc_send_msg_to_vf - Send message to VF
286  * @vf: pointer to the VF info
287  * @v_opcode: virtual channel opcode
288  * @v_retval: virtual channel return value
289  * @msg: pointer to the msg buffer
290  * @msglen: msg length
291  *
292  * send msg to VF
293  */
294 int
295 ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
296 		      enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
297 {
298 	struct device *dev;
299 	struct ice_pf *pf;
300 	int aq_ret;
301 
302 	pf = vf->pf;
303 	dev = ice_pf_to_dev(pf);
304 
305 	aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
306 				       msg, msglen, NULL);
307 	if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
308 		dev_info(dev, "Unable to send the message to VF %d ret %d aq_err %s\n",
309 			 vf->vf_id, aq_ret,
310 			 ice_aq_str(pf->hw.mailboxq.sq_last_status));
311 		return -EIO;
312 	}
313 
314 	return 0;
315 }
316 
317 /**
318  * ice_vc_get_ver_msg
319  * @vf: pointer to the VF info
320  * @msg: pointer to the msg buffer
321  *
322  * called from the VF to request the API version used by the PF
323  */
324 static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
325 {
326 	struct virtchnl_version_info info = {
327 		VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
328 	};
329 
330 	vf->vf_ver = *(struct virtchnl_version_info *)msg;
331 	/* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
332 	if (VF_IS_V10(&vf->vf_ver))
333 		info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
334 
335 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
336 				     VIRTCHNL_STATUS_SUCCESS, (u8 *)&info,
337 				     sizeof(struct virtchnl_version_info));
338 }
339 
340 /**
341  * ice_vc_get_max_frame_size - get max frame size allowed for VF
342  * @vf: VF used to determine max frame size
343  *
344  * Max frame size is determined based on the current port's max frame size and
345  * whether a port VLAN is configured on this VF. The VF is not aware whether
346  * it's in a port VLAN so the PF needs to account for this in max frame size
347  * checks and sending the max frame size to the VF.
348  */
349 static u16 ice_vc_get_max_frame_size(struct ice_vf *vf)
350 {
351 	struct ice_port_info *pi = ice_vf_get_port_info(vf);
352 	u16 max_frame_size;
353 
354 	max_frame_size = pi->phy.link_info.max_frame_size;
355 
356 	if (ice_vf_is_port_vlan_ena(vf))
357 		max_frame_size -= VLAN_HLEN;
358 
359 	return max_frame_size;
360 }
361 
362 /**
363  * ice_vc_get_vlan_caps
364  * @hw: pointer to the hw
365  * @vf: pointer to the VF info
366  * @vsi: pointer to the VSI
367  * @driver_caps: current driver caps
368  *
369  * Return 0 if there is no VLAN caps supported, or VLAN caps value
370  */
371 static u32
372 ice_vc_get_vlan_caps(struct ice_hw *hw, struct ice_vf *vf, struct ice_vsi *vsi,
373 		     u32 driver_caps)
374 {
375 	if (ice_is_eswitch_mode_switchdev(vf->pf))
376 		/* In switchdev setting VLAN from VF isn't supported */
377 		return 0;
378 
379 	if (driver_caps & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
380 		/* VLAN offloads based on current device configuration */
381 		return VIRTCHNL_VF_OFFLOAD_VLAN_V2;
382 	} else if (driver_caps & VIRTCHNL_VF_OFFLOAD_VLAN) {
383 		/* allow VF to negotiate VIRTCHNL_VF_OFFLOAD explicitly for
384 		 * these two conditions, which amounts to guest VLAN filtering
385 		 * and offloads being based on the inner VLAN or the
386 		 * inner/single VLAN respectively and don't allow VF to
387 		 * negotiate VIRTCHNL_VF_OFFLOAD in any other cases
388 		 */
389 		if (ice_is_dvm_ena(hw) && ice_vf_is_port_vlan_ena(vf)) {
390 			return VIRTCHNL_VF_OFFLOAD_VLAN;
391 		} else if (!ice_is_dvm_ena(hw) &&
392 			   !ice_vf_is_port_vlan_ena(vf)) {
393 			/* configure backward compatible support for VFs that
394 			 * only support VIRTCHNL_VF_OFFLOAD_VLAN, the PF is
395 			 * configured in SVM, and no port VLAN is configured
396 			 */
397 			ice_vf_vsi_cfg_svm_legacy_vlan_mode(vsi);
398 			return VIRTCHNL_VF_OFFLOAD_VLAN;
399 		} else if (ice_is_dvm_ena(hw)) {
400 			/* configure software offloaded VLAN support when DVM
401 			 * is enabled, but no port VLAN is enabled
402 			 */
403 			ice_vf_vsi_cfg_dvm_legacy_vlan_mode(vsi);
404 		}
405 	}
406 
407 	return 0;
408 }
409 
410 /**
411  * ice_vc_get_vf_res_msg
412  * @vf: pointer to the VF info
413  * @msg: pointer to the msg buffer
414  *
415  * called from the VF to request its resources
416  */
417 static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
418 {
419 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
420 	struct virtchnl_vf_resource *vfres = NULL;
421 	struct ice_hw *hw = &vf->pf->hw;
422 	struct ice_vsi *vsi;
423 	int len = 0;
424 	int ret;
425 
426 	if (ice_check_vf_init(vf)) {
427 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
428 		goto err;
429 	}
430 
431 	len = sizeof(struct virtchnl_vf_resource);
432 
433 	vfres = kzalloc(len, GFP_KERNEL);
434 	if (!vfres) {
435 		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
436 		len = 0;
437 		goto err;
438 	}
439 	if (VF_IS_V11(&vf->vf_ver))
440 		vf->driver_caps = *(u32 *)msg;
441 	else
442 		vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
443 				  VIRTCHNL_VF_OFFLOAD_RSS_REG |
444 				  VIRTCHNL_VF_OFFLOAD_VLAN;
445 
446 	vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
447 	vsi = ice_get_vf_vsi(vf);
448 	if (!vsi) {
449 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
450 		goto err;
451 	}
452 
453 	vfres->vf_cap_flags |= ice_vc_get_vlan_caps(hw, vf, vsi,
454 						    vf->driver_caps);
455 
456 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
457 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
458 	} else {
459 		if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)
460 			vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
461 		else
462 			vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
463 	}
464 
465 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
466 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_FDIR_PF;
467 
468 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
469 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
470 
471 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
472 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
473 
474 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)
475 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
476 
477 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING)
478 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
479 
480 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
481 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
482 
483 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
484 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
485 
486 	if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
487 		vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
488 
489 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF)
490 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF;
491 
492 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_USO)
493 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_USO;
494 
495 	vfres->num_vsis = 1;
496 	/* Tx and Rx queue are equal for VF */
497 	vfres->num_queue_pairs = vsi->num_txq;
498 	vfres->max_vectors = vf->pf->vfs.num_msix_per;
499 	vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE;
500 	vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
501 	vfres->max_mtu = ice_vc_get_max_frame_size(vf);
502 
503 	vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
504 	vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
505 	vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;
506 	ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
507 			vf->hw_lan_addr.addr);
508 
509 	/* match guest capabilities */
510 	vf->driver_caps = vfres->vf_cap_flags;
511 
512 	ice_vc_set_caps_allowlist(vf);
513 	ice_vc_set_working_allowlist(vf);
514 
515 	set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
516 
517 err:
518 	/* send the response back to the VF */
519 	ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, v_ret,
520 				    (u8 *)vfres, len);
521 
522 	kfree(vfres);
523 	return ret;
524 }
525 
526 /**
527  * ice_vc_reset_vf_msg
528  * @vf: pointer to the VF info
529  *
530  * called from the VF to reset itself,
531  * unlike other virtchnl messages, PF driver
532  * doesn't send the response back to the VF
533  */
534 static void ice_vc_reset_vf_msg(struct ice_vf *vf)
535 {
536 	if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
537 		ice_reset_vf(vf, 0);
538 }
539 
540 /**
541  * ice_vc_isvalid_vsi_id
542  * @vf: pointer to the VF info
543  * @vsi_id: VF relative VSI ID
544  *
545  * check for the valid VSI ID
546  */
547 bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
548 {
549 	struct ice_pf *pf = vf->pf;
550 	struct ice_vsi *vsi;
551 
552 	vsi = ice_find_vsi(pf, vsi_id);
553 
554 	return (vsi && (vsi->vf == vf));
555 }
556 
557 /**
558  * ice_vc_isvalid_q_id
559  * @vf: pointer to the VF info
560  * @vsi_id: VSI ID
561  * @qid: VSI relative queue ID
562  *
563  * check for the valid queue ID
564  */
565 static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
566 {
567 	struct ice_vsi *vsi = ice_find_vsi(vf->pf, vsi_id);
568 	/* allocated Tx and Rx queues should be always equal for VF VSI */
569 	return (vsi && (qid < vsi->alloc_txq));
570 }
571 
572 /**
573  * ice_vc_isvalid_ring_len
574  * @ring_len: length of ring
575  *
576  * check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE
577  * or zero
578  */
579 static bool ice_vc_isvalid_ring_len(u16 ring_len)
580 {
581 	return ring_len == 0 ||
582 	       (ring_len >= ICE_MIN_NUM_DESC &&
583 		ring_len <= ICE_MAX_NUM_DESC &&
584 		!(ring_len % ICE_REQ_DESC_MULTIPLE));
585 }
586 
587 /**
588  * ice_vc_validate_pattern
589  * @vf: pointer to the VF info
590  * @proto: virtchnl protocol headers
591  *
592  * validate the pattern is supported or not.
593  *
594  * Return: true on success, false on error.
595  */
596 bool
597 ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto)
598 {
599 	bool is_ipv4 = false;
600 	bool is_ipv6 = false;
601 	bool is_udp = false;
602 	u16 ptype = -1;
603 	int i = 0;
604 
605 	while (i < proto->count &&
606 	       proto->proto_hdr[i].type != VIRTCHNL_PROTO_HDR_NONE) {
607 		switch (proto->proto_hdr[i].type) {
608 		case VIRTCHNL_PROTO_HDR_ETH:
609 			ptype = ICE_PTYPE_MAC_PAY;
610 			break;
611 		case VIRTCHNL_PROTO_HDR_IPV4:
612 			ptype = ICE_PTYPE_IPV4_PAY;
613 			is_ipv4 = true;
614 			break;
615 		case VIRTCHNL_PROTO_HDR_IPV6:
616 			ptype = ICE_PTYPE_IPV6_PAY;
617 			is_ipv6 = true;
618 			break;
619 		case VIRTCHNL_PROTO_HDR_UDP:
620 			if (is_ipv4)
621 				ptype = ICE_PTYPE_IPV4_UDP_PAY;
622 			else if (is_ipv6)
623 				ptype = ICE_PTYPE_IPV6_UDP_PAY;
624 			is_udp = true;
625 			break;
626 		case VIRTCHNL_PROTO_HDR_TCP:
627 			if (is_ipv4)
628 				ptype = ICE_PTYPE_IPV4_TCP_PAY;
629 			else if (is_ipv6)
630 				ptype = ICE_PTYPE_IPV6_TCP_PAY;
631 			break;
632 		case VIRTCHNL_PROTO_HDR_SCTP:
633 			if (is_ipv4)
634 				ptype = ICE_PTYPE_IPV4_SCTP_PAY;
635 			else if (is_ipv6)
636 				ptype = ICE_PTYPE_IPV6_SCTP_PAY;
637 			break;
638 		case VIRTCHNL_PROTO_HDR_GTPU_IP:
639 		case VIRTCHNL_PROTO_HDR_GTPU_EH:
640 			if (is_ipv4)
641 				ptype = ICE_MAC_IPV4_GTPU;
642 			else if (is_ipv6)
643 				ptype = ICE_MAC_IPV6_GTPU;
644 			goto out;
645 		case VIRTCHNL_PROTO_HDR_L2TPV3:
646 			if (is_ipv4)
647 				ptype = ICE_MAC_IPV4_L2TPV3;
648 			else if (is_ipv6)
649 				ptype = ICE_MAC_IPV6_L2TPV3;
650 			goto out;
651 		case VIRTCHNL_PROTO_HDR_ESP:
652 			if (is_ipv4)
653 				ptype = is_udp ? ICE_MAC_IPV4_NAT_T_ESP :
654 						ICE_MAC_IPV4_ESP;
655 			else if (is_ipv6)
656 				ptype = is_udp ? ICE_MAC_IPV6_NAT_T_ESP :
657 						ICE_MAC_IPV6_ESP;
658 			goto out;
659 		case VIRTCHNL_PROTO_HDR_AH:
660 			if (is_ipv4)
661 				ptype = ICE_MAC_IPV4_AH;
662 			else if (is_ipv6)
663 				ptype = ICE_MAC_IPV6_AH;
664 			goto out;
665 		case VIRTCHNL_PROTO_HDR_PFCP:
666 			if (is_ipv4)
667 				ptype = ICE_MAC_IPV4_PFCP_SESSION;
668 			else if (is_ipv6)
669 				ptype = ICE_MAC_IPV6_PFCP_SESSION;
670 			goto out;
671 		default:
672 			break;
673 		}
674 		i++;
675 	}
676 
677 out:
678 	return ice_hw_ptype_ena(&vf->pf->hw, ptype);
679 }
680 
681 /**
682  * ice_vc_parse_rss_cfg - parses hash fields and headers from
683  * a specific virtchnl RSS cfg
684  * @hw: pointer to the hardware
685  * @rss_cfg: pointer to the virtchnl RSS cfg
686  * @addl_hdrs: pointer to the protocol header fields (ICE_FLOW_SEG_HDR_*)
687  * to configure
688  * @hash_flds: pointer to the hash bit fields (ICE_FLOW_HASH_*) to configure
689  *
690  * Return true if all the protocol header and hash fields in the RSS cfg could
691  * be parsed, else return false
692  *
693  * This function parses the virtchnl RSS cfg to be the intended
694  * hash fields and the intended header for RSS configuration
695  */
696 static bool
697 ice_vc_parse_rss_cfg(struct ice_hw *hw, struct virtchnl_rss_cfg *rss_cfg,
698 		     u32 *addl_hdrs, u64 *hash_flds)
699 {
700 	const struct ice_vc_hash_field_match_type *hf_list;
701 	const struct ice_vc_hdr_match_type *hdr_list;
702 	int i, hf_list_len, hdr_list_len;
703 
704 	hf_list = ice_vc_hash_field_list;
705 	hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list);
706 	hdr_list = ice_vc_hdr_list;
707 	hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list);
708 
709 	for (i = 0; i < rss_cfg->proto_hdrs.count; i++) {
710 		struct virtchnl_proto_hdr *proto_hdr =
711 					&rss_cfg->proto_hdrs.proto_hdr[i];
712 		bool hdr_found = false;
713 		int j;
714 
715 		/* Find matched ice headers according to virtchnl headers. */
716 		for (j = 0; j < hdr_list_len; j++) {
717 			struct ice_vc_hdr_match_type hdr_map = hdr_list[j];
718 
719 			if (proto_hdr->type == hdr_map.vc_hdr) {
720 				*addl_hdrs |= hdr_map.ice_hdr;
721 				hdr_found = true;
722 			}
723 		}
724 
725 		if (!hdr_found)
726 			return false;
727 
728 		/* Find matched ice hash fields according to
729 		 * virtchnl hash fields.
730 		 */
731 		for (j = 0; j < hf_list_len; j++) {
732 			struct ice_vc_hash_field_match_type hf_map = hf_list[j];
733 
734 			if (proto_hdr->type == hf_map.vc_hdr &&
735 			    proto_hdr->field_selector == hf_map.vc_hash_field) {
736 				*hash_flds |= hf_map.ice_hash_field;
737 				break;
738 			}
739 		}
740 	}
741 
742 	return true;
743 }
744 
745 /**
746  * ice_vf_adv_rss_offload_ena - determine if capabilities support advanced
747  * RSS offloads
748  * @caps: VF driver negotiated capabilities
749  *
750  * Return true if VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF capability is set,
751  * else return false
752  */
753 static bool ice_vf_adv_rss_offload_ena(u32 caps)
754 {
755 	return !!(caps & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF);
756 }
757 
758 /**
759  * ice_vc_handle_rss_cfg
760  * @vf: pointer to the VF info
761  * @msg: pointer to the message buffer
762  * @add: add a RSS config if true, otherwise delete a RSS config
763  *
764  * This function adds/deletes a RSS config
765  */
766 static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
767 {
768 	u32 v_opcode = add ? VIRTCHNL_OP_ADD_RSS_CFG : VIRTCHNL_OP_DEL_RSS_CFG;
769 	struct virtchnl_rss_cfg *rss_cfg = (struct virtchnl_rss_cfg *)msg;
770 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
771 	struct device *dev = ice_pf_to_dev(vf->pf);
772 	struct ice_hw *hw = &vf->pf->hw;
773 	struct ice_vsi *vsi;
774 
775 	if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
776 		dev_dbg(dev, "VF %d attempting to configure RSS, but RSS is not supported by the PF\n",
777 			vf->vf_id);
778 		v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
779 		goto error_param;
780 	}
781 
782 	if (!ice_vf_adv_rss_offload_ena(vf->driver_caps)) {
783 		dev_dbg(dev, "VF %d attempting to configure RSS, but Advanced RSS offload is not supported\n",
784 			vf->vf_id);
785 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
786 		goto error_param;
787 	}
788 
789 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
790 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
791 		goto error_param;
792 	}
793 
794 	if (rss_cfg->proto_hdrs.count > VIRTCHNL_MAX_NUM_PROTO_HDRS ||
795 	    rss_cfg->rss_algorithm < VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC ||
796 	    rss_cfg->rss_algorithm > VIRTCHNL_RSS_ALG_XOR_SYMMETRIC) {
797 		dev_dbg(dev, "VF %d attempting to configure RSS, but RSS configuration is not valid\n",
798 			vf->vf_id);
799 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
800 		goto error_param;
801 	}
802 
803 	vsi = ice_get_vf_vsi(vf);
804 	if (!vsi) {
805 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
806 		goto error_param;
807 	}
808 
809 	if (!ice_vc_validate_pattern(vf, &rss_cfg->proto_hdrs)) {
810 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
811 		goto error_param;
812 	}
813 
814 	if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_R_ASYMMETRIC) {
815 		struct ice_vsi_ctx *ctx;
816 		u8 lut_type, hash_type;
817 		int status;
818 
819 		lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
820 		hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_XOR :
821 				ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
822 
823 		ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
824 		if (!ctx) {
825 			v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
826 			goto error_param;
827 		}
828 
829 		ctx->info.q_opt_rss = ((lut_type <<
830 					ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
831 				       ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
832 				       (hash_type &
833 					ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
834 
835 		/* Preserve existing queueing option setting */
836 		ctx->info.q_opt_rss |= (vsi->info.q_opt_rss &
837 					  ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M);
838 		ctx->info.q_opt_tc = vsi->info.q_opt_tc;
839 		ctx->info.q_opt_flags = vsi->info.q_opt_rss;
840 
841 		ctx->info.valid_sections =
842 				cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
843 
844 		status = ice_update_vsi(hw, vsi->idx, ctx, NULL);
845 		if (status) {
846 			dev_err(dev, "update VSI for RSS failed, err %d aq_err %s\n",
847 				status, ice_aq_str(hw->adminq.sq_last_status));
848 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
849 		} else {
850 			vsi->info.q_opt_rss = ctx->info.q_opt_rss;
851 		}
852 
853 		kfree(ctx);
854 	} else {
855 		u32 addl_hdrs = ICE_FLOW_SEG_HDR_NONE;
856 		u64 hash_flds = ICE_HASH_INVALID;
857 
858 		if (!ice_vc_parse_rss_cfg(hw, rss_cfg, &addl_hdrs,
859 					  &hash_flds)) {
860 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
861 			goto error_param;
862 		}
863 
864 		if (add) {
865 			if (ice_add_rss_cfg(hw, vsi->idx, hash_flds,
866 					    addl_hdrs)) {
867 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
868 				dev_err(dev, "ice_add_rss_cfg failed for vsi = %d, v_ret = %d\n",
869 					vsi->vsi_num, v_ret);
870 			}
871 		} else {
872 			int status;
873 
874 			status = ice_rem_rss_cfg(hw, vsi->idx, hash_flds,
875 						 addl_hdrs);
876 			/* We just ignore -ENOENT, because if two configurations
877 			 * share the same profile remove one of them actually
878 			 * removes both, since the profile is deleted.
879 			 */
880 			if (status && status != -ENOENT) {
881 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
882 				dev_err(dev, "ice_rem_rss_cfg failed for VF ID:%d, error:%d\n",
883 					vf->vf_id, status);
884 			}
885 		}
886 	}
887 
888 error_param:
889 	return ice_vc_send_msg_to_vf(vf, v_opcode, v_ret, NULL, 0);
890 }
891 
892 /**
893  * ice_vc_config_rss_key
894  * @vf: pointer to the VF info
895  * @msg: pointer to the msg buffer
896  *
897  * Configure the VF's RSS key
898  */
899 static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
900 {
901 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
902 	struct virtchnl_rss_key *vrk =
903 		(struct virtchnl_rss_key *)msg;
904 	struct ice_vsi *vsi;
905 
906 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
907 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
908 		goto error_param;
909 	}
910 
911 	if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
912 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
913 		goto error_param;
914 	}
915 
916 	if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
917 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
918 		goto error_param;
919 	}
920 
921 	if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
922 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
923 		goto error_param;
924 	}
925 
926 	vsi = ice_get_vf_vsi(vf);
927 	if (!vsi) {
928 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
929 		goto error_param;
930 	}
931 
932 	if (ice_set_rss_key(vsi, vrk->key))
933 		v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
934 error_param:
935 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret,
936 				     NULL, 0);
937 }
938 
939 /**
940  * ice_vc_config_rss_lut
941  * @vf: pointer to the VF info
942  * @msg: pointer to the msg buffer
943  *
944  * Configure the VF's RSS LUT
945  */
946 static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
947 {
948 	struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
949 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
950 	struct ice_vsi *vsi;
951 
952 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
953 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
954 		goto error_param;
955 	}
956 
957 	if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
958 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
959 		goto error_param;
960 	}
961 
962 	if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) {
963 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
964 		goto error_param;
965 	}
966 
967 	if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
968 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
969 		goto error_param;
970 	}
971 
972 	vsi = ice_get_vf_vsi(vf);
973 	if (!vsi) {
974 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
975 		goto error_param;
976 	}
977 
978 	if (ice_set_rss_lut(vsi, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE))
979 		v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
980 error_param:
981 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret,
982 				     NULL, 0);
983 }
984 
985 /**
986  * ice_vc_cfg_promiscuous_mode_msg
987  * @vf: pointer to the VF info
988  * @msg: pointer to the msg buffer
989  *
990  * called from the VF to configure VF VSIs promiscuous mode
991  */
992 static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
993 {
994 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
995 	bool rm_promisc, alluni = false, allmulti = false;
996 	struct virtchnl_promisc_info *info =
997 	    (struct virtchnl_promisc_info *)msg;
998 	struct ice_vsi_vlan_ops *vlan_ops;
999 	int mcast_err = 0, ucast_err = 0;
1000 	struct ice_pf *pf = vf->pf;
1001 	struct ice_vsi *vsi;
1002 	struct device *dev;
1003 	int ret = 0;
1004 
1005 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1006 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1007 		goto error_param;
1008 	}
1009 
1010 	if (!ice_vc_isvalid_vsi_id(vf, info->vsi_id)) {
1011 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1012 		goto error_param;
1013 	}
1014 
1015 	vsi = ice_get_vf_vsi(vf);
1016 	if (!vsi) {
1017 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1018 		goto error_param;
1019 	}
1020 
1021 	dev = ice_pf_to_dev(pf);
1022 	if (!ice_is_vf_trusted(vf)) {
1023 		dev_err(dev, "Unprivileged VF %d is attempting to configure promiscuous mode\n",
1024 			vf->vf_id);
1025 		/* Leave v_ret alone, lie to the VF on purpose. */
1026 		goto error_param;
1027 	}
1028 
1029 	if (info->flags & FLAG_VF_UNICAST_PROMISC)
1030 		alluni = true;
1031 
1032 	if (info->flags & FLAG_VF_MULTICAST_PROMISC)
1033 		allmulti = true;
1034 
1035 	rm_promisc = !allmulti && !alluni;
1036 
1037 	vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
1038 	if (rm_promisc)
1039 		ret = vlan_ops->ena_rx_filtering(vsi);
1040 	else
1041 		ret = vlan_ops->dis_rx_filtering(vsi);
1042 	if (ret) {
1043 		dev_err(dev, "Failed to configure VLAN pruning in promiscuous mode\n");
1044 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1045 		goto error_param;
1046 	}
1047 
1048 	if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
1049 		bool set_dflt_vsi = alluni || allmulti;
1050 
1051 		if (set_dflt_vsi && !ice_is_dflt_vsi_in_use(pf->first_sw))
1052 			/* only attempt to set the default forwarding VSI if
1053 			 * it's not currently set
1054 			 */
1055 			ret = ice_set_dflt_vsi(pf->first_sw, vsi);
1056 		else if (!set_dflt_vsi &&
1057 			 ice_is_vsi_dflt_vsi(pf->first_sw, vsi))
1058 			/* only attempt to free the default forwarding VSI if we
1059 			 * are the owner
1060 			 */
1061 			ret = ice_clear_dflt_vsi(pf->first_sw);
1062 
1063 		if (ret) {
1064 			dev_err(dev, "%sable VF %d as the default VSI failed, error %d\n",
1065 				set_dflt_vsi ? "en" : "dis", vf->vf_id, ret);
1066 			v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1067 			goto error_param;
1068 		}
1069 	} else {
1070 		u8 mcast_m, ucast_m;
1071 
1072 		if (ice_vf_is_port_vlan_ena(vf) ||
1073 		    ice_vsi_has_non_zero_vlans(vsi)) {
1074 			mcast_m = ICE_MCAST_VLAN_PROMISC_BITS;
1075 			ucast_m = ICE_UCAST_VLAN_PROMISC_BITS;
1076 		} else {
1077 			mcast_m = ICE_MCAST_PROMISC_BITS;
1078 			ucast_m = ICE_UCAST_PROMISC_BITS;
1079 		}
1080 
1081 		if (alluni)
1082 			ucast_err = ice_vf_set_vsi_promisc(vf, vsi, ucast_m);
1083 		else
1084 			ucast_err = ice_vf_clear_vsi_promisc(vf, vsi, ucast_m);
1085 
1086 		if (allmulti)
1087 			mcast_err = ice_vf_set_vsi_promisc(vf, vsi, mcast_m);
1088 		else
1089 			mcast_err = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m);
1090 
1091 		if (ucast_err || mcast_err)
1092 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1093 	}
1094 
1095 	if (!mcast_err) {
1096 		if (allmulti &&
1097 		    !test_and_set_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
1098 			dev_info(dev, "VF %u successfully set multicast promiscuous mode\n",
1099 				 vf->vf_id);
1100 		else if (!allmulti &&
1101 			 test_and_clear_bit(ICE_VF_STATE_MC_PROMISC,
1102 					    vf->vf_states))
1103 			dev_info(dev, "VF %u successfully unset multicast promiscuous mode\n",
1104 				 vf->vf_id);
1105 	}
1106 
1107 	if (!ucast_err) {
1108 		if (alluni &&
1109 		    !test_and_set_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states))
1110 			dev_info(dev, "VF %u successfully set unicast promiscuous mode\n",
1111 				 vf->vf_id);
1112 		else if (!alluni &&
1113 			 test_and_clear_bit(ICE_VF_STATE_UC_PROMISC,
1114 					    vf->vf_states))
1115 			dev_info(dev, "VF %u successfully unset unicast promiscuous mode\n",
1116 				 vf->vf_id);
1117 	}
1118 
1119 error_param:
1120 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
1121 				     v_ret, NULL, 0);
1122 }
1123 
1124 /**
1125  * ice_vc_get_stats_msg
1126  * @vf: pointer to the VF info
1127  * @msg: pointer to the msg buffer
1128  *
1129  * called from the VF to get VSI stats
1130  */
1131 static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
1132 {
1133 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1134 	struct virtchnl_queue_select *vqs =
1135 		(struct virtchnl_queue_select *)msg;
1136 	struct ice_eth_stats stats = { 0 };
1137 	struct ice_vsi *vsi;
1138 
1139 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1140 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1141 		goto error_param;
1142 	}
1143 
1144 	if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
1145 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1146 		goto error_param;
1147 	}
1148 
1149 	vsi = ice_get_vf_vsi(vf);
1150 	if (!vsi) {
1151 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1152 		goto error_param;
1153 	}
1154 
1155 	ice_update_eth_stats(vsi);
1156 
1157 	stats = vsi->eth_stats;
1158 
1159 error_param:
1160 	/* send the response to the VF */
1161 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, v_ret,
1162 				     (u8 *)&stats, sizeof(stats));
1163 }
1164 
1165 /**
1166  * ice_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTCHNL
1167  * @vqs: virtchnl_queue_select structure containing bitmaps to validate
1168  *
1169  * Return true on successful validation, else false
1170  */
1171 static bool ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
1172 {
1173 	if ((!vqs->rx_queues && !vqs->tx_queues) ||
1174 	    vqs->rx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF) ||
1175 	    vqs->tx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF))
1176 		return false;
1177 
1178 	return true;
1179 }
1180 
1181 /**
1182  * ice_vf_ena_txq_interrupt - enable Tx queue interrupt via QINT_TQCTL
1183  * @vsi: VSI of the VF to configure
1184  * @q_idx: VF queue index used to determine the queue in the PF's space
1185  */
1186 static void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx)
1187 {
1188 	struct ice_hw *hw = &vsi->back->hw;
1189 	u32 pfq = vsi->txq_map[q_idx];
1190 	u32 reg;
1191 
1192 	reg = rd32(hw, QINT_TQCTL(pfq));
1193 
1194 	/* MSI-X index 0 in the VF's space is always for the OICR, which means
1195 	 * this is most likely a poll mode VF driver, so don't enable an
1196 	 * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
1197 	 */
1198 	if (!(reg & QINT_TQCTL_MSIX_INDX_M))
1199 		return;
1200 
1201 	wr32(hw, QINT_TQCTL(pfq), reg | QINT_TQCTL_CAUSE_ENA_M);
1202 }
1203 
1204 /**
1205  * ice_vf_ena_rxq_interrupt - enable Tx queue interrupt via QINT_RQCTL
1206  * @vsi: VSI of the VF to configure
1207  * @q_idx: VF queue index used to determine the queue in the PF's space
1208  */
1209 static void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx)
1210 {
1211 	struct ice_hw *hw = &vsi->back->hw;
1212 	u32 pfq = vsi->rxq_map[q_idx];
1213 	u32 reg;
1214 
1215 	reg = rd32(hw, QINT_RQCTL(pfq));
1216 
1217 	/* MSI-X index 0 in the VF's space is always for the OICR, which means
1218 	 * this is most likely a poll mode VF driver, so don't enable an
1219 	 * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
1220 	 */
1221 	if (!(reg & QINT_RQCTL_MSIX_INDX_M))
1222 		return;
1223 
1224 	wr32(hw, QINT_RQCTL(pfq), reg | QINT_RQCTL_CAUSE_ENA_M);
1225 }
1226 
1227 /**
1228  * ice_vc_ena_qs_msg
1229  * @vf: pointer to the VF info
1230  * @msg: pointer to the msg buffer
1231  *
1232  * called from the VF to enable all or specific queue(s)
1233  */
1234 static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
1235 {
1236 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1237 	struct virtchnl_queue_select *vqs =
1238 	    (struct virtchnl_queue_select *)msg;
1239 	struct ice_vsi *vsi;
1240 	unsigned long q_map;
1241 	u16 vf_q_id;
1242 
1243 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1244 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1245 		goto error_param;
1246 	}
1247 
1248 	if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
1249 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1250 		goto error_param;
1251 	}
1252 
1253 	if (!ice_vc_validate_vqs_bitmaps(vqs)) {
1254 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1255 		goto error_param;
1256 	}
1257 
1258 	vsi = ice_get_vf_vsi(vf);
1259 	if (!vsi) {
1260 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1261 		goto error_param;
1262 	}
1263 
1264 	/* Enable only Rx rings, Tx rings were enabled by the FW when the
1265 	 * Tx queue group list was configured and the context bits were
1266 	 * programmed using ice_vsi_cfg_txqs
1267 	 */
1268 	q_map = vqs->rx_queues;
1269 	for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
1270 		if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
1271 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1272 			goto error_param;
1273 		}
1274 
1275 		/* Skip queue if enabled */
1276 		if (test_bit(vf_q_id, vf->rxq_ena))
1277 			continue;
1278 
1279 		if (ice_vsi_ctrl_one_rx_ring(vsi, true, vf_q_id, true)) {
1280 			dev_err(ice_pf_to_dev(vsi->back), "Failed to enable Rx ring %d on VSI %d\n",
1281 				vf_q_id, vsi->vsi_num);
1282 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1283 			goto error_param;
1284 		}
1285 
1286 		ice_vf_ena_rxq_interrupt(vsi, vf_q_id);
1287 		set_bit(vf_q_id, vf->rxq_ena);
1288 	}
1289 
1290 	q_map = vqs->tx_queues;
1291 	for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
1292 		if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
1293 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1294 			goto error_param;
1295 		}
1296 
1297 		/* Skip queue if enabled */
1298 		if (test_bit(vf_q_id, vf->txq_ena))
1299 			continue;
1300 
1301 		ice_vf_ena_txq_interrupt(vsi, vf_q_id);
1302 		set_bit(vf_q_id, vf->txq_ena);
1303 	}
1304 
1305 	/* Set flag to indicate that queues are enabled */
1306 	if (v_ret == VIRTCHNL_STATUS_SUCCESS)
1307 		set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
1308 
1309 error_param:
1310 	/* send the response to the VF */
1311 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret,
1312 				     NULL, 0);
1313 }
1314 
1315 /**
1316  * ice_vf_vsi_dis_single_txq - disable a single Tx queue
1317  * @vf: VF to disable queue for
1318  * @vsi: VSI for the VF
1319  * @q_id: VF relative (0-based) queue ID
1320  *
1321  * Attempt to disable the Tx queue passed in. If the Tx queue was successfully
1322  * disabled then clear q_id bit in the enabled queues bitmap and return
1323  * success. Otherwise return error.
1324  */
1325 static int
1326 ice_vf_vsi_dis_single_txq(struct ice_vf *vf, struct ice_vsi *vsi, u16 q_id)
1327 {
1328 	struct ice_txq_meta txq_meta = { 0 };
1329 	struct ice_tx_ring *ring;
1330 	int err;
1331 
1332 	if (!test_bit(q_id, vf->txq_ena))
1333 		dev_dbg(ice_pf_to_dev(vsi->back), "Queue %u on VSI %u is not enabled, but stopping it anyway\n",
1334 			q_id, vsi->vsi_num);
1335 
1336 	ring = vsi->tx_rings[q_id];
1337 	if (!ring)
1338 		return -EINVAL;
1339 
1340 	ice_fill_txq_meta(vsi, ring, &txq_meta);
1341 
1342 	err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id, ring, &txq_meta);
1343 	if (err) {
1344 		dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
1345 			q_id, vsi->vsi_num);
1346 		return err;
1347 	}
1348 
1349 	/* Clear enabled queues flag */
1350 	clear_bit(q_id, vf->txq_ena);
1351 
1352 	return 0;
1353 }
1354 
1355 /**
1356  * ice_vc_dis_qs_msg
1357  * @vf: pointer to the VF info
1358  * @msg: pointer to the msg buffer
1359  *
1360  * called from the VF to disable all or specific queue(s)
1361  */
1362 static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
1363 {
1364 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1365 	struct virtchnl_queue_select *vqs =
1366 	    (struct virtchnl_queue_select *)msg;
1367 	struct ice_vsi *vsi;
1368 	unsigned long q_map;
1369 	u16 vf_q_id;
1370 
1371 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
1372 	    !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) {
1373 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1374 		goto error_param;
1375 	}
1376 
1377 	if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
1378 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1379 		goto error_param;
1380 	}
1381 
1382 	if (!ice_vc_validate_vqs_bitmaps(vqs)) {
1383 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1384 		goto error_param;
1385 	}
1386 
1387 	vsi = ice_get_vf_vsi(vf);
1388 	if (!vsi) {
1389 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1390 		goto error_param;
1391 	}
1392 
1393 	if (vqs->tx_queues) {
1394 		q_map = vqs->tx_queues;
1395 
1396 		for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
1397 			if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
1398 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1399 				goto error_param;
1400 			}
1401 
1402 			if (ice_vf_vsi_dis_single_txq(vf, vsi, vf_q_id)) {
1403 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1404 				goto error_param;
1405 			}
1406 		}
1407 	}
1408 
1409 	q_map = vqs->rx_queues;
1410 	/* speed up Rx queue disable by batching them if possible */
1411 	if (q_map &&
1412 	    bitmap_equal(&q_map, vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF)) {
1413 		if (ice_vsi_stop_all_rx_rings(vsi)) {
1414 			dev_err(ice_pf_to_dev(vsi->back), "Failed to stop all Rx rings on VSI %d\n",
1415 				vsi->vsi_num);
1416 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1417 			goto error_param;
1418 		}
1419 
1420 		bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
1421 	} else if (q_map) {
1422 		for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
1423 			if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
1424 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1425 				goto error_param;
1426 			}
1427 
1428 			/* Skip queue if not enabled */
1429 			if (!test_bit(vf_q_id, vf->rxq_ena))
1430 				continue;
1431 
1432 			if (ice_vsi_ctrl_one_rx_ring(vsi, false, vf_q_id,
1433 						     true)) {
1434 				dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Rx ring %d on VSI %d\n",
1435 					vf_q_id, vsi->vsi_num);
1436 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1437 				goto error_param;
1438 			}
1439 
1440 			/* Clear enabled queues flag */
1441 			clear_bit(vf_q_id, vf->rxq_ena);
1442 		}
1443 	}
1444 
1445 	/* Clear enabled queues flag */
1446 	if (v_ret == VIRTCHNL_STATUS_SUCCESS && ice_vf_has_no_qs_ena(vf))
1447 		clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
1448 
1449 error_param:
1450 	/* send the response to the VF */
1451 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret,
1452 				     NULL, 0);
1453 }
1454 
1455 /**
1456  * ice_cfg_interrupt
1457  * @vf: pointer to the VF info
1458  * @vsi: the VSI being configured
1459  * @vector_id: vector ID
1460  * @map: vector map for mapping vectors to queues
1461  * @q_vector: structure for interrupt vector
1462  * configure the IRQ to queue map
1463  */
1464 static int
1465 ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id,
1466 		  struct virtchnl_vector_map *map,
1467 		  struct ice_q_vector *q_vector)
1468 {
1469 	u16 vsi_q_id, vsi_q_id_idx;
1470 	unsigned long qmap;
1471 
1472 	q_vector->num_ring_rx = 0;
1473 	q_vector->num_ring_tx = 0;
1474 
1475 	qmap = map->rxq_map;
1476 	for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
1477 		vsi_q_id = vsi_q_id_idx;
1478 
1479 		if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
1480 			return VIRTCHNL_STATUS_ERR_PARAM;
1481 
1482 		q_vector->num_ring_rx++;
1483 		q_vector->rx.itr_idx = map->rxitr_idx;
1484 		vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
1485 		ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id,
1486 				      q_vector->rx.itr_idx);
1487 	}
1488 
1489 	qmap = map->txq_map;
1490 	for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
1491 		vsi_q_id = vsi_q_id_idx;
1492 
1493 		if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
1494 			return VIRTCHNL_STATUS_ERR_PARAM;
1495 
1496 		q_vector->num_ring_tx++;
1497 		q_vector->tx.itr_idx = map->txitr_idx;
1498 		vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
1499 		ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id,
1500 				      q_vector->tx.itr_idx);
1501 	}
1502 
1503 	return VIRTCHNL_STATUS_SUCCESS;
1504 }
1505 
1506 /**
1507  * ice_vc_cfg_irq_map_msg
1508  * @vf: pointer to the VF info
1509  * @msg: pointer to the msg buffer
1510  *
1511  * called from the VF to configure the IRQ to queue map
1512  */
1513 static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
1514 {
1515 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1516 	u16 num_q_vectors_mapped, vsi_id, vector_id;
1517 	struct virtchnl_irq_map_info *irqmap_info;
1518 	struct virtchnl_vector_map *map;
1519 	struct ice_pf *pf = vf->pf;
1520 	struct ice_vsi *vsi;
1521 	int i;
1522 
1523 	irqmap_info = (struct virtchnl_irq_map_info *)msg;
1524 	num_q_vectors_mapped = irqmap_info->num_vectors;
1525 
1526 	/* Check to make sure number of VF vectors mapped is not greater than
1527 	 * number of VF vectors originally allocated, and check that
1528 	 * there is actually at least a single VF queue vector mapped
1529 	 */
1530 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
1531 	    pf->vfs.num_msix_per < num_q_vectors_mapped ||
1532 	    !num_q_vectors_mapped) {
1533 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1534 		goto error_param;
1535 	}
1536 
1537 	vsi = ice_get_vf_vsi(vf);
1538 	if (!vsi) {
1539 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1540 		goto error_param;
1541 	}
1542 
1543 	for (i = 0; i < num_q_vectors_mapped; i++) {
1544 		struct ice_q_vector *q_vector;
1545 
1546 		map = &irqmap_info->vecmap[i];
1547 
1548 		vector_id = map->vector_id;
1549 		vsi_id = map->vsi_id;
1550 		/* vector_id is always 0-based for each VF, and can never be
1551 		 * larger than or equal to the max allowed interrupts per VF
1552 		 */
1553 		if (!(vector_id < pf->vfs.num_msix_per) ||
1554 		    !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
1555 		    (!vector_id && (map->rxq_map || map->txq_map))) {
1556 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1557 			goto error_param;
1558 		}
1559 
1560 		/* No need to map VF miscellaneous or rogue vector */
1561 		if (!vector_id)
1562 			continue;
1563 
1564 		/* Subtract non queue vector from vector_id passed by VF
1565 		 * to get actual number of VSI queue vector array index
1566 		 */
1567 		q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF];
1568 		if (!q_vector) {
1569 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1570 			goto error_param;
1571 		}
1572 
1573 		/* lookout for the invalid queue index */
1574 		v_ret = (enum virtchnl_status_code)
1575 			ice_cfg_interrupt(vf, vsi, vector_id, map, q_vector);
1576 		if (v_ret)
1577 			goto error_param;
1578 	}
1579 
1580 error_param:
1581 	/* send the response to the VF */
1582 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
1583 				     NULL, 0);
1584 }
1585 
1586 /**
1587  * ice_vc_cfg_qs_msg
1588  * @vf: pointer to the VF info
1589  * @msg: pointer to the msg buffer
1590  *
1591  * called from the VF to configure the Rx/Tx queues
1592  */
1593 static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
1594 {
1595 	struct virtchnl_vsi_queue_config_info *qci =
1596 	    (struct virtchnl_vsi_queue_config_info *)msg;
1597 	struct virtchnl_queue_pair_info *qpi;
1598 	struct ice_pf *pf = vf->pf;
1599 	struct ice_vsi *vsi;
1600 	int i = -1, q_idx;
1601 
1602 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
1603 		goto error_param;
1604 
1605 	if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id))
1606 		goto error_param;
1607 
1608 	vsi = ice_get_vf_vsi(vf);
1609 	if (!vsi)
1610 		goto error_param;
1611 
1612 	if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF ||
1613 	    qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
1614 		dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n",
1615 			vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
1616 		goto error_param;
1617 	}
1618 
1619 	for (i = 0; i < qci->num_queue_pairs; i++) {
1620 		qpi = &qci->qpair[i];
1621 		if (qpi->txq.vsi_id != qci->vsi_id ||
1622 		    qpi->rxq.vsi_id != qci->vsi_id ||
1623 		    qpi->rxq.queue_id != qpi->txq.queue_id ||
1624 		    qpi->txq.headwb_enabled ||
1625 		    !ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
1626 		    !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
1627 		    !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
1628 			goto error_param;
1629 		}
1630 
1631 		q_idx = qpi->rxq.queue_id;
1632 
1633 		/* make sure selected "q_idx" is in valid range of queues
1634 		 * for selected "vsi"
1635 		 */
1636 		if (q_idx >= vsi->alloc_txq || q_idx >= vsi->alloc_rxq) {
1637 			goto error_param;
1638 		}
1639 
1640 		/* copy Tx queue info from VF into VSI */
1641 		if (qpi->txq.ring_len > 0) {
1642 			vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
1643 			vsi->tx_rings[i]->count = qpi->txq.ring_len;
1644 
1645 			/* Disable any existing queue first */
1646 			if (ice_vf_vsi_dis_single_txq(vf, vsi, q_idx))
1647 				goto error_param;
1648 
1649 			/* Configure a queue with the requested settings */
1650 			if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) {
1651 				dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure TX queue %d\n",
1652 					 vf->vf_id, i);
1653 				goto error_param;
1654 			}
1655 		}
1656 
1657 		/* copy Rx queue info from VF into VSI */
1658 		if (qpi->rxq.ring_len > 0) {
1659 			u16 max_frame_size = ice_vc_get_max_frame_size(vf);
1660 
1661 			vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
1662 			vsi->rx_rings[i]->count = qpi->rxq.ring_len;
1663 
1664 			if (qpi->rxq.databuffer_size != 0 &&
1665 			    (qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
1666 			     qpi->rxq.databuffer_size < 1024))
1667 				goto error_param;
1668 			vsi->rx_buf_len = qpi->rxq.databuffer_size;
1669 			vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len;
1670 			if (qpi->rxq.max_pkt_size > max_frame_size ||
1671 			    qpi->rxq.max_pkt_size < 64)
1672 				goto error_param;
1673 
1674 			vsi->max_frame = qpi->rxq.max_pkt_size;
1675 			/* add space for the port VLAN since the VF driver is
1676 			 * not expected to account for it in the MTU
1677 			 * calculation
1678 			 */
1679 			if (ice_vf_is_port_vlan_ena(vf))
1680 				vsi->max_frame += VLAN_HLEN;
1681 
1682 			if (ice_vsi_cfg_single_rxq(vsi, q_idx)) {
1683 				dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure RX queue %d\n",
1684 					 vf->vf_id, i);
1685 				goto error_param;
1686 			}
1687 		}
1688 	}
1689 
1690 	/* send the response to the VF */
1691 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
1692 				     VIRTCHNL_STATUS_SUCCESS, NULL, 0);
1693 error_param:
1694 	/* disable whatever we can */
1695 	for (; i >= 0; i--) {
1696 		if (ice_vsi_ctrl_one_rx_ring(vsi, false, i, true))
1697 			dev_err(ice_pf_to_dev(pf), "VF-%d could not disable RX queue %d\n",
1698 				vf->vf_id, i);
1699 		if (ice_vf_vsi_dis_single_txq(vf, vsi, i))
1700 			dev_err(ice_pf_to_dev(pf), "VF-%d could not disable TX queue %d\n",
1701 				vf->vf_id, i);
1702 	}
1703 
1704 	/* send the response to the VF */
1705 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
1706 				     VIRTCHNL_STATUS_ERR_PARAM, NULL, 0);
1707 }
1708 
1709 /**
1710  * ice_can_vf_change_mac
1711  * @vf: pointer to the VF info
1712  *
1713  * Return true if the VF is allowed to change its MAC filters, false otherwise
1714  */
1715 static bool ice_can_vf_change_mac(struct ice_vf *vf)
1716 {
1717 	/* If the VF MAC address has been set administratively (via the
1718 	 * ndo_set_vf_mac command), then deny permission to the VF to
1719 	 * add/delete unicast MAC addresses, unless the VF is trusted
1720 	 */
1721 	if (vf->pf_set_mac && !ice_is_vf_trusted(vf))
1722 		return false;
1723 
1724 	return true;
1725 }
1726 
1727 /**
1728  * ice_vc_ether_addr_type - get type of virtchnl_ether_addr
1729  * @vc_ether_addr: used to extract the type
1730  */
1731 static u8
1732 ice_vc_ether_addr_type(struct virtchnl_ether_addr *vc_ether_addr)
1733 {
1734 	return (vc_ether_addr->type & VIRTCHNL_ETHER_ADDR_TYPE_MASK);
1735 }
1736 
1737 /**
1738  * ice_is_vc_addr_legacy - check if the MAC address is from an older VF
1739  * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
1740  */
1741 static bool
1742 ice_is_vc_addr_legacy(struct virtchnl_ether_addr *vc_ether_addr)
1743 {
1744 	u8 type = ice_vc_ether_addr_type(vc_ether_addr);
1745 
1746 	return (type == VIRTCHNL_ETHER_ADDR_LEGACY);
1747 }
1748 
1749 /**
1750  * ice_is_vc_addr_primary - check if the MAC address is the VF's primary MAC
1751  * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
1752  *
1753  * This function should only be called when the MAC address in
1754  * virtchnl_ether_addr is a valid unicast MAC
1755  */
1756 static bool
1757 ice_is_vc_addr_primary(struct virtchnl_ether_addr __maybe_unused *vc_ether_addr)
1758 {
1759 	u8 type = ice_vc_ether_addr_type(vc_ether_addr);
1760 
1761 	return (type == VIRTCHNL_ETHER_ADDR_PRIMARY);
1762 }
1763 
1764 /**
1765  * ice_vfhw_mac_add - update the VF's cached hardware MAC if allowed
1766  * @vf: VF to update
1767  * @vc_ether_addr: structure from VIRTCHNL with MAC to add
1768  */
1769 static void
1770 ice_vfhw_mac_add(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr)
1771 {
1772 	u8 *mac_addr = vc_ether_addr->addr;
1773 
1774 	if (!is_valid_ether_addr(mac_addr))
1775 		return;
1776 
1777 	/* only allow legacy VF drivers to set the device and hardware MAC if it
1778 	 * is zero and allow new VF drivers to set the hardware MAC if the type
1779 	 * was correctly specified over VIRTCHNL
1780 	 */
1781 	if ((ice_is_vc_addr_legacy(vc_ether_addr) &&
1782 	     is_zero_ether_addr(vf->hw_lan_addr.addr)) ||
1783 	    ice_is_vc_addr_primary(vc_ether_addr)) {
1784 		ether_addr_copy(vf->dev_lan_addr.addr, mac_addr);
1785 		ether_addr_copy(vf->hw_lan_addr.addr, mac_addr);
1786 	}
1787 
1788 	/* hardware and device MACs are already set, but its possible that the
1789 	 * VF driver sent the VIRTCHNL_OP_ADD_ETH_ADDR message before the
1790 	 * VIRTCHNL_OP_DEL_ETH_ADDR when trying to update its MAC, so save it
1791 	 * away for the legacy VF driver case as it will be updated in the
1792 	 * delete flow for this case
1793 	 */
1794 	if (ice_is_vc_addr_legacy(vc_ether_addr)) {
1795 		ether_addr_copy(vf->legacy_last_added_umac.addr,
1796 				mac_addr);
1797 		vf->legacy_last_added_umac.time_modified = jiffies;
1798 	}
1799 }
1800 
1801 /**
1802  * ice_vc_add_mac_addr - attempt to add the MAC address passed in
1803  * @vf: pointer to the VF info
1804  * @vsi: pointer to the VF's VSI
1805  * @vc_ether_addr: VIRTCHNL MAC address structure used to add MAC
1806  */
1807 static int
1808 ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
1809 		    struct virtchnl_ether_addr *vc_ether_addr)
1810 {
1811 	struct device *dev = ice_pf_to_dev(vf->pf);
1812 	u8 *mac_addr = vc_ether_addr->addr;
1813 	int ret;
1814 
1815 	/* device MAC already added */
1816 	if (ether_addr_equal(mac_addr, vf->dev_lan_addr.addr))
1817 		return 0;
1818 
1819 	if (is_unicast_ether_addr(mac_addr) && !ice_can_vf_change_mac(vf)) {
1820 		dev_err(dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
1821 		return -EPERM;
1822 	}
1823 
1824 	ret = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
1825 	if (ret == -EEXIST) {
1826 		dev_dbg(dev, "MAC %pM already exists for VF %d\n", mac_addr,
1827 			vf->vf_id);
1828 		/* don't return since we might need to update
1829 		 * the primary MAC in ice_vfhw_mac_add() below
1830 		 */
1831 	} else if (ret) {
1832 		dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %d\n",
1833 			mac_addr, vf->vf_id, ret);
1834 		return ret;
1835 	} else {
1836 		vf->num_mac++;
1837 	}
1838 
1839 	ice_vfhw_mac_add(vf, vc_ether_addr);
1840 
1841 	return ret;
1842 }
1843 
1844 /**
1845  * ice_is_legacy_umac_expired - check if last added legacy unicast MAC expired
1846  * @last_added_umac: structure used to check expiration
1847  */
1848 static bool ice_is_legacy_umac_expired(struct ice_time_mac *last_added_umac)
1849 {
1850 #define ICE_LEGACY_VF_MAC_CHANGE_EXPIRE_TIME	msecs_to_jiffies(3000)
1851 	return time_is_before_jiffies(last_added_umac->time_modified +
1852 				      ICE_LEGACY_VF_MAC_CHANGE_EXPIRE_TIME);
1853 }
1854 
1855 /**
1856  * ice_update_legacy_cached_mac - update cached hardware MAC for legacy VF
1857  * @vf: VF to update
1858  * @vc_ether_addr: structure from VIRTCHNL with MAC to check
1859  *
1860  * only update cached hardware MAC for legacy VF drivers on delete
1861  * because we cannot guarantee order/type of MAC from the VF driver
1862  */
1863 static void
1864 ice_update_legacy_cached_mac(struct ice_vf *vf,
1865 			     struct virtchnl_ether_addr *vc_ether_addr)
1866 {
1867 	if (!ice_is_vc_addr_legacy(vc_ether_addr) ||
1868 	    ice_is_legacy_umac_expired(&vf->legacy_last_added_umac))
1869 		return;
1870 
1871 	ether_addr_copy(vf->dev_lan_addr.addr, vf->legacy_last_added_umac.addr);
1872 	ether_addr_copy(vf->hw_lan_addr.addr, vf->legacy_last_added_umac.addr);
1873 }
1874 
1875 /**
1876  * ice_vfhw_mac_del - update the VF's cached hardware MAC if allowed
1877  * @vf: VF to update
1878  * @vc_ether_addr: structure from VIRTCHNL with MAC to delete
1879  */
1880 static void
1881 ice_vfhw_mac_del(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr)
1882 {
1883 	u8 *mac_addr = vc_ether_addr->addr;
1884 
1885 	if (!is_valid_ether_addr(mac_addr) ||
1886 	    !ether_addr_equal(vf->dev_lan_addr.addr, mac_addr))
1887 		return;
1888 
1889 	/* allow the device MAC to be repopulated in the add flow and don't
1890 	 * clear the hardware MAC (i.e. hw_lan_addr.addr) here as that is meant
1891 	 * to be persistent on VM reboot and across driver unload/load, which
1892 	 * won't work if we clear the hardware MAC here
1893 	 */
1894 	eth_zero_addr(vf->dev_lan_addr.addr);
1895 
1896 	ice_update_legacy_cached_mac(vf, vc_ether_addr);
1897 }
1898 
1899 /**
1900  * ice_vc_del_mac_addr - attempt to delete the MAC address passed in
1901  * @vf: pointer to the VF info
1902  * @vsi: pointer to the VF's VSI
1903  * @vc_ether_addr: VIRTCHNL MAC address structure used to delete MAC
1904  */
1905 static int
1906 ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
1907 		    struct virtchnl_ether_addr *vc_ether_addr)
1908 {
1909 	struct device *dev = ice_pf_to_dev(vf->pf);
1910 	u8 *mac_addr = vc_ether_addr->addr;
1911 	int status;
1912 
1913 	if (!ice_can_vf_change_mac(vf) &&
1914 	    ether_addr_equal(vf->dev_lan_addr.addr, mac_addr))
1915 		return 0;
1916 
1917 	status = ice_fltr_remove_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
1918 	if (status == -ENOENT) {
1919 		dev_err(dev, "MAC %pM does not exist for VF %d\n", mac_addr,
1920 			vf->vf_id);
1921 		return -ENOENT;
1922 	} else if (status) {
1923 		dev_err(dev, "Failed to delete MAC %pM for VF %d, error %d\n",
1924 			mac_addr, vf->vf_id, status);
1925 		return -EIO;
1926 	}
1927 
1928 	ice_vfhw_mac_del(vf, vc_ether_addr);
1929 
1930 	vf->num_mac--;
1931 
1932 	return 0;
1933 }
1934 
1935 /**
1936  * ice_vc_handle_mac_addr_msg
1937  * @vf: pointer to the VF info
1938  * @msg: pointer to the msg buffer
1939  * @set: true if MAC filters are being set, false otherwise
1940  *
1941  * add guest MAC address filter
1942  */
1943 static int
1944 ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
1945 {
1946 	int (*ice_vc_cfg_mac)
1947 		(struct ice_vf *vf, struct ice_vsi *vsi,
1948 		 struct virtchnl_ether_addr *virtchnl_ether_addr);
1949 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1950 	struct virtchnl_ether_addr_list *al =
1951 	    (struct virtchnl_ether_addr_list *)msg;
1952 	struct ice_pf *pf = vf->pf;
1953 	enum virtchnl_ops vc_op;
1954 	struct ice_vsi *vsi;
1955 	int i;
1956 
1957 	if (set) {
1958 		vc_op = VIRTCHNL_OP_ADD_ETH_ADDR;
1959 		ice_vc_cfg_mac = ice_vc_add_mac_addr;
1960 	} else {
1961 		vc_op = VIRTCHNL_OP_DEL_ETH_ADDR;
1962 		ice_vc_cfg_mac = ice_vc_del_mac_addr;
1963 	}
1964 
1965 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
1966 	    !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
1967 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1968 		goto handle_mac_exit;
1969 	}
1970 
1971 	/* If this VF is not privileged, then we can't add more than a
1972 	 * limited number of addresses. Check to make sure that the
1973 	 * additions do not push us over the limit.
1974 	 */
1975 	if (set && !ice_is_vf_trusted(vf) &&
1976 	    (vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
1977 		dev_err(ice_pf_to_dev(pf), "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
1978 			vf->vf_id);
1979 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1980 		goto handle_mac_exit;
1981 	}
1982 
1983 	vsi = ice_get_vf_vsi(vf);
1984 	if (!vsi) {
1985 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1986 		goto handle_mac_exit;
1987 	}
1988 
1989 	for (i = 0; i < al->num_elements; i++) {
1990 		u8 *mac_addr = al->list[i].addr;
1991 		int result;
1992 
1993 		if (is_broadcast_ether_addr(mac_addr) ||
1994 		    is_zero_ether_addr(mac_addr))
1995 			continue;
1996 
1997 		result = ice_vc_cfg_mac(vf, vsi, &al->list[i]);
1998 		if (result == -EEXIST || result == -ENOENT) {
1999 			continue;
2000 		} else if (result) {
2001 			v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
2002 			goto handle_mac_exit;
2003 		}
2004 	}
2005 
2006 handle_mac_exit:
2007 	/* send the response to the VF */
2008 	return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0);
2009 }
2010 
2011 /**
2012  * ice_vc_add_mac_addr_msg
2013  * @vf: pointer to the VF info
2014  * @msg: pointer to the msg buffer
2015  *
2016  * add guest MAC address filter
2017  */
2018 static int ice_vc_add_mac_addr_msg(struct ice_vf *vf, u8 *msg)
2019 {
2020 	return ice_vc_handle_mac_addr_msg(vf, msg, true);
2021 }
2022 
2023 /**
2024  * ice_vc_del_mac_addr_msg
2025  * @vf: pointer to the VF info
2026  * @msg: pointer to the msg buffer
2027  *
2028  * remove guest MAC address filter
2029  */
2030 static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
2031 {
2032 	return ice_vc_handle_mac_addr_msg(vf, msg, false);
2033 }
2034 
2035 /**
2036  * ice_vc_request_qs_msg
2037  * @vf: pointer to the VF info
2038  * @msg: pointer to the msg buffer
2039  *
2040  * VFs get a default number of queues but can use this message to request a
2041  * different number. If the request is successful, PF will reset the VF and
2042  * return 0. If unsuccessful, PF will send message informing VF of number of
2043  * available queue pairs via virtchnl message response to VF.
2044  */
2045 static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
2046 {
2047 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2048 	struct virtchnl_vf_res_request *vfres =
2049 		(struct virtchnl_vf_res_request *)msg;
2050 	u16 req_queues = vfres->num_queue_pairs;
2051 	struct ice_pf *pf = vf->pf;
2052 	u16 max_allowed_vf_queues;
2053 	u16 tx_rx_queue_left;
2054 	struct device *dev;
2055 	u16 cur_queues;
2056 
2057 	dev = ice_pf_to_dev(pf);
2058 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2059 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2060 		goto error_param;
2061 	}
2062 
2063 	cur_queues = vf->num_vf_qs;
2064 	tx_rx_queue_left = min_t(u16, ice_get_avail_txq_count(pf),
2065 				 ice_get_avail_rxq_count(pf));
2066 	max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
2067 	if (!req_queues) {
2068 		dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n",
2069 			vf->vf_id);
2070 	} else if (req_queues > ICE_MAX_RSS_QS_PER_VF) {
2071 		dev_err(dev, "VF %d tried to request more than %d queues.\n",
2072 			vf->vf_id, ICE_MAX_RSS_QS_PER_VF);
2073 		vfres->num_queue_pairs = ICE_MAX_RSS_QS_PER_VF;
2074 	} else if (req_queues > cur_queues &&
2075 		   req_queues - cur_queues > tx_rx_queue_left) {
2076 		dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n",
2077 			 vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
2078 		vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
2079 					       ICE_MAX_RSS_QS_PER_VF);
2080 	} else {
2081 		/* request is successful, then reset VF */
2082 		vf->num_req_qs = req_queues;
2083 		ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
2084 		dev_info(dev, "VF %d granted request of %u queues.\n",
2085 			 vf->vf_id, req_queues);
2086 		return 0;
2087 	}
2088 
2089 error_param:
2090 	/* send the response to the VF */
2091 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
2092 				     v_ret, (u8 *)vfres, sizeof(*vfres));
2093 }
2094 
2095 /**
2096  * ice_vf_vlan_offload_ena - determine if capabilities support VLAN offloads
2097  * @caps: VF driver negotiated capabilities
2098  *
2099  * Return true if VIRTCHNL_VF_OFFLOAD_VLAN capability is set, else return false
2100  */
2101 static bool ice_vf_vlan_offload_ena(u32 caps)
2102 {
2103 	return !!(caps & VIRTCHNL_VF_OFFLOAD_VLAN);
2104 }
2105 
2106 /**
2107  * ice_is_vlan_promisc_allowed - check if VLAN promiscuous config is allowed
2108  * @vf: VF used to determine if VLAN promiscuous config is allowed
2109  */
2110 static bool ice_is_vlan_promisc_allowed(struct ice_vf *vf)
2111 {
2112 	if ((test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
2113 	     test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) &&
2114 	    test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, vf->pf->flags))
2115 		return true;
2116 
2117 	return false;
2118 }
2119 
2120 /**
2121  * ice_vf_ena_vlan_promisc - Enable Tx/Rx VLAN promiscuous for the VLAN
2122  * @vsi: VF's VSI used to enable VLAN promiscuous mode
2123  * @vlan: VLAN used to enable VLAN promiscuous
2124  *
2125  * This function should only be called if VLAN promiscuous mode is allowed,
2126  * which can be determined via ice_is_vlan_promisc_allowed().
2127  */
2128 static int ice_vf_ena_vlan_promisc(struct ice_vsi *vsi, struct ice_vlan *vlan)
2129 {
2130 	u8 promisc_m = ICE_PROMISC_VLAN_TX | ICE_PROMISC_VLAN_RX;
2131 	int status;
2132 
2133 	status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m,
2134 					  vlan->vid);
2135 	if (status && status != -EEXIST)
2136 		return status;
2137 
2138 	return 0;
2139 }
2140 
2141 /**
2142  * ice_vf_dis_vlan_promisc - Disable Tx/Rx VLAN promiscuous for the VLAN
2143  * @vsi: VF's VSI used to disable VLAN promiscuous mode for
2144  * @vlan: VLAN used to disable VLAN promiscuous
2145  *
2146  * This function should only be called if VLAN promiscuous mode is allowed,
2147  * which can be determined via ice_is_vlan_promisc_allowed().
2148  */
2149 static int ice_vf_dis_vlan_promisc(struct ice_vsi *vsi, struct ice_vlan *vlan)
2150 {
2151 	u8 promisc_m = ICE_PROMISC_VLAN_TX | ICE_PROMISC_VLAN_RX;
2152 	int status;
2153 
2154 	status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m,
2155 					    vlan->vid);
2156 	if (status && status != -ENOENT)
2157 		return status;
2158 
2159 	return 0;
2160 }
2161 
2162 /**
2163  * ice_vf_has_max_vlans - check if VF already has the max allowed VLAN filters
2164  * @vf: VF to check against
2165  * @vsi: VF's VSI
2166  *
2167  * If the VF is trusted then the VF is allowed to add as many VLANs as it
2168  * wants to, so return false.
2169  *
2170  * When the VF is untrusted compare the number of non-zero VLANs + 1 to the max
2171  * allowed VLANs for an untrusted VF. Return the result of this comparison.
2172  */
2173 static bool ice_vf_has_max_vlans(struct ice_vf *vf, struct ice_vsi *vsi)
2174 {
2175 	if (ice_is_vf_trusted(vf))
2176 		return false;
2177 
2178 #define ICE_VF_ADDED_VLAN_ZERO_FLTRS	1
2179 	return ((ice_vsi_num_non_zero_vlans(vsi) +
2180 		ICE_VF_ADDED_VLAN_ZERO_FLTRS) >= ICE_MAX_VLAN_PER_VF);
2181 }
2182 
2183 /**
2184  * ice_vc_process_vlan_msg
2185  * @vf: pointer to the VF info
2186  * @msg: pointer to the msg buffer
2187  * @add_v: Add VLAN if true, otherwise delete VLAN
2188  *
2189  * Process virtchnl op to add or remove programmed guest VLAN ID
2190  */
2191 static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
2192 {
2193 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2194 	struct virtchnl_vlan_filter_list *vfl =
2195 	    (struct virtchnl_vlan_filter_list *)msg;
2196 	struct ice_pf *pf = vf->pf;
2197 	bool vlan_promisc = false;
2198 	struct ice_vsi *vsi;
2199 	struct device *dev;
2200 	int status = 0;
2201 	int i;
2202 
2203 	dev = ice_pf_to_dev(pf);
2204 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2205 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2206 		goto error_param;
2207 	}
2208 
2209 	if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
2210 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2211 		goto error_param;
2212 	}
2213 
2214 	if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
2215 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2216 		goto error_param;
2217 	}
2218 
2219 	for (i = 0; i < vfl->num_elements; i++) {
2220 		if (vfl->vlan_id[i] >= VLAN_N_VID) {
2221 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2222 			dev_err(dev, "invalid VF VLAN id %d\n",
2223 				vfl->vlan_id[i]);
2224 			goto error_param;
2225 		}
2226 	}
2227 
2228 	vsi = ice_get_vf_vsi(vf);
2229 	if (!vsi) {
2230 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2231 		goto error_param;
2232 	}
2233 
2234 	if (add_v && ice_vf_has_max_vlans(vf, vsi)) {
2235 		dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
2236 			 vf->vf_id);
2237 		/* There is no need to let VF know about being not trusted,
2238 		 * so we can just return success message here
2239 		 */
2240 		goto error_param;
2241 	}
2242 
2243 	/* in DVM a VF can add/delete inner VLAN filters when
2244 	 * VIRTCHNL_VF_OFFLOAD_VLAN is negotiated, so only reject in SVM
2245 	 */
2246 	if (ice_vf_is_port_vlan_ena(vf) && !ice_is_dvm_ena(&pf->hw)) {
2247 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2248 		goto error_param;
2249 	}
2250 
2251 	/* in DVM VLAN promiscuous is based on the outer VLAN, which would be
2252 	 * the port VLAN if VIRTCHNL_VF_OFFLOAD_VLAN was negotiated, so only
2253 	 * allow vlan_promisc = true in SVM and if no port VLAN is configured
2254 	 */
2255 	vlan_promisc = ice_is_vlan_promisc_allowed(vf) &&
2256 		!ice_is_dvm_ena(&pf->hw) &&
2257 		!ice_vf_is_port_vlan_ena(vf);
2258 
2259 	if (add_v) {
2260 		for (i = 0; i < vfl->num_elements; i++) {
2261 			u16 vid = vfl->vlan_id[i];
2262 			struct ice_vlan vlan;
2263 
2264 			if (ice_vf_has_max_vlans(vf, vsi)) {
2265 				dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
2266 					 vf->vf_id);
2267 				/* There is no need to let VF know about being
2268 				 * not trusted, so we can just return success
2269 				 * message here as well.
2270 				 */
2271 				goto error_param;
2272 			}
2273 
2274 			/* we add VLAN 0 by default for each VF so we can enable
2275 			 * Tx VLAN anti-spoof without triggering MDD events so
2276 			 * we don't need to add it again here
2277 			 */
2278 			if (!vid)
2279 				continue;
2280 
2281 			vlan = ICE_VLAN(ETH_P_8021Q, vid, 0);
2282 			status = vsi->inner_vlan_ops.add_vlan(vsi, &vlan);
2283 			if (status) {
2284 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2285 				goto error_param;
2286 			}
2287 
2288 			/* Enable VLAN filtering on first non-zero VLAN */
2289 			if (!vlan_promisc && vid && !ice_is_dvm_ena(&pf->hw)) {
2290 				if (vsi->inner_vlan_ops.ena_rx_filtering(vsi)) {
2291 					v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2292 					dev_err(dev, "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
2293 						vid, status);
2294 					goto error_param;
2295 				}
2296 			} else if (vlan_promisc) {
2297 				status = ice_vf_ena_vlan_promisc(vsi, &vlan);
2298 				if (status) {
2299 					v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2300 					dev_err(dev, "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
2301 						vid, status);
2302 				}
2303 			}
2304 		}
2305 	} else {
2306 		/* In case of non_trusted VF, number of VLAN elements passed
2307 		 * to PF for removal might be greater than number of VLANs
2308 		 * filter programmed for that VF - So, use actual number of
2309 		 * VLANS added earlier with add VLAN opcode. In order to avoid
2310 		 * removing VLAN that doesn't exist, which result to sending
2311 		 * erroneous failed message back to the VF
2312 		 */
2313 		int num_vf_vlan;
2314 
2315 		num_vf_vlan = vsi->num_vlan;
2316 		for (i = 0; i < vfl->num_elements && i < num_vf_vlan; i++) {
2317 			u16 vid = vfl->vlan_id[i];
2318 			struct ice_vlan vlan;
2319 
2320 			/* we add VLAN 0 by default for each VF so we can enable
2321 			 * Tx VLAN anti-spoof without triggering MDD events so
2322 			 * we don't want a VIRTCHNL request to remove it
2323 			 */
2324 			if (!vid)
2325 				continue;
2326 
2327 			vlan = ICE_VLAN(ETH_P_8021Q, vid, 0);
2328 			status = vsi->inner_vlan_ops.del_vlan(vsi, &vlan);
2329 			if (status) {
2330 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2331 				goto error_param;
2332 			}
2333 
2334 			/* Disable VLAN filtering when only VLAN 0 is left */
2335 			if (!ice_vsi_has_non_zero_vlans(vsi))
2336 				vsi->inner_vlan_ops.dis_rx_filtering(vsi);
2337 
2338 			if (vlan_promisc)
2339 				ice_vf_dis_vlan_promisc(vsi, &vlan);
2340 		}
2341 	}
2342 
2343 error_param:
2344 	/* send the response to the VF */
2345 	if (add_v)
2346 		return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, v_ret,
2347 					     NULL, 0);
2348 	else
2349 		return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, v_ret,
2350 					     NULL, 0);
2351 }
2352 
2353 /**
2354  * ice_vc_add_vlan_msg
2355  * @vf: pointer to the VF info
2356  * @msg: pointer to the msg buffer
2357  *
2358  * Add and program guest VLAN ID
2359  */
2360 static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg)
2361 {
2362 	return ice_vc_process_vlan_msg(vf, msg, true);
2363 }
2364 
2365 /**
2366  * ice_vc_remove_vlan_msg
2367  * @vf: pointer to the VF info
2368  * @msg: pointer to the msg buffer
2369  *
2370  * remove programmed guest VLAN ID
2371  */
2372 static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg)
2373 {
2374 	return ice_vc_process_vlan_msg(vf, msg, false);
2375 }
2376 
2377 /**
2378  * ice_vc_ena_vlan_stripping
2379  * @vf: pointer to the VF info
2380  *
2381  * Enable VLAN header stripping for a given VF
2382  */
2383 static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
2384 {
2385 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2386 	struct ice_vsi *vsi;
2387 
2388 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2389 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2390 		goto error_param;
2391 	}
2392 
2393 	if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
2394 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2395 		goto error_param;
2396 	}
2397 
2398 	vsi = ice_get_vf_vsi(vf);
2399 	if (!vsi) {
2400 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2401 		goto error_param;
2402 	}
2403 
2404 	if (vsi->inner_vlan_ops.ena_stripping(vsi, ETH_P_8021Q))
2405 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2406 
2407 error_param:
2408 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
2409 				     v_ret, NULL, 0);
2410 }
2411 
2412 /**
2413  * ice_vc_dis_vlan_stripping
2414  * @vf: pointer to the VF info
2415  *
2416  * Disable VLAN header stripping for a given VF
2417  */
2418 static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
2419 {
2420 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2421 	struct ice_vsi *vsi;
2422 
2423 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2424 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2425 		goto error_param;
2426 	}
2427 
2428 	if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
2429 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2430 		goto error_param;
2431 	}
2432 
2433 	vsi = ice_get_vf_vsi(vf);
2434 	if (!vsi) {
2435 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2436 		goto error_param;
2437 	}
2438 
2439 	if (vsi->inner_vlan_ops.dis_stripping(vsi))
2440 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2441 
2442 error_param:
2443 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
2444 				     v_ret, NULL, 0);
2445 }
2446 
2447 /**
2448  * ice_vf_init_vlan_stripping - enable/disable VLAN stripping on initialization
2449  * @vf: VF to enable/disable VLAN stripping for on initialization
2450  *
2451  * Set the default for VLAN stripping based on whether a port VLAN is configured
2452  * and the current VLAN mode of the device.
2453  */
2454 static int ice_vf_init_vlan_stripping(struct ice_vf *vf)
2455 {
2456 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
2457 
2458 	if (!vsi)
2459 		return -EINVAL;
2460 
2461 	/* don't modify stripping if port VLAN is configured in SVM since the
2462 	 * port VLAN is based on the inner/single VLAN in SVM
2463 	 */
2464 	if (ice_vf_is_port_vlan_ena(vf) && !ice_is_dvm_ena(&vsi->back->hw))
2465 		return 0;
2466 
2467 	if (ice_vf_vlan_offload_ena(vf->driver_caps))
2468 		return vsi->inner_vlan_ops.ena_stripping(vsi, ETH_P_8021Q);
2469 	else
2470 		return vsi->inner_vlan_ops.dis_stripping(vsi);
2471 }
2472 
2473 static u16 ice_vc_get_max_vlan_fltrs(struct ice_vf *vf)
2474 {
2475 	if (vf->trusted)
2476 		return VLAN_N_VID;
2477 	else
2478 		return ICE_MAX_VLAN_PER_VF;
2479 }
2480 
2481 /**
2482  * ice_vf_outer_vlan_not_allowed - check if outer VLAN can be used
2483  * @vf: VF that being checked for
2484  *
2485  * When the device is in double VLAN mode, check whether or not the outer VLAN
2486  * is allowed.
2487  */
2488 static bool ice_vf_outer_vlan_not_allowed(struct ice_vf *vf)
2489 {
2490 	if (ice_vf_is_port_vlan_ena(vf))
2491 		return true;
2492 
2493 	return false;
2494 }
2495 
2496 /**
2497  * ice_vc_set_dvm_caps - set VLAN capabilities when the device is in DVM
2498  * @vf: VF that capabilities are being set for
2499  * @caps: VLAN capabilities to populate
2500  *
2501  * Determine VLAN capabilities support based on whether a port VLAN is
2502  * configured. If a port VLAN is configured then the VF should use the inner
2503  * filtering/offload capabilities since the port VLAN is using the outer VLAN
2504  * capabilies.
2505  */
2506 static void
2507 ice_vc_set_dvm_caps(struct ice_vf *vf, struct virtchnl_vlan_caps *caps)
2508 {
2509 	struct virtchnl_vlan_supported_caps *supported_caps;
2510 
2511 	if (ice_vf_outer_vlan_not_allowed(vf)) {
2512 		/* until support for inner VLAN filtering is added when a port
2513 		 * VLAN is configured, only support software offloaded inner
2514 		 * VLANs when a port VLAN is confgured in DVM
2515 		 */
2516 		supported_caps = &caps->filtering.filtering_support;
2517 		supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
2518 
2519 		supported_caps = &caps->offloads.stripping_support;
2520 		supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
2521 					VIRTCHNL_VLAN_TOGGLE |
2522 					VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
2523 		supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
2524 
2525 		supported_caps = &caps->offloads.insertion_support;
2526 		supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
2527 					VIRTCHNL_VLAN_TOGGLE |
2528 					VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
2529 		supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
2530 
2531 		caps->offloads.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
2532 		caps->offloads.ethertype_match =
2533 			VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
2534 	} else {
2535 		supported_caps = &caps->filtering.filtering_support;
2536 		supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
2537 		supported_caps->outer = VIRTCHNL_VLAN_ETHERTYPE_8100 |
2538 					VIRTCHNL_VLAN_ETHERTYPE_88A8 |
2539 					VIRTCHNL_VLAN_ETHERTYPE_9100 |
2540 					VIRTCHNL_VLAN_ETHERTYPE_AND;
2541 		caps->filtering.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100 |
2542 						 VIRTCHNL_VLAN_ETHERTYPE_88A8 |
2543 						 VIRTCHNL_VLAN_ETHERTYPE_9100;
2544 
2545 		supported_caps = &caps->offloads.stripping_support;
2546 		supported_caps->inner = VIRTCHNL_VLAN_TOGGLE |
2547 					VIRTCHNL_VLAN_ETHERTYPE_8100 |
2548 					VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
2549 		supported_caps->outer = VIRTCHNL_VLAN_TOGGLE |
2550 					VIRTCHNL_VLAN_ETHERTYPE_8100 |
2551 					VIRTCHNL_VLAN_ETHERTYPE_88A8 |
2552 					VIRTCHNL_VLAN_ETHERTYPE_9100 |
2553 					VIRTCHNL_VLAN_ETHERTYPE_XOR |
2554 					VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2;
2555 
2556 		supported_caps = &caps->offloads.insertion_support;
2557 		supported_caps->inner = VIRTCHNL_VLAN_TOGGLE |
2558 					VIRTCHNL_VLAN_ETHERTYPE_8100 |
2559 					VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
2560 		supported_caps->outer = VIRTCHNL_VLAN_TOGGLE |
2561 					VIRTCHNL_VLAN_ETHERTYPE_8100 |
2562 					VIRTCHNL_VLAN_ETHERTYPE_88A8 |
2563 					VIRTCHNL_VLAN_ETHERTYPE_9100 |
2564 					VIRTCHNL_VLAN_ETHERTYPE_XOR |
2565 					VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2;
2566 
2567 		caps->offloads.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
2568 
2569 		caps->offloads.ethertype_match =
2570 			VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
2571 	}
2572 
2573 	caps->filtering.max_filters = ice_vc_get_max_vlan_fltrs(vf);
2574 }
2575 
2576 /**
2577  * ice_vc_set_svm_caps - set VLAN capabilities when the device is in SVM
2578  * @vf: VF that capabilities are being set for
2579  * @caps: VLAN capabilities to populate
2580  *
2581  * Determine VLAN capabilities support based on whether a port VLAN is
2582  * configured. If a port VLAN is configured then the VF does not have any VLAN
2583  * filtering or offload capabilities since the port VLAN is using the inner VLAN
2584  * capabilities in single VLAN mode (SVM). Otherwise allow the VF to use inner
2585  * VLAN fitlering and offload capabilities.
2586  */
2587 static void
2588 ice_vc_set_svm_caps(struct ice_vf *vf, struct virtchnl_vlan_caps *caps)
2589 {
2590 	struct virtchnl_vlan_supported_caps *supported_caps;
2591 
2592 	if (ice_vf_is_port_vlan_ena(vf)) {
2593 		supported_caps = &caps->filtering.filtering_support;
2594 		supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
2595 		supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
2596 
2597 		supported_caps = &caps->offloads.stripping_support;
2598 		supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
2599 		supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
2600 
2601 		supported_caps = &caps->offloads.insertion_support;
2602 		supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
2603 		supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
2604 
2605 		caps->offloads.ethertype_init = VIRTCHNL_VLAN_UNSUPPORTED;
2606 		caps->offloads.ethertype_match = VIRTCHNL_VLAN_UNSUPPORTED;
2607 		caps->filtering.max_filters = 0;
2608 	} else {
2609 		supported_caps = &caps->filtering.filtering_support;
2610 		supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100;
2611 		supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
2612 		caps->filtering.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
2613 
2614 		supported_caps = &caps->offloads.stripping_support;
2615 		supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
2616 					VIRTCHNL_VLAN_TOGGLE |
2617 					VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
2618 		supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
2619 
2620 		supported_caps = &caps->offloads.insertion_support;
2621 		supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
2622 					VIRTCHNL_VLAN_TOGGLE |
2623 					VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
2624 		supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
2625 
2626 		caps->offloads.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
2627 		caps->offloads.ethertype_match =
2628 			VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
2629 		caps->filtering.max_filters = ice_vc_get_max_vlan_fltrs(vf);
2630 	}
2631 }
2632 
2633 /**
2634  * ice_vc_get_offload_vlan_v2_caps - determine VF's VLAN capabilities
2635  * @vf: VF to determine VLAN capabilities for
2636  *
2637  * This will only be called if the VF and PF successfully negotiated
2638  * VIRTCHNL_VF_OFFLOAD_VLAN_V2.
2639  *
2640  * Set VLAN capabilities based on the current VLAN mode and whether a port VLAN
2641  * is configured or not.
2642  */
2643 static int ice_vc_get_offload_vlan_v2_caps(struct ice_vf *vf)
2644 {
2645 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2646 	struct virtchnl_vlan_caps *caps = NULL;
2647 	int err, len = 0;
2648 
2649 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2650 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2651 		goto out;
2652 	}
2653 
2654 	caps = kzalloc(sizeof(*caps), GFP_KERNEL);
2655 	if (!caps) {
2656 		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
2657 		goto out;
2658 	}
2659 	len = sizeof(*caps);
2660 
2661 	if (ice_is_dvm_ena(&vf->pf->hw))
2662 		ice_vc_set_dvm_caps(vf, caps);
2663 	else
2664 		ice_vc_set_svm_caps(vf, caps);
2665 
2666 	/* store negotiated caps to prevent invalid VF messages */
2667 	memcpy(&vf->vlan_v2_caps, caps, sizeof(*caps));
2668 
2669 out:
2670 	err = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS,
2671 				    v_ret, (u8 *)caps, len);
2672 	kfree(caps);
2673 	return err;
2674 }
2675 
2676 /**
2677  * ice_vc_validate_vlan_tpid - validate VLAN TPID
2678  * @filtering_caps: negotiated/supported VLAN filtering capabilities
2679  * @tpid: VLAN TPID used for validation
2680  *
2681  * Convert the VLAN TPID to a VIRTCHNL_VLAN_ETHERTYPE_* and then compare against
2682  * the negotiated/supported filtering caps to see if the VLAN TPID is valid.
2683  */
2684 static bool ice_vc_validate_vlan_tpid(u16 filtering_caps, u16 tpid)
2685 {
2686 	enum virtchnl_vlan_support vlan_ethertype = VIRTCHNL_VLAN_UNSUPPORTED;
2687 
2688 	switch (tpid) {
2689 	case ETH_P_8021Q:
2690 		vlan_ethertype = VIRTCHNL_VLAN_ETHERTYPE_8100;
2691 		break;
2692 	case ETH_P_8021AD:
2693 		vlan_ethertype = VIRTCHNL_VLAN_ETHERTYPE_88A8;
2694 		break;
2695 	case ETH_P_QINQ1:
2696 		vlan_ethertype = VIRTCHNL_VLAN_ETHERTYPE_9100;
2697 		break;
2698 	}
2699 
2700 	if (!(filtering_caps & vlan_ethertype))
2701 		return false;
2702 
2703 	return true;
2704 }
2705 
2706 /**
2707  * ice_vc_is_valid_vlan - validate the virtchnl_vlan
2708  * @vc_vlan: virtchnl_vlan to validate
2709  *
2710  * If the VLAN TCI and VLAN TPID are 0, then this filter is invalid, so return
2711  * false. Otherwise return true.
2712  */
2713 static bool ice_vc_is_valid_vlan(struct virtchnl_vlan *vc_vlan)
2714 {
2715 	if (!vc_vlan->tci || !vc_vlan->tpid)
2716 		return false;
2717 
2718 	return true;
2719 }
2720 
2721 /**
2722  * ice_vc_validate_vlan_filter_list - validate the filter list from the VF
2723  * @vfc: negotiated/supported VLAN filtering capabilities
2724  * @vfl: VLAN filter list from VF to validate
2725  *
2726  * Validate all of the filters in the VLAN filter list from the VF. If any of
2727  * the checks fail then return false. Otherwise return true.
2728  */
2729 static bool
2730 ice_vc_validate_vlan_filter_list(struct virtchnl_vlan_filtering_caps *vfc,
2731 				 struct virtchnl_vlan_filter_list_v2 *vfl)
2732 {
2733 	u16 i;
2734 
2735 	if (!vfl->num_elements)
2736 		return false;
2737 
2738 	for (i = 0; i < vfl->num_elements; i++) {
2739 		struct virtchnl_vlan_supported_caps *filtering_support =
2740 			&vfc->filtering_support;
2741 		struct virtchnl_vlan_filter *vlan_fltr = &vfl->filters[i];
2742 		struct virtchnl_vlan *outer = &vlan_fltr->outer;
2743 		struct virtchnl_vlan *inner = &vlan_fltr->inner;
2744 
2745 		if ((ice_vc_is_valid_vlan(outer) &&
2746 		     filtering_support->outer == VIRTCHNL_VLAN_UNSUPPORTED) ||
2747 		    (ice_vc_is_valid_vlan(inner) &&
2748 		     filtering_support->inner == VIRTCHNL_VLAN_UNSUPPORTED))
2749 			return false;
2750 
2751 		if ((outer->tci_mask &&
2752 		     !(filtering_support->outer & VIRTCHNL_VLAN_FILTER_MASK)) ||
2753 		    (inner->tci_mask &&
2754 		     !(filtering_support->inner & VIRTCHNL_VLAN_FILTER_MASK)))
2755 			return false;
2756 
2757 		if (((outer->tci & VLAN_PRIO_MASK) &&
2758 		     !(filtering_support->outer & VIRTCHNL_VLAN_PRIO)) ||
2759 		    ((inner->tci & VLAN_PRIO_MASK) &&
2760 		     !(filtering_support->inner & VIRTCHNL_VLAN_PRIO)))
2761 			return false;
2762 
2763 		if ((ice_vc_is_valid_vlan(outer) &&
2764 		     !ice_vc_validate_vlan_tpid(filtering_support->outer,
2765 						outer->tpid)) ||
2766 		    (ice_vc_is_valid_vlan(inner) &&
2767 		     !ice_vc_validate_vlan_tpid(filtering_support->inner,
2768 						inner->tpid)))
2769 			return false;
2770 	}
2771 
2772 	return true;
2773 }
2774 
2775 /**
2776  * ice_vc_to_vlan - transform from struct virtchnl_vlan to struct ice_vlan
2777  * @vc_vlan: struct virtchnl_vlan to transform
2778  */
2779 static struct ice_vlan ice_vc_to_vlan(struct virtchnl_vlan *vc_vlan)
2780 {
2781 	struct ice_vlan vlan = { 0 };
2782 
2783 	vlan.prio = (vc_vlan->tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
2784 	vlan.vid = vc_vlan->tci & VLAN_VID_MASK;
2785 	vlan.tpid = vc_vlan->tpid;
2786 
2787 	return vlan;
2788 }
2789 
2790 /**
2791  * ice_vc_vlan_action - action to perform on the virthcnl_vlan
2792  * @vsi: VF's VSI used to perform the action
2793  * @vlan_action: function to perform the action with (i.e. add/del)
2794  * @vlan: VLAN filter to perform the action with
2795  */
2796 static int
2797 ice_vc_vlan_action(struct ice_vsi *vsi,
2798 		   int (*vlan_action)(struct ice_vsi *, struct ice_vlan *),
2799 		   struct ice_vlan *vlan)
2800 {
2801 	int err;
2802 
2803 	err = vlan_action(vsi, vlan);
2804 	if (err)
2805 		return err;
2806 
2807 	return 0;
2808 }
2809 
2810 /**
2811  * ice_vc_del_vlans - delete VLAN(s) from the virtchnl filter list
2812  * @vf: VF used to delete the VLAN(s)
2813  * @vsi: VF's VSI used to delete the VLAN(s)
2814  * @vfl: virthchnl filter list used to delete the filters
2815  */
2816 static int
2817 ice_vc_del_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
2818 		 struct virtchnl_vlan_filter_list_v2 *vfl)
2819 {
2820 	bool vlan_promisc = ice_is_vlan_promisc_allowed(vf);
2821 	int err;
2822 	u16 i;
2823 
2824 	for (i = 0; i < vfl->num_elements; i++) {
2825 		struct virtchnl_vlan_filter *vlan_fltr = &vfl->filters[i];
2826 		struct virtchnl_vlan *vc_vlan;
2827 
2828 		vc_vlan = &vlan_fltr->outer;
2829 		if (ice_vc_is_valid_vlan(vc_vlan)) {
2830 			struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
2831 
2832 			err = ice_vc_vlan_action(vsi,
2833 						 vsi->outer_vlan_ops.del_vlan,
2834 						 &vlan);
2835 			if (err)
2836 				return err;
2837 
2838 			if (vlan_promisc)
2839 				ice_vf_dis_vlan_promisc(vsi, &vlan);
2840 		}
2841 
2842 		vc_vlan = &vlan_fltr->inner;
2843 		if (ice_vc_is_valid_vlan(vc_vlan)) {
2844 			struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
2845 
2846 			err = ice_vc_vlan_action(vsi,
2847 						 vsi->inner_vlan_ops.del_vlan,
2848 						 &vlan);
2849 			if (err)
2850 				return err;
2851 
2852 			/* no support for VLAN promiscuous on inner VLAN unless
2853 			 * we are in Single VLAN Mode (SVM)
2854 			 */
2855 			if (!ice_is_dvm_ena(&vsi->back->hw) && vlan_promisc)
2856 				ice_vf_dis_vlan_promisc(vsi, &vlan);
2857 		}
2858 	}
2859 
2860 	return 0;
2861 }
2862 
2863 /**
2864  * ice_vc_remove_vlan_v2_msg - virtchnl handler for VIRTCHNL_OP_DEL_VLAN_V2
2865  * @vf: VF the message was received from
2866  * @msg: message received from the VF
2867  */
2868 static int ice_vc_remove_vlan_v2_msg(struct ice_vf *vf, u8 *msg)
2869 {
2870 	struct virtchnl_vlan_filter_list_v2 *vfl =
2871 		(struct virtchnl_vlan_filter_list_v2 *)msg;
2872 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2873 	struct ice_vsi *vsi;
2874 
2875 	if (!ice_vc_validate_vlan_filter_list(&vf->vlan_v2_caps.filtering,
2876 					      vfl)) {
2877 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2878 		goto out;
2879 	}
2880 
2881 	if (!ice_vc_isvalid_vsi_id(vf, vfl->vport_id)) {
2882 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2883 		goto out;
2884 	}
2885 
2886 	vsi = ice_get_vf_vsi(vf);
2887 	if (!vsi) {
2888 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2889 		goto out;
2890 	}
2891 
2892 	if (ice_vc_del_vlans(vf, vsi, vfl))
2893 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2894 
2895 out:
2896 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN_V2, v_ret, NULL,
2897 				     0);
2898 }
2899 
2900 /**
2901  * ice_vc_add_vlans - add VLAN(s) from the virtchnl filter list
2902  * @vf: VF used to add the VLAN(s)
2903  * @vsi: VF's VSI used to add the VLAN(s)
2904  * @vfl: virthchnl filter list used to add the filters
2905  */
2906 static int
2907 ice_vc_add_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
2908 		 struct virtchnl_vlan_filter_list_v2 *vfl)
2909 {
2910 	bool vlan_promisc = ice_is_vlan_promisc_allowed(vf);
2911 	int err;
2912 	u16 i;
2913 
2914 	for (i = 0; i < vfl->num_elements; i++) {
2915 		struct virtchnl_vlan_filter *vlan_fltr = &vfl->filters[i];
2916 		struct virtchnl_vlan *vc_vlan;
2917 
2918 		vc_vlan = &vlan_fltr->outer;
2919 		if (ice_vc_is_valid_vlan(vc_vlan)) {
2920 			struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
2921 
2922 			err = ice_vc_vlan_action(vsi,
2923 						 vsi->outer_vlan_ops.add_vlan,
2924 						 &vlan);
2925 			if (err)
2926 				return err;
2927 
2928 			if (vlan_promisc) {
2929 				err = ice_vf_ena_vlan_promisc(vsi, &vlan);
2930 				if (err)
2931 					return err;
2932 			}
2933 		}
2934 
2935 		vc_vlan = &vlan_fltr->inner;
2936 		if (ice_vc_is_valid_vlan(vc_vlan)) {
2937 			struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
2938 
2939 			err = ice_vc_vlan_action(vsi,
2940 						 vsi->inner_vlan_ops.add_vlan,
2941 						 &vlan);
2942 			if (err)
2943 				return err;
2944 
2945 			/* no support for VLAN promiscuous on inner VLAN unless
2946 			 * we are in Single VLAN Mode (SVM)
2947 			 */
2948 			if (!ice_is_dvm_ena(&vsi->back->hw) && vlan_promisc) {
2949 				err = ice_vf_ena_vlan_promisc(vsi, &vlan);
2950 				if (err)
2951 					return err;
2952 			}
2953 		}
2954 	}
2955 
2956 	return 0;
2957 }
2958 
2959 /**
2960  * ice_vc_validate_add_vlan_filter_list - validate add filter list from the VF
2961  * @vsi: VF VSI used to get number of existing VLAN filters
2962  * @vfc: negotiated/supported VLAN filtering capabilities
2963  * @vfl: VLAN filter list from VF to validate
2964  *
2965  * Validate all of the filters in the VLAN filter list from the VF during the
2966  * VIRTCHNL_OP_ADD_VLAN_V2 opcode. If any of the checks fail then return false.
2967  * Otherwise return true.
2968  */
2969 static bool
2970 ice_vc_validate_add_vlan_filter_list(struct ice_vsi *vsi,
2971 				     struct virtchnl_vlan_filtering_caps *vfc,
2972 				     struct virtchnl_vlan_filter_list_v2 *vfl)
2973 {
2974 	u16 num_requested_filters = vsi->num_vlan + vfl->num_elements;
2975 
2976 	if (num_requested_filters > vfc->max_filters)
2977 		return false;
2978 
2979 	return ice_vc_validate_vlan_filter_list(vfc, vfl);
2980 }
2981 
2982 /**
2983  * ice_vc_add_vlan_v2_msg - virtchnl handler for VIRTCHNL_OP_ADD_VLAN_V2
2984  * @vf: VF the message was received from
2985  * @msg: message received from the VF
2986  */
2987 static int ice_vc_add_vlan_v2_msg(struct ice_vf *vf, u8 *msg)
2988 {
2989 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2990 	struct virtchnl_vlan_filter_list_v2 *vfl =
2991 		(struct virtchnl_vlan_filter_list_v2 *)msg;
2992 	struct ice_vsi *vsi;
2993 
2994 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2995 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2996 		goto out;
2997 	}
2998 
2999 	if (!ice_vc_isvalid_vsi_id(vf, vfl->vport_id)) {
3000 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3001 		goto out;
3002 	}
3003 
3004 	vsi = ice_get_vf_vsi(vf);
3005 	if (!vsi) {
3006 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3007 		goto out;
3008 	}
3009 
3010 	if (!ice_vc_validate_add_vlan_filter_list(vsi,
3011 						  &vf->vlan_v2_caps.filtering,
3012 						  vfl)) {
3013 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3014 		goto out;
3015 	}
3016 
3017 	if (ice_vc_add_vlans(vf, vsi, vfl))
3018 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3019 
3020 out:
3021 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN_V2, v_ret, NULL,
3022 				     0);
3023 }
3024 
3025 /**
3026  * ice_vc_valid_vlan_setting - validate VLAN setting
3027  * @negotiated_settings: negotiated VLAN settings during VF init
3028  * @ethertype_setting: ethertype(s) requested for the VLAN setting
3029  */
3030 static bool
3031 ice_vc_valid_vlan_setting(u32 negotiated_settings, u32 ethertype_setting)
3032 {
3033 	if (ethertype_setting && !(negotiated_settings & ethertype_setting))
3034 		return false;
3035 
3036 	/* only allow a single VIRTCHNL_VLAN_ETHERTYPE if
3037 	 * VIRTHCNL_VLAN_ETHERTYPE_AND is not negotiated/supported
3038 	 */
3039 	if (!(negotiated_settings & VIRTCHNL_VLAN_ETHERTYPE_AND) &&
3040 	    hweight32(ethertype_setting) > 1)
3041 		return false;
3042 
3043 	/* ability to modify the VLAN setting was not negotiated */
3044 	if (!(negotiated_settings & VIRTCHNL_VLAN_TOGGLE))
3045 		return false;
3046 
3047 	return true;
3048 }
3049 
3050 /**
3051  * ice_vc_valid_vlan_setting_msg - validate the VLAN setting message
3052  * @caps: negotiated VLAN settings during VF init
3053  * @msg: message to validate
3054  *
3055  * Used to validate any VLAN virtchnl message sent as a
3056  * virtchnl_vlan_setting structure. Validates the message against the
3057  * negotiated/supported caps during VF driver init.
3058  */
3059 static bool
3060 ice_vc_valid_vlan_setting_msg(struct virtchnl_vlan_supported_caps *caps,
3061 			      struct virtchnl_vlan_setting *msg)
3062 {
3063 	if ((!msg->outer_ethertype_setting &&
3064 	     !msg->inner_ethertype_setting) ||
3065 	    (!caps->outer && !caps->inner))
3066 		return false;
3067 
3068 	if (msg->outer_ethertype_setting &&
3069 	    !ice_vc_valid_vlan_setting(caps->outer,
3070 				       msg->outer_ethertype_setting))
3071 		return false;
3072 
3073 	if (msg->inner_ethertype_setting &&
3074 	    !ice_vc_valid_vlan_setting(caps->inner,
3075 				       msg->inner_ethertype_setting))
3076 		return false;
3077 
3078 	return true;
3079 }
3080 
3081 /**
3082  * ice_vc_get_tpid - transform from VIRTCHNL_VLAN_ETHERTYPE_* to VLAN TPID
3083  * @ethertype_setting: VIRTCHNL_VLAN_ETHERTYPE_* used to get VLAN TPID
3084  * @tpid: VLAN TPID to populate
3085  */
3086 static int ice_vc_get_tpid(u32 ethertype_setting, u16 *tpid)
3087 {
3088 	switch (ethertype_setting) {
3089 	case VIRTCHNL_VLAN_ETHERTYPE_8100:
3090 		*tpid = ETH_P_8021Q;
3091 		break;
3092 	case VIRTCHNL_VLAN_ETHERTYPE_88A8:
3093 		*tpid = ETH_P_8021AD;
3094 		break;
3095 	case VIRTCHNL_VLAN_ETHERTYPE_9100:
3096 		*tpid = ETH_P_QINQ1;
3097 		break;
3098 	default:
3099 		*tpid = 0;
3100 		return -EINVAL;
3101 	}
3102 
3103 	return 0;
3104 }
3105 
3106 /**
3107  * ice_vc_ena_vlan_offload - enable VLAN offload based on the ethertype_setting
3108  * @vsi: VF's VSI used to enable the VLAN offload
3109  * @ena_offload: function used to enable the VLAN offload
3110  * @ethertype_setting: VIRTCHNL_VLAN_ETHERTYPE_* to enable offloads for
3111  */
3112 static int
3113 ice_vc_ena_vlan_offload(struct ice_vsi *vsi,
3114 			int (*ena_offload)(struct ice_vsi *vsi, u16 tpid),
3115 			u32 ethertype_setting)
3116 {
3117 	u16 tpid;
3118 	int err;
3119 
3120 	err = ice_vc_get_tpid(ethertype_setting, &tpid);
3121 	if (err)
3122 		return err;
3123 
3124 	err = ena_offload(vsi, tpid);
3125 	if (err)
3126 		return err;
3127 
3128 	return 0;
3129 }
3130 
3131 #define ICE_L2TSEL_QRX_CONTEXT_REG_IDX	3
3132 #define ICE_L2TSEL_BIT_OFFSET		23
3133 enum ice_l2tsel {
3134 	ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND,
3135 	ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG1,
3136 };
3137 
3138 /**
3139  * ice_vsi_update_l2tsel - update l2tsel field for all Rx rings on this VSI
3140  * @vsi: VSI used to update l2tsel on
3141  * @l2tsel: l2tsel setting requested
3142  *
3143  * Use the l2tsel setting to update all of the Rx queue context bits for l2tsel.
3144  * This will modify which descriptor field the first offloaded VLAN will be
3145  * stripped into.
3146  */
3147 static void ice_vsi_update_l2tsel(struct ice_vsi *vsi, enum ice_l2tsel l2tsel)
3148 {
3149 	struct ice_hw *hw = &vsi->back->hw;
3150 	u32 l2tsel_bit;
3151 	int i;
3152 
3153 	if (l2tsel == ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND)
3154 		l2tsel_bit = 0;
3155 	else
3156 		l2tsel_bit = BIT(ICE_L2TSEL_BIT_OFFSET);
3157 
3158 	for (i = 0; i < vsi->alloc_rxq; i++) {
3159 		u16 pfq = vsi->rxq_map[i];
3160 		u32 qrx_context_offset;
3161 		u32 regval;
3162 
3163 		qrx_context_offset =
3164 			QRX_CONTEXT(ICE_L2TSEL_QRX_CONTEXT_REG_IDX, pfq);
3165 
3166 		regval = rd32(hw, qrx_context_offset);
3167 		regval &= ~BIT(ICE_L2TSEL_BIT_OFFSET);
3168 		regval |= l2tsel_bit;
3169 		wr32(hw, qrx_context_offset, regval);
3170 	}
3171 }
3172 
3173 /**
3174  * ice_vc_ena_vlan_stripping_v2_msg
3175  * @vf: VF the message was received from
3176  * @msg: message received from the VF
3177  *
3178  * virthcnl handler for VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2
3179  */
3180 static int ice_vc_ena_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg)
3181 {
3182 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3183 	struct virtchnl_vlan_supported_caps *stripping_support;
3184 	struct virtchnl_vlan_setting *strip_msg =
3185 		(struct virtchnl_vlan_setting *)msg;
3186 	u32 ethertype_setting;
3187 	struct ice_vsi *vsi;
3188 
3189 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3190 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3191 		goto out;
3192 	}
3193 
3194 	if (!ice_vc_isvalid_vsi_id(vf, strip_msg->vport_id)) {
3195 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3196 		goto out;
3197 	}
3198 
3199 	vsi = ice_get_vf_vsi(vf);
3200 	if (!vsi) {
3201 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3202 		goto out;
3203 	}
3204 
3205 	stripping_support = &vf->vlan_v2_caps.offloads.stripping_support;
3206 	if (!ice_vc_valid_vlan_setting_msg(stripping_support, strip_msg)) {
3207 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3208 		goto out;
3209 	}
3210 
3211 	ethertype_setting = strip_msg->outer_ethertype_setting;
3212 	if (ethertype_setting) {
3213 		if (ice_vc_ena_vlan_offload(vsi,
3214 					    vsi->outer_vlan_ops.ena_stripping,
3215 					    ethertype_setting)) {
3216 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3217 			goto out;
3218 		} else {
3219 			enum ice_l2tsel l2tsel =
3220 				ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND;
3221 
3222 			/* PF tells the VF that the outer VLAN tag is always
3223 			 * extracted to VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 and
3224 			 * inner is always extracted to
3225 			 * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1. This is needed to
3226 			 * support outer stripping so the first tag always ends
3227 			 * up in L2TAG2_2ND and the second/inner tag, if
3228 			 * enabled, is extracted in L2TAG1.
3229 			 */
3230 			ice_vsi_update_l2tsel(vsi, l2tsel);
3231 		}
3232 	}
3233 
3234 	ethertype_setting = strip_msg->inner_ethertype_setting;
3235 	if (ethertype_setting &&
3236 	    ice_vc_ena_vlan_offload(vsi, vsi->inner_vlan_ops.ena_stripping,
3237 				    ethertype_setting)) {
3238 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3239 		goto out;
3240 	}
3241 
3242 out:
3243 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2,
3244 				     v_ret, NULL, 0);
3245 }
3246 
3247 /**
3248  * ice_vc_dis_vlan_stripping_v2_msg
3249  * @vf: VF the message was received from
3250  * @msg: message received from the VF
3251  *
3252  * virthcnl handler for VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2
3253  */
3254 static int ice_vc_dis_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg)
3255 {
3256 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3257 	struct virtchnl_vlan_supported_caps *stripping_support;
3258 	struct virtchnl_vlan_setting *strip_msg =
3259 		(struct virtchnl_vlan_setting *)msg;
3260 	u32 ethertype_setting;
3261 	struct ice_vsi *vsi;
3262 
3263 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3264 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3265 		goto out;
3266 	}
3267 
3268 	if (!ice_vc_isvalid_vsi_id(vf, strip_msg->vport_id)) {
3269 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3270 		goto out;
3271 	}
3272 
3273 	vsi = ice_get_vf_vsi(vf);
3274 	if (!vsi) {
3275 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3276 		goto out;
3277 	}
3278 
3279 	stripping_support = &vf->vlan_v2_caps.offloads.stripping_support;
3280 	if (!ice_vc_valid_vlan_setting_msg(stripping_support, strip_msg)) {
3281 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3282 		goto out;
3283 	}
3284 
3285 	ethertype_setting = strip_msg->outer_ethertype_setting;
3286 	if (ethertype_setting) {
3287 		if (vsi->outer_vlan_ops.dis_stripping(vsi)) {
3288 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3289 			goto out;
3290 		} else {
3291 			enum ice_l2tsel l2tsel =
3292 				ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG1;
3293 
3294 			/* PF tells the VF that the outer VLAN tag is always
3295 			 * extracted to VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 and
3296 			 * inner is always extracted to
3297 			 * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1. This is needed to
3298 			 * support inner stripping while outer stripping is
3299 			 * disabled so that the first and only tag is extracted
3300 			 * in L2TAG1.
3301 			 */
3302 			ice_vsi_update_l2tsel(vsi, l2tsel);
3303 		}
3304 	}
3305 
3306 	ethertype_setting = strip_msg->inner_ethertype_setting;
3307 	if (ethertype_setting && vsi->inner_vlan_ops.dis_stripping(vsi)) {
3308 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3309 		goto out;
3310 	}
3311 
3312 out:
3313 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2,
3314 				     v_ret, NULL, 0);
3315 }
3316 
3317 /**
3318  * ice_vc_ena_vlan_insertion_v2_msg
3319  * @vf: VF the message was received from
3320  * @msg: message received from the VF
3321  *
3322  * virthcnl handler for VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2
3323  */
3324 static int ice_vc_ena_vlan_insertion_v2_msg(struct ice_vf *vf, u8 *msg)
3325 {
3326 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3327 	struct virtchnl_vlan_supported_caps *insertion_support;
3328 	struct virtchnl_vlan_setting *insertion_msg =
3329 		(struct virtchnl_vlan_setting *)msg;
3330 	u32 ethertype_setting;
3331 	struct ice_vsi *vsi;
3332 
3333 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3334 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3335 		goto out;
3336 	}
3337 
3338 	if (!ice_vc_isvalid_vsi_id(vf, insertion_msg->vport_id)) {
3339 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3340 		goto out;
3341 	}
3342 
3343 	vsi = ice_get_vf_vsi(vf);
3344 	if (!vsi) {
3345 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3346 		goto out;
3347 	}
3348 
3349 	insertion_support = &vf->vlan_v2_caps.offloads.insertion_support;
3350 	if (!ice_vc_valid_vlan_setting_msg(insertion_support, insertion_msg)) {
3351 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3352 		goto out;
3353 	}
3354 
3355 	ethertype_setting = insertion_msg->outer_ethertype_setting;
3356 	if (ethertype_setting &&
3357 	    ice_vc_ena_vlan_offload(vsi, vsi->outer_vlan_ops.ena_insertion,
3358 				    ethertype_setting)) {
3359 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3360 		goto out;
3361 	}
3362 
3363 	ethertype_setting = insertion_msg->inner_ethertype_setting;
3364 	if (ethertype_setting &&
3365 	    ice_vc_ena_vlan_offload(vsi, vsi->inner_vlan_ops.ena_insertion,
3366 				    ethertype_setting)) {
3367 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3368 		goto out;
3369 	}
3370 
3371 out:
3372 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2,
3373 				     v_ret, NULL, 0);
3374 }
3375 
3376 /**
3377  * ice_vc_dis_vlan_insertion_v2_msg
3378  * @vf: VF the message was received from
3379  * @msg: message received from the VF
3380  *
3381  * virthcnl handler for VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2
3382  */
3383 static int ice_vc_dis_vlan_insertion_v2_msg(struct ice_vf *vf, u8 *msg)
3384 {
3385 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3386 	struct virtchnl_vlan_supported_caps *insertion_support;
3387 	struct virtchnl_vlan_setting *insertion_msg =
3388 		(struct virtchnl_vlan_setting *)msg;
3389 	u32 ethertype_setting;
3390 	struct ice_vsi *vsi;
3391 
3392 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3393 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3394 		goto out;
3395 	}
3396 
3397 	if (!ice_vc_isvalid_vsi_id(vf, insertion_msg->vport_id)) {
3398 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3399 		goto out;
3400 	}
3401 
3402 	vsi = ice_get_vf_vsi(vf);
3403 	if (!vsi) {
3404 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3405 		goto out;
3406 	}
3407 
3408 	insertion_support = &vf->vlan_v2_caps.offloads.insertion_support;
3409 	if (!ice_vc_valid_vlan_setting_msg(insertion_support, insertion_msg)) {
3410 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3411 		goto out;
3412 	}
3413 
3414 	ethertype_setting = insertion_msg->outer_ethertype_setting;
3415 	if (ethertype_setting && vsi->outer_vlan_ops.dis_insertion(vsi)) {
3416 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3417 		goto out;
3418 	}
3419 
3420 	ethertype_setting = insertion_msg->inner_ethertype_setting;
3421 	if (ethertype_setting && vsi->inner_vlan_ops.dis_insertion(vsi)) {
3422 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3423 		goto out;
3424 	}
3425 
3426 out:
3427 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2,
3428 				     v_ret, NULL, 0);
3429 }
3430 
3431 static const struct ice_virtchnl_ops ice_virtchnl_dflt_ops = {
3432 	.get_ver_msg = ice_vc_get_ver_msg,
3433 	.get_vf_res_msg = ice_vc_get_vf_res_msg,
3434 	.reset_vf = ice_vc_reset_vf_msg,
3435 	.add_mac_addr_msg = ice_vc_add_mac_addr_msg,
3436 	.del_mac_addr_msg = ice_vc_del_mac_addr_msg,
3437 	.cfg_qs_msg = ice_vc_cfg_qs_msg,
3438 	.ena_qs_msg = ice_vc_ena_qs_msg,
3439 	.dis_qs_msg = ice_vc_dis_qs_msg,
3440 	.request_qs_msg = ice_vc_request_qs_msg,
3441 	.cfg_irq_map_msg = ice_vc_cfg_irq_map_msg,
3442 	.config_rss_key = ice_vc_config_rss_key,
3443 	.config_rss_lut = ice_vc_config_rss_lut,
3444 	.get_stats_msg = ice_vc_get_stats_msg,
3445 	.cfg_promiscuous_mode_msg = ice_vc_cfg_promiscuous_mode_msg,
3446 	.add_vlan_msg = ice_vc_add_vlan_msg,
3447 	.remove_vlan_msg = ice_vc_remove_vlan_msg,
3448 	.ena_vlan_stripping = ice_vc_ena_vlan_stripping,
3449 	.dis_vlan_stripping = ice_vc_dis_vlan_stripping,
3450 	.handle_rss_cfg_msg = ice_vc_handle_rss_cfg,
3451 	.add_fdir_fltr_msg = ice_vc_add_fdir_fltr,
3452 	.del_fdir_fltr_msg = ice_vc_del_fdir_fltr,
3453 	.get_offload_vlan_v2_caps = ice_vc_get_offload_vlan_v2_caps,
3454 	.add_vlan_v2_msg = ice_vc_add_vlan_v2_msg,
3455 	.remove_vlan_v2_msg = ice_vc_remove_vlan_v2_msg,
3456 	.ena_vlan_stripping_v2_msg = ice_vc_ena_vlan_stripping_v2_msg,
3457 	.dis_vlan_stripping_v2_msg = ice_vc_dis_vlan_stripping_v2_msg,
3458 	.ena_vlan_insertion_v2_msg = ice_vc_ena_vlan_insertion_v2_msg,
3459 	.dis_vlan_insertion_v2_msg = ice_vc_dis_vlan_insertion_v2_msg,
3460 };
3461 
3462 /**
3463  * ice_virtchnl_set_dflt_ops - Switch to default virtchnl ops
3464  * @vf: the VF to switch ops
3465  */
3466 void ice_virtchnl_set_dflt_ops(struct ice_vf *vf)
3467 {
3468 	vf->virtchnl_ops = &ice_virtchnl_dflt_ops;
3469 }
3470 
3471 /**
3472  * ice_vc_repr_add_mac
3473  * @vf: pointer to VF
3474  * @msg: virtchannel message
3475  *
3476  * When port representors are created, we do not add MAC rule
3477  * to firmware, we store it so that PF could report same
3478  * MAC as VF.
3479  */
3480 static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg)
3481 {
3482 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3483 	struct virtchnl_ether_addr_list *al =
3484 	    (struct virtchnl_ether_addr_list *)msg;
3485 	struct ice_vsi *vsi;
3486 	struct ice_pf *pf;
3487 	int i;
3488 
3489 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
3490 	    !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
3491 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3492 		goto handle_mac_exit;
3493 	}
3494 
3495 	pf = vf->pf;
3496 
3497 	vsi = ice_get_vf_vsi(vf);
3498 	if (!vsi) {
3499 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3500 		goto handle_mac_exit;
3501 	}
3502 
3503 	for (i = 0; i < al->num_elements; i++) {
3504 		u8 *mac_addr = al->list[i].addr;
3505 		int result;
3506 
3507 		if (!is_unicast_ether_addr(mac_addr) ||
3508 		    ether_addr_equal(mac_addr, vf->hw_lan_addr.addr))
3509 			continue;
3510 
3511 		if (vf->pf_set_mac) {
3512 			dev_err(ice_pf_to_dev(pf), "VF attempting to override administratively set MAC address\n");
3513 			v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
3514 			goto handle_mac_exit;
3515 		}
3516 
3517 		result = ice_eswitch_add_vf_mac_rule(pf, vf, mac_addr);
3518 		if (result) {
3519 			dev_err(ice_pf_to_dev(pf), "Failed to add MAC %pM for VF %d\n, error %d\n",
3520 				mac_addr, vf->vf_id, result);
3521 			goto handle_mac_exit;
3522 		}
3523 
3524 		ice_vfhw_mac_add(vf, &al->list[i]);
3525 		vf->num_mac++;
3526 		break;
3527 	}
3528 
3529 handle_mac_exit:
3530 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
3531 				     v_ret, NULL, 0);
3532 }
3533 
3534 /**
3535  * ice_vc_repr_del_mac - response with success for deleting MAC
3536  * @vf: pointer to VF
3537  * @msg: virtchannel message
3538  *
3539  * Respond with success to not break normal VF flow.
3540  * For legacy VF driver try to update cached MAC address.
3541  */
3542 static int
3543 ice_vc_repr_del_mac(struct ice_vf __always_unused *vf, u8 __always_unused *msg)
3544 {
3545 	struct virtchnl_ether_addr_list *al =
3546 		(struct virtchnl_ether_addr_list *)msg;
3547 
3548 	ice_update_legacy_cached_mac(vf, &al->list[0]);
3549 
3550 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR,
3551 				     VIRTCHNL_STATUS_SUCCESS, NULL, 0);
3552 }
3553 
3554 static int
3555 ice_vc_repr_cfg_promiscuous_mode(struct ice_vf *vf, u8 __always_unused *msg)
3556 {
3557 	dev_dbg(ice_pf_to_dev(vf->pf),
3558 		"Can't config promiscuous mode in switchdev mode for VF %d\n",
3559 		vf->vf_id);
3560 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
3561 				     VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
3562 				     NULL, 0);
3563 }
3564 
3565 static const struct ice_virtchnl_ops ice_virtchnl_repr_ops = {
3566 	.get_ver_msg = ice_vc_get_ver_msg,
3567 	.get_vf_res_msg = ice_vc_get_vf_res_msg,
3568 	.reset_vf = ice_vc_reset_vf_msg,
3569 	.add_mac_addr_msg = ice_vc_repr_add_mac,
3570 	.del_mac_addr_msg = ice_vc_repr_del_mac,
3571 	.cfg_qs_msg = ice_vc_cfg_qs_msg,
3572 	.ena_qs_msg = ice_vc_ena_qs_msg,
3573 	.dis_qs_msg = ice_vc_dis_qs_msg,
3574 	.request_qs_msg = ice_vc_request_qs_msg,
3575 	.cfg_irq_map_msg = ice_vc_cfg_irq_map_msg,
3576 	.config_rss_key = ice_vc_config_rss_key,
3577 	.config_rss_lut = ice_vc_config_rss_lut,
3578 	.get_stats_msg = ice_vc_get_stats_msg,
3579 	.cfg_promiscuous_mode_msg = ice_vc_repr_cfg_promiscuous_mode,
3580 	.add_vlan_msg = ice_vc_add_vlan_msg,
3581 	.remove_vlan_msg = ice_vc_remove_vlan_msg,
3582 	.ena_vlan_stripping = ice_vc_ena_vlan_stripping,
3583 	.dis_vlan_stripping = ice_vc_dis_vlan_stripping,
3584 	.handle_rss_cfg_msg = ice_vc_handle_rss_cfg,
3585 	.add_fdir_fltr_msg = ice_vc_add_fdir_fltr,
3586 	.del_fdir_fltr_msg = ice_vc_del_fdir_fltr,
3587 	.get_offload_vlan_v2_caps = ice_vc_get_offload_vlan_v2_caps,
3588 	.add_vlan_v2_msg = ice_vc_add_vlan_v2_msg,
3589 	.remove_vlan_v2_msg = ice_vc_remove_vlan_v2_msg,
3590 	.ena_vlan_stripping_v2_msg = ice_vc_ena_vlan_stripping_v2_msg,
3591 	.dis_vlan_stripping_v2_msg = ice_vc_dis_vlan_stripping_v2_msg,
3592 	.ena_vlan_insertion_v2_msg = ice_vc_ena_vlan_insertion_v2_msg,
3593 	.dis_vlan_insertion_v2_msg = ice_vc_dis_vlan_insertion_v2_msg,
3594 };
3595 
3596 /**
3597  * ice_virtchnl_set_repr_ops - Switch to representor virtchnl ops
3598  * @vf: the VF to switch ops
3599  */
3600 void ice_virtchnl_set_repr_ops(struct ice_vf *vf)
3601 {
3602 	vf->virtchnl_ops = &ice_virtchnl_repr_ops;
3603 }
3604 
3605 /**
3606  * ice_vc_process_vf_msg - Process request from VF
3607  * @pf: pointer to the PF structure
3608  * @event: pointer to the AQ event
3609  *
3610  * called from the common asq/arq handler to
3611  * process request from VF
3612  */
3613 void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
3614 {
3615 	u32 v_opcode = le32_to_cpu(event->desc.cookie_high);
3616 	s16 vf_id = le16_to_cpu(event->desc.retval);
3617 	const struct ice_virtchnl_ops *ops;
3618 	u16 msglen = event->msg_len;
3619 	u8 *msg = event->msg_buf;
3620 	struct ice_vf *vf = NULL;
3621 	struct device *dev;
3622 	int err = 0;
3623 
3624 	dev = ice_pf_to_dev(pf);
3625 
3626 	vf = ice_get_vf_by_id(pf, vf_id);
3627 	if (!vf) {
3628 		dev_err(dev, "Unable to locate VF for message from VF ID %d, opcode %d, len %d\n",
3629 			vf_id, v_opcode, msglen);
3630 		return;
3631 	}
3632 
3633 	mutex_lock(&vf->cfg_lock);
3634 
3635 	/* Check if VF is disabled. */
3636 	if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) {
3637 		err = -EPERM;
3638 		goto error_handler;
3639 	}
3640 
3641 	ops = vf->virtchnl_ops;
3642 
3643 	/* Perform basic checks on the msg */
3644 	err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
3645 	if (err) {
3646 		if (err == VIRTCHNL_STATUS_ERR_PARAM)
3647 			err = -EPERM;
3648 		else
3649 			err = -EINVAL;
3650 	}
3651 
3652 error_handler:
3653 	if (err) {
3654 		ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM,
3655 				      NULL, 0);
3656 		dev_err(dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
3657 			vf_id, v_opcode, msglen, err);
3658 		goto finish;
3659 	}
3660 
3661 	if (!ice_vc_is_opcode_allowed(vf, v_opcode)) {
3662 		ice_vc_send_msg_to_vf(vf, v_opcode,
3663 				      VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, NULL,
3664 				      0);
3665 		goto finish;
3666 	}
3667 
3668 	switch (v_opcode) {
3669 	case VIRTCHNL_OP_VERSION:
3670 		err = ops->get_ver_msg(vf, msg);
3671 		break;
3672 	case VIRTCHNL_OP_GET_VF_RESOURCES:
3673 		err = ops->get_vf_res_msg(vf, msg);
3674 		if (ice_vf_init_vlan_stripping(vf))
3675 			dev_dbg(dev, "Failed to initialize VLAN stripping for VF %d\n",
3676 				vf->vf_id);
3677 		ice_vc_notify_vf_link_state(vf);
3678 		break;
3679 	case VIRTCHNL_OP_RESET_VF:
3680 		ops->reset_vf(vf);
3681 		break;
3682 	case VIRTCHNL_OP_ADD_ETH_ADDR:
3683 		err = ops->add_mac_addr_msg(vf, msg);
3684 		break;
3685 	case VIRTCHNL_OP_DEL_ETH_ADDR:
3686 		err = ops->del_mac_addr_msg(vf, msg);
3687 		break;
3688 	case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
3689 		err = ops->cfg_qs_msg(vf, msg);
3690 		break;
3691 	case VIRTCHNL_OP_ENABLE_QUEUES:
3692 		err = ops->ena_qs_msg(vf, msg);
3693 		ice_vc_notify_vf_link_state(vf);
3694 		break;
3695 	case VIRTCHNL_OP_DISABLE_QUEUES:
3696 		err = ops->dis_qs_msg(vf, msg);
3697 		break;
3698 	case VIRTCHNL_OP_REQUEST_QUEUES:
3699 		err = ops->request_qs_msg(vf, msg);
3700 		break;
3701 	case VIRTCHNL_OP_CONFIG_IRQ_MAP:
3702 		err = ops->cfg_irq_map_msg(vf, msg);
3703 		break;
3704 	case VIRTCHNL_OP_CONFIG_RSS_KEY:
3705 		err = ops->config_rss_key(vf, msg);
3706 		break;
3707 	case VIRTCHNL_OP_CONFIG_RSS_LUT:
3708 		err = ops->config_rss_lut(vf, msg);
3709 		break;
3710 	case VIRTCHNL_OP_GET_STATS:
3711 		err = ops->get_stats_msg(vf, msg);
3712 		break;
3713 	case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
3714 		err = ops->cfg_promiscuous_mode_msg(vf, msg);
3715 		break;
3716 	case VIRTCHNL_OP_ADD_VLAN:
3717 		err = ops->add_vlan_msg(vf, msg);
3718 		break;
3719 	case VIRTCHNL_OP_DEL_VLAN:
3720 		err = ops->remove_vlan_msg(vf, msg);
3721 		break;
3722 	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
3723 		err = ops->ena_vlan_stripping(vf);
3724 		break;
3725 	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
3726 		err = ops->dis_vlan_stripping(vf);
3727 		break;
3728 	case VIRTCHNL_OP_ADD_FDIR_FILTER:
3729 		err = ops->add_fdir_fltr_msg(vf, msg);
3730 		break;
3731 	case VIRTCHNL_OP_DEL_FDIR_FILTER:
3732 		err = ops->del_fdir_fltr_msg(vf, msg);
3733 		break;
3734 	case VIRTCHNL_OP_ADD_RSS_CFG:
3735 		err = ops->handle_rss_cfg_msg(vf, msg, true);
3736 		break;
3737 	case VIRTCHNL_OP_DEL_RSS_CFG:
3738 		err = ops->handle_rss_cfg_msg(vf, msg, false);
3739 		break;
3740 	case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS:
3741 		err = ops->get_offload_vlan_v2_caps(vf);
3742 		break;
3743 	case VIRTCHNL_OP_ADD_VLAN_V2:
3744 		err = ops->add_vlan_v2_msg(vf, msg);
3745 		break;
3746 	case VIRTCHNL_OP_DEL_VLAN_V2:
3747 		err = ops->remove_vlan_v2_msg(vf, msg);
3748 		break;
3749 	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
3750 		err = ops->ena_vlan_stripping_v2_msg(vf, msg);
3751 		break;
3752 	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
3753 		err = ops->dis_vlan_stripping_v2_msg(vf, msg);
3754 		break;
3755 	case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
3756 		err = ops->ena_vlan_insertion_v2_msg(vf, msg);
3757 		break;
3758 	case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
3759 		err = ops->dis_vlan_insertion_v2_msg(vf, msg);
3760 		break;
3761 	case VIRTCHNL_OP_UNKNOWN:
3762 	default:
3763 		dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode,
3764 			vf_id);
3765 		err = ice_vc_send_msg_to_vf(vf, v_opcode,
3766 					    VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
3767 					    NULL, 0);
3768 		break;
3769 	}
3770 	if (err) {
3771 		/* Helper function cares less about error return values here
3772 		 * as it is busy with pending work.
3773 		 */
3774 		dev_info(dev, "PF failed to honor VF %d, opcode %d, error %d\n",
3775 			 vf_id, v_opcode, err);
3776 	}
3777 
3778 finish:
3779 	mutex_unlock(&vf->cfg_lock);
3780 	ice_put_vf(vf);
3781 }
3782