1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2022, Intel Corporation. */
3 
4 #include "ice_virtchnl.h"
5 #include "ice_vf_lib_private.h"
6 #include "ice.h"
7 #include "ice_base.h"
8 #include "ice_lib.h"
9 #include "ice_fltr.h"
10 #include "ice_virtchnl_allowlist.h"
11 #include "ice_vf_vsi_vlan_ops.h"
12 #include "ice_vlan.h"
13 #include "ice_flex_pipe.h"
14 #include "ice_dcb_lib.h"
15 
16 #define FIELD_SELECTOR(proto_hdr_field) \
17 		BIT((proto_hdr_field) & PROTO_HDR_FIELD_MASK)
18 
19 struct ice_vc_hdr_match_type {
20 	u32 vc_hdr;	/* virtchnl headers (VIRTCHNL_PROTO_HDR_XXX) */
21 	u32 ice_hdr;	/* ice headers (ICE_FLOW_SEG_HDR_XXX) */
22 };
23 
24 static const struct ice_vc_hdr_match_type ice_vc_hdr_list[] = {
25 	{VIRTCHNL_PROTO_HDR_NONE,	ICE_FLOW_SEG_HDR_NONE},
26 	{VIRTCHNL_PROTO_HDR_ETH,	ICE_FLOW_SEG_HDR_ETH},
27 	{VIRTCHNL_PROTO_HDR_S_VLAN,	ICE_FLOW_SEG_HDR_VLAN},
28 	{VIRTCHNL_PROTO_HDR_C_VLAN,	ICE_FLOW_SEG_HDR_VLAN},
29 	{VIRTCHNL_PROTO_HDR_IPV4,	ICE_FLOW_SEG_HDR_IPV4 |
30 					ICE_FLOW_SEG_HDR_IPV_OTHER},
31 	{VIRTCHNL_PROTO_HDR_IPV6,	ICE_FLOW_SEG_HDR_IPV6 |
32 					ICE_FLOW_SEG_HDR_IPV_OTHER},
33 	{VIRTCHNL_PROTO_HDR_TCP,	ICE_FLOW_SEG_HDR_TCP},
34 	{VIRTCHNL_PROTO_HDR_UDP,	ICE_FLOW_SEG_HDR_UDP},
35 	{VIRTCHNL_PROTO_HDR_SCTP,	ICE_FLOW_SEG_HDR_SCTP},
36 	{VIRTCHNL_PROTO_HDR_PPPOE,	ICE_FLOW_SEG_HDR_PPPOE},
37 	{VIRTCHNL_PROTO_HDR_GTPU_IP,	ICE_FLOW_SEG_HDR_GTPU_IP},
38 	{VIRTCHNL_PROTO_HDR_GTPU_EH,	ICE_FLOW_SEG_HDR_GTPU_EH},
39 	{VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN,
40 					ICE_FLOW_SEG_HDR_GTPU_DWN},
41 	{VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP,
42 					ICE_FLOW_SEG_HDR_GTPU_UP},
43 	{VIRTCHNL_PROTO_HDR_L2TPV3,	ICE_FLOW_SEG_HDR_L2TPV3},
44 	{VIRTCHNL_PROTO_HDR_ESP,	ICE_FLOW_SEG_HDR_ESP},
45 	{VIRTCHNL_PROTO_HDR_AH,		ICE_FLOW_SEG_HDR_AH},
46 	{VIRTCHNL_PROTO_HDR_PFCP,	ICE_FLOW_SEG_HDR_PFCP_SESSION},
47 };
48 
49 struct ice_vc_hash_field_match_type {
50 	u32 vc_hdr;		/* virtchnl headers
51 				 * (VIRTCHNL_PROTO_HDR_XXX)
52 				 */
53 	u32 vc_hash_field;	/* virtchnl hash fields selector
54 				 * FIELD_SELECTOR((VIRTCHNL_PROTO_HDR_ETH_XXX))
55 				 */
56 	u64 ice_hash_field;	/* ice hash fields
57 				 * (BIT_ULL(ICE_FLOW_FIELD_IDX_XXX))
58 				 */
59 };
60 
61 static const struct
62 ice_vc_hash_field_match_type ice_vc_hash_field_list[] = {
63 	{VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC),
64 		BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA)},
65 	{VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
66 		BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_DA)},
67 	{VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC) |
68 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
69 		ICE_FLOW_HASH_ETH},
70 	{VIRTCHNL_PROTO_HDR_ETH,
71 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE),
72 		BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_TYPE)},
73 	{VIRTCHNL_PROTO_HDR_S_VLAN,
74 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_S_VLAN_ID),
75 		BIT_ULL(ICE_FLOW_FIELD_IDX_S_VLAN)},
76 	{VIRTCHNL_PROTO_HDR_C_VLAN,
77 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_C_VLAN_ID),
78 		BIT_ULL(ICE_FLOW_FIELD_IDX_C_VLAN)},
79 	{VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC),
80 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)},
81 	{VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
82 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)},
83 	{VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
84 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
85 		ICE_FLOW_HASH_IPV4},
86 	{VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
87 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
88 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
89 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
90 	{VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
91 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
92 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
93 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
94 	{VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
95 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
96 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
97 		ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
98 	{VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
99 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
100 	{VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC),
101 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)},
102 	{VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
103 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)},
104 	{VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
105 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
106 		ICE_FLOW_HASH_IPV6},
107 	{VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
108 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
109 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) |
110 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
111 	{VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
112 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
113 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) |
114 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
115 	{VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
116 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
117 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
118 		ICE_FLOW_HASH_IPV6 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
119 	{VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
120 		BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
121 	{VIRTCHNL_PROTO_HDR_TCP,
122 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT),
123 		BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)},
124 	{VIRTCHNL_PROTO_HDR_TCP,
125 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
126 		BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)},
127 	{VIRTCHNL_PROTO_HDR_TCP,
128 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) |
129 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
130 		ICE_FLOW_HASH_TCP_PORT},
131 	{VIRTCHNL_PROTO_HDR_UDP,
132 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT),
133 		BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)},
134 	{VIRTCHNL_PROTO_HDR_UDP,
135 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
136 		BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)},
137 	{VIRTCHNL_PROTO_HDR_UDP,
138 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) |
139 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
140 		ICE_FLOW_HASH_UDP_PORT},
141 	{VIRTCHNL_PROTO_HDR_SCTP,
142 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT),
143 		BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)},
144 	{VIRTCHNL_PROTO_HDR_SCTP,
145 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
146 		BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)},
147 	{VIRTCHNL_PROTO_HDR_SCTP,
148 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) |
149 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
150 		ICE_FLOW_HASH_SCTP_PORT},
151 	{VIRTCHNL_PROTO_HDR_PPPOE,
152 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID),
153 		BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID)},
154 	{VIRTCHNL_PROTO_HDR_GTPU_IP,
155 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_GTPU_IP_TEID),
156 		BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID)},
157 	{VIRTCHNL_PROTO_HDR_L2TPV3,
158 		FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID),
159 		BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID)},
160 	{VIRTCHNL_PROTO_HDR_ESP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ESP_SPI),
161 		BIT_ULL(ICE_FLOW_FIELD_IDX_ESP_SPI)},
162 	{VIRTCHNL_PROTO_HDR_AH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_AH_SPI),
163 		BIT_ULL(ICE_FLOW_FIELD_IDX_AH_SPI)},
164 	{VIRTCHNL_PROTO_HDR_PFCP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PFCP_SEID),
165 		BIT_ULL(ICE_FLOW_FIELD_IDX_PFCP_SEID)},
166 };
167 
168 /**
169  * ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
170  * @pf: pointer to the PF structure
171  * @v_opcode: operation code
172  * @v_retval: return value
173  * @msg: pointer to the msg buffer
174  * @msglen: msg length
175  */
176 static void
177 ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
178 		    enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
179 {
180 	struct ice_hw *hw = &pf->hw;
181 	struct ice_vf *vf;
182 	unsigned int bkt;
183 
184 	mutex_lock(&pf->vfs.table_lock);
185 	ice_for_each_vf(pf, bkt, vf) {
186 		/* Not all vfs are enabled so skip the ones that are not */
187 		if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
188 		    !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
189 			continue;
190 
191 		/* Ignore return value on purpose - a given VF may fail, but
192 		 * we need to keep going and send to all of them
193 		 */
194 		ice_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, msg,
195 				      msglen, NULL);
196 	}
197 	mutex_unlock(&pf->vfs.table_lock);
198 }
199 
200 /**
201  * ice_set_pfe_link - Set the link speed/status of the virtchnl_pf_event
202  * @vf: pointer to the VF structure
203  * @pfe: pointer to the virtchnl_pf_event to set link speed/status for
204  * @ice_link_speed: link speed specified by ICE_AQ_LINK_SPEED_*
205  * @link_up: whether or not to set the link up/down
206  */
207 static void
208 ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
209 		 int ice_link_speed, bool link_up)
210 {
211 	if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
212 		pfe->event_data.link_event_adv.link_status = link_up;
213 		/* Speed in Mbps */
214 		pfe->event_data.link_event_adv.link_speed =
215 			ice_conv_link_speed_to_virtchnl(true, ice_link_speed);
216 	} else {
217 		pfe->event_data.link_event.link_status = link_up;
218 		/* Legacy method for virtchnl link speeds */
219 		pfe->event_data.link_event.link_speed =
220 			(enum virtchnl_link_speed)
221 			ice_conv_link_speed_to_virtchnl(false, ice_link_speed);
222 	}
223 }
224 
225 /**
226  * ice_vc_notify_vf_link_state - Inform a VF of link status
227  * @vf: pointer to the VF structure
228  *
229  * send a link status message to a single VF
230  */
231 void ice_vc_notify_vf_link_state(struct ice_vf *vf)
232 {
233 	struct virtchnl_pf_event pfe = { 0 };
234 	struct ice_hw *hw = &vf->pf->hw;
235 
236 	pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
237 	pfe.severity = PF_EVENT_SEVERITY_INFO;
238 
239 	if (ice_is_vf_link_up(vf))
240 		ice_set_pfe_link(vf, &pfe,
241 				 hw->port_info->phy.link_info.link_speed, true);
242 	else
243 		ice_set_pfe_link(vf, &pfe, ICE_AQ_LINK_SPEED_UNKNOWN, false);
244 
245 	ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
246 			      VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
247 			      sizeof(pfe), NULL);
248 }
249 
250 /**
251  * ice_vc_notify_link_state - Inform all VFs on a PF of link status
252  * @pf: pointer to the PF structure
253  */
254 void ice_vc_notify_link_state(struct ice_pf *pf)
255 {
256 	struct ice_vf *vf;
257 	unsigned int bkt;
258 
259 	mutex_lock(&pf->vfs.table_lock);
260 	ice_for_each_vf(pf, bkt, vf)
261 		ice_vc_notify_vf_link_state(vf);
262 	mutex_unlock(&pf->vfs.table_lock);
263 }
264 
265 /**
266  * ice_vc_notify_reset - Send pending reset message to all VFs
267  * @pf: pointer to the PF structure
268  *
269  * indicate a pending reset to all VFs on a given PF
270  */
271 void ice_vc_notify_reset(struct ice_pf *pf)
272 {
273 	struct virtchnl_pf_event pfe;
274 
275 	if (!ice_has_vfs(pf))
276 		return;
277 
278 	pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
279 	pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
280 	ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, VIRTCHNL_STATUS_SUCCESS,
281 			    (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
282 }
283 
284 /**
285  * ice_vc_send_msg_to_vf - Send message to VF
286  * @vf: pointer to the VF info
287  * @v_opcode: virtual channel opcode
288  * @v_retval: virtual channel return value
289  * @msg: pointer to the msg buffer
290  * @msglen: msg length
291  *
292  * send msg to VF
293  */
294 int
295 ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
296 		      enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
297 {
298 	struct device *dev;
299 	struct ice_pf *pf;
300 	int aq_ret;
301 
302 	pf = vf->pf;
303 	dev = ice_pf_to_dev(pf);
304 
305 	aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
306 				       msg, msglen, NULL);
307 	if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
308 		dev_info(dev, "Unable to send the message to VF %d ret %d aq_err %s\n",
309 			 vf->vf_id, aq_ret,
310 			 ice_aq_str(pf->hw.mailboxq.sq_last_status));
311 		return -EIO;
312 	}
313 
314 	return 0;
315 }
316 
317 /**
318  * ice_vc_get_ver_msg
319  * @vf: pointer to the VF info
320  * @msg: pointer to the msg buffer
321  *
322  * called from the VF to request the API version used by the PF
323  */
324 static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
325 {
326 	struct virtchnl_version_info info = {
327 		VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
328 	};
329 
330 	vf->vf_ver = *(struct virtchnl_version_info *)msg;
331 	/* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
332 	if (VF_IS_V10(&vf->vf_ver))
333 		info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
334 
335 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
336 				     VIRTCHNL_STATUS_SUCCESS, (u8 *)&info,
337 				     sizeof(struct virtchnl_version_info));
338 }
339 
340 /**
341  * ice_vc_get_max_frame_size - get max frame size allowed for VF
342  * @vf: VF used to determine max frame size
343  *
344  * Max frame size is determined based on the current port's max frame size and
345  * whether a port VLAN is configured on this VF. The VF is not aware whether
346  * it's in a port VLAN so the PF needs to account for this in max frame size
347  * checks and sending the max frame size to the VF.
348  */
349 static u16 ice_vc_get_max_frame_size(struct ice_vf *vf)
350 {
351 	struct ice_port_info *pi = ice_vf_get_port_info(vf);
352 	u16 max_frame_size;
353 
354 	max_frame_size = pi->phy.link_info.max_frame_size;
355 
356 	if (ice_vf_is_port_vlan_ena(vf))
357 		max_frame_size -= VLAN_HLEN;
358 
359 	return max_frame_size;
360 }
361 
362 /**
363  * ice_vc_get_vlan_caps
364  * @hw: pointer to the hw
365  * @vf: pointer to the VF info
366  * @vsi: pointer to the VSI
367  * @driver_caps: current driver caps
368  *
369  * Return 0 if there is no VLAN caps supported, or VLAN caps value
370  */
371 static u32
372 ice_vc_get_vlan_caps(struct ice_hw *hw, struct ice_vf *vf, struct ice_vsi *vsi,
373 		     u32 driver_caps)
374 {
375 	if (ice_is_eswitch_mode_switchdev(vf->pf))
376 		/* In switchdev setting VLAN from VF isn't supported */
377 		return 0;
378 
379 	if (driver_caps & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
380 		/* VLAN offloads based on current device configuration */
381 		return VIRTCHNL_VF_OFFLOAD_VLAN_V2;
382 	} else if (driver_caps & VIRTCHNL_VF_OFFLOAD_VLAN) {
383 		/* allow VF to negotiate VIRTCHNL_VF_OFFLOAD explicitly for
384 		 * these two conditions, which amounts to guest VLAN filtering
385 		 * and offloads being based on the inner VLAN or the
386 		 * inner/single VLAN respectively and don't allow VF to
387 		 * negotiate VIRTCHNL_VF_OFFLOAD in any other cases
388 		 */
389 		if (ice_is_dvm_ena(hw) && ice_vf_is_port_vlan_ena(vf)) {
390 			return VIRTCHNL_VF_OFFLOAD_VLAN;
391 		} else if (!ice_is_dvm_ena(hw) &&
392 			   !ice_vf_is_port_vlan_ena(vf)) {
393 			/* configure backward compatible support for VFs that
394 			 * only support VIRTCHNL_VF_OFFLOAD_VLAN, the PF is
395 			 * configured in SVM, and no port VLAN is configured
396 			 */
397 			ice_vf_vsi_cfg_svm_legacy_vlan_mode(vsi);
398 			return VIRTCHNL_VF_OFFLOAD_VLAN;
399 		} else if (ice_is_dvm_ena(hw)) {
400 			/* configure software offloaded VLAN support when DVM
401 			 * is enabled, but no port VLAN is enabled
402 			 */
403 			ice_vf_vsi_cfg_dvm_legacy_vlan_mode(vsi);
404 		}
405 	}
406 
407 	return 0;
408 }
409 
410 /**
411  * ice_vc_get_vf_res_msg
412  * @vf: pointer to the VF info
413  * @msg: pointer to the msg buffer
414  *
415  * called from the VF to request its resources
416  */
417 static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
418 {
419 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
420 	struct virtchnl_vf_resource *vfres = NULL;
421 	struct ice_hw *hw = &vf->pf->hw;
422 	struct ice_vsi *vsi;
423 	int len = 0;
424 	int ret;
425 
426 	if (ice_check_vf_init(vf)) {
427 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
428 		goto err;
429 	}
430 
431 	len = sizeof(struct virtchnl_vf_resource);
432 
433 	vfres = kzalloc(len, GFP_KERNEL);
434 	if (!vfres) {
435 		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
436 		len = 0;
437 		goto err;
438 	}
439 	if (VF_IS_V11(&vf->vf_ver))
440 		vf->driver_caps = *(u32 *)msg;
441 	else
442 		vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
443 				  VIRTCHNL_VF_OFFLOAD_RSS_REG |
444 				  VIRTCHNL_VF_OFFLOAD_VLAN;
445 
446 	vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
447 	vsi = ice_get_vf_vsi(vf);
448 	if (!vsi) {
449 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
450 		goto err;
451 	}
452 
453 	vfres->vf_cap_flags |= ice_vc_get_vlan_caps(hw, vf, vsi,
454 						    vf->driver_caps);
455 
456 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
457 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
458 	} else {
459 		if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)
460 			vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
461 		else
462 			vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
463 	}
464 
465 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
466 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_FDIR_PF;
467 
468 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
469 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
470 
471 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
472 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
473 
474 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)
475 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
476 
477 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING)
478 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
479 
480 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
481 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
482 
483 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
484 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
485 
486 	if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
487 		vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
488 
489 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF)
490 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF;
491 
492 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_USO)
493 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_USO;
494 
495 	vfres->num_vsis = 1;
496 	/* Tx and Rx queue are equal for VF */
497 	vfres->num_queue_pairs = vsi->num_txq;
498 	vfres->max_vectors = vf->pf->vfs.num_msix_per;
499 	vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE;
500 	vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
501 	vfres->max_mtu = ice_vc_get_max_frame_size(vf);
502 
503 	vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
504 	vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
505 	vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;
506 	ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
507 			vf->hw_lan_addr.addr);
508 
509 	/* match guest capabilities */
510 	vf->driver_caps = vfres->vf_cap_flags;
511 
512 	ice_vc_set_caps_allowlist(vf);
513 	ice_vc_set_working_allowlist(vf);
514 
515 	set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
516 
517 err:
518 	/* send the response back to the VF */
519 	ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, v_ret,
520 				    (u8 *)vfres, len);
521 
522 	kfree(vfres);
523 	return ret;
524 }
525 
526 /**
527  * ice_vc_reset_vf_msg
528  * @vf: pointer to the VF info
529  *
530  * called from the VF to reset itself,
531  * unlike other virtchnl messages, PF driver
532  * doesn't send the response back to the VF
533  */
534 static void ice_vc_reset_vf_msg(struct ice_vf *vf)
535 {
536 	if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
537 		ice_reset_vf(vf, 0);
538 }
539 
540 /**
541  * ice_vc_isvalid_vsi_id
542  * @vf: pointer to the VF info
543  * @vsi_id: VF relative VSI ID
544  *
545  * check for the valid VSI ID
546  */
547 bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
548 {
549 	struct ice_pf *pf = vf->pf;
550 	struct ice_vsi *vsi;
551 
552 	vsi = ice_find_vsi(pf, vsi_id);
553 
554 	return (vsi && (vsi->vf == vf));
555 }
556 
557 /**
558  * ice_vc_isvalid_q_id
559  * @vf: pointer to the VF info
560  * @vsi_id: VSI ID
561  * @qid: VSI relative queue ID
562  *
563  * check for the valid queue ID
564  */
565 static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
566 {
567 	struct ice_vsi *vsi = ice_find_vsi(vf->pf, vsi_id);
568 	/* allocated Tx and Rx queues should be always equal for VF VSI */
569 	return (vsi && (qid < vsi->alloc_txq));
570 }
571 
572 /**
573  * ice_vc_isvalid_ring_len
574  * @ring_len: length of ring
575  *
576  * check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE
577  * or zero
578  */
579 static bool ice_vc_isvalid_ring_len(u16 ring_len)
580 {
581 	return ring_len == 0 ||
582 	       (ring_len >= ICE_MIN_NUM_DESC &&
583 		ring_len <= ICE_MAX_NUM_DESC &&
584 		!(ring_len % ICE_REQ_DESC_MULTIPLE));
585 }
586 
587 /**
588  * ice_vc_validate_pattern
589  * @vf: pointer to the VF info
590  * @proto: virtchnl protocol headers
591  *
592  * validate the pattern is supported or not.
593  *
594  * Return: true on success, false on error.
595  */
596 bool
597 ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto)
598 {
599 	bool is_ipv4 = false;
600 	bool is_ipv6 = false;
601 	bool is_udp = false;
602 	u16 ptype = -1;
603 	int i = 0;
604 
605 	while (i < proto->count &&
606 	       proto->proto_hdr[i].type != VIRTCHNL_PROTO_HDR_NONE) {
607 		switch (proto->proto_hdr[i].type) {
608 		case VIRTCHNL_PROTO_HDR_ETH:
609 			ptype = ICE_PTYPE_MAC_PAY;
610 			break;
611 		case VIRTCHNL_PROTO_HDR_IPV4:
612 			ptype = ICE_PTYPE_IPV4_PAY;
613 			is_ipv4 = true;
614 			break;
615 		case VIRTCHNL_PROTO_HDR_IPV6:
616 			ptype = ICE_PTYPE_IPV6_PAY;
617 			is_ipv6 = true;
618 			break;
619 		case VIRTCHNL_PROTO_HDR_UDP:
620 			if (is_ipv4)
621 				ptype = ICE_PTYPE_IPV4_UDP_PAY;
622 			else if (is_ipv6)
623 				ptype = ICE_PTYPE_IPV6_UDP_PAY;
624 			is_udp = true;
625 			break;
626 		case VIRTCHNL_PROTO_HDR_TCP:
627 			if (is_ipv4)
628 				ptype = ICE_PTYPE_IPV4_TCP_PAY;
629 			else if (is_ipv6)
630 				ptype = ICE_PTYPE_IPV6_TCP_PAY;
631 			break;
632 		case VIRTCHNL_PROTO_HDR_SCTP:
633 			if (is_ipv4)
634 				ptype = ICE_PTYPE_IPV4_SCTP_PAY;
635 			else if (is_ipv6)
636 				ptype = ICE_PTYPE_IPV6_SCTP_PAY;
637 			break;
638 		case VIRTCHNL_PROTO_HDR_GTPU_IP:
639 		case VIRTCHNL_PROTO_HDR_GTPU_EH:
640 			if (is_ipv4)
641 				ptype = ICE_MAC_IPV4_GTPU;
642 			else if (is_ipv6)
643 				ptype = ICE_MAC_IPV6_GTPU;
644 			goto out;
645 		case VIRTCHNL_PROTO_HDR_L2TPV3:
646 			if (is_ipv4)
647 				ptype = ICE_MAC_IPV4_L2TPV3;
648 			else if (is_ipv6)
649 				ptype = ICE_MAC_IPV6_L2TPV3;
650 			goto out;
651 		case VIRTCHNL_PROTO_HDR_ESP:
652 			if (is_ipv4)
653 				ptype = is_udp ? ICE_MAC_IPV4_NAT_T_ESP :
654 						ICE_MAC_IPV4_ESP;
655 			else if (is_ipv6)
656 				ptype = is_udp ? ICE_MAC_IPV6_NAT_T_ESP :
657 						ICE_MAC_IPV6_ESP;
658 			goto out;
659 		case VIRTCHNL_PROTO_HDR_AH:
660 			if (is_ipv4)
661 				ptype = ICE_MAC_IPV4_AH;
662 			else if (is_ipv6)
663 				ptype = ICE_MAC_IPV6_AH;
664 			goto out;
665 		case VIRTCHNL_PROTO_HDR_PFCP:
666 			if (is_ipv4)
667 				ptype = ICE_MAC_IPV4_PFCP_SESSION;
668 			else if (is_ipv6)
669 				ptype = ICE_MAC_IPV6_PFCP_SESSION;
670 			goto out;
671 		default:
672 			break;
673 		}
674 		i++;
675 	}
676 
677 out:
678 	return ice_hw_ptype_ena(&vf->pf->hw, ptype);
679 }
680 
681 /**
682  * ice_vc_parse_rss_cfg - parses hash fields and headers from
683  * a specific virtchnl RSS cfg
684  * @hw: pointer to the hardware
685  * @rss_cfg: pointer to the virtchnl RSS cfg
686  * @addl_hdrs: pointer to the protocol header fields (ICE_FLOW_SEG_HDR_*)
687  * to configure
688  * @hash_flds: pointer to the hash bit fields (ICE_FLOW_HASH_*) to configure
689  *
690  * Return true if all the protocol header and hash fields in the RSS cfg could
691  * be parsed, else return false
692  *
693  * This function parses the virtchnl RSS cfg to be the intended
694  * hash fields and the intended header for RSS configuration
695  */
696 static bool
697 ice_vc_parse_rss_cfg(struct ice_hw *hw, struct virtchnl_rss_cfg *rss_cfg,
698 		     u32 *addl_hdrs, u64 *hash_flds)
699 {
700 	const struct ice_vc_hash_field_match_type *hf_list;
701 	const struct ice_vc_hdr_match_type *hdr_list;
702 	int i, hf_list_len, hdr_list_len;
703 
704 	hf_list = ice_vc_hash_field_list;
705 	hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list);
706 	hdr_list = ice_vc_hdr_list;
707 	hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list);
708 
709 	for (i = 0; i < rss_cfg->proto_hdrs.count; i++) {
710 		struct virtchnl_proto_hdr *proto_hdr =
711 					&rss_cfg->proto_hdrs.proto_hdr[i];
712 		bool hdr_found = false;
713 		int j;
714 
715 		/* Find matched ice headers according to virtchnl headers. */
716 		for (j = 0; j < hdr_list_len; j++) {
717 			struct ice_vc_hdr_match_type hdr_map = hdr_list[j];
718 
719 			if (proto_hdr->type == hdr_map.vc_hdr) {
720 				*addl_hdrs |= hdr_map.ice_hdr;
721 				hdr_found = true;
722 			}
723 		}
724 
725 		if (!hdr_found)
726 			return false;
727 
728 		/* Find matched ice hash fields according to
729 		 * virtchnl hash fields.
730 		 */
731 		for (j = 0; j < hf_list_len; j++) {
732 			struct ice_vc_hash_field_match_type hf_map = hf_list[j];
733 
734 			if (proto_hdr->type == hf_map.vc_hdr &&
735 			    proto_hdr->field_selector == hf_map.vc_hash_field) {
736 				*hash_flds |= hf_map.ice_hash_field;
737 				break;
738 			}
739 		}
740 	}
741 
742 	return true;
743 }
744 
745 /**
746  * ice_vf_adv_rss_offload_ena - determine if capabilities support advanced
747  * RSS offloads
748  * @caps: VF driver negotiated capabilities
749  *
750  * Return true if VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF capability is set,
751  * else return false
752  */
753 static bool ice_vf_adv_rss_offload_ena(u32 caps)
754 {
755 	return !!(caps & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF);
756 }
757 
758 /**
759  * ice_vc_handle_rss_cfg
760  * @vf: pointer to the VF info
761  * @msg: pointer to the message buffer
762  * @add: add a RSS config if true, otherwise delete a RSS config
763  *
764  * This function adds/deletes a RSS config
765  */
766 static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
767 {
768 	u32 v_opcode = add ? VIRTCHNL_OP_ADD_RSS_CFG : VIRTCHNL_OP_DEL_RSS_CFG;
769 	struct virtchnl_rss_cfg *rss_cfg = (struct virtchnl_rss_cfg *)msg;
770 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
771 	struct device *dev = ice_pf_to_dev(vf->pf);
772 	struct ice_hw *hw = &vf->pf->hw;
773 	struct ice_vsi *vsi;
774 
775 	if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
776 		dev_dbg(dev, "VF %d attempting to configure RSS, but RSS is not supported by the PF\n",
777 			vf->vf_id);
778 		v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
779 		goto error_param;
780 	}
781 
782 	if (!ice_vf_adv_rss_offload_ena(vf->driver_caps)) {
783 		dev_dbg(dev, "VF %d attempting to configure RSS, but Advanced RSS offload is not supported\n",
784 			vf->vf_id);
785 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
786 		goto error_param;
787 	}
788 
789 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
790 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
791 		goto error_param;
792 	}
793 
794 	if (rss_cfg->proto_hdrs.count > VIRTCHNL_MAX_NUM_PROTO_HDRS ||
795 	    rss_cfg->rss_algorithm < VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC ||
796 	    rss_cfg->rss_algorithm > VIRTCHNL_RSS_ALG_XOR_SYMMETRIC) {
797 		dev_dbg(dev, "VF %d attempting to configure RSS, but RSS configuration is not valid\n",
798 			vf->vf_id);
799 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
800 		goto error_param;
801 	}
802 
803 	vsi = ice_get_vf_vsi(vf);
804 	if (!vsi) {
805 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
806 		goto error_param;
807 	}
808 
809 	if (!ice_vc_validate_pattern(vf, &rss_cfg->proto_hdrs)) {
810 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
811 		goto error_param;
812 	}
813 
814 	if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_R_ASYMMETRIC) {
815 		struct ice_vsi_ctx *ctx;
816 		u8 lut_type, hash_type;
817 		int status;
818 
819 		lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
820 		hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_XOR :
821 				ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
822 
823 		ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
824 		if (!ctx) {
825 			v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
826 			goto error_param;
827 		}
828 
829 		ctx->info.q_opt_rss = ((lut_type <<
830 					ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
831 				       ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
832 				       (hash_type &
833 					ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
834 
835 		/* Preserve existing queueing option setting */
836 		ctx->info.q_opt_rss |= (vsi->info.q_opt_rss &
837 					  ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M);
838 		ctx->info.q_opt_tc = vsi->info.q_opt_tc;
839 		ctx->info.q_opt_flags = vsi->info.q_opt_rss;
840 
841 		ctx->info.valid_sections =
842 				cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
843 
844 		status = ice_update_vsi(hw, vsi->idx, ctx, NULL);
845 		if (status) {
846 			dev_err(dev, "update VSI for RSS failed, err %d aq_err %s\n",
847 				status, ice_aq_str(hw->adminq.sq_last_status));
848 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
849 		} else {
850 			vsi->info.q_opt_rss = ctx->info.q_opt_rss;
851 		}
852 
853 		kfree(ctx);
854 	} else {
855 		u32 addl_hdrs = ICE_FLOW_SEG_HDR_NONE;
856 		u64 hash_flds = ICE_HASH_INVALID;
857 
858 		if (!ice_vc_parse_rss_cfg(hw, rss_cfg, &addl_hdrs,
859 					  &hash_flds)) {
860 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
861 			goto error_param;
862 		}
863 
864 		if (add) {
865 			if (ice_add_rss_cfg(hw, vsi->idx, hash_flds,
866 					    addl_hdrs)) {
867 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
868 				dev_err(dev, "ice_add_rss_cfg failed for vsi = %d, v_ret = %d\n",
869 					vsi->vsi_num, v_ret);
870 			}
871 		} else {
872 			int status;
873 
874 			status = ice_rem_rss_cfg(hw, vsi->idx, hash_flds,
875 						 addl_hdrs);
876 			/* We just ignore -ENOENT, because if two configurations
877 			 * share the same profile remove one of them actually
878 			 * removes both, since the profile is deleted.
879 			 */
880 			if (status && status != -ENOENT) {
881 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
882 				dev_err(dev, "ice_rem_rss_cfg failed for VF ID:%d, error:%d\n",
883 					vf->vf_id, status);
884 			}
885 		}
886 	}
887 
888 error_param:
889 	return ice_vc_send_msg_to_vf(vf, v_opcode, v_ret, NULL, 0);
890 }
891 
892 /**
893  * ice_vc_config_rss_key
894  * @vf: pointer to the VF info
895  * @msg: pointer to the msg buffer
896  *
897  * Configure the VF's RSS key
898  */
899 static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
900 {
901 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
902 	struct virtchnl_rss_key *vrk =
903 		(struct virtchnl_rss_key *)msg;
904 	struct ice_vsi *vsi;
905 
906 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
907 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
908 		goto error_param;
909 	}
910 
911 	if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
912 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
913 		goto error_param;
914 	}
915 
916 	if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
917 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
918 		goto error_param;
919 	}
920 
921 	if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
922 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
923 		goto error_param;
924 	}
925 
926 	vsi = ice_get_vf_vsi(vf);
927 	if (!vsi) {
928 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
929 		goto error_param;
930 	}
931 
932 	if (ice_set_rss_key(vsi, vrk->key))
933 		v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
934 error_param:
935 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret,
936 				     NULL, 0);
937 }
938 
939 /**
940  * ice_vc_config_rss_lut
941  * @vf: pointer to the VF info
942  * @msg: pointer to the msg buffer
943  *
944  * Configure the VF's RSS LUT
945  */
946 static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
947 {
948 	struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
949 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
950 	struct ice_vsi *vsi;
951 
952 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
953 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
954 		goto error_param;
955 	}
956 
957 	if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
958 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
959 		goto error_param;
960 	}
961 
962 	if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) {
963 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
964 		goto error_param;
965 	}
966 
967 	if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
968 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
969 		goto error_param;
970 	}
971 
972 	vsi = ice_get_vf_vsi(vf);
973 	if (!vsi) {
974 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
975 		goto error_param;
976 	}
977 
978 	if (ice_set_rss_lut(vsi, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE))
979 		v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
980 error_param:
981 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret,
982 				     NULL, 0);
983 }
984 
985 /**
986  * ice_vc_cfg_promiscuous_mode_msg
987  * @vf: pointer to the VF info
988  * @msg: pointer to the msg buffer
989  *
990  * called from the VF to configure VF VSIs promiscuous mode
991  */
992 static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
993 {
994 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
995 	bool rm_promisc, alluni = false, allmulti = false;
996 	struct virtchnl_promisc_info *info =
997 	    (struct virtchnl_promisc_info *)msg;
998 	struct ice_vsi_vlan_ops *vlan_ops;
999 	int mcast_err = 0, ucast_err = 0;
1000 	struct ice_pf *pf = vf->pf;
1001 	struct ice_vsi *vsi;
1002 	u8 mcast_m, ucast_m;
1003 	struct device *dev;
1004 	int ret = 0;
1005 
1006 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1007 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1008 		goto error_param;
1009 	}
1010 
1011 	if (!ice_vc_isvalid_vsi_id(vf, info->vsi_id)) {
1012 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1013 		goto error_param;
1014 	}
1015 
1016 	vsi = ice_get_vf_vsi(vf);
1017 	if (!vsi) {
1018 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1019 		goto error_param;
1020 	}
1021 
1022 	dev = ice_pf_to_dev(pf);
1023 	if (!ice_is_vf_trusted(vf)) {
1024 		dev_err(dev, "Unprivileged VF %d is attempting to configure promiscuous mode\n",
1025 			vf->vf_id);
1026 		/* Leave v_ret alone, lie to the VF on purpose. */
1027 		goto error_param;
1028 	}
1029 
1030 	if (info->flags & FLAG_VF_UNICAST_PROMISC)
1031 		alluni = true;
1032 
1033 	if (info->flags & FLAG_VF_MULTICAST_PROMISC)
1034 		allmulti = true;
1035 
1036 	rm_promisc = !allmulti && !alluni;
1037 
1038 	vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
1039 	if (rm_promisc)
1040 		ret = vlan_ops->ena_rx_filtering(vsi);
1041 	else
1042 		ret = vlan_ops->dis_rx_filtering(vsi);
1043 	if (ret) {
1044 		dev_err(dev, "Failed to configure VLAN pruning in promiscuous mode\n");
1045 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1046 		goto error_param;
1047 	}
1048 
1049 	ice_vf_get_promisc_masks(vf, vsi, &ucast_m, &mcast_m);
1050 
1051 	if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
1052 		if (alluni) {
1053 			/* in this case we're turning on promiscuous mode */
1054 			ret = ice_set_dflt_vsi(vsi);
1055 		} else {
1056 			/* in this case we're turning off promiscuous mode */
1057 			if (ice_is_dflt_vsi_in_use(vsi->port_info))
1058 				ret = ice_clear_dflt_vsi(vsi);
1059 		}
1060 
1061 		/* in this case we're turning on/off only
1062 		 * allmulticast
1063 		 */
1064 		if (allmulti)
1065 			mcast_err = ice_vf_set_vsi_promisc(vf, vsi, mcast_m);
1066 		else
1067 			mcast_err = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m);
1068 
1069 		if (ret) {
1070 			dev_err(dev, "Turning on/off promiscuous mode for VF %d failed, error: %d\n",
1071 				vf->vf_id, ret);
1072 			v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1073 			goto error_param;
1074 		}
1075 	} else {
1076 		if (alluni)
1077 			ucast_err = ice_vf_set_vsi_promisc(vf, vsi, ucast_m);
1078 		else
1079 			ucast_err = ice_vf_clear_vsi_promisc(vf, vsi, ucast_m);
1080 
1081 		if (allmulti)
1082 			mcast_err = ice_vf_set_vsi_promisc(vf, vsi, mcast_m);
1083 		else
1084 			mcast_err = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m);
1085 
1086 		if (ucast_err || mcast_err)
1087 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1088 	}
1089 
1090 	if (!mcast_err) {
1091 		if (allmulti &&
1092 		    !test_and_set_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
1093 			dev_info(dev, "VF %u successfully set multicast promiscuous mode\n",
1094 				 vf->vf_id);
1095 		else if (!allmulti &&
1096 			 test_and_clear_bit(ICE_VF_STATE_MC_PROMISC,
1097 					    vf->vf_states))
1098 			dev_info(dev, "VF %u successfully unset multicast promiscuous mode\n",
1099 				 vf->vf_id);
1100 	} else {
1101 		dev_err(dev, "Error while modifying multicast promiscuous mode for VF %u, error: %d\n",
1102 			vf->vf_id, mcast_err);
1103 	}
1104 
1105 	if (!ucast_err) {
1106 		if (alluni &&
1107 		    !test_and_set_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states))
1108 			dev_info(dev, "VF %u successfully set unicast promiscuous mode\n",
1109 				 vf->vf_id);
1110 		else if (!alluni &&
1111 			 test_and_clear_bit(ICE_VF_STATE_UC_PROMISC,
1112 					    vf->vf_states))
1113 			dev_info(dev, "VF %u successfully unset unicast promiscuous mode\n",
1114 				 vf->vf_id);
1115 	} else {
1116 		dev_err(dev, "Error while modifying unicast promiscuous mode for VF %u, error: %d\n",
1117 			vf->vf_id, ucast_err);
1118 	}
1119 
1120 error_param:
1121 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
1122 				     v_ret, NULL, 0);
1123 }
1124 
1125 /**
1126  * ice_vc_get_stats_msg
1127  * @vf: pointer to the VF info
1128  * @msg: pointer to the msg buffer
1129  *
1130  * called from the VF to get VSI stats
1131  */
1132 static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
1133 {
1134 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1135 	struct virtchnl_queue_select *vqs =
1136 		(struct virtchnl_queue_select *)msg;
1137 	struct ice_eth_stats stats = { 0 };
1138 	struct ice_vsi *vsi;
1139 
1140 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1141 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1142 		goto error_param;
1143 	}
1144 
1145 	if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
1146 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1147 		goto error_param;
1148 	}
1149 
1150 	vsi = ice_get_vf_vsi(vf);
1151 	if (!vsi) {
1152 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1153 		goto error_param;
1154 	}
1155 
1156 	ice_update_eth_stats(vsi);
1157 
1158 	stats = vsi->eth_stats;
1159 
1160 error_param:
1161 	/* send the response to the VF */
1162 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, v_ret,
1163 				     (u8 *)&stats, sizeof(stats));
1164 }
1165 
1166 /**
1167  * ice_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTCHNL
1168  * @vqs: virtchnl_queue_select structure containing bitmaps to validate
1169  *
1170  * Return true on successful validation, else false
1171  */
1172 static bool ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
1173 {
1174 	if ((!vqs->rx_queues && !vqs->tx_queues) ||
1175 	    vqs->rx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF) ||
1176 	    vqs->tx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF))
1177 		return false;
1178 
1179 	return true;
1180 }
1181 
1182 /**
1183  * ice_vf_ena_txq_interrupt - enable Tx queue interrupt via QINT_TQCTL
1184  * @vsi: VSI of the VF to configure
1185  * @q_idx: VF queue index used to determine the queue in the PF's space
1186  */
1187 static void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx)
1188 {
1189 	struct ice_hw *hw = &vsi->back->hw;
1190 	u32 pfq = vsi->txq_map[q_idx];
1191 	u32 reg;
1192 
1193 	reg = rd32(hw, QINT_TQCTL(pfq));
1194 
1195 	/* MSI-X index 0 in the VF's space is always for the OICR, which means
1196 	 * this is most likely a poll mode VF driver, so don't enable an
1197 	 * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
1198 	 */
1199 	if (!(reg & QINT_TQCTL_MSIX_INDX_M))
1200 		return;
1201 
1202 	wr32(hw, QINT_TQCTL(pfq), reg | QINT_TQCTL_CAUSE_ENA_M);
1203 }
1204 
1205 /**
1206  * ice_vf_ena_rxq_interrupt - enable Tx queue interrupt via QINT_RQCTL
1207  * @vsi: VSI of the VF to configure
1208  * @q_idx: VF queue index used to determine the queue in the PF's space
1209  */
1210 static void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx)
1211 {
1212 	struct ice_hw *hw = &vsi->back->hw;
1213 	u32 pfq = vsi->rxq_map[q_idx];
1214 	u32 reg;
1215 
1216 	reg = rd32(hw, QINT_RQCTL(pfq));
1217 
1218 	/* MSI-X index 0 in the VF's space is always for the OICR, which means
1219 	 * this is most likely a poll mode VF driver, so don't enable an
1220 	 * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
1221 	 */
1222 	if (!(reg & QINT_RQCTL_MSIX_INDX_M))
1223 		return;
1224 
1225 	wr32(hw, QINT_RQCTL(pfq), reg | QINT_RQCTL_CAUSE_ENA_M);
1226 }
1227 
1228 /**
1229  * ice_vc_ena_qs_msg
1230  * @vf: pointer to the VF info
1231  * @msg: pointer to the msg buffer
1232  *
1233  * called from the VF to enable all or specific queue(s)
1234  */
1235 static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
1236 {
1237 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1238 	struct virtchnl_queue_select *vqs =
1239 	    (struct virtchnl_queue_select *)msg;
1240 	struct ice_vsi *vsi;
1241 	unsigned long q_map;
1242 	u16 vf_q_id;
1243 
1244 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1245 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1246 		goto error_param;
1247 	}
1248 
1249 	if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
1250 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1251 		goto error_param;
1252 	}
1253 
1254 	if (!ice_vc_validate_vqs_bitmaps(vqs)) {
1255 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1256 		goto error_param;
1257 	}
1258 
1259 	vsi = ice_get_vf_vsi(vf);
1260 	if (!vsi) {
1261 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1262 		goto error_param;
1263 	}
1264 
1265 	/* Enable only Rx rings, Tx rings were enabled by the FW when the
1266 	 * Tx queue group list was configured and the context bits were
1267 	 * programmed using ice_vsi_cfg_txqs
1268 	 */
1269 	q_map = vqs->rx_queues;
1270 	for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
1271 		if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
1272 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1273 			goto error_param;
1274 		}
1275 
1276 		/* Skip queue if enabled */
1277 		if (test_bit(vf_q_id, vf->rxq_ena))
1278 			continue;
1279 
1280 		if (ice_vsi_ctrl_one_rx_ring(vsi, true, vf_q_id, true)) {
1281 			dev_err(ice_pf_to_dev(vsi->back), "Failed to enable Rx ring %d on VSI %d\n",
1282 				vf_q_id, vsi->vsi_num);
1283 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1284 			goto error_param;
1285 		}
1286 
1287 		ice_vf_ena_rxq_interrupt(vsi, vf_q_id);
1288 		set_bit(vf_q_id, vf->rxq_ena);
1289 	}
1290 
1291 	q_map = vqs->tx_queues;
1292 	for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
1293 		if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
1294 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1295 			goto error_param;
1296 		}
1297 
1298 		/* Skip queue if enabled */
1299 		if (test_bit(vf_q_id, vf->txq_ena))
1300 			continue;
1301 
1302 		ice_vf_ena_txq_interrupt(vsi, vf_q_id);
1303 		set_bit(vf_q_id, vf->txq_ena);
1304 	}
1305 
1306 	/* Set flag to indicate that queues are enabled */
1307 	if (v_ret == VIRTCHNL_STATUS_SUCCESS)
1308 		set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
1309 
1310 error_param:
1311 	/* send the response to the VF */
1312 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret,
1313 				     NULL, 0);
1314 }
1315 
1316 /**
1317  * ice_vf_vsi_dis_single_txq - disable a single Tx queue
1318  * @vf: VF to disable queue for
1319  * @vsi: VSI for the VF
1320  * @q_id: VF relative (0-based) queue ID
1321  *
1322  * Attempt to disable the Tx queue passed in. If the Tx queue was successfully
1323  * disabled then clear q_id bit in the enabled queues bitmap and return
1324  * success. Otherwise return error.
1325  */
1326 static int
1327 ice_vf_vsi_dis_single_txq(struct ice_vf *vf, struct ice_vsi *vsi, u16 q_id)
1328 {
1329 	struct ice_txq_meta txq_meta = { 0 };
1330 	struct ice_tx_ring *ring;
1331 	int err;
1332 
1333 	if (!test_bit(q_id, vf->txq_ena))
1334 		dev_dbg(ice_pf_to_dev(vsi->back), "Queue %u on VSI %u is not enabled, but stopping it anyway\n",
1335 			q_id, vsi->vsi_num);
1336 
1337 	ring = vsi->tx_rings[q_id];
1338 	if (!ring)
1339 		return -EINVAL;
1340 
1341 	ice_fill_txq_meta(vsi, ring, &txq_meta);
1342 
1343 	err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id, ring, &txq_meta);
1344 	if (err) {
1345 		dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
1346 			q_id, vsi->vsi_num);
1347 		return err;
1348 	}
1349 
1350 	/* Clear enabled queues flag */
1351 	clear_bit(q_id, vf->txq_ena);
1352 
1353 	return 0;
1354 }
1355 
1356 /**
1357  * ice_vc_dis_qs_msg
1358  * @vf: pointer to the VF info
1359  * @msg: pointer to the msg buffer
1360  *
1361  * called from the VF to disable all or specific queue(s)
1362  */
1363 static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
1364 {
1365 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1366 	struct virtchnl_queue_select *vqs =
1367 	    (struct virtchnl_queue_select *)msg;
1368 	struct ice_vsi *vsi;
1369 	unsigned long q_map;
1370 	u16 vf_q_id;
1371 
1372 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
1373 	    !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) {
1374 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1375 		goto error_param;
1376 	}
1377 
1378 	if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
1379 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1380 		goto error_param;
1381 	}
1382 
1383 	if (!ice_vc_validate_vqs_bitmaps(vqs)) {
1384 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1385 		goto error_param;
1386 	}
1387 
1388 	vsi = ice_get_vf_vsi(vf);
1389 	if (!vsi) {
1390 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1391 		goto error_param;
1392 	}
1393 
1394 	if (vqs->tx_queues) {
1395 		q_map = vqs->tx_queues;
1396 
1397 		for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
1398 			if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
1399 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1400 				goto error_param;
1401 			}
1402 
1403 			if (ice_vf_vsi_dis_single_txq(vf, vsi, vf_q_id)) {
1404 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1405 				goto error_param;
1406 			}
1407 		}
1408 	}
1409 
1410 	q_map = vqs->rx_queues;
1411 	/* speed up Rx queue disable by batching them if possible */
1412 	if (q_map &&
1413 	    bitmap_equal(&q_map, vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF)) {
1414 		if (ice_vsi_stop_all_rx_rings(vsi)) {
1415 			dev_err(ice_pf_to_dev(vsi->back), "Failed to stop all Rx rings on VSI %d\n",
1416 				vsi->vsi_num);
1417 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1418 			goto error_param;
1419 		}
1420 
1421 		bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
1422 	} else if (q_map) {
1423 		for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
1424 			if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
1425 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1426 				goto error_param;
1427 			}
1428 
1429 			/* Skip queue if not enabled */
1430 			if (!test_bit(vf_q_id, vf->rxq_ena))
1431 				continue;
1432 
1433 			if (ice_vsi_ctrl_one_rx_ring(vsi, false, vf_q_id,
1434 						     true)) {
1435 				dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Rx ring %d on VSI %d\n",
1436 					vf_q_id, vsi->vsi_num);
1437 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1438 				goto error_param;
1439 			}
1440 
1441 			/* Clear enabled queues flag */
1442 			clear_bit(vf_q_id, vf->rxq_ena);
1443 		}
1444 	}
1445 
1446 	/* Clear enabled queues flag */
1447 	if (v_ret == VIRTCHNL_STATUS_SUCCESS && ice_vf_has_no_qs_ena(vf))
1448 		clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
1449 
1450 error_param:
1451 	/* send the response to the VF */
1452 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret,
1453 				     NULL, 0);
1454 }
1455 
1456 /**
1457  * ice_cfg_interrupt
1458  * @vf: pointer to the VF info
1459  * @vsi: the VSI being configured
1460  * @vector_id: vector ID
1461  * @map: vector map for mapping vectors to queues
1462  * @q_vector: structure for interrupt vector
1463  * configure the IRQ to queue map
1464  */
1465 static int
1466 ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id,
1467 		  struct virtchnl_vector_map *map,
1468 		  struct ice_q_vector *q_vector)
1469 {
1470 	u16 vsi_q_id, vsi_q_id_idx;
1471 	unsigned long qmap;
1472 
1473 	q_vector->num_ring_rx = 0;
1474 	q_vector->num_ring_tx = 0;
1475 
1476 	qmap = map->rxq_map;
1477 	for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
1478 		vsi_q_id = vsi_q_id_idx;
1479 
1480 		if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
1481 			return VIRTCHNL_STATUS_ERR_PARAM;
1482 
1483 		q_vector->num_ring_rx++;
1484 		q_vector->rx.itr_idx = map->rxitr_idx;
1485 		vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
1486 		ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id,
1487 				      q_vector->rx.itr_idx);
1488 	}
1489 
1490 	qmap = map->txq_map;
1491 	for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
1492 		vsi_q_id = vsi_q_id_idx;
1493 
1494 		if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
1495 			return VIRTCHNL_STATUS_ERR_PARAM;
1496 
1497 		q_vector->num_ring_tx++;
1498 		q_vector->tx.itr_idx = map->txitr_idx;
1499 		vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
1500 		ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id,
1501 				      q_vector->tx.itr_idx);
1502 	}
1503 
1504 	return VIRTCHNL_STATUS_SUCCESS;
1505 }
1506 
1507 /**
1508  * ice_vc_cfg_irq_map_msg
1509  * @vf: pointer to the VF info
1510  * @msg: pointer to the msg buffer
1511  *
1512  * called from the VF to configure the IRQ to queue map
1513  */
1514 static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
1515 {
1516 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1517 	u16 num_q_vectors_mapped, vsi_id, vector_id;
1518 	struct virtchnl_irq_map_info *irqmap_info;
1519 	struct virtchnl_vector_map *map;
1520 	struct ice_pf *pf = vf->pf;
1521 	struct ice_vsi *vsi;
1522 	int i;
1523 
1524 	irqmap_info = (struct virtchnl_irq_map_info *)msg;
1525 	num_q_vectors_mapped = irqmap_info->num_vectors;
1526 
1527 	/* Check to make sure number of VF vectors mapped is not greater than
1528 	 * number of VF vectors originally allocated, and check that
1529 	 * there is actually at least a single VF queue vector mapped
1530 	 */
1531 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
1532 	    pf->vfs.num_msix_per < num_q_vectors_mapped ||
1533 	    !num_q_vectors_mapped) {
1534 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1535 		goto error_param;
1536 	}
1537 
1538 	vsi = ice_get_vf_vsi(vf);
1539 	if (!vsi) {
1540 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1541 		goto error_param;
1542 	}
1543 
1544 	for (i = 0; i < num_q_vectors_mapped; i++) {
1545 		struct ice_q_vector *q_vector;
1546 
1547 		map = &irqmap_info->vecmap[i];
1548 
1549 		vector_id = map->vector_id;
1550 		vsi_id = map->vsi_id;
1551 		/* vector_id is always 0-based for each VF, and can never be
1552 		 * larger than or equal to the max allowed interrupts per VF
1553 		 */
1554 		if (!(vector_id < pf->vfs.num_msix_per) ||
1555 		    !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
1556 		    (!vector_id && (map->rxq_map || map->txq_map))) {
1557 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1558 			goto error_param;
1559 		}
1560 
1561 		/* No need to map VF miscellaneous or rogue vector */
1562 		if (!vector_id)
1563 			continue;
1564 
1565 		/* Subtract non queue vector from vector_id passed by VF
1566 		 * to get actual number of VSI queue vector array index
1567 		 */
1568 		q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF];
1569 		if (!q_vector) {
1570 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1571 			goto error_param;
1572 		}
1573 
1574 		/* lookout for the invalid queue index */
1575 		v_ret = (enum virtchnl_status_code)
1576 			ice_cfg_interrupt(vf, vsi, vector_id, map, q_vector);
1577 		if (v_ret)
1578 			goto error_param;
1579 	}
1580 
1581 error_param:
1582 	/* send the response to the VF */
1583 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
1584 				     NULL, 0);
1585 }
1586 
1587 /**
1588  * ice_vc_cfg_qs_msg
1589  * @vf: pointer to the VF info
1590  * @msg: pointer to the msg buffer
1591  *
1592  * called from the VF to configure the Rx/Tx queues
1593  */
1594 static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
1595 {
1596 	struct virtchnl_vsi_queue_config_info *qci =
1597 	    (struct virtchnl_vsi_queue_config_info *)msg;
1598 	struct virtchnl_queue_pair_info *qpi;
1599 	struct ice_pf *pf = vf->pf;
1600 	struct ice_vsi *vsi;
1601 	int i = -1, q_idx;
1602 
1603 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
1604 		goto error_param;
1605 
1606 	if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id))
1607 		goto error_param;
1608 
1609 	vsi = ice_get_vf_vsi(vf);
1610 	if (!vsi)
1611 		goto error_param;
1612 
1613 	if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF ||
1614 	    qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
1615 		dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n",
1616 			vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
1617 		goto error_param;
1618 	}
1619 
1620 	for (i = 0; i < qci->num_queue_pairs; i++) {
1621 		qpi = &qci->qpair[i];
1622 		if (qpi->txq.vsi_id != qci->vsi_id ||
1623 		    qpi->rxq.vsi_id != qci->vsi_id ||
1624 		    qpi->rxq.queue_id != qpi->txq.queue_id ||
1625 		    qpi->txq.headwb_enabled ||
1626 		    !ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
1627 		    !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
1628 		    !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
1629 			goto error_param;
1630 		}
1631 
1632 		q_idx = qpi->rxq.queue_id;
1633 
1634 		/* make sure selected "q_idx" is in valid range of queues
1635 		 * for selected "vsi"
1636 		 */
1637 		if (q_idx >= vsi->alloc_txq || q_idx >= vsi->alloc_rxq) {
1638 			goto error_param;
1639 		}
1640 
1641 		/* copy Tx queue info from VF into VSI */
1642 		if (qpi->txq.ring_len > 0) {
1643 			vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
1644 			vsi->tx_rings[i]->count = qpi->txq.ring_len;
1645 
1646 			/* Disable any existing queue first */
1647 			if (ice_vf_vsi_dis_single_txq(vf, vsi, q_idx))
1648 				goto error_param;
1649 
1650 			/* Configure a queue with the requested settings */
1651 			if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) {
1652 				dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure TX queue %d\n",
1653 					 vf->vf_id, i);
1654 				goto error_param;
1655 			}
1656 		}
1657 
1658 		/* copy Rx queue info from VF into VSI */
1659 		if (qpi->rxq.ring_len > 0) {
1660 			u16 max_frame_size = ice_vc_get_max_frame_size(vf);
1661 
1662 			vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
1663 			vsi->rx_rings[i]->count = qpi->rxq.ring_len;
1664 
1665 			if (qpi->rxq.databuffer_size != 0 &&
1666 			    (qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
1667 			     qpi->rxq.databuffer_size < 1024))
1668 				goto error_param;
1669 			vsi->rx_buf_len = qpi->rxq.databuffer_size;
1670 			vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len;
1671 			if (qpi->rxq.max_pkt_size > max_frame_size ||
1672 			    qpi->rxq.max_pkt_size < 64)
1673 				goto error_param;
1674 
1675 			vsi->max_frame = qpi->rxq.max_pkt_size;
1676 			/* add space for the port VLAN since the VF driver is
1677 			 * not expected to account for it in the MTU
1678 			 * calculation
1679 			 */
1680 			if (ice_vf_is_port_vlan_ena(vf))
1681 				vsi->max_frame += VLAN_HLEN;
1682 
1683 			if (ice_vsi_cfg_single_rxq(vsi, q_idx)) {
1684 				dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure RX queue %d\n",
1685 					 vf->vf_id, i);
1686 				goto error_param;
1687 			}
1688 		}
1689 	}
1690 
1691 	/* send the response to the VF */
1692 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
1693 				     VIRTCHNL_STATUS_SUCCESS, NULL, 0);
1694 error_param:
1695 	/* disable whatever we can */
1696 	for (; i >= 0; i--) {
1697 		if (ice_vsi_ctrl_one_rx_ring(vsi, false, i, true))
1698 			dev_err(ice_pf_to_dev(pf), "VF-%d could not disable RX queue %d\n",
1699 				vf->vf_id, i);
1700 		if (ice_vf_vsi_dis_single_txq(vf, vsi, i))
1701 			dev_err(ice_pf_to_dev(pf), "VF-%d could not disable TX queue %d\n",
1702 				vf->vf_id, i);
1703 	}
1704 
1705 	/* send the response to the VF */
1706 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
1707 				     VIRTCHNL_STATUS_ERR_PARAM, NULL, 0);
1708 }
1709 
1710 /**
1711  * ice_can_vf_change_mac
1712  * @vf: pointer to the VF info
1713  *
1714  * Return true if the VF is allowed to change its MAC filters, false otherwise
1715  */
1716 static bool ice_can_vf_change_mac(struct ice_vf *vf)
1717 {
1718 	/* If the VF MAC address has been set administratively (via the
1719 	 * ndo_set_vf_mac command), then deny permission to the VF to
1720 	 * add/delete unicast MAC addresses, unless the VF is trusted
1721 	 */
1722 	if (vf->pf_set_mac && !ice_is_vf_trusted(vf))
1723 		return false;
1724 
1725 	return true;
1726 }
1727 
1728 /**
1729  * ice_vc_ether_addr_type - get type of virtchnl_ether_addr
1730  * @vc_ether_addr: used to extract the type
1731  */
1732 static u8
1733 ice_vc_ether_addr_type(struct virtchnl_ether_addr *vc_ether_addr)
1734 {
1735 	return (vc_ether_addr->type & VIRTCHNL_ETHER_ADDR_TYPE_MASK);
1736 }
1737 
1738 /**
1739  * ice_is_vc_addr_legacy - check if the MAC address is from an older VF
1740  * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
1741  */
1742 static bool
1743 ice_is_vc_addr_legacy(struct virtchnl_ether_addr *vc_ether_addr)
1744 {
1745 	u8 type = ice_vc_ether_addr_type(vc_ether_addr);
1746 
1747 	return (type == VIRTCHNL_ETHER_ADDR_LEGACY);
1748 }
1749 
1750 /**
1751  * ice_is_vc_addr_primary - check if the MAC address is the VF's primary MAC
1752  * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
1753  *
1754  * This function should only be called when the MAC address in
1755  * virtchnl_ether_addr is a valid unicast MAC
1756  */
1757 static bool
1758 ice_is_vc_addr_primary(struct virtchnl_ether_addr __maybe_unused *vc_ether_addr)
1759 {
1760 	u8 type = ice_vc_ether_addr_type(vc_ether_addr);
1761 
1762 	return (type == VIRTCHNL_ETHER_ADDR_PRIMARY);
1763 }
1764 
1765 /**
1766  * ice_vfhw_mac_add - update the VF's cached hardware MAC if allowed
1767  * @vf: VF to update
1768  * @vc_ether_addr: structure from VIRTCHNL with MAC to add
1769  */
1770 static void
1771 ice_vfhw_mac_add(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr)
1772 {
1773 	u8 *mac_addr = vc_ether_addr->addr;
1774 
1775 	if (!is_valid_ether_addr(mac_addr))
1776 		return;
1777 
1778 	/* only allow legacy VF drivers to set the device and hardware MAC if it
1779 	 * is zero and allow new VF drivers to set the hardware MAC if the type
1780 	 * was correctly specified over VIRTCHNL
1781 	 */
1782 	if ((ice_is_vc_addr_legacy(vc_ether_addr) &&
1783 	     is_zero_ether_addr(vf->hw_lan_addr.addr)) ||
1784 	    ice_is_vc_addr_primary(vc_ether_addr)) {
1785 		ether_addr_copy(vf->dev_lan_addr.addr, mac_addr);
1786 		ether_addr_copy(vf->hw_lan_addr.addr, mac_addr);
1787 	}
1788 
1789 	/* hardware and device MACs are already set, but its possible that the
1790 	 * VF driver sent the VIRTCHNL_OP_ADD_ETH_ADDR message before the
1791 	 * VIRTCHNL_OP_DEL_ETH_ADDR when trying to update its MAC, so save it
1792 	 * away for the legacy VF driver case as it will be updated in the
1793 	 * delete flow for this case
1794 	 */
1795 	if (ice_is_vc_addr_legacy(vc_ether_addr)) {
1796 		ether_addr_copy(vf->legacy_last_added_umac.addr,
1797 				mac_addr);
1798 		vf->legacy_last_added_umac.time_modified = jiffies;
1799 	}
1800 }
1801 
1802 /**
1803  * ice_vc_add_mac_addr - attempt to add the MAC address passed in
1804  * @vf: pointer to the VF info
1805  * @vsi: pointer to the VF's VSI
1806  * @vc_ether_addr: VIRTCHNL MAC address structure used to add MAC
1807  */
1808 static int
1809 ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
1810 		    struct virtchnl_ether_addr *vc_ether_addr)
1811 {
1812 	struct device *dev = ice_pf_to_dev(vf->pf);
1813 	u8 *mac_addr = vc_ether_addr->addr;
1814 	int ret;
1815 
1816 	/* device MAC already added */
1817 	if (ether_addr_equal(mac_addr, vf->dev_lan_addr.addr))
1818 		return 0;
1819 
1820 	if (is_unicast_ether_addr(mac_addr) && !ice_can_vf_change_mac(vf)) {
1821 		dev_err(dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
1822 		return -EPERM;
1823 	}
1824 
1825 	ret = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
1826 	if (ret == -EEXIST) {
1827 		dev_dbg(dev, "MAC %pM already exists for VF %d\n", mac_addr,
1828 			vf->vf_id);
1829 		/* don't return since we might need to update
1830 		 * the primary MAC in ice_vfhw_mac_add() below
1831 		 */
1832 	} else if (ret) {
1833 		dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %d\n",
1834 			mac_addr, vf->vf_id, ret);
1835 		return ret;
1836 	} else {
1837 		vf->num_mac++;
1838 	}
1839 
1840 	ice_vfhw_mac_add(vf, vc_ether_addr);
1841 
1842 	return ret;
1843 }
1844 
1845 /**
1846  * ice_is_legacy_umac_expired - check if last added legacy unicast MAC expired
1847  * @last_added_umac: structure used to check expiration
1848  */
1849 static bool ice_is_legacy_umac_expired(struct ice_time_mac *last_added_umac)
1850 {
1851 #define ICE_LEGACY_VF_MAC_CHANGE_EXPIRE_TIME	msecs_to_jiffies(3000)
1852 	return time_is_before_jiffies(last_added_umac->time_modified +
1853 				      ICE_LEGACY_VF_MAC_CHANGE_EXPIRE_TIME);
1854 }
1855 
1856 /**
1857  * ice_update_legacy_cached_mac - update cached hardware MAC for legacy VF
1858  * @vf: VF to update
1859  * @vc_ether_addr: structure from VIRTCHNL with MAC to check
1860  *
1861  * only update cached hardware MAC for legacy VF drivers on delete
1862  * because we cannot guarantee order/type of MAC from the VF driver
1863  */
1864 static void
1865 ice_update_legacy_cached_mac(struct ice_vf *vf,
1866 			     struct virtchnl_ether_addr *vc_ether_addr)
1867 {
1868 	if (!ice_is_vc_addr_legacy(vc_ether_addr) ||
1869 	    ice_is_legacy_umac_expired(&vf->legacy_last_added_umac))
1870 		return;
1871 
1872 	ether_addr_copy(vf->dev_lan_addr.addr, vf->legacy_last_added_umac.addr);
1873 	ether_addr_copy(vf->hw_lan_addr.addr, vf->legacy_last_added_umac.addr);
1874 }
1875 
1876 /**
1877  * ice_vfhw_mac_del - update the VF's cached hardware MAC if allowed
1878  * @vf: VF to update
1879  * @vc_ether_addr: structure from VIRTCHNL with MAC to delete
1880  */
1881 static void
1882 ice_vfhw_mac_del(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr)
1883 {
1884 	u8 *mac_addr = vc_ether_addr->addr;
1885 
1886 	if (!is_valid_ether_addr(mac_addr) ||
1887 	    !ether_addr_equal(vf->dev_lan_addr.addr, mac_addr))
1888 		return;
1889 
1890 	/* allow the device MAC to be repopulated in the add flow and don't
1891 	 * clear the hardware MAC (i.e. hw_lan_addr.addr) here as that is meant
1892 	 * to be persistent on VM reboot and across driver unload/load, which
1893 	 * won't work if we clear the hardware MAC here
1894 	 */
1895 	eth_zero_addr(vf->dev_lan_addr.addr);
1896 
1897 	ice_update_legacy_cached_mac(vf, vc_ether_addr);
1898 }
1899 
1900 /**
1901  * ice_vc_del_mac_addr - attempt to delete the MAC address passed in
1902  * @vf: pointer to the VF info
1903  * @vsi: pointer to the VF's VSI
1904  * @vc_ether_addr: VIRTCHNL MAC address structure used to delete MAC
1905  */
1906 static int
1907 ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
1908 		    struct virtchnl_ether_addr *vc_ether_addr)
1909 {
1910 	struct device *dev = ice_pf_to_dev(vf->pf);
1911 	u8 *mac_addr = vc_ether_addr->addr;
1912 	int status;
1913 
1914 	if (!ice_can_vf_change_mac(vf) &&
1915 	    ether_addr_equal(vf->dev_lan_addr.addr, mac_addr))
1916 		return 0;
1917 
1918 	status = ice_fltr_remove_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
1919 	if (status == -ENOENT) {
1920 		dev_err(dev, "MAC %pM does not exist for VF %d\n", mac_addr,
1921 			vf->vf_id);
1922 		return -ENOENT;
1923 	} else if (status) {
1924 		dev_err(dev, "Failed to delete MAC %pM for VF %d, error %d\n",
1925 			mac_addr, vf->vf_id, status);
1926 		return -EIO;
1927 	}
1928 
1929 	ice_vfhw_mac_del(vf, vc_ether_addr);
1930 
1931 	vf->num_mac--;
1932 
1933 	return 0;
1934 }
1935 
1936 /**
1937  * ice_vc_handle_mac_addr_msg
1938  * @vf: pointer to the VF info
1939  * @msg: pointer to the msg buffer
1940  * @set: true if MAC filters are being set, false otherwise
1941  *
1942  * add guest MAC address filter
1943  */
1944 static int
1945 ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
1946 {
1947 	int (*ice_vc_cfg_mac)
1948 		(struct ice_vf *vf, struct ice_vsi *vsi,
1949 		 struct virtchnl_ether_addr *virtchnl_ether_addr);
1950 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1951 	struct virtchnl_ether_addr_list *al =
1952 	    (struct virtchnl_ether_addr_list *)msg;
1953 	struct ice_pf *pf = vf->pf;
1954 	enum virtchnl_ops vc_op;
1955 	struct ice_vsi *vsi;
1956 	int i;
1957 
1958 	if (set) {
1959 		vc_op = VIRTCHNL_OP_ADD_ETH_ADDR;
1960 		ice_vc_cfg_mac = ice_vc_add_mac_addr;
1961 	} else {
1962 		vc_op = VIRTCHNL_OP_DEL_ETH_ADDR;
1963 		ice_vc_cfg_mac = ice_vc_del_mac_addr;
1964 	}
1965 
1966 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
1967 	    !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
1968 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1969 		goto handle_mac_exit;
1970 	}
1971 
1972 	/* If this VF is not privileged, then we can't add more than a
1973 	 * limited number of addresses. Check to make sure that the
1974 	 * additions do not push us over the limit.
1975 	 */
1976 	if (set && !ice_is_vf_trusted(vf) &&
1977 	    (vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
1978 		dev_err(ice_pf_to_dev(pf), "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
1979 			vf->vf_id);
1980 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1981 		goto handle_mac_exit;
1982 	}
1983 
1984 	vsi = ice_get_vf_vsi(vf);
1985 	if (!vsi) {
1986 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1987 		goto handle_mac_exit;
1988 	}
1989 
1990 	for (i = 0; i < al->num_elements; i++) {
1991 		u8 *mac_addr = al->list[i].addr;
1992 		int result;
1993 
1994 		if (is_broadcast_ether_addr(mac_addr) ||
1995 		    is_zero_ether_addr(mac_addr))
1996 			continue;
1997 
1998 		result = ice_vc_cfg_mac(vf, vsi, &al->list[i]);
1999 		if (result == -EEXIST || result == -ENOENT) {
2000 			continue;
2001 		} else if (result) {
2002 			v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
2003 			goto handle_mac_exit;
2004 		}
2005 	}
2006 
2007 handle_mac_exit:
2008 	/* send the response to the VF */
2009 	return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0);
2010 }
2011 
2012 /**
2013  * ice_vc_add_mac_addr_msg
2014  * @vf: pointer to the VF info
2015  * @msg: pointer to the msg buffer
2016  *
2017  * add guest MAC address filter
2018  */
2019 static int ice_vc_add_mac_addr_msg(struct ice_vf *vf, u8 *msg)
2020 {
2021 	return ice_vc_handle_mac_addr_msg(vf, msg, true);
2022 }
2023 
2024 /**
2025  * ice_vc_del_mac_addr_msg
2026  * @vf: pointer to the VF info
2027  * @msg: pointer to the msg buffer
2028  *
2029  * remove guest MAC address filter
2030  */
2031 static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
2032 {
2033 	return ice_vc_handle_mac_addr_msg(vf, msg, false);
2034 }
2035 
2036 /**
2037  * ice_vc_request_qs_msg
2038  * @vf: pointer to the VF info
2039  * @msg: pointer to the msg buffer
2040  *
2041  * VFs get a default number of queues but can use this message to request a
2042  * different number. If the request is successful, PF will reset the VF and
2043  * return 0. If unsuccessful, PF will send message informing VF of number of
2044  * available queue pairs via virtchnl message response to VF.
2045  */
2046 static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
2047 {
2048 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2049 	struct virtchnl_vf_res_request *vfres =
2050 		(struct virtchnl_vf_res_request *)msg;
2051 	u16 req_queues = vfres->num_queue_pairs;
2052 	struct ice_pf *pf = vf->pf;
2053 	u16 max_allowed_vf_queues;
2054 	u16 tx_rx_queue_left;
2055 	struct device *dev;
2056 	u16 cur_queues;
2057 
2058 	dev = ice_pf_to_dev(pf);
2059 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2060 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2061 		goto error_param;
2062 	}
2063 
2064 	cur_queues = vf->num_vf_qs;
2065 	tx_rx_queue_left = min_t(u16, ice_get_avail_txq_count(pf),
2066 				 ice_get_avail_rxq_count(pf));
2067 	max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
2068 	if (!req_queues) {
2069 		dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n",
2070 			vf->vf_id);
2071 	} else if (req_queues > ICE_MAX_RSS_QS_PER_VF) {
2072 		dev_err(dev, "VF %d tried to request more than %d queues.\n",
2073 			vf->vf_id, ICE_MAX_RSS_QS_PER_VF);
2074 		vfres->num_queue_pairs = ICE_MAX_RSS_QS_PER_VF;
2075 	} else if (req_queues > cur_queues &&
2076 		   req_queues - cur_queues > tx_rx_queue_left) {
2077 		dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n",
2078 			 vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
2079 		vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
2080 					       ICE_MAX_RSS_QS_PER_VF);
2081 	} else {
2082 		/* request is successful, then reset VF */
2083 		vf->num_req_qs = req_queues;
2084 		ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
2085 		dev_info(dev, "VF %d granted request of %u queues.\n",
2086 			 vf->vf_id, req_queues);
2087 		return 0;
2088 	}
2089 
2090 error_param:
2091 	/* send the response to the VF */
2092 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
2093 				     v_ret, (u8 *)vfres, sizeof(*vfres));
2094 }
2095 
2096 /**
2097  * ice_vf_vlan_offload_ena - determine if capabilities support VLAN offloads
2098  * @caps: VF driver negotiated capabilities
2099  *
2100  * Return true if VIRTCHNL_VF_OFFLOAD_VLAN capability is set, else return false
2101  */
2102 static bool ice_vf_vlan_offload_ena(u32 caps)
2103 {
2104 	return !!(caps & VIRTCHNL_VF_OFFLOAD_VLAN);
2105 }
2106 
2107 /**
2108  * ice_is_vlan_promisc_allowed - check if VLAN promiscuous config is allowed
2109  * @vf: VF used to determine if VLAN promiscuous config is allowed
2110  */
2111 static bool ice_is_vlan_promisc_allowed(struct ice_vf *vf)
2112 {
2113 	if ((test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
2114 	     test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) &&
2115 	    test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, vf->pf->flags))
2116 		return true;
2117 
2118 	return false;
2119 }
2120 
2121 /**
2122  * ice_vf_ena_vlan_promisc - Enable Tx/Rx VLAN promiscuous for the VLAN
2123  * @vsi: VF's VSI used to enable VLAN promiscuous mode
2124  * @vlan: VLAN used to enable VLAN promiscuous
2125  *
2126  * This function should only be called if VLAN promiscuous mode is allowed,
2127  * which can be determined via ice_is_vlan_promisc_allowed().
2128  */
2129 static int ice_vf_ena_vlan_promisc(struct ice_vsi *vsi, struct ice_vlan *vlan)
2130 {
2131 	u8 promisc_m = ICE_PROMISC_VLAN_TX | ICE_PROMISC_VLAN_RX;
2132 	int status;
2133 
2134 	status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m,
2135 					  vlan->vid);
2136 	if (status && status != -EEXIST)
2137 		return status;
2138 
2139 	return 0;
2140 }
2141 
2142 /**
2143  * ice_vf_dis_vlan_promisc - Disable Tx/Rx VLAN promiscuous for the VLAN
2144  * @vsi: VF's VSI used to disable VLAN promiscuous mode for
2145  * @vlan: VLAN used to disable VLAN promiscuous
2146  *
2147  * This function should only be called if VLAN promiscuous mode is allowed,
2148  * which can be determined via ice_is_vlan_promisc_allowed().
2149  */
2150 static int ice_vf_dis_vlan_promisc(struct ice_vsi *vsi, struct ice_vlan *vlan)
2151 {
2152 	u8 promisc_m = ICE_PROMISC_VLAN_TX | ICE_PROMISC_VLAN_RX;
2153 	int status;
2154 
2155 	status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m,
2156 					    vlan->vid);
2157 	if (status && status != -ENOENT)
2158 		return status;
2159 
2160 	return 0;
2161 }
2162 
2163 /**
2164  * ice_vf_has_max_vlans - check if VF already has the max allowed VLAN filters
2165  * @vf: VF to check against
2166  * @vsi: VF's VSI
2167  *
2168  * If the VF is trusted then the VF is allowed to add as many VLANs as it
2169  * wants to, so return false.
2170  *
2171  * When the VF is untrusted compare the number of non-zero VLANs + 1 to the max
2172  * allowed VLANs for an untrusted VF. Return the result of this comparison.
2173  */
2174 static bool ice_vf_has_max_vlans(struct ice_vf *vf, struct ice_vsi *vsi)
2175 {
2176 	if (ice_is_vf_trusted(vf))
2177 		return false;
2178 
2179 #define ICE_VF_ADDED_VLAN_ZERO_FLTRS	1
2180 	return ((ice_vsi_num_non_zero_vlans(vsi) +
2181 		ICE_VF_ADDED_VLAN_ZERO_FLTRS) >= ICE_MAX_VLAN_PER_VF);
2182 }
2183 
2184 /**
2185  * ice_vc_process_vlan_msg
2186  * @vf: pointer to the VF info
2187  * @msg: pointer to the msg buffer
2188  * @add_v: Add VLAN if true, otherwise delete VLAN
2189  *
2190  * Process virtchnl op to add or remove programmed guest VLAN ID
2191  */
2192 static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
2193 {
2194 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2195 	struct virtchnl_vlan_filter_list *vfl =
2196 	    (struct virtchnl_vlan_filter_list *)msg;
2197 	struct ice_pf *pf = vf->pf;
2198 	bool vlan_promisc = false;
2199 	struct ice_vsi *vsi;
2200 	struct device *dev;
2201 	int status = 0;
2202 	int i;
2203 
2204 	dev = ice_pf_to_dev(pf);
2205 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2206 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2207 		goto error_param;
2208 	}
2209 
2210 	if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
2211 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2212 		goto error_param;
2213 	}
2214 
2215 	if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
2216 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2217 		goto error_param;
2218 	}
2219 
2220 	for (i = 0; i < vfl->num_elements; i++) {
2221 		if (vfl->vlan_id[i] >= VLAN_N_VID) {
2222 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2223 			dev_err(dev, "invalid VF VLAN id %d\n",
2224 				vfl->vlan_id[i]);
2225 			goto error_param;
2226 		}
2227 	}
2228 
2229 	vsi = ice_get_vf_vsi(vf);
2230 	if (!vsi) {
2231 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2232 		goto error_param;
2233 	}
2234 
2235 	if (add_v && ice_vf_has_max_vlans(vf, vsi)) {
2236 		dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
2237 			 vf->vf_id);
2238 		/* There is no need to let VF know about being not trusted,
2239 		 * so we can just return success message here
2240 		 */
2241 		goto error_param;
2242 	}
2243 
2244 	/* in DVM a VF can add/delete inner VLAN filters when
2245 	 * VIRTCHNL_VF_OFFLOAD_VLAN is negotiated, so only reject in SVM
2246 	 */
2247 	if (ice_vf_is_port_vlan_ena(vf) && !ice_is_dvm_ena(&pf->hw)) {
2248 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2249 		goto error_param;
2250 	}
2251 
2252 	/* in DVM VLAN promiscuous is based on the outer VLAN, which would be
2253 	 * the port VLAN if VIRTCHNL_VF_OFFLOAD_VLAN was negotiated, so only
2254 	 * allow vlan_promisc = true in SVM and if no port VLAN is configured
2255 	 */
2256 	vlan_promisc = ice_is_vlan_promisc_allowed(vf) &&
2257 		!ice_is_dvm_ena(&pf->hw) &&
2258 		!ice_vf_is_port_vlan_ena(vf);
2259 
2260 	if (add_v) {
2261 		for (i = 0; i < vfl->num_elements; i++) {
2262 			u16 vid = vfl->vlan_id[i];
2263 			struct ice_vlan vlan;
2264 
2265 			if (ice_vf_has_max_vlans(vf, vsi)) {
2266 				dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
2267 					 vf->vf_id);
2268 				/* There is no need to let VF know about being
2269 				 * not trusted, so we can just return success
2270 				 * message here as well.
2271 				 */
2272 				goto error_param;
2273 			}
2274 
2275 			/* we add VLAN 0 by default for each VF so we can enable
2276 			 * Tx VLAN anti-spoof without triggering MDD events so
2277 			 * we don't need to add it again here
2278 			 */
2279 			if (!vid)
2280 				continue;
2281 
2282 			vlan = ICE_VLAN(ETH_P_8021Q, vid, 0);
2283 			status = vsi->inner_vlan_ops.add_vlan(vsi, &vlan);
2284 			if (status) {
2285 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2286 				goto error_param;
2287 			}
2288 
2289 			/* Enable VLAN filtering on first non-zero VLAN */
2290 			if (!vlan_promisc && vid && !ice_is_dvm_ena(&pf->hw)) {
2291 				if (vf->spoofchk) {
2292 					status = vsi->inner_vlan_ops.ena_tx_filtering(vsi);
2293 					if (status) {
2294 						v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2295 						dev_err(dev, "Enable VLAN anti-spoofing on VLAN ID: %d failed error-%d\n",
2296 							vid, status);
2297 						goto error_param;
2298 					}
2299 				}
2300 				if (vsi->inner_vlan_ops.ena_rx_filtering(vsi)) {
2301 					v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2302 					dev_err(dev, "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
2303 						vid, status);
2304 					goto error_param;
2305 				}
2306 			} else if (vlan_promisc) {
2307 				status = ice_vf_ena_vlan_promisc(vsi, &vlan);
2308 				if (status) {
2309 					v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2310 					dev_err(dev, "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
2311 						vid, status);
2312 				}
2313 			}
2314 		}
2315 	} else {
2316 		/* In case of non_trusted VF, number of VLAN elements passed
2317 		 * to PF for removal might be greater than number of VLANs
2318 		 * filter programmed for that VF - So, use actual number of
2319 		 * VLANS added earlier with add VLAN opcode. In order to avoid
2320 		 * removing VLAN that doesn't exist, which result to sending
2321 		 * erroneous failed message back to the VF
2322 		 */
2323 		int num_vf_vlan;
2324 
2325 		num_vf_vlan = vsi->num_vlan;
2326 		for (i = 0; i < vfl->num_elements && i < num_vf_vlan; i++) {
2327 			u16 vid = vfl->vlan_id[i];
2328 			struct ice_vlan vlan;
2329 
2330 			/* we add VLAN 0 by default for each VF so we can enable
2331 			 * Tx VLAN anti-spoof without triggering MDD events so
2332 			 * we don't want a VIRTCHNL request to remove it
2333 			 */
2334 			if (!vid)
2335 				continue;
2336 
2337 			vlan = ICE_VLAN(ETH_P_8021Q, vid, 0);
2338 			status = vsi->inner_vlan_ops.del_vlan(vsi, &vlan);
2339 			if (status) {
2340 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2341 				goto error_param;
2342 			}
2343 
2344 			/* Disable VLAN filtering when only VLAN 0 is left */
2345 			if (!ice_vsi_has_non_zero_vlans(vsi)) {
2346 				vsi->inner_vlan_ops.dis_tx_filtering(vsi);
2347 				vsi->inner_vlan_ops.dis_rx_filtering(vsi);
2348 			}
2349 
2350 			if (vlan_promisc)
2351 				ice_vf_dis_vlan_promisc(vsi, &vlan);
2352 		}
2353 	}
2354 
2355 error_param:
2356 	/* send the response to the VF */
2357 	if (add_v)
2358 		return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, v_ret,
2359 					     NULL, 0);
2360 	else
2361 		return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, v_ret,
2362 					     NULL, 0);
2363 }
2364 
2365 /**
2366  * ice_vc_add_vlan_msg
2367  * @vf: pointer to the VF info
2368  * @msg: pointer to the msg buffer
2369  *
2370  * Add and program guest VLAN ID
2371  */
2372 static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg)
2373 {
2374 	return ice_vc_process_vlan_msg(vf, msg, true);
2375 }
2376 
2377 /**
2378  * ice_vc_remove_vlan_msg
2379  * @vf: pointer to the VF info
2380  * @msg: pointer to the msg buffer
2381  *
2382  * remove programmed guest VLAN ID
2383  */
2384 static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg)
2385 {
2386 	return ice_vc_process_vlan_msg(vf, msg, false);
2387 }
2388 
2389 /**
2390  * ice_vc_ena_vlan_stripping
2391  * @vf: pointer to the VF info
2392  *
2393  * Enable VLAN header stripping for a given VF
2394  */
2395 static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
2396 {
2397 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2398 	struct ice_vsi *vsi;
2399 
2400 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2401 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2402 		goto error_param;
2403 	}
2404 
2405 	if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
2406 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2407 		goto error_param;
2408 	}
2409 
2410 	vsi = ice_get_vf_vsi(vf);
2411 	if (!vsi) {
2412 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2413 		goto error_param;
2414 	}
2415 
2416 	if (vsi->inner_vlan_ops.ena_stripping(vsi, ETH_P_8021Q))
2417 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2418 
2419 error_param:
2420 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
2421 				     v_ret, NULL, 0);
2422 }
2423 
2424 /**
2425  * ice_vc_dis_vlan_stripping
2426  * @vf: pointer to the VF info
2427  *
2428  * Disable VLAN header stripping for a given VF
2429  */
2430 static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
2431 {
2432 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2433 	struct ice_vsi *vsi;
2434 
2435 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2436 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2437 		goto error_param;
2438 	}
2439 
2440 	if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
2441 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2442 		goto error_param;
2443 	}
2444 
2445 	vsi = ice_get_vf_vsi(vf);
2446 	if (!vsi) {
2447 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2448 		goto error_param;
2449 	}
2450 
2451 	if (vsi->inner_vlan_ops.dis_stripping(vsi))
2452 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2453 
2454 error_param:
2455 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
2456 				     v_ret, NULL, 0);
2457 }
2458 
2459 /**
2460  * ice_vf_init_vlan_stripping - enable/disable VLAN stripping on initialization
2461  * @vf: VF to enable/disable VLAN stripping for on initialization
2462  *
2463  * Set the default for VLAN stripping based on whether a port VLAN is configured
2464  * and the current VLAN mode of the device.
2465  */
2466 static int ice_vf_init_vlan_stripping(struct ice_vf *vf)
2467 {
2468 	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
2469 
2470 	if (!vsi)
2471 		return -EINVAL;
2472 
2473 	/* don't modify stripping if port VLAN is configured in SVM since the
2474 	 * port VLAN is based on the inner/single VLAN in SVM
2475 	 */
2476 	if (ice_vf_is_port_vlan_ena(vf) && !ice_is_dvm_ena(&vsi->back->hw))
2477 		return 0;
2478 
2479 	if (ice_vf_vlan_offload_ena(vf->driver_caps))
2480 		return vsi->inner_vlan_ops.ena_stripping(vsi, ETH_P_8021Q);
2481 	else
2482 		return vsi->inner_vlan_ops.dis_stripping(vsi);
2483 }
2484 
2485 static u16 ice_vc_get_max_vlan_fltrs(struct ice_vf *vf)
2486 {
2487 	if (vf->trusted)
2488 		return VLAN_N_VID;
2489 	else
2490 		return ICE_MAX_VLAN_PER_VF;
2491 }
2492 
2493 /**
2494  * ice_vf_outer_vlan_not_allowed - check if outer VLAN can be used
2495  * @vf: VF that being checked for
2496  *
2497  * When the device is in double VLAN mode, check whether or not the outer VLAN
2498  * is allowed.
2499  */
2500 static bool ice_vf_outer_vlan_not_allowed(struct ice_vf *vf)
2501 {
2502 	if (ice_vf_is_port_vlan_ena(vf))
2503 		return true;
2504 
2505 	return false;
2506 }
2507 
2508 /**
2509  * ice_vc_set_dvm_caps - set VLAN capabilities when the device is in DVM
2510  * @vf: VF that capabilities are being set for
2511  * @caps: VLAN capabilities to populate
2512  *
2513  * Determine VLAN capabilities support based on whether a port VLAN is
2514  * configured. If a port VLAN is configured then the VF should use the inner
2515  * filtering/offload capabilities since the port VLAN is using the outer VLAN
2516  * capabilies.
2517  */
2518 static void
2519 ice_vc_set_dvm_caps(struct ice_vf *vf, struct virtchnl_vlan_caps *caps)
2520 {
2521 	struct virtchnl_vlan_supported_caps *supported_caps;
2522 
2523 	if (ice_vf_outer_vlan_not_allowed(vf)) {
2524 		/* until support for inner VLAN filtering is added when a port
2525 		 * VLAN is configured, only support software offloaded inner
2526 		 * VLANs when a port VLAN is confgured in DVM
2527 		 */
2528 		supported_caps = &caps->filtering.filtering_support;
2529 		supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
2530 
2531 		supported_caps = &caps->offloads.stripping_support;
2532 		supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
2533 					VIRTCHNL_VLAN_TOGGLE |
2534 					VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
2535 		supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
2536 
2537 		supported_caps = &caps->offloads.insertion_support;
2538 		supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
2539 					VIRTCHNL_VLAN_TOGGLE |
2540 					VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
2541 		supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
2542 
2543 		caps->offloads.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
2544 		caps->offloads.ethertype_match =
2545 			VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
2546 	} else {
2547 		supported_caps = &caps->filtering.filtering_support;
2548 		supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
2549 		supported_caps->outer = VIRTCHNL_VLAN_ETHERTYPE_8100 |
2550 					VIRTCHNL_VLAN_ETHERTYPE_88A8 |
2551 					VIRTCHNL_VLAN_ETHERTYPE_9100 |
2552 					VIRTCHNL_VLAN_ETHERTYPE_AND;
2553 		caps->filtering.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100 |
2554 						 VIRTCHNL_VLAN_ETHERTYPE_88A8 |
2555 						 VIRTCHNL_VLAN_ETHERTYPE_9100;
2556 
2557 		supported_caps = &caps->offloads.stripping_support;
2558 		supported_caps->inner = VIRTCHNL_VLAN_TOGGLE |
2559 					VIRTCHNL_VLAN_ETHERTYPE_8100 |
2560 					VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
2561 		supported_caps->outer = VIRTCHNL_VLAN_TOGGLE |
2562 					VIRTCHNL_VLAN_ETHERTYPE_8100 |
2563 					VIRTCHNL_VLAN_ETHERTYPE_88A8 |
2564 					VIRTCHNL_VLAN_ETHERTYPE_9100 |
2565 					VIRTCHNL_VLAN_ETHERTYPE_XOR |
2566 					VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2;
2567 
2568 		supported_caps = &caps->offloads.insertion_support;
2569 		supported_caps->inner = VIRTCHNL_VLAN_TOGGLE |
2570 					VIRTCHNL_VLAN_ETHERTYPE_8100 |
2571 					VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
2572 		supported_caps->outer = VIRTCHNL_VLAN_TOGGLE |
2573 					VIRTCHNL_VLAN_ETHERTYPE_8100 |
2574 					VIRTCHNL_VLAN_ETHERTYPE_88A8 |
2575 					VIRTCHNL_VLAN_ETHERTYPE_9100 |
2576 					VIRTCHNL_VLAN_ETHERTYPE_XOR |
2577 					VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2;
2578 
2579 		caps->offloads.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
2580 
2581 		caps->offloads.ethertype_match =
2582 			VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
2583 	}
2584 
2585 	caps->filtering.max_filters = ice_vc_get_max_vlan_fltrs(vf);
2586 }
2587 
2588 /**
2589  * ice_vc_set_svm_caps - set VLAN capabilities when the device is in SVM
2590  * @vf: VF that capabilities are being set for
2591  * @caps: VLAN capabilities to populate
2592  *
2593  * Determine VLAN capabilities support based on whether a port VLAN is
2594  * configured. If a port VLAN is configured then the VF does not have any VLAN
2595  * filtering or offload capabilities since the port VLAN is using the inner VLAN
2596  * capabilities in single VLAN mode (SVM). Otherwise allow the VF to use inner
2597  * VLAN fitlering and offload capabilities.
2598  */
2599 static void
2600 ice_vc_set_svm_caps(struct ice_vf *vf, struct virtchnl_vlan_caps *caps)
2601 {
2602 	struct virtchnl_vlan_supported_caps *supported_caps;
2603 
2604 	if (ice_vf_is_port_vlan_ena(vf)) {
2605 		supported_caps = &caps->filtering.filtering_support;
2606 		supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
2607 		supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
2608 
2609 		supported_caps = &caps->offloads.stripping_support;
2610 		supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
2611 		supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
2612 
2613 		supported_caps = &caps->offloads.insertion_support;
2614 		supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
2615 		supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
2616 
2617 		caps->offloads.ethertype_init = VIRTCHNL_VLAN_UNSUPPORTED;
2618 		caps->offloads.ethertype_match = VIRTCHNL_VLAN_UNSUPPORTED;
2619 		caps->filtering.max_filters = 0;
2620 	} else {
2621 		supported_caps = &caps->filtering.filtering_support;
2622 		supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100;
2623 		supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
2624 		caps->filtering.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
2625 
2626 		supported_caps = &caps->offloads.stripping_support;
2627 		supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
2628 					VIRTCHNL_VLAN_TOGGLE |
2629 					VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
2630 		supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
2631 
2632 		supported_caps = &caps->offloads.insertion_support;
2633 		supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
2634 					VIRTCHNL_VLAN_TOGGLE |
2635 					VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
2636 		supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
2637 
2638 		caps->offloads.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
2639 		caps->offloads.ethertype_match =
2640 			VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
2641 		caps->filtering.max_filters = ice_vc_get_max_vlan_fltrs(vf);
2642 	}
2643 }
2644 
2645 /**
2646  * ice_vc_get_offload_vlan_v2_caps - determine VF's VLAN capabilities
2647  * @vf: VF to determine VLAN capabilities for
2648  *
2649  * This will only be called if the VF and PF successfully negotiated
2650  * VIRTCHNL_VF_OFFLOAD_VLAN_V2.
2651  *
2652  * Set VLAN capabilities based on the current VLAN mode and whether a port VLAN
2653  * is configured or not.
2654  */
2655 static int ice_vc_get_offload_vlan_v2_caps(struct ice_vf *vf)
2656 {
2657 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2658 	struct virtchnl_vlan_caps *caps = NULL;
2659 	int err, len = 0;
2660 
2661 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2662 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2663 		goto out;
2664 	}
2665 
2666 	caps = kzalloc(sizeof(*caps), GFP_KERNEL);
2667 	if (!caps) {
2668 		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
2669 		goto out;
2670 	}
2671 	len = sizeof(*caps);
2672 
2673 	if (ice_is_dvm_ena(&vf->pf->hw))
2674 		ice_vc_set_dvm_caps(vf, caps);
2675 	else
2676 		ice_vc_set_svm_caps(vf, caps);
2677 
2678 	/* store negotiated caps to prevent invalid VF messages */
2679 	memcpy(&vf->vlan_v2_caps, caps, sizeof(*caps));
2680 
2681 out:
2682 	err = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS,
2683 				    v_ret, (u8 *)caps, len);
2684 	kfree(caps);
2685 	return err;
2686 }
2687 
2688 /**
2689  * ice_vc_validate_vlan_tpid - validate VLAN TPID
2690  * @filtering_caps: negotiated/supported VLAN filtering capabilities
2691  * @tpid: VLAN TPID used for validation
2692  *
2693  * Convert the VLAN TPID to a VIRTCHNL_VLAN_ETHERTYPE_* and then compare against
2694  * the negotiated/supported filtering caps to see if the VLAN TPID is valid.
2695  */
2696 static bool ice_vc_validate_vlan_tpid(u16 filtering_caps, u16 tpid)
2697 {
2698 	enum virtchnl_vlan_support vlan_ethertype = VIRTCHNL_VLAN_UNSUPPORTED;
2699 
2700 	switch (tpid) {
2701 	case ETH_P_8021Q:
2702 		vlan_ethertype = VIRTCHNL_VLAN_ETHERTYPE_8100;
2703 		break;
2704 	case ETH_P_8021AD:
2705 		vlan_ethertype = VIRTCHNL_VLAN_ETHERTYPE_88A8;
2706 		break;
2707 	case ETH_P_QINQ1:
2708 		vlan_ethertype = VIRTCHNL_VLAN_ETHERTYPE_9100;
2709 		break;
2710 	}
2711 
2712 	if (!(filtering_caps & vlan_ethertype))
2713 		return false;
2714 
2715 	return true;
2716 }
2717 
2718 /**
2719  * ice_vc_is_valid_vlan - validate the virtchnl_vlan
2720  * @vc_vlan: virtchnl_vlan to validate
2721  *
2722  * If the VLAN TCI and VLAN TPID are 0, then this filter is invalid, so return
2723  * false. Otherwise return true.
2724  */
2725 static bool ice_vc_is_valid_vlan(struct virtchnl_vlan *vc_vlan)
2726 {
2727 	if (!vc_vlan->tci || !vc_vlan->tpid)
2728 		return false;
2729 
2730 	return true;
2731 }
2732 
2733 /**
2734  * ice_vc_validate_vlan_filter_list - validate the filter list from the VF
2735  * @vfc: negotiated/supported VLAN filtering capabilities
2736  * @vfl: VLAN filter list from VF to validate
2737  *
2738  * Validate all of the filters in the VLAN filter list from the VF. If any of
2739  * the checks fail then return false. Otherwise return true.
2740  */
2741 static bool
2742 ice_vc_validate_vlan_filter_list(struct virtchnl_vlan_filtering_caps *vfc,
2743 				 struct virtchnl_vlan_filter_list_v2 *vfl)
2744 {
2745 	u16 i;
2746 
2747 	if (!vfl->num_elements)
2748 		return false;
2749 
2750 	for (i = 0; i < vfl->num_elements; i++) {
2751 		struct virtchnl_vlan_supported_caps *filtering_support =
2752 			&vfc->filtering_support;
2753 		struct virtchnl_vlan_filter *vlan_fltr = &vfl->filters[i];
2754 		struct virtchnl_vlan *outer = &vlan_fltr->outer;
2755 		struct virtchnl_vlan *inner = &vlan_fltr->inner;
2756 
2757 		if ((ice_vc_is_valid_vlan(outer) &&
2758 		     filtering_support->outer == VIRTCHNL_VLAN_UNSUPPORTED) ||
2759 		    (ice_vc_is_valid_vlan(inner) &&
2760 		     filtering_support->inner == VIRTCHNL_VLAN_UNSUPPORTED))
2761 			return false;
2762 
2763 		if ((outer->tci_mask &&
2764 		     !(filtering_support->outer & VIRTCHNL_VLAN_FILTER_MASK)) ||
2765 		    (inner->tci_mask &&
2766 		     !(filtering_support->inner & VIRTCHNL_VLAN_FILTER_MASK)))
2767 			return false;
2768 
2769 		if (((outer->tci & VLAN_PRIO_MASK) &&
2770 		     !(filtering_support->outer & VIRTCHNL_VLAN_PRIO)) ||
2771 		    ((inner->tci & VLAN_PRIO_MASK) &&
2772 		     !(filtering_support->inner & VIRTCHNL_VLAN_PRIO)))
2773 			return false;
2774 
2775 		if ((ice_vc_is_valid_vlan(outer) &&
2776 		     !ice_vc_validate_vlan_tpid(filtering_support->outer,
2777 						outer->tpid)) ||
2778 		    (ice_vc_is_valid_vlan(inner) &&
2779 		     !ice_vc_validate_vlan_tpid(filtering_support->inner,
2780 						inner->tpid)))
2781 			return false;
2782 	}
2783 
2784 	return true;
2785 }
2786 
2787 /**
2788  * ice_vc_to_vlan - transform from struct virtchnl_vlan to struct ice_vlan
2789  * @vc_vlan: struct virtchnl_vlan to transform
2790  */
2791 static struct ice_vlan ice_vc_to_vlan(struct virtchnl_vlan *vc_vlan)
2792 {
2793 	struct ice_vlan vlan = { 0 };
2794 
2795 	vlan.prio = (vc_vlan->tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
2796 	vlan.vid = vc_vlan->tci & VLAN_VID_MASK;
2797 	vlan.tpid = vc_vlan->tpid;
2798 
2799 	return vlan;
2800 }
2801 
2802 /**
2803  * ice_vc_vlan_action - action to perform on the virthcnl_vlan
2804  * @vsi: VF's VSI used to perform the action
2805  * @vlan_action: function to perform the action with (i.e. add/del)
2806  * @vlan: VLAN filter to perform the action with
2807  */
2808 static int
2809 ice_vc_vlan_action(struct ice_vsi *vsi,
2810 		   int (*vlan_action)(struct ice_vsi *, struct ice_vlan *),
2811 		   struct ice_vlan *vlan)
2812 {
2813 	int err;
2814 
2815 	err = vlan_action(vsi, vlan);
2816 	if (err)
2817 		return err;
2818 
2819 	return 0;
2820 }
2821 
2822 /**
2823  * ice_vc_del_vlans - delete VLAN(s) from the virtchnl filter list
2824  * @vf: VF used to delete the VLAN(s)
2825  * @vsi: VF's VSI used to delete the VLAN(s)
2826  * @vfl: virthchnl filter list used to delete the filters
2827  */
2828 static int
2829 ice_vc_del_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
2830 		 struct virtchnl_vlan_filter_list_v2 *vfl)
2831 {
2832 	bool vlan_promisc = ice_is_vlan_promisc_allowed(vf);
2833 	int err;
2834 	u16 i;
2835 
2836 	for (i = 0; i < vfl->num_elements; i++) {
2837 		struct virtchnl_vlan_filter *vlan_fltr = &vfl->filters[i];
2838 		struct virtchnl_vlan *vc_vlan;
2839 
2840 		vc_vlan = &vlan_fltr->outer;
2841 		if (ice_vc_is_valid_vlan(vc_vlan)) {
2842 			struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
2843 
2844 			err = ice_vc_vlan_action(vsi,
2845 						 vsi->outer_vlan_ops.del_vlan,
2846 						 &vlan);
2847 			if (err)
2848 				return err;
2849 
2850 			if (vlan_promisc)
2851 				ice_vf_dis_vlan_promisc(vsi, &vlan);
2852 
2853 			/* Disable VLAN filtering when only VLAN 0 is left */
2854 			if (!ice_vsi_has_non_zero_vlans(vsi) && ice_is_dvm_ena(&vsi->back->hw)) {
2855 				err = vsi->outer_vlan_ops.dis_tx_filtering(vsi);
2856 				if (err)
2857 					return err;
2858 			}
2859 		}
2860 
2861 		vc_vlan = &vlan_fltr->inner;
2862 		if (ice_vc_is_valid_vlan(vc_vlan)) {
2863 			struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
2864 
2865 			err = ice_vc_vlan_action(vsi,
2866 						 vsi->inner_vlan_ops.del_vlan,
2867 						 &vlan);
2868 			if (err)
2869 				return err;
2870 
2871 			/* no support for VLAN promiscuous on inner VLAN unless
2872 			 * we are in Single VLAN Mode (SVM)
2873 			 */
2874 			if (!ice_is_dvm_ena(&vsi->back->hw)) {
2875 				if (vlan_promisc)
2876 					ice_vf_dis_vlan_promisc(vsi, &vlan);
2877 
2878 				/* Disable VLAN filtering when only VLAN 0 is left */
2879 				if (!ice_vsi_has_non_zero_vlans(vsi)) {
2880 					err = vsi->inner_vlan_ops.dis_tx_filtering(vsi);
2881 					if (err)
2882 						return err;
2883 				}
2884 			}
2885 		}
2886 	}
2887 
2888 	return 0;
2889 }
2890 
2891 /**
2892  * ice_vc_remove_vlan_v2_msg - virtchnl handler for VIRTCHNL_OP_DEL_VLAN_V2
2893  * @vf: VF the message was received from
2894  * @msg: message received from the VF
2895  */
2896 static int ice_vc_remove_vlan_v2_msg(struct ice_vf *vf, u8 *msg)
2897 {
2898 	struct virtchnl_vlan_filter_list_v2 *vfl =
2899 		(struct virtchnl_vlan_filter_list_v2 *)msg;
2900 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2901 	struct ice_vsi *vsi;
2902 
2903 	if (!ice_vc_validate_vlan_filter_list(&vf->vlan_v2_caps.filtering,
2904 					      vfl)) {
2905 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2906 		goto out;
2907 	}
2908 
2909 	if (!ice_vc_isvalid_vsi_id(vf, vfl->vport_id)) {
2910 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2911 		goto out;
2912 	}
2913 
2914 	vsi = ice_get_vf_vsi(vf);
2915 	if (!vsi) {
2916 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2917 		goto out;
2918 	}
2919 
2920 	if (ice_vc_del_vlans(vf, vsi, vfl))
2921 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2922 
2923 out:
2924 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN_V2, v_ret, NULL,
2925 				     0);
2926 }
2927 
2928 /**
2929  * ice_vc_add_vlans - add VLAN(s) from the virtchnl filter list
2930  * @vf: VF used to add the VLAN(s)
2931  * @vsi: VF's VSI used to add the VLAN(s)
2932  * @vfl: virthchnl filter list used to add the filters
2933  */
2934 static int
2935 ice_vc_add_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
2936 		 struct virtchnl_vlan_filter_list_v2 *vfl)
2937 {
2938 	bool vlan_promisc = ice_is_vlan_promisc_allowed(vf);
2939 	int err;
2940 	u16 i;
2941 
2942 	for (i = 0; i < vfl->num_elements; i++) {
2943 		struct virtchnl_vlan_filter *vlan_fltr = &vfl->filters[i];
2944 		struct virtchnl_vlan *vc_vlan;
2945 
2946 		vc_vlan = &vlan_fltr->outer;
2947 		if (ice_vc_is_valid_vlan(vc_vlan)) {
2948 			struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
2949 
2950 			err = ice_vc_vlan_action(vsi,
2951 						 vsi->outer_vlan_ops.add_vlan,
2952 						 &vlan);
2953 			if (err)
2954 				return err;
2955 
2956 			if (vlan_promisc) {
2957 				err = ice_vf_ena_vlan_promisc(vsi, &vlan);
2958 				if (err)
2959 					return err;
2960 			}
2961 
2962 			/* Enable VLAN filtering on first non-zero VLAN */
2963 			if (vf->spoofchk && vlan.vid && ice_is_dvm_ena(&vsi->back->hw)) {
2964 				err = vsi->outer_vlan_ops.ena_tx_filtering(vsi);
2965 				if (err)
2966 					return err;
2967 			}
2968 		}
2969 
2970 		vc_vlan = &vlan_fltr->inner;
2971 		if (ice_vc_is_valid_vlan(vc_vlan)) {
2972 			struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
2973 
2974 			err = ice_vc_vlan_action(vsi,
2975 						 vsi->inner_vlan_ops.add_vlan,
2976 						 &vlan);
2977 			if (err)
2978 				return err;
2979 
2980 			/* no support for VLAN promiscuous on inner VLAN unless
2981 			 * we are in Single VLAN Mode (SVM)
2982 			 */
2983 			if (!ice_is_dvm_ena(&vsi->back->hw)) {
2984 				if (vlan_promisc) {
2985 					err = ice_vf_ena_vlan_promisc(vsi, &vlan);
2986 					if (err)
2987 						return err;
2988 				}
2989 
2990 				/* Enable VLAN filtering on first non-zero VLAN */
2991 				if (vf->spoofchk && vlan.vid) {
2992 					err = vsi->inner_vlan_ops.ena_tx_filtering(vsi);
2993 					if (err)
2994 						return err;
2995 				}
2996 			}
2997 		}
2998 	}
2999 
3000 	return 0;
3001 }
3002 
3003 /**
3004  * ice_vc_validate_add_vlan_filter_list - validate add filter list from the VF
3005  * @vsi: VF VSI used to get number of existing VLAN filters
3006  * @vfc: negotiated/supported VLAN filtering capabilities
3007  * @vfl: VLAN filter list from VF to validate
3008  *
3009  * Validate all of the filters in the VLAN filter list from the VF during the
3010  * VIRTCHNL_OP_ADD_VLAN_V2 opcode. If any of the checks fail then return false.
3011  * Otherwise return true.
3012  */
3013 static bool
3014 ice_vc_validate_add_vlan_filter_list(struct ice_vsi *vsi,
3015 				     struct virtchnl_vlan_filtering_caps *vfc,
3016 				     struct virtchnl_vlan_filter_list_v2 *vfl)
3017 {
3018 	u16 num_requested_filters = ice_vsi_num_non_zero_vlans(vsi) +
3019 		vfl->num_elements;
3020 
3021 	if (num_requested_filters > vfc->max_filters)
3022 		return false;
3023 
3024 	return ice_vc_validate_vlan_filter_list(vfc, vfl);
3025 }
3026 
3027 /**
3028  * ice_vc_add_vlan_v2_msg - virtchnl handler for VIRTCHNL_OP_ADD_VLAN_V2
3029  * @vf: VF the message was received from
3030  * @msg: message received from the VF
3031  */
3032 static int ice_vc_add_vlan_v2_msg(struct ice_vf *vf, u8 *msg)
3033 {
3034 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3035 	struct virtchnl_vlan_filter_list_v2 *vfl =
3036 		(struct virtchnl_vlan_filter_list_v2 *)msg;
3037 	struct ice_vsi *vsi;
3038 
3039 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3040 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3041 		goto out;
3042 	}
3043 
3044 	if (!ice_vc_isvalid_vsi_id(vf, vfl->vport_id)) {
3045 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3046 		goto out;
3047 	}
3048 
3049 	vsi = ice_get_vf_vsi(vf);
3050 	if (!vsi) {
3051 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3052 		goto out;
3053 	}
3054 
3055 	if (!ice_vc_validate_add_vlan_filter_list(vsi,
3056 						  &vf->vlan_v2_caps.filtering,
3057 						  vfl)) {
3058 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3059 		goto out;
3060 	}
3061 
3062 	if (ice_vc_add_vlans(vf, vsi, vfl))
3063 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3064 
3065 out:
3066 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN_V2, v_ret, NULL,
3067 				     0);
3068 }
3069 
3070 /**
3071  * ice_vc_valid_vlan_setting - validate VLAN setting
3072  * @negotiated_settings: negotiated VLAN settings during VF init
3073  * @ethertype_setting: ethertype(s) requested for the VLAN setting
3074  */
3075 static bool
3076 ice_vc_valid_vlan_setting(u32 negotiated_settings, u32 ethertype_setting)
3077 {
3078 	if (ethertype_setting && !(negotiated_settings & ethertype_setting))
3079 		return false;
3080 
3081 	/* only allow a single VIRTCHNL_VLAN_ETHERTYPE if
3082 	 * VIRTHCNL_VLAN_ETHERTYPE_AND is not negotiated/supported
3083 	 */
3084 	if (!(negotiated_settings & VIRTCHNL_VLAN_ETHERTYPE_AND) &&
3085 	    hweight32(ethertype_setting) > 1)
3086 		return false;
3087 
3088 	/* ability to modify the VLAN setting was not negotiated */
3089 	if (!(negotiated_settings & VIRTCHNL_VLAN_TOGGLE))
3090 		return false;
3091 
3092 	return true;
3093 }
3094 
3095 /**
3096  * ice_vc_valid_vlan_setting_msg - validate the VLAN setting message
3097  * @caps: negotiated VLAN settings during VF init
3098  * @msg: message to validate
3099  *
3100  * Used to validate any VLAN virtchnl message sent as a
3101  * virtchnl_vlan_setting structure. Validates the message against the
3102  * negotiated/supported caps during VF driver init.
3103  */
3104 static bool
3105 ice_vc_valid_vlan_setting_msg(struct virtchnl_vlan_supported_caps *caps,
3106 			      struct virtchnl_vlan_setting *msg)
3107 {
3108 	if ((!msg->outer_ethertype_setting &&
3109 	     !msg->inner_ethertype_setting) ||
3110 	    (!caps->outer && !caps->inner))
3111 		return false;
3112 
3113 	if (msg->outer_ethertype_setting &&
3114 	    !ice_vc_valid_vlan_setting(caps->outer,
3115 				       msg->outer_ethertype_setting))
3116 		return false;
3117 
3118 	if (msg->inner_ethertype_setting &&
3119 	    !ice_vc_valid_vlan_setting(caps->inner,
3120 				       msg->inner_ethertype_setting))
3121 		return false;
3122 
3123 	return true;
3124 }
3125 
3126 /**
3127  * ice_vc_get_tpid - transform from VIRTCHNL_VLAN_ETHERTYPE_* to VLAN TPID
3128  * @ethertype_setting: VIRTCHNL_VLAN_ETHERTYPE_* used to get VLAN TPID
3129  * @tpid: VLAN TPID to populate
3130  */
3131 static int ice_vc_get_tpid(u32 ethertype_setting, u16 *tpid)
3132 {
3133 	switch (ethertype_setting) {
3134 	case VIRTCHNL_VLAN_ETHERTYPE_8100:
3135 		*tpid = ETH_P_8021Q;
3136 		break;
3137 	case VIRTCHNL_VLAN_ETHERTYPE_88A8:
3138 		*tpid = ETH_P_8021AD;
3139 		break;
3140 	case VIRTCHNL_VLAN_ETHERTYPE_9100:
3141 		*tpid = ETH_P_QINQ1;
3142 		break;
3143 	default:
3144 		*tpid = 0;
3145 		return -EINVAL;
3146 	}
3147 
3148 	return 0;
3149 }
3150 
3151 /**
3152  * ice_vc_ena_vlan_offload - enable VLAN offload based on the ethertype_setting
3153  * @vsi: VF's VSI used to enable the VLAN offload
3154  * @ena_offload: function used to enable the VLAN offload
3155  * @ethertype_setting: VIRTCHNL_VLAN_ETHERTYPE_* to enable offloads for
3156  */
3157 static int
3158 ice_vc_ena_vlan_offload(struct ice_vsi *vsi,
3159 			int (*ena_offload)(struct ice_vsi *vsi, u16 tpid),
3160 			u32 ethertype_setting)
3161 {
3162 	u16 tpid;
3163 	int err;
3164 
3165 	err = ice_vc_get_tpid(ethertype_setting, &tpid);
3166 	if (err)
3167 		return err;
3168 
3169 	err = ena_offload(vsi, tpid);
3170 	if (err)
3171 		return err;
3172 
3173 	return 0;
3174 }
3175 
3176 #define ICE_L2TSEL_QRX_CONTEXT_REG_IDX	3
3177 #define ICE_L2TSEL_BIT_OFFSET		23
3178 enum ice_l2tsel {
3179 	ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND,
3180 	ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG1,
3181 };
3182 
3183 /**
3184  * ice_vsi_update_l2tsel - update l2tsel field for all Rx rings on this VSI
3185  * @vsi: VSI used to update l2tsel on
3186  * @l2tsel: l2tsel setting requested
3187  *
3188  * Use the l2tsel setting to update all of the Rx queue context bits for l2tsel.
3189  * This will modify which descriptor field the first offloaded VLAN will be
3190  * stripped into.
3191  */
3192 static void ice_vsi_update_l2tsel(struct ice_vsi *vsi, enum ice_l2tsel l2tsel)
3193 {
3194 	struct ice_hw *hw = &vsi->back->hw;
3195 	u32 l2tsel_bit;
3196 	int i;
3197 
3198 	if (l2tsel == ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND)
3199 		l2tsel_bit = 0;
3200 	else
3201 		l2tsel_bit = BIT(ICE_L2TSEL_BIT_OFFSET);
3202 
3203 	for (i = 0; i < vsi->alloc_rxq; i++) {
3204 		u16 pfq = vsi->rxq_map[i];
3205 		u32 qrx_context_offset;
3206 		u32 regval;
3207 
3208 		qrx_context_offset =
3209 			QRX_CONTEXT(ICE_L2TSEL_QRX_CONTEXT_REG_IDX, pfq);
3210 
3211 		regval = rd32(hw, qrx_context_offset);
3212 		regval &= ~BIT(ICE_L2TSEL_BIT_OFFSET);
3213 		regval |= l2tsel_bit;
3214 		wr32(hw, qrx_context_offset, regval);
3215 	}
3216 }
3217 
3218 /**
3219  * ice_vc_ena_vlan_stripping_v2_msg
3220  * @vf: VF the message was received from
3221  * @msg: message received from the VF
3222  *
3223  * virthcnl handler for VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2
3224  */
3225 static int ice_vc_ena_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg)
3226 {
3227 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3228 	struct virtchnl_vlan_supported_caps *stripping_support;
3229 	struct virtchnl_vlan_setting *strip_msg =
3230 		(struct virtchnl_vlan_setting *)msg;
3231 	u32 ethertype_setting;
3232 	struct ice_vsi *vsi;
3233 
3234 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3235 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3236 		goto out;
3237 	}
3238 
3239 	if (!ice_vc_isvalid_vsi_id(vf, strip_msg->vport_id)) {
3240 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3241 		goto out;
3242 	}
3243 
3244 	vsi = ice_get_vf_vsi(vf);
3245 	if (!vsi) {
3246 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3247 		goto out;
3248 	}
3249 
3250 	stripping_support = &vf->vlan_v2_caps.offloads.stripping_support;
3251 	if (!ice_vc_valid_vlan_setting_msg(stripping_support, strip_msg)) {
3252 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3253 		goto out;
3254 	}
3255 
3256 	ethertype_setting = strip_msg->outer_ethertype_setting;
3257 	if (ethertype_setting) {
3258 		if (ice_vc_ena_vlan_offload(vsi,
3259 					    vsi->outer_vlan_ops.ena_stripping,
3260 					    ethertype_setting)) {
3261 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3262 			goto out;
3263 		} else {
3264 			enum ice_l2tsel l2tsel =
3265 				ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND;
3266 
3267 			/* PF tells the VF that the outer VLAN tag is always
3268 			 * extracted to VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 and
3269 			 * inner is always extracted to
3270 			 * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1. This is needed to
3271 			 * support outer stripping so the first tag always ends
3272 			 * up in L2TAG2_2ND and the second/inner tag, if
3273 			 * enabled, is extracted in L2TAG1.
3274 			 */
3275 			ice_vsi_update_l2tsel(vsi, l2tsel);
3276 		}
3277 	}
3278 
3279 	ethertype_setting = strip_msg->inner_ethertype_setting;
3280 	if (ethertype_setting &&
3281 	    ice_vc_ena_vlan_offload(vsi, vsi->inner_vlan_ops.ena_stripping,
3282 				    ethertype_setting)) {
3283 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3284 		goto out;
3285 	}
3286 
3287 out:
3288 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2,
3289 				     v_ret, NULL, 0);
3290 }
3291 
3292 /**
3293  * ice_vc_dis_vlan_stripping_v2_msg
3294  * @vf: VF the message was received from
3295  * @msg: message received from the VF
3296  *
3297  * virthcnl handler for VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2
3298  */
3299 static int ice_vc_dis_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg)
3300 {
3301 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3302 	struct virtchnl_vlan_supported_caps *stripping_support;
3303 	struct virtchnl_vlan_setting *strip_msg =
3304 		(struct virtchnl_vlan_setting *)msg;
3305 	u32 ethertype_setting;
3306 	struct ice_vsi *vsi;
3307 
3308 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3309 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3310 		goto out;
3311 	}
3312 
3313 	if (!ice_vc_isvalid_vsi_id(vf, strip_msg->vport_id)) {
3314 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3315 		goto out;
3316 	}
3317 
3318 	vsi = ice_get_vf_vsi(vf);
3319 	if (!vsi) {
3320 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3321 		goto out;
3322 	}
3323 
3324 	stripping_support = &vf->vlan_v2_caps.offloads.stripping_support;
3325 	if (!ice_vc_valid_vlan_setting_msg(stripping_support, strip_msg)) {
3326 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3327 		goto out;
3328 	}
3329 
3330 	ethertype_setting = strip_msg->outer_ethertype_setting;
3331 	if (ethertype_setting) {
3332 		if (vsi->outer_vlan_ops.dis_stripping(vsi)) {
3333 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3334 			goto out;
3335 		} else {
3336 			enum ice_l2tsel l2tsel =
3337 				ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG1;
3338 
3339 			/* PF tells the VF that the outer VLAN tag is always
3340 			 * extracted to VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 and
3341 			 * inner is always extracted to
3342 			 * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1. This is needed to
3343 			 * support inner stripping while outer stripping is
3344 			 * disabled so that the first and only tag is extracted
3345 			 * in L2TAG1.
3346 			 */
3347 			ice_vsi_update_l2tsel(vsi, l2tsel);
3348 		}
3349 	}
3350 
3351 	ethertype_setting = strip_msg->inner_ethertype_setting;
3352 	if (ethertype_setting && vsi->inner_vlan_ops.dis_stripping(vsi)) {
3353 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3354 		goto out;
3355 	}
3356 
3357 out:
3358 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2,
3359 				     v_ret, NULL, 0);
3360 }
3361 
3362 /**
3363  * ice_vc_ena_vlan_insertion_v2_msg
3364  * @vf: VF the message was received from
3365  * @msg: message received from the VF
3366  *
3367  * virthcnl handler for VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2
3368  */
3369 static int ice_vc_ena_vlan_insertion_v2_msg(struct ice_vf *vf, u8 *msg)
3370 {
3371 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3372 	struct virtchnl_vlan_supported_caps *insertion_support;
3373 	struct virtchnl_vlan_setting *insertion_msg =
3374 		(struct virtchnl_vlan_setting *)msg;
3375 	u32 ethertype_setting;
3376 	struct ice_vsi *vsi;
3377 
3378 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3379 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3380 		goto out;
3381 	}
3382 
3383 	if (!ice_vc_isvalid_vsi_id(vf, insertion_msg->vport_id)) {
3384 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3385 		goto out;
3386 	}
3387 
3388 	vsi = ice_get_vf_vsi(vf);
3389 	if (!vsi) {
3390 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3391 		goto out;
3392 	}
3393 
3394 	insertion_support = &vf->vlan_v2_caps.offloads.insertion_support;
3395 	if (!ice_vc_valid_vlan_setting_msg(insertion_support, insertion_msg)) {
3396 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3397 		goto out;
3398 	}
3399 
3400 	ethertype_setting = insertion_msg->outer_ethertype_setting;
3401 	if (ethertype_setting &&
3402 	    ice_vc_ena_vlan_offload(vsi, vsi->outer_vlan_ops.ena_insertion,
3403 				    ethertype_setting)) {
3404 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3405 		goto out;
3406 	}
3407 
3408 	ethertype_setting = insertion_msg->inner_ethertype_setting;
3409 	if (ethertype_setting &&
3410 	    ice_vc_ena_vlan_offload(vsi, vsi->inner_vlan_ops.ena_insertion,
3411 				    ethertype_setting)) {
3412 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3413 		goto out;
3414 	}
3415 
3416 out:
3417 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2,
3418 				     v_ret, NULL, 0);
3419 }
3420 
3421 /**
3422  * ice_vc_dis_vlan_insertion_v2_msg
3423  * @vf: VF the message was received from
3424  * @msg: message received from the VF
3425  *
3426  * virthcnl handler for VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2
3427  */
3428 static int ice_vc_dis_vlan_insertion_v2_msg(struct ice_vf *vf, u8 *msg)
3429 {
3430 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3431 	struct virtchnl_vlan_supported_caps *insertion_support;
3432 	struct virtchnl_vlan_setting *insertion_msg =
3433 		(struct virtchnl_vlan_setting *)msg;
3434 	u32 ethertype_setting;
3435 	struct ice_vsi *vsi;
3436 
3437 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3438 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3439 		goto out;
3440 	}
3441 
3442 	if (!ice_vc_isvalid_vsi_id(vf, insertion_msg->vport_id)) {
3443 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3444 		goto out;
3445 	}
3446 
3447 	vsi = ice_get_vf_vsi(vf);
3448 	if (!vsi) {
3449 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3450 		goto out;
3451 	}
3452 
3453 	insertion_support = &vf->vlan_v2_caps.offloads.insertion_support;
3454 	if (!ice_vc_valid_vlan_setting_msg(insertion_support, insertion_msg)) {
3455 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3456 		goto out;
3457 	}
3458 
3459 	ethertype_setting = insertion_msg->outer_ethertype_setting;
3460 	if (ethertype_setting && vsi->outer_vlan_ops.dis_insertion(vsi)) {
3461 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3462 		goto out;
3463 	}
3464 
3465 	ethertype_setting = insertion_msg->inner_ethertype_setting;
3466 	if (ethertype_setting && vsi->inner_vlan_ops.dis_insertion(vsi)) {
3467 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3468 		goto out;
3469 	}
3470 
3471 out:
3472 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2,
3473 				     v_ret, NULL, 0);
3474 }
3475 
3476 static const struct ice_virtchnl_ops ice_virtchnl_dflt_ops = {
3477 	.get_ver_msg = ice_vc_get_ver_msg,
3478 	.get_vf_res_msg = ice_vc_get_vf_res_msg,
3479 	.reset_vf = ice_vc_reset_vf_msg,
3480 	.add_mac_addr_msg = ice_vc_add_mac_addr_msg,
3481 	.del_mac_addr_msg = ice_vc_del_mac_addr_msg,
3482 	.cfg_qs_msg = ice_vc_cfg_qs_msg,
3483 	.ena_qs_msg = ice_vc_ena_qs_msg,
3484 	.dis_qs_msg = ice_vc_dis_qs_msg,
3485 	.request_qs_msg = ice_vc_request_qs_msg,
3486 	.cfg_irq_map_msg = ice_vc_cfg_irq_map_msg,
3487 	.config_rss_key = ice_vc_config_rss_key,
3488 	.config_rss_lut = ice_vc_config_rss_lut,
3489 	.get_stats_msg = ice_vc_get_stats_msg,
3490 	.cfg_promiscuous_mode_msg = ice_vc_cfg_promiscuous_mode_msg,
3491 	.add_vlan_msg = ice_vc_add_vlan_msg,
3492 	.remove_vlan_msg = ice_vc_remove_vlan_msg,
3493 	.ena_vlan_stripping = ice_vc_ena_vlan_stripping,
3494 	.dis_vlan_stripping = ice_vc_dis_vlan_stripping,
3495 	.handle_rss_cfg_msg = ice_vc_handle_rss_cfg,
3496 	.add_fdir_fltr_msg = ice_vc_add_fdir_fltr,
3497 	.del_fdir_fltr_msg = ice_vc_del_fdir_fltr,
3498 	.get_offload_vlan_v2_caps = ice_vc_get_offload_vlan_v2_caps,
3499 	.add_vlan_v2_msg = ice_vc_add_vlan_v2_msg,
3500 	.remove_vlan_v2_msg = ice_vc_remove_vlan_v2_msg,
3501 	.ena_vlan_stripping_v2_msg = ice_vc_ena_vlan_stripping_v2_msg,
3502 	.dis_vlan_stripping_v2_msg = ice_vc_dis_vlan_stripping_v2_msg,
3503 	.ena_vlan_insertion_v2_msg = ice_vc_ena_vlan_insertion_v2_msg,
3504 	.dis_vlan_insertion_v2_msg = ice_vc_dis_vlan_insertion_v2_msg,
3505 };
3506 
3507 /**
3508  * ice_virtchnl_set_dflt_ops - Switch to default virtchnl ops
3509  * @vf: the VF to switch ops
3510  */
3511 void ice_virtchnl_set_dflt_ops(struct ice_vf *vf)
3512 {
3513 	vf->virtchnl_ops = &ice_virtchnl_dflt_ops;
3514 }
3515 
3516 /**
3517  * ice_vc_repr_add_mac
3518  * @vf: pointer to VF
3519  * @msg: virtchannel message
3520  *
3521  * When port representors are created, we do not add MAC rule
3522  * to firmware, we store it so that PF could report same
3523  * MAC as VF.
3524  */
3525 static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg)
3526 {
3527 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3528 	struct virtchnl_ether_addr_list *al =
3529 	    (struct virtchnl_ether_addr_list *)msg;
3530 	struct ice_vsi *vsi;
3531 	struct ice_pf *pf;
3532 	int i;
3533 
3534 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
3535 	    !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
3536 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3537 		goto handle_mac_exit;
3538 	}
3539 
3540 	pf = vf->pf;
3541 
3542 	vsi = ice_get_vf_vsi(vf);
3543 	if (!vsi) {
3544 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3545 		goto handle_mac_exit;
3546 	}
3547 
3548 	for (i = 0; i < al->num_elements; i++) {
3549 		u8 *mac_addr = al->list[i].addr;
3550 		int result;
3551 
3552 		if (!is_unicast_ether_addr(mac_addr) ||
3553 		    ether_addr_equal(mac_addr, vf->hw_lan_addr.addr))
3554 			continue;
3555 
3556 		if (vf->pf_set_mac) {
3557 			dev_err(ice_pf_to_dev(pf), "VF attempting to override administratively set MAC address\n");
3558 			v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
3559 			goto handle_mac_exit;
3560 		}
3561 
3562 		result = ice_eswitch_add_vf_mac_rule(pf, vf, mac_addr);
3563 		if (result) {
3564 			dev_err(ice_pf_to_dev(pf), "Failed to add MAC %pM for VF %d\n, error %d\n",
3565 				mac_addr, vf->vf_id, result);
3566 			goto handle_mac_exit;
3567 		}
3568 
3569 		ice_vfhw_mac_add(vf, &al->list[i]);
3570 		vf->num_mac++;
3571 		break;
3572 	}
3573 
3574 handle_mac_exit:
3575 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
3576 				     v_ret, NULL, 0);
3577 }
3578 
3579 /**
3580  * ice_vc_repr_del_mac - response with success for deleting MAC
3581  * @vf: pointer to VF
3582  * @msg: virtchannel message
3583  *
3584  * Respond with success to not break normal VF flow.
3585  * For legacy VF driver try to update cached MAC address.
3586  */
3587 static int
3588 ice_vc_repr_del_mac(struct ice_vf __always_unused *vf, u8 __always_unused *msg)
3589 {
3590 	struct virtchnl_ether_addr_list *al =
3591 		(struct virtchnl_ether_addr_list *)msg;
3592 
3593 	ice_update_legacy_cached_mac(vf, &al->list[0]);
3594 
3595 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR,
3596 				     VIRTCHNL_STATUS_SUCCESS, NULL, 0);
3597 }
3598 
3599 static int
3600 ice_vc_repr_cfg_promiscuous_mode(struct ice_vf *vf, u8 __always_unused *msg)
3601 {
3602 	dev_dbg(ice_pf_to_dev(vf->pf),
3603 		"Can't config promiscuous mode in switchdev mode for VF %d\n",
3604 		vf->vf_id);
3605 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
3606 				     VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
3607 				     NULL, 0);
3608 }
3609 
3610 static const struct ice_virtchnl_ops ice_virtchnl_repr_ops = {
3611 	.get_ver_msg = ice_vc_get_ver_msg,
3612 	.get_vf_res_msg = ice_vc_get_vf_res_msg,
3613 	.reset_vf = ice_vc_reset_vf_msg,
3614 	.add_mac_addr_msg = ice_vc_repr_add_mac,
3615 	.del_mac_addr_msg = ice_vc_repr_del_mac,
3616 	.cfg_qs_msg = ice_vc_cfg_qs_msg,
3617 	.ena_qs_msg = ice_vc_ena_qs_msg,
3618 	.dis_qs_msg = ice_vc_dis_qs_msg,
3619 	.request_qs_msg = ice_vc_request_qs_msg,
3620 	.cfg_irq_map_msg = ice_vc_cfg_irq_map_msg,
3621 	.config_rss_key = ice_vc_config_rss_key,
3622 	.config_rss_lut = ice_vc_config_rss_lut,
3623 	.get_stats_msg = ice_vc_get_stats_msg,
3624 	.cfg_promiscuous_mode_msg = ice_vc_repr_cfg_promiscuous_mode,
3625 	.add_vlan_msg = ice_vc_add_vlan_msg,
3626 	.remove_vlan_msg = ice_vc_remove_vlan_msg,
3627 	.ena_vlan_stripping = ice_vc_ena_vlan_stripping,
3628 	.dis_vlan_stripping = ice_vc_dis_vlan_stripping,
3629 	.handle_rss_cfg_msg = ice_vc_handle_rss_cfg,
3630 	.add_fdir_fltr_msg = ice_vc_add_fdir_fltr,
3631 	.del_fdir_fltr_msg = ice_vc_del_fdir_fltr,
3632 	.get_offload_vlan_v2_caps = ice_vc_get_offload_vlan_v2_caps,
3633 	.add_vlan_v2_msg = ice_vc_add_vlan_v2_msg,
3634 	.remove_vlan_v2_msg = ice_vc_remove_vlan_v2_msg,
3635 	.ena_vlan_stripping_v2_msg = ice_vc_ena_vlan_stripping_v2_msg,
3636 	.dis_vlan_stripping_v2_msg = ice_vc_dis_vlan_stripping_v2_msg,
3637 	.ena_vlan_insertion_v2_msg = ice_vc_ena_vlan_insertion_v2_msg,
3638 	.dis_vlan_insertion_v2_msg = ice_vc_dis_vlan_insertion_v2_msg,
3639 };
3640 
3641 /**
3642  * ice_virtchnl_set_repr_ops - Switch to representor virtchnl ops
3643  * @vf: the VF to switch ops
3644  */
3645 void ice_virtchnl_set_repr_ops(struct ice_vf *vf)
3646 {
3647 	vf->virtchnl_ops = &ice_virtchnl_repr_ops;
3648 }
3649 
3650 /**
3651  * ice_vc_process_vf_msg - Process request from VF
3652  * @pf: pointer to the PF structure
3653  * @event: pointer to the AQ event
3654  *
3655  * called from the common asq/arq handler to
3656  * process request from VF
3657  */
3658 void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
3659 {
3660 	u32 v_opcode = le32_to_cpu(event->desc.cookie_high);
3661 	s16 vf_id = le16_to_cpu(event->desc.retval);
3662 	const struct ice_virtchnl_ops *ops;
3663 	u16 msglen = event->msg_len;
3664 	u8 *msg = event->msg_buf;
3665 	struct ice_vf *vf = NULL;
3666 	struct device *dev;
3667 	int err = 0;
3668 
3669 	dev = ice_pf_to_dev(pf);
3670 
3671 	vf = ice_get_vf_by_id(pf, vf_id);
3672 	if (!vf) {
3673 		dev_err(dev, "Unable to locate VF for message from VF ID %d, opcode %d, len %d\n",
3674 			vf_id, v_opcode, msglen);
3675 		return;
3676 	}
3677 
3678 	mutex_lock(&vf->cfg_lock);
3679 
3680 	/* Check if VF is disabled. */
3681 	if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) {
3682 		err = -EPERM;
3683 		goto error_handler;
3684 	}
3685 
3686 	ops = vf->virtchnl_ops;
3687 
3688 	/* Perform basic checks on the msg */
3689 	err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
3690 	if (err) {
3691 		if (err == VIRTCHNL_STATUS_ERR_PARAM)
3692 			err = -EPERM;
3693 		else
3694 			err = -EINVAL;
3695 	}
3696 
3697 error_handler:
3698 	if (err) {
3699 		ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM,
3700 				      NULL, 0);
3701 		dev_err(dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
3702 			vf_id, v_opcode, msglen, err);
3703 		goto finish;
3704 	}
3705 
3706 	if (!ice_vc_is_opcode_allowed(vf, v_opcode)) {
3707 		ice_vc_send_msg_to_vf(vf, v_opcode,
3708 				      VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, NULL,
3709 				      0);
3710 		goto finish;
3711 	}
3712 
3713 	switch (v_opcode) {
3714 	case VIRTCHNL_OP_VERSION:
3715 		err = ops->get_ver_msg(vf, msg);
3716 		break;
3717 	case VIRTCHNL_OP_GET_VF_RESOURCES:
3718 		err = ops->get_vf_res_msg(vf, msg);
3719 		if (ice_vf_init_vlan_stripping(vf))
3720 			dev_dbg(dev, "Failed to initialize VLAN stripping for VF %d\n",
3721 				vf->vf_id);
3722 		ice_vc_notify_vf_link_state(vf);
3723 		break;
3724 	case VIRTCHNL_OP_RESET_VF:
3725 		ops->reset_vf(vf);
3726 		break;
3727 	case VIRTCHNL_OP_ADD_ETH_ADDR:
3728 		err = ops->add_mac_addr_msg(vf, msg);
3729 		break;
3730 	case VIRTCHNL_OP_DEL_ETH_ADDR:
3731 		err = ops->del_mac_addr_msg(vf, msg);
3732 		break;
3733 	case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
3734 		err = ops->cfg_qs_msg(vf, msg);
3735 		break;
3736 	case VIRTCHNL_OP_ENABLE_QUEUES:
3737 		err = ops->ena_qs_msg(vf, msg);
3738 		ice_vc_notify_vf_link_state(vf);
3739 		break;
3740 	case VIRTCHNL_OP_DISABLE_QUEUES:
3741 		err = ops->dis_qs_msg(vf, msg);
3742 		break;
3743 	case VIRTCHNL_OP_REQUEST_QUEUES:
3744 		err = ops->request_qs_msg(vf, msg);
3745 		break;
3746 	case VIRTCHNL_OP_CONFIG_IRQ_MAP:
3747 		err = ops->cfg_irq_map_msg(vf, msg);
3748 		break;
3749 	case VIRTCHNL_OP_CONFIG_RSS_KEY:
3750 		err = ops->config_rss_key(vf, msg);
3751 		break;
3752 	case VIRTCHNL_OP_CONFIG_RSS_LUT:
3753 		err = ops->config_rss_lut(vf, msg);
3754 		break;
3755 	case VIRTCHNL_OP_GET_STATS:
3756 		err = ops->get_stats_msg(vf, msg);
3757 		break;
3758 	case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
3759 		err = ops->cfg_promiscuous_mode_msg(vf, msg);
3760 		break;
3761 	case VIRTCHNL_OP_ADD_VLAN:
3762 		err = ops->add_vlan_msg(vf, msg);
3763 		break;
3764 	case VIRTCHNL_OP_DEL_VLAN:
3765 		err = ops->remove_vlan_msg(vf, msg);
3766 		break;
3767 	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
3768 		err = ops->ena_vlan_stripping(vf);
3769 		break;
3770 	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
3771 		err = ops->dis_vlan_stripping(vf);
3772 		break;
3773 	case VIRTCHNL_OP_ADD_FDIR_FILTER:
3774 		err = ops->add_fdir_fltr_msg(vf, msg);
3775 		break;
3776 	case VIRTCHNL_OP_DEL_FDIR_FILTER:
3777 		err = ops->del_fdir_fltr_msg(vf, msg);
3778 		break;
3779 	case VIRTCHNL_OP_ADD_RSS_CFG:
3780 		err = ops->handle_rss_cfg_msg(vf, msg, true);
3781 		break;
3782 	case VIRTCHNL_OP_DEL_RSS_CFG:
3783 		err = ops->handle_rss_cfg_msg(vf, msg, false);
3784 		break;
3785 	case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS:
3786 		err = ops->get_offload_vlan_v2_caps(vf);
3787 		break;
3788 	case VIRTCHNL_OP_ADD_VLAN_V2:
3789 		err = ops->add_vlan_v2_msg(vf, msg);
3790 		break;
3791 	case VIRTCHNL_OP_DEL_VLAN_V2:
3792 		err = ops->remove_vlan_v2_msg(vf, msg);
3793 		break;
3794 	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
3795 		err = ops->ena_vlan_stripping_v2_msg(vf, msg);
3796 		break;
3797 	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
3798 		err = ops->dis_vlan_stripping_v2_msg(vf, msg);
3799 		break;
3800 	case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
3801 		err = ops->ena_vlan_insertion_v2_msg(vf, msg);
3802 		break;
3803 	case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
3804 		err = ops->dis_vlan_insertion_v2_msg(vf, msg);
3805 		break;
3806 	case VIRTCHNL_OP_UNKNOWN:
3807 	default:
3808 		dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode,
3809 			vf_id);
3810 		err = ice_vc_send_msg_to_vf(vf, v_opcode,
3811 					    VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
3812 					    NULL, 0);
3813 		break;
3814 	}
3815 	if (err) {
3816 		/* Helper function cares less about error return values here
3817 		 * as it is busy with pending work.
3818 		 */
3819 		dev_info(dev, "PF failed to honor VF %d, opcode %d, error %d\n",
3820 			 vf_id, v_opcode, err);
3821 	}
3822 
3823 finish:
3824 	mutex_unlock(&vf->cfg_lock);
3825 	ice_put_vf(vf);
3826 }
3827