1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021, Intel Corporation. */
3 
4 #include "ice.h"
5 #include "ice_base.h"
6 #include "ice_lib.h"
7 #include "ice_flow.h"
8 #include "ice_vf_lib_private.h"
9 
10 #define to_fltr_conf_from_desc(p) \
11 	container_of(p, struct virtchnl_fdir_fltr_conf, input)
12 
13 #define ICE_FLOW_PROF_TYPE_S	0
14 #define ICE_FLOW_PROF_TYPE_M	(0xFFFFFFFFULL << ICE_FLOW_PROF_TYPE_S)
15 #define ICE_FLOW_PROF_VSI_S	32
16 #define ICE_FLOW_PROF_VSI_M	(0xFFFFFFFFULL << ICE_FLOW_PROF_VSI_S)
17 
18 /* Flow profile ID format:
19  * [0:31] - flow type, flow + tun_offs
20  * [32:63] - VSI index
21  */
22 #define ICE_FLOW_PROF_FD(vsi, flow, tun_offs) \
23 	((u64)(((((flow) + (tun_offs)) & ICE_FLOW_PROF_TYPE_M)) | \
24 	      (((u64)(vsi) << ICE_FLOW_PROF_VSI_S) & ICE_FLOW_PROF_VSI_M)))
25 
26 #define GTPU_TEID_OFFSET 4
27 #define GTPU_EH_QFI_OFFSET 1
28 #define GTPU_EH_QFI_MASK 0x3F
29 #define PFCP_S_OFFSET 0
30 #define PFCP_S_MASK 0x1
31 #define PFCP_PORT_NR 8805
32 
33 #define FDIR_INSET_FLAG_ESP_S 0
34 #define FDIR_INSET_FLAG_ESP_M BIT_ULL(FDIR_INSET_FLAG_ESP_S)
35 #define FDIR_INSET_FLAG_ESP_UDP BIT_ULL(FDIR_INSET_FLAG_ESP_S)
36 #define FDIR_INSET_FLAG_ESP_IPSEC (0ULL << FDIR_INSET_FLAG_ESP_S)
37 
38 enum ice_fdir_tunnel_type {
39 	ICE_FDIR_TUNNEL_TYPE_NONE = 0,
40 	ICE_FDIR_TUNNEL_TYPE_GTPU,
41 	ICE_FDIR_TUNNEL_TYPE_GTPU_EH,
42 };
43 
44 struct virtchnl_fdir_fltr_conf {
45 	struct ice_fdir_fltr input;
46 	enum ice_fdir_tunnel_type ttype;
47 	u64 inset_flag;
48 	u32 flow_id;
49 };
50 
51 struct virtchnl_fdir_inset_map {
52 	enum virtchnl_proto_hdr_field field;
53 	enum ice_flow_field fld;
54 	u64 flag;
55 	u64 mask;
56 };
57 
58 static const struct virtchnl_fdir_inset_map fdir_inset_map[] = {
59 	{VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE, ICE_FLOW_FIELD_IDX_ETH_TYPE, 0, 0},
60 	{VIRTCHNL_PROTO_HDR_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA, 0, 0},
61 	{VIRTCHNL_PROTO_HDR_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA, 0, 0},
62 	{VIRTCHNL_PROTO_HDR_IPV4_DSCP, ICE_FLOW_FIELD_IDX_IPV4_DSCP, 0, 0},
63 	{VIRTCHNL_PROTO_HDR_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL, 0, 0},
64 	{VIRTCHNL_PROTO_HDR_IPV4_PROT, ICE_FLOW_FIELD_IDX_IPV4_PROT, 0, 0},
65 	{VIRTCHNL_PROTO_HDR_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA, 0, 0},
66 	{VIRTCHNL_PROTO_HDR_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA, 0, 0},
67 	{VIRTCHNL_PROTO_HDR_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP, 0, 0},
68 	{VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL, 0, 0},
69 	{VIRTCHNL_PROTO_HDR_IPV6_PROT, ICE_FLOW_FIELD_IDX_IPV6_PROT, 0, 0},
70 	{VIRTCHNL_PROTO_HDR_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT, 0, 0},
71 	{VIRTCHNL_PROTO_HDR_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0},
72 	{VIRTCHNL_PROTO_HDR_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT, 0, 0},
73 	{VIRTCHNL_PROTO_HDR_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT, 0, 0},
74 	{VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT, 0, 0},
75 	{VIRTCHNL_PROTO_HDR_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT, 0, 0},
76 	{VIRTCHNL_PROTO_HDR_GTPU_IP_TEID, ICE_FLOW_FIELD_IDX_GTPU_IP_TEID, 0, 0},
77 	{VIRTCHNL_PROTO_HDR_GTPU_EH_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI, 0, 0},
78 	{VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_ESP_SPI,
79 		FDIR_INSET_FLAG_ESP_IPSEC, FDIR_INSET_FLAG_ESP_M},
80 	{VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI,
81 		FDIR_INSET_FLAG_ESP_UDP, FDIR_INSET_FLAG_ESP_M},
82 	{VIRTCHNL_PROTO_HDR_AH_SPI, ICE_FLOW_FIELD_IDX_AH_SPI, 0, 0},
83 	{VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID, ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID, 0, 0},
84 	{VIRTCHNL_PROTO_HDR_PFCP_S_FIELD, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0},
85 };
86 
87 /**
88  * ice_vc_fdir_param_check
89  * @vf: pointer to the VF structure
90  * @vsi_id: VF relative VSI ID
91  *
92  * Check for the valid VSI ID, PF's state and VF's state
93  *
94  * Return: 0 on success, and -EINVAL on error.
95  */
96 static int
97 ice_vc_fdir_param_check(struct ice_vf *vf, u16 vsi_id)
98 {
99 	struct ice_pf *pf = vf->pf;
100 
101 	if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
102 		return -EINVAL;
103 
104 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
105 		return -EINVAL;
106 
107 	if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF))
108 		return -EINVAL;
109 
110 	if (!ice_vc_isvalid_vsi_id(vf, vsi_id))
111 		return -EINVAL;
112 
113 	if (!ice_get_vf_vsi(vf))
114 		return -EINVAL;
115 
116 	return 0;
117 }
118 
119 /**
120  * ice_vf_start_ctrl_vsi
121  * @vf: pointer to the VF structure
122  *
123  * Allocate ctrl_vsi for the first time and open the ctrl_vsi port for VF
124  *
125  * Return: 0 on success, and other on error.
126  */
127 static int ice_vf_start_ctrl_vsi(struct ice_vf *vf)
128 {
129 	struct ice_pf *pf = vf->pf;
130 	struct ice_vsi *ctrl_vsi;
131 	struct device *dev;
132 	int err;
133 
134 	dev = ice_pf_to_dev(pf);
135 	if (vf->ctrl_vsi_idx != ICE_NO_VSI)
136 		return -EEXIST;
137 
138 	ctrl_vsi = ice_vf_ctrl_vsi_setup(vf);
139 	if (!ctrl_vsi) {
140 		dev_dbg(dev, "Could not setup control VSI for VF %d\n",
141 			vf->vf_id);
142 		return -ENOMEM;
143 	}
144 
145 	err = ice_vsi_open_ctrl(ctrl_vsi);
146 	if (err) {
147 		dev_dbg(dev, "Could not open control VSI for VF %d\n",
148 			vf->vf_id);
149 		goto err_vsi_open;
150 	}
151 
152 	return 0;
153 
154 err_vsi_open:
155 	ice_vsi_release(ctrl_vsi);
156 	if (vf->ctrl_vsi_idx != ICE_NO_VSI) {
157 		pf->vsi[vf->ctrl_vsi_idx] = NULL;
158 		vf->ctrl_vsi_idx = ICE_NO_VSI;
159 	}
160 	return err;
161 }
162 
163 /**
164  * ice_vc_fdir_alloc_prof - allocate profile for this filter flow type
165  * @vf: pointer to the VF structure
166  * @flow: filter flow type
167  *
168  * Return: 0 on success, and other on error.
169  */
170 static int
171 ice_vc_fdir_alloc_prof(struct ice_vf *vf, enum ice_fltr_ptype flow)
172 {
173 	struct ice_vf_fdir *fdir = &vf->fdir;
174 
175 	if (!fdir->fdir_prof) {
176 		fdir->fdir_prof = devm_kcalloc(ice_pf_to_dev(vf->pf),
177 					       ICE_FLTR_PTYPE_MAX,
178 					       sizeof(*fdir->fdir_prof),
179 					       GFP_KERNEL);
180 		if (!fdir->fdir_prof)
181 			return -ENOMEM;
182 	}
183 
184 	if (!fdir->fdir_prof[flow]) {
185 		fdir->fdir_prof[flow] = devm_kzalloc(ice_pf_to_dev(vf->pf),
186 						     sizeof(**fdir->fdir_prof),
187 						     GFP_KERNEL);
188 		if (!fdir->fdir_prof[flow])
189 			return -ENOMEM;
190 	}
191 
192 	return 0;
193 }
194 
195 /**
196  * ice_vc_fdir_free_prof - free profile for this filter flow type
197  * @vf: pointer to the VF structure
198  * @flow: filter flow type
199  */
200 static void
201 ice_vc_fdir_free_prof(struct ice_vf *vf, enum ice_fltr_ptype flow)
202 {
203 	struct ice_vf_fdir *fdir = &vf->fdir;
204 
205 	if (!fdir->fdir_prof)
206 		return;
207 
208 	if (!fdir->fdir_prof[flow])
209 		return;
210 
211 	devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof[flow]);
212 	fdir->fdir_prof[flow] = NULL;
213 }
214 
215 /**
216  * ice_vc_fdir_free_prof_all - free all the profile for this VF
217  * @vf: pointer to the VF structure
218  */
219 static void ice_vc_fdir_free_prof_all(struct ice_vf *vf)
220 {
221 	struct ice_vf_fdir *fdir = &vf->fdir;
222 	enum ice_fltr_ptype flow;
223 
224 	if (!fdir->fdir_prof)
225 		return;
226 
227 	for (flow = ICE_FLTR_PTYPE_NONF_NONE; flow < ICE_FLTR_PTYPE_MAX; flow++)
228 		ice_vc_fdir_free_prof(vf, flow);
229 
230 	devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof);
231 	fdir->fdir_prof = NULL;
232 }
233 
234 /**
235  * ice_vc_fdir_parse_flow_fld
236  * @proto_hdr: virtual channel protocol filter header
237  * @conf: FDIR configuration for each filter
238  * @fld: field type array
239  * @fld_cnt: field counter
240  *
241  * Parse the virtual channel filter header and store them into field type array
242  *
243  * Return: 0 on success, and other on error.
244  */
245 static int
246 ice_vc_fdir_parse_flow_fld(struct virtchnl_proto_hdr *proto_hdr,
247 			   struct virtchnl_fdir_fltr_conf *conf,
248 			   enum ice_flow_field *fld, int *fld_cnt)
249 {
250 	struct virtchnl_proto_hdr hdr;
251 	u32 i;
252 
253 	memcpy(&hdr, proto_hdr, sizeof(hdr));
254 
255 	for (i = 0; (i < ARRAY_SIZE(fdir_inset_map)) &&
256 	     VIRTCHNL_GET_PROTO_HDR_FIELD(&hdr); i++)
257 		if (VIRTCHNL_TEST_PROTO_HDR(&hdr, fdir_inset_map[i].field)) {
258 			if (fdir_inset_map[i].mask &&
259 			    ((fdir_inset_map[i].mask & conf->inset_flag) !=
260 			     fdir_inset_map[i].flag))
261 				continue;
262 
263 			fld[*fld_cnt] = fdir_inset_map[i].fld;
264 			*fld_cnt += 1;
265 			if (*fld_cnt >= ICE_FLOW_FIELD_IDX_MAX)
266 				return -EINVAL;
267 			VIRTCHNL_DEL_PROTO_HDR_FIELD(&hdr,
268 						     fdir_inset_map[i].field);
269 		}
270 
271 	return 0;
272 }
273 
274 /**
275  * ice_vc_fdir_set_flow_fld
276  * @vf: pointer to the VF structure
277  * @fltr: virtual channel add cmd buffer
278  * @conf: FDIR configuration for each filter
279  * @seg: array of one or more packet segments that describe the flow
280  *
281  * Parse the virtual channel add msg buffer's field vector and store them into
282  * flow's packet segment field
283  *
284  * Return: 0 on success, and other on error.
285  */
286 static int
287 ice_vc_fdir_set_flow_fld(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
288 			 struct virtchnl_fdir_fltr_conf *conf,
289 			 struct ice_flow_seg_info *seg)
290 {
291 	struct virtchnl_fdir_rule *rule = &fltr->rule_cfg;
292 	enum ice_flow_field fld[ICE_FLOW_FIELD_IDX_MAX];
293 	struct device *dev = ice_pf_to_dev(vf->pf);
294 	struct virtchnl_proto_hdrs *proto;
295 	int fld_cnt = 0;
296 	int i;
297 
298 	proto = &rule->proto_hdrs;
299 	for (i = 0; i < proto->count; i++) {
300 		struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i];
301 		int ret;
302 
303 		ret = ice_vc_fdir_parse_flow_fld(hdr, conf, fld, &fld_cnt);
304 		if (ret)
305 			return ret;
306 	}
307 
308 	if (fld_cnt == 0) {
309 		dev_dbg(dev, "Empty input set for VF %d\n", vf->vf_id);
310 		return -EINVAL;
311 	}
312 
313 	for (i = 0; i < fld_cnt; i++)
314 		ice_flow_set_fld(seg, fld[i],
315 				 ICE_FLOW_FLD_OFF_INVAL,
316 				 ICE_FLOW_FLD_OFF_INVAL,
317 				 ICE_FLOW_FLD_OFF_INVAL, false);
318 
319 	return 0;
320 }
321 
322 /**
323  * ice_vc_fdir_set_flow_hdr - config the flow's packet segment header
324  * @vf: pointer to the VF structure
325  * @conf: FDIR configuration for each filter
326  * @seg: array of one or more packet segments that describe the flow
327  *
328  * Return: 0 on success, and other on error.
329  */
330 static int
331 ice_vc_fdir_set_flow_hdr(struct ice_vf *vf,
332 			 struct virtchnl_fdir_fltr_conf *conf,
333 			 struct ice_flow_seg_info *seg)
334 {
335 	enum ice_fltr_ptype flow = conf->input.flow_type;
336 	enum ice_fdir_tunnel_type ttype = conf->ttype;
337 	struct device *dev = ice_pf_to_dev(vf->pf);
338 
339 	switch (flow) {
340 	case ICE_FLTR_PTYPE_NON_IP_L2:
341 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ETH_NON_IP);
342 		break;
343 	case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3:
344 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 |
345 				  ICE_FLOW_SEG_HDR_IPV4 |
346 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
347 		break;
348 	case ICE_FLTR_PTYPE_NONF_IPV4_ESP:
349 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP |
350 				  ICE_FLOW_SEG_HDR_IPV4 |
351 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
352 		break;
353 	case ICE_FLTR_PTYPE_NONF_IPV4_AH:
354 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH |
355 				  ICE_FLOW_SEG_HDR_IPV4 |
356 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
357 		break;
358 	case ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP:
359 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP |
360 				  ICE_FLOW_SEG_HDR_IPV4 |
361 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
362 		break;
363 	case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE:
364 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE |
365 				  ICE_FLOW_SEG_HDR_IPV4 |
366 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
367 		break;
368 	case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION:
369 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION |
370 				  ICE_FLOW_SEG_HDR_IPV4 |
371 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
372 		break;
373 	case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
374 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 |
375 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
376 		break;
377 	case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
378 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
379 				  ICE_FLOW_SEG_HDR_IPV4 |
380 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
381 		break;
382 	case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
383 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
384 				  ICE_FLOW_SEG_HDR_IPV4 |
385 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
386 		break;
387 	case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
388 	case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
389 	case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
390 	case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
391 		if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU) {
392 			ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP |
393 					  ICE_FLOW_SEG_HDR_IPV4 |
394 					  ICE_FLOW_SEG_HDR_IPV_OTHER);
395 		} else if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH) {
396 			ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
397 					  ICE_FLOW_SEG_HDR_GTPU_IP |
398 					  ICE_FLOW_SEG_HDR_IPV4 |
399 					  ICE_FLOW_SEG_HDR_IPV_OTHER);
400 		} else {
401 			dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n",
402 				flow, vf->vf_id);
403 			return -EINVAL;
404 		}
405 		break;
406 	case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
407 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
408 				  ICE_FLOW_SEG_HDR_IPV4 |
409 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
410 		break;
411 	case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3:
412 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 |
413 				  ICE_FLOW_SEG_HDR_IPV6 |
414 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
415 		break;
416 	case ICE_FLTR_PTYPE_NONF_IPV6_ESP:
417 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP |
418 				  ICE_FLOW_SEG_HDR_IPV6 |
419 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
420 		break;
421 	case ICE_FLTR_PTYPE_NONF_IPV6_AH:
422 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH |
423 				  ICE_FLOW_SEG_HDR_IPV6 |
424 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
425 		break;
426 	case ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP:
427 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP |
428 				  ICE_FLOW_SEG_HDR_IPV6 |
429 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
430 		break;
431 	case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE:
432 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE |
433 				  ICE_FLOW_SEG_HDR_IPV6 |
434 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
435 		break;
436 	case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION:
437 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION |
438 				  ICE_FLOW_SEG_HDR_IPV6 |
439 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
440 		break;
441 	case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
442 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 |
443 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
444 		break;
445 	case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
446 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
447 				  ICE_FLOW_SEG_HDR_IPV6 |
448 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
449 		break;
450 	case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
451 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
452 				  ICE_FLOW_SEG_HDR_IPV6 |
453 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
454 		break;
455 	case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
456 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
457 				  ICE_FLOW_SEG_HDR_IPV6 |
458 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
459 		break;
460 	default:
461 		dev_dbg(dev, "Invalid flow type 0x%x for VF %d failed\n",
462 			flow, vf->vf_id);
463 		return -EINVAL;
464 	}
465 
466 	return 0;
467 }
468 
469 /**
470  * ice_vc_fdir_rem_prof - remove profile for this filter flow type
471  * @vf: pointer to the VF structure
472  * @flow: filter flow type
473  * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter
474  */
475 static void
476 ice_vc_fdir_rem_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, int tun)
477 {
478 	struct ice_vf_fdir *fdir = &vf->fdir;
479 	struct ice_fd_hw_prof *vf_prof;
480 	struct ice_pf *pf = vf->pf;
481 	struct ice_vsi *vf_vsi;
482 	struct device *dev;
483 	struct ice_hw *hw;
484 	u64 prof_id;
485 	int i;
486 
487 	dev = ice_pf_to_dev(pf);
488 	hw = &pf->hw;
489 	if (!fdir->fdir_prof || !fdir->fdir_prof[flow])
490 		return;
491 
492 	vf_prof = fdir->fdir_prof[flow];
493 
494 	vf_vsi = ice_get_vf_vsi(vf);
495 	if (!vf_vsi) {
496 		dev_dbg(dev, "NULL vf %d vsi pointer\n", vf->vf_id);
497 		return;
498 	}
499 
500 	if (!fdir->prof_entry_cnt[flow][tun])
501 		return;
502 
503 	prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num,
504 				   flow, tun ? ICE_FLTR_PTYPE_MAX : 0);
505 
506 	for (i = 0; i < fdir->prof_entry_cnt[flow][tun]; i++)
507 		if (vf_prof->entry_h[i][tun]) {
508 			u16 vsi_num = ice_get_hw_vsi_num(hw, vf_prof->vsi_h[i]);
509 
510 			ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
511 			ice_flow_rem_entry(hw, ICE_BLK_FD,
512 					   vf_prof->entry_h[i][tun]);
513 			vf_prof->entry_h[i][tun] = 0;
514 		}
515 
516 	ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
517 	devm_kfree(dev, vf_prof->fdir_seg[tun]);
518 	vf_prof->fdir_seg[tun] = NULL;
519 
520 	for (i = 0; i < vf_prof->cnt; i++)
521 		vf_prof->vsi_h[i] = 0;
522 
523 	fdir->prof_entry_cnt[flow][tun] = 0;
524 }
525 
526 /**
527  * ice_vc_fdir_rem_prof_all - remove profile for this VF
528  * @vf: pointer to the VF structure
529  */
530 static void ice_vc_fdir_rem_prof_all(struct ice_vf *vf)
531 {
532 	enum ice_fltr_ptype flow;
533 
534 	for (flow = ICE_FLTR_PTYPE_NONF_NONE;
535 	     flow < ICE_FLTR_PTYPE_MAX; flow++) {
536 		ice_vc_fdir_rem_prof(vf, flow, 0);
537 		ice_vc_fdir_rem_prof(vf, flow, 1);
538 	}
539 }
540 
541 /**
542  * ice_vc_fdir_reset_cnt_all - reset all FDIR counters for this VF FDIR
543  * @fdir: pointer to the VF FDIR structure
544  */
545 static void ice_vc_fdir_reset_cnt_all(struct ice_vf_fdir *fdir)
546 {
547 	enum ice_fltr_ptype flow;
548 
549 	for (flow = ICE_FLTR_PTYPE_NONF_NONE;
550 	     flow < ICE_FLTR_PTYPE_MAX; flow++) {
551 		fdir->fdir_fltr_cnt[flow][0] = 0;
552 		fdir->fdir_fltr_cnt[flow][1] = 0;
553 	}
554 
555 	fdir->fdir_fltr_cnt_total = 0;
556 }
557 
558 /**
559  * ice_vc_fdir_has_prof_conflict
560  * @vf: pointer to the VF structure
561  * @conf: FDIR configuration for each filter
562  *
563  * Check if @conf has conflicting profile with existing profiles
564  *
565  * Return: true on success, and false on error.
566  */
567 static bool
568 ice_vc_fdir_has_prof_conflict(struct ice_vf *vf,
569 			      struct virtchnl_fdir_fltr_conf *conf)
570 {
571 	struct ice_fdir_fltr *desc;
572 
573 	list_for_each_entry(desc, &vf->fdir.fdir_rule_list, fltr_node) {
574 		struct virtchnl_fdir_fltr_conf *existing_conf;
575 		enum ice_fltr_ptype flow_type_a, flow_type_b;
576 		struct ice_fdir_fltr *a, *b;
577 
578 		existing_conf = to_fltr_conf_from_desc(desc);
579 		a = &existing_conf->input;
580 		b = &conf->input;
581 		flow_type_a = a->flow_type;
582 		flow_type_b = b->flow_type;
583 
584 		/* No need to compare two rules with different tunnel types or
585 		 * with the same protocol type.
586 		 */
587 		if (existing_conf->ttype != conf->ttype ||
588 		    flow_type_a == flow_type_b)
589 			continue;
590 
591 		switch (flow_type_a) {
592 		case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
593 		case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
594 		case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
595 			if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_OTHER)
596 				return true;
597 			break;
598 		case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
599 			if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
600 			    flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
601 			    flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_SCTP)
602 				return true;
603 			break;
604 		case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
605 		case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
606 		case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
607 			if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_OTHER)
608 				return true;
609 			break;
610 		case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
611 			if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_UDP ||
612 			    flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_TCP ||
613 			    flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_SCTP)
614 				return true;
615 			break;
616 		default:
617 			break;
618 		}
619 	}
620 
621 	return false;
622 }
623 
624 /**
625  * ice_vc_fdir_write_flow_prof
626  * @vf: pointer to the VF structure
627  * @flow: filter flow type
628  * @seg: array of one or more packet segments that describe the flow
629  * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter
630  *
631  * Write the flow's profile config and packet segment into the hardware
632  *
633  * Return: 0 on success, and other on error.
634  */
635 static int
636 ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow,
637 			    struct ice_flow_seg_info *seg, int tun)
638 {
639 	struct ice_vf_fdir *fdir = &vf->fdir;
640 	struct ice_vsi *vf_vsi, *ctrl_vsi;
641 	struct ice_flow_seg_info *old_seg;
642 	struct ice_flow_prof *prof = NULL;
643 	struct ice_fd_hw_prof *vf_prof;
644 	struct device *dev;
645 	struct ice_pf *pf;
646 	struct ice_hw *hw;
647 	u64 entry1_h = 0;
648 	u64 entry2_h = 0;
649 	u64 prof_id;
650 	int ret;
651 
652 	pf = vf->pf;
653 	dev = ice_pf_to_dev(pf);
654 	hw = &pf->hw;
655 	vf_vsi = ice_get_vf_vsi(vf);
656 	if (!vf_vsi)
657 		return -EINVAL;
658 
659 	ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
660 	if (!ctrl_vsi)
661 		return -EINVAL;
662 
663 	vf_prof = fdir->fdir_prof[flow];
664 	old_seg = vf_prof->fdir_seg[tun];
665 	if (old_seg) {
666 		if (!memcmp(old_seg, seg, sizeof(*seg))) {
667 			dev_dbg(dev, "Duplicated profile for VF %d!\n",
668 				vf->vf_id);
669 			return -EEXIST;
670 		}
671 
672 		if (fdir->fdir_fltr_cnt[flow][tun]) {
673 			ret = -EINVAL;
674 			dev_dbg(dev, "Input set conflicts for VF %d\n",
675 				vf->vf_id);
676 			goto err_exit;
677 		}
678 
679 		/* remove previously allocated profile */
680 		ice_vc_fdir_rem_prof(vf, flow, tun);
681 	}
682 
683 	prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num, flow,
684 				   tun ? ICE_FLTR_PTYPE_MAX : 0);
685 
686 	ret = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg,
687 				tun + 1, &prof);
688 	if (ret) {
689 		dev_dbg(dev, "Could not add VSI flow 0x%x for VF %d\n",
690 			flow, vf->vf_id);
691 		goto err_exit;
692 	}
693 
694 	ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx,
695 				 vf_vsi->idx, ICE_FLOW_PRIO_NORMAL,
696 				 seg, &entry1_h);
697 	if (ret) {
698 		dev_dbg(dev, "Could not add flow 0x%x VSI entry for VF %d\n",
699 			flow, vf->vf_id);
700 		goto err_prof;
701 	}
702 
703 	ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx,
704 				 ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
705 				 seg, &entry2_h);
706 	if (ret) {
707 		dev_dbg(dev,
708 			"Could not add flow 0x%x Ctrl VSI entry for VF %d\n",
709 			flow, vf->vf_id);
710 		goto err_entry_1;
711 	}
712 
713 	vf_prof->fdir_seg[tun] = seg;
714 	vf_prof->cnt = 0;
715 	fdir->prof_entry_cnt[flow][tun] = 0;
716 
717 	vf_prof->entry_h[vf_prof->cnt][tun] = entry1_h;
718 	vf_prof->vsi_h[vf_prof->cnt] = vf_vsi->idx;
719 	vf_prof->cnt++;
720 	fdir->prof_entry_cnt[flow][tun]++;
721 
722 	vf_prof->entry_h[vf_prof->cnt][tun] = entry2_h;
723 	vf_prof->vsi_h[vf_prof->cnt] = ctrl_vsi->idx;
724 	vf_prof->cnt++;
725 	fdir->prof_entry_cnt[flow][tun]++;
726 
727 	return 0;
728 
729 err_entry_1:
730 	ice_rem_prof_id_flow(hw, ICE_BLK_FD,
731 			     ice_get_hw_vsi_num(hw, vf_vsi->idx), prof_id);
732 	ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h);
733 err_prof:
734 	ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
735 err_exit:
736 	return ret;
737 }
738 
739 /**
740  * ice_vc_fdir_config_input_set
741  * @vf: pointer to the VF structure
742  * @fltr: virtual channel add cmd buffer
743  * @conf: FDIR configuration for each filter
744  * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter
745  *
746  * Config the input set type and value for virtual channel add msg buffer
747  *
748  * Return: 0 on success, and other on error.
749  */
750 static int
751 ice_vc_fdir_config_input_set(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
752 			     struct virtchnl_fdir_fltr_conf *conf, int tun)
753 {
754 	struct ice_fdir_fltr *input = &conf->input;
755 	struct device *dev = ice_pf_to_dev(vf->pf);
756 	struct ice_flow_seg_info *seg;
757 	enum ice_fltr_ptype flow;
758 	int ret;
759 
760 	ret = ice_vc_fdir_has_prof_conflict(vf, conf);
761 	if (ret) {
762 		dev_dbg(dev, "Found flow profile conflict for VF %d\n",
763 			vf->vf_id);
764 		return ret;
765 	}
766 
767 	flow = input->flow_type;
768 	ret = ice_vc_fdir_alloc_prof(vf, flow);
769 	if (ret) {
770 		dev_dbg(dev, "Alloc flow prof for VF %d failed\n", vf->vf_id);
771 		return ret;
772 	}
773 
774 	seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL);
775 	if (!seg)
776 		return -ENOMEM;
777 
778 	ret = ice_vc_fdir_set_flow_fld(vf, fltr, conf, seg);
779 	if (ret) {
780 		dev_dbg(dev, "Set flow field for VF %d failed\n", vf->vf_id);
781 		goto err_exit;
782 	}
783 
784 	ret = ice_vc_fdir_set_flow_hdr(vf, conf, seg);
785 	if (ret) {
786 		dev_dbg(dev, "Set flow hdr for VF %d failed\n", vf->vf_id);
787 		goto err_exit;
788 	}
789 
790 	ret = ice_vc_fdir_write_flow_prof(vf, flow, seg, tun);
791 	if (ret == -EEXIST) {
792 		devm_kfree(dev, seg);
793 	} else if (ret) {
794 		dev_dbg(dev, "Write flow profile for VF %d failed\n",
795 			vf->vf_id);
796 		goto err_exit;
797 	}
798 
799 	return 0;
800 
801 err_exit:
802 	devm_kfree(dev, seg);
803 	return ret;
804 }
805 
806 /**
807  * ice_vc_fdir_parse_pattern
808  * @vf: pointer to the VF info
809  * @fltr: virtual channel add cmd buffer
810  * @conf: FDIR configuration for each filter
811  *
812  * Parse the virtual channel filter's pattern and store them into conf
813  *
814  * Return: 0 on success, and other on error.
815  */
816 static int
817 ice_vc_fdir_parse_pattern(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
818 			  struct virtchnl_fdir_fltr_conf *conf)
819 {
820 	struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs;
821 	enum virtchnl_proto_hdr_type l3 = VIRTCHNL_PROTO_HDR_NONE;
822 	enum virtchnl_proto_hdr_type l4 = VIRTCHNL_PROTO_HDR_NONE;
823 	struct device *dev = ice_pf_to_dev(vf->pf);
824 	struct ice_fdir_fltr *input = &conf->input;
825 	int i;
826 
827 	if (proto->count > VIRTCHNL_MAX_NUM_PROTO_HDRS) {
828 		dev_dbg(dev, "Invalid protocol count:0x%x for VF %d\n",
829 			proto->count, vf->vf_id);
830 		return -EINVAL;
831 	}
832 
833 	for (i = 0; i < proto->count; i++) {
834 		struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i];
835 		struct ip_esp_hdr *esph;
836 		struct ip_auth_hdr *ah;
837 		struct sctphdr *sctph;
838 		struct ipv6hdr *ip6h;
839 		struct udphdr *udph;
840 		struct tcphdr *tcph;
841 		struct ethhdr *eth;
842 		struct iphdr *iph;
843 		u8 s_field;
844 		u8 *rawh;
845 
846 		switch (hdr->type) {
847 		case VIRTCHNL_PROTO_HDR_ETH:
848 			eth = (struct ethhdr *)hdr->buffer;
849 			input->flow_type = ICE_FLTR_PTYPE_NON_IP_L2;
850 
851 			if (hdr->field_selector)
852 				input->ext_data.ether_type = eth->h_proto;
853 			break;
854 		case VIRTCHNL_PROTO_HDR_IPV4:
855 			iph = (struct iphdr *)hdr->buffer;
856 			l3 = VIRTCHNL_PROTO_HDR_IPV4;
857 			input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
858 
859 			if (hdr->field_selector) {
860 				input->ip.v4.src_ip = iph->saddr;
861 				input->ip.v4.dst_ip = iph->daddr;
862 				input->ip.v4.tos = iph->tos;
863 				input->ip.v4.proto = iph->protocol;
864 			}
865 			break;
866 		case VIRTCHNL_PROTO_HDR_IPV6:
867 			ip6h = (struct ipv6hdr *)hdr->buffer;
868 			l3 = VIRTCHNL_PROTO_HDR_IPV6;
869 			input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
870 
871 			if (hdr->field_selector) {
872 				memcpy(input->ip.v6.src_ip,
873 				       ip6h->saddr.in6_u.u6_addr8,
874 				       sizeof(ip6h->saddr));
875 				memcpy(input->ip.v6.dst_ip,
876 				       ip6h->daddr.in6_u.u6_addr8,
877 				       sizeof(ip6h->daddr));
878 				input->ip.v6.tc = ((u8)(ip6h->priority) << 4) |
879 						  (ip6h->flow_lbl[0] >> 4);
880 				input->ip.v6.proto = ip6h->nexthdr;
881 			}
882 			break;
883 		case VIRTCHNL_PROTO_HDR_TCP:
884 			tcph = (struct tcphdr *)hdr->buffer;
885 			if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
886 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
887 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
888 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
889 
890 			if (hdr->field_selector) {
891 				if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
892 					input->ip.v4.src_port = tcph->source;
893 					input->ip.v4.dst_port = tcph->dest;
894 				} else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
895 					input->ip.v6.src_port = tcph->source;
896 					input->ip.v6.dst_port = tcph->dest;
897 				}
898 			}
899 			break;
900 		case VIRTCHNL_PROTO_HDR_UDP:
901 			udph = (struct udphdr *)hdr->buffer;
902 			if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
903 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
904 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
905 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
906 
907 			if (hdr->field_selector) {
908 				if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
909 					input->ip.v4.src_port = udph->source;
910 					input->ip.v4.dst_port = udph->dest;
911 				} else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
912 					input->ip.v6.src_port = udph->source;
913 					input->ip.v6.dst_port = udph->dest;
914 				}
915 			}
916 			break;
917 		case VIRTCHNL_PROTO_HDR_SCTP:
918 			sctph = (struct sctphdr *)hdr->buffer;
919 			if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
920 				input->flow_type =
921 					ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
922 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
923 				input->flow_type =
924 					ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
925 
926 			if (hdr->field_selector) {
927 				if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
928 					input->ip.v4.src_port = sctph->source;
929 					input->ip.v4.dst_port = sctph->dest;
930 				} else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
931 					input->ip.v6.src_port = sctph->source;
932 					input->ip.v6.dst_port = sctph->dest;
933 				}
934 			}
935 			break;
936 		case VIRTCHNL_PROTO_HDR_L2TPV3:
937 			if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
938 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3;
939 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
940 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3;
941 
942 			if (hdr->field_selector)
943 				input->l2tpv3_data.session_id = *((__be32 *)hdr->buffer);
944 			break;
945 		case VIRTCHNL_PROTO_HDR_ESP:
946 			esph = (struct ip_esp_hdr *)hdr->buffer;
947 			if (l3 == VIRTCHNL_PROTO_HDR_IPV4 &&
948 			    l4 == VIRTCHNL_PROTO_HDR_UDP)
949 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP;
950 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 &&
951 				 l4 == VIRTCHNL_PROTO_HDR_UDP)
952 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP;
953 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 &&
954 				 l4 == VIRTCHNL_PROTO_HDR_NONE)
955 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_ESP;
956 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 &&
957 				 l4 == VIRTCHNL_PROTO_HDR_NONE)
958 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_ESP;
959 
960 			if (l4 == VIRTCHNL_PROTO_HDR_UDP)
961 				conf->inset_flag |= FDIR_INSET_FLAG_ESP_UDP;
962 			else
963 				conf->inset_flag |= FDIR_INSET_FLAG_ESP_IPSEC;
964 
965 			if (hdr->field_selector) {
966 				if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
967 					input->ip.v4.sec_parm_idx = esph->spi;
968 				else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
969 					input->ip.v6.sec_parm_idx = esph->spi;
970 			}
971 			break;
972 		case VIRTCHNL_PROTO_HDR_AH:
973 			ah = (struct ip_auth_hdr *)hdr->buffer;
974 			if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
975 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_AH;
976 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
977 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_AH;
978 
979 			if (hdr->field_selector) {
980 				if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
981 					input->ip.v4.sec_parm_idx = ah->spi;
982 				else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
983 					input->ip.v6.sec_parm_idx = ah->spi;
984 			}
985 			break;
986 		case VIRTCHNL_PROTO_HDR_PFCP:
987 			rawh = (u8 *)hdr->buffer;
988 			s_field = (rawh[0] >> PFCP_S_OFFSET) & PFCP_S_MASK;
989 			if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 0)
990 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE;
991 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 1)
992 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION;
993 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 0)
994 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE;
995 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 1)
996 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION;
997 
998 			if (hdr->field_selector) {
999 				if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
1000 					input->ip.v4.dst_port = cpu_to_be16(PFCP_PORT_NR);
1001 				else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
1002 					input->ip.v6.dst_port = cpu_to_be16(PFCP_PORT_NR);
1003 			}
1004 			break;
1005 		case VIRTCHNL_PROTO_HDR_GTPU_IP:
1006 			rawh = (u8 *)hdr->buffer;
1007 			input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
1008 
1009 			if (hdr->field_selector)
1010 				input->gtpu_data.teid = *(__be32 *)(&rawh[GTPU_TEID_OFFSET]);
1011 			conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU;
1012 			break;
1013 		case VIRTCHNL_PROTO_HDR_GTPU_EH:
1014 			rawh = (u8 *)hdr->buffer;
1015 
1016 			if (hdr->field_selector)
1017 				input->gtpu_data.qfi = rawh[GTPU_EH_QFI_OFFSET] & GTPU_EH_QFI_MASK;
1018 			conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU_EH;
1019 			break;
1020 		default:
1021 			dev_dbg(dev, "Invalid header type 0x:%x for VF %d\n",
1022 				hdr->type, vf->vf_id);
1023 			return -EINVAL;
1024 		}
1025 	}
1026 
1027 	return 0;
1028 }
1029 
1030 /**
1031  * ice_vc_fdir_parse_action
1032  * @vf: pointer to the VF info
1033  * @fltr: virtual channel add cmd buffer
1034  * @conf: FDIR configuration for each filter
1035  *
1036  * Parse the virtual channel filter's action and store them into conf
1037  *
1038  * Return: 0 on success, and other on error.
1039  */
1040 static int
1041 ice_vc_fdir_parse_action(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
1042 			 struct virtchnl_fdir_fltr_conf *conf)
1043 {
1044 	struct virtchnl_filter_action_set *as = &fltr->rule_cfg.action_set;
1045 	struct device *dev = ice_pf_to_dev(vf->pf);
1046 	struct ice_fdir_fltr *input = &conf->input;
1047 	u32 dest_num = 0;
1048 	u32 mark_num = 0;
1049 	int i;
1050 
1051 	if (as->count > VIRTCHNL_MAX_NUM_ACTIONS) {
1052 		dev_dbg(dev, "Invalid action numbers:0x%x for VF %d\n",
1053 			as->count, vf->vf_id);
1054 		return -EINVAL;
1055 	}
1056 
1057 	for (i = 0; i < as->count; i++) {
1058 		struct virtchnl_filter_action *action = &as->actions[i];
1059 
1060 		switch (action->type) {
1061 		case VIRTCHNL_ACTION_PASSTHRU:
1062 			dest_num++;
1063 			input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER;
1064 			break;
1065 		case VIRTCHNL_ACTION_DROP:
1066 			dest_num++;
1067 			input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1068 			break;
1069 		case VIRTCHNL_ACTION_QUEUE:
1070 			dest_num++;
1071 			input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1072 			input->q_index = action->act_conf.queue.index;
1073 			break;
1074 		case VIRTCHNL_ACTION_Q_REGION:
1075 			dest_num++;
1076 			input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
1077 			input->q_index = action->act_conf.queue.index;
1078 			input->q_region = action->act_conf.queue.region;
1079 			break;
1080 		case VIRTCHNL_ACTION_MARK:
1081 			mark_num++;
1082 			input->fltr_id = action->act_conf.mark_id;
1083 			input->fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_THREE;
1084 			break;
1085 		default:
1086 			dev_dbg(dev, "Invalid action type:0x%x for VF %d\n",
1087 				action->type, vf->vf_id);
1088 			return -EINVAL;
1089 		}
1090 	}
1091 
1092 	if (dest_num == 0 || dest_num >= 2) {
1093 		dev_dbg(dev, "Invalid destination action for VF %d\n",
1094 			vf->vf_id);
1095 		return -EINVAL;
1096 	}
1097 
1098 	if (mark_num >= 2) {
1099 		dev_dbg(dev, "Too many mark actions for VF %d\n", vf->vf_id);
1100 		return -EINVAL;
1101 	}
1102 
1103 	return 0;
1104 }
1105 
1106 /**
1107  * ice_vc_validate_fdir_fltr - validate the virtual channel filter
1108  * @vf: pointer to the VF info
1109  * @fltr: virtual channel add cmd buffer
1110  * @conf: FDIR configuration for each filter
1111  *
1112  * Return: 0 on success, and other on error.
1113  */
1114 static int
1115 ice_vc_validate_fdir_fltr(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
1116 			  struct virtchnl_fdir_fltr_conf *conf)
1117 {
1118 	struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs;
1119 	int ret;
1120 
1121 	if (!ice_vc_validate_pattern(vf, proto))
1122 		return -EINVAL;
1123 
1124 	ret = ice_vc_fdir_parse_pattern(vf, fltr, conf);
1125 	if (ret)
1126 		return ret;
1127 
1128 	return ice_vc_fdir_parse_action(vf, fltr, conf);
1129 }
1130 
1131 /**
1132  * ice_vc_fdir_comp_rules - compare if two filter rules have the same value
1133  * @conf_a: FDIR configuration for filter a
1134  * @conf_b: FDIR configuration for filter b
1135  *
1136  * Return: 0 on success, and other on error.
1137  */
1138 static bool
1139 ice_vc_fdir_comp_rules(struct virtchnl_fdir_fltr_conf *conf_a,
1140 		       struct virtchnl_fdir_fltr_conf *conf_b)
1141 {
1142 	struct ice_fdir_fltr *a = &conf_a->input;
1143 	struct ice_fdir_fltr *b = &conf_b->input;
1144 
1145 	if (conf_a->ttype != conf_b->ttype)
1146 		return false;
1147 	if (a->flow_type != b->flow_type)
1148 		return false;
1149 	if (memcmp(&a->ip, &b->ip, sizeof(a->ip)))
1150 		return false;
1151 	if (memcmp(&a->mask, &b->mask, sizeof(a->mask)))
1152 		return false;
1153 	if (memcmp(&a->gtpu_data, &b->gtpu_data, sizeof(a->gtpu_data)))
1154 		return false;
1155 	if (memcmp(&a->gtpu_mask, &b->gtpu_mask, sizeof(a->gtpu_mask)))
1156 		return false;
1157 	if (memcmp(&a->l2tpv3_data, &b->l2tpv3_data, sizeof(a->l2tpv3_data)))
1158 		return false;
1159 	if (memcmp(&a->l2tpv3_mask, &b->l2tpv3_mask, sizeof(a->l2tpv3_mask)))
1160 		return false;
1161 	if (memcmp(&a->ext_data, &b->ext_data, sizeof(a->ext_data)))
1162 		return false;
1163 	if (memcmp(&a->ext_mask, &b->ext_mask, sizeof(a->ext_mask)))
1164 		return false;
1165 
1166 	return true;
1167 }
1168 
1169 /**
1170  * ice_vc_fdir_is_dup_fltr
1171  * @vf: pointer to the VF info
1172  * @conf: FDIR configuration for each filter
1173  *
1174  * Check if there is duplicated rule with same conf value
1175  *
1176  * Return: 0 true success, and false on error.
1177  */
1178 static bool
1179 ice_vc_fdir_is_dup_fltr(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf)
1180 {
1181 	struct ice_fdir_fltr *desc;
1182 	bool ret;
1183 
1184 	list_for_each_entry(desc, &vf->fdir.fdir_rule_list, fltr_node) {
1185 		struct virtchnl_fdir_fltr_conf *node =
1186 				to_fltr_conf_from_desc(desc);
1187 
1188 		ret = ice_vc_fdir_comp_rules(node, conf);
1189 		if (ret)
1190 			return true;
1191 	}
1192 
1193 	return false;
1194 }
1195 
1196 /**
1197  * ice_vc_fdir_insert_entry
1198  * @vf: pointer to the VF info
1199  * @conf: FDIR configuration for each filter
1200  * @id: pointer to ID value allocated by driver
1201  *
1202  * Insert FDIR conf entry into list and allocate ID for this filter
1203  *
1204  * Return: 0 true success, and other on error.
1205  */
1206 static int
1207 ice_vc_fdir_insert_entry(struct ice_vf *vf,
1208 			 struct virtchnl_fdir_fltr_conf *conf, u32 *id)
1209 {
1210 	struct ice_fdir_fltr *input = &conf->input;
1211 	int i;
1212 
1213 	/* alloc ID corresponding with conf */
1214 	i = idr_alloc(&vf->fdir.fdir_rule_idr, conf, 0,
1215 		      ICE_FDIR_MAX_FLTRS, GFP_KERNEL);
1216 	if (i < 0)
1217 		return -EINVAL;
1218 	*id = i;
1219 
1220 	list_add(&input->fltr_node, &vf->fdir.fdir_rule_list);
1221 	return 0;
1222 }
1223 
1224 /**
1225  * ice_vc_fdir_remove_entry - remove FDIR conf entry by ID value
1226  * @vf: pointer to the VF info
1227  * @conf: FDIR configuration for each filter
1228  * @id: filter rule's ID
1229  */
1230 static void
1231 ice_vc_fdir_remove_entry(struct ice_vf *vf,
1232 			 struct virtchnl_fdir_fltr_conf *conf, u32 id)
1233 {
1234 	struct ice_fdir_fltr *input = &conf->input;
1235 
1236 	idr_remove(&vf->fdir.fdir_rule_idr, id);
1237 	list_del(&input->fltr_node);
1238 }
1239 
1240 /**
1241  * ice_vc_fdir_lookup_entry - lookup FDIR conf entry by ID value
1242  * @vf: pointer to the VF info
1243  * @id: filter rule's ID
1244  *
1245  * Return: NULL on error, and other on success.
1246  */
1247 static struct virtchnl_fdir_fltr_conf *
1248 ice_vc_fdir_lookup_entry(struct ice_vf *vf, u32 id)
1249 {
1250 	return idr_find(&vf->fdir.fdir_rule_idr, id);
1251 }
1252 
1253 /**
1254  * ice_vc_fdir_flush_entry - remove all FDIR conf entry
1255  * @vf: pointer to the VF info
1256  */
1257 static void ice_vc_fdir_flush_entry(struct ice_vf *vf)
1258 {
1259 	struct virtchnl_fdir_fltr_conf *conf;
1260 	struct ice_fdir_fltr *desc, *temp;
1261 
1262 	list_for_each_entry_safe(desc, temp,
1263 				 &vf->fdir.fdir_rule_list, fltr_node) {
1264 		conf = to_fltr_conf_from_desc(desc);
1265 		list_del(&desc->fltr_node);
1266 		devm_kfree(ice_pf_to_dev(vf->pf), conf);
1267 	}
1268 }
1269 
1270 /**
1271  * ice_vc_fdir_write_fltr - write filter rule into hardware
1272  * @vf: pointer to the VF info
1273  * @conf: FDIR configuration for each filter
1274  * @add: true implies add rule, false implies del rules
1275  * @is_tun: false implies non-tunnel type filter, true implies tunnel filter
1276  *
1277  * Return: 0 on success, and other on error.
1278  */
1279 static int ice_vc_fdir_write_fltr(struct ice_vf *vf,
1280 				  struct virtchnl_fdir_fltr_conf *conf,
1281 				  bool add, bool is_tun)
1282 {
1283 	struct ice_fdir_fltr *input = &conf->input;
1284 	struct ice_vsi *vsi, *ctrl_vsi;
1285 	struct ice_fltr_desc desc;
1286 	struct device *dev;
1287 	struct ice_pf *pf;
1288 	struct ice_hw *hw;
1289 	int ret;
1290 	u8 *pkt;
1291 
1292 	pf = vf->pf;
1293 	dev = ice_pf_to_dev(pf);
1294 	hw = &pf->hw;
1295 	vsi = ice_get_vf_vsi(vf);
1296 	if (!vsi) {
1297 		dev_dbg(dev, "Invalid vsi for VF %d\n", vf->vf_id);
1298 		return -EINVAL;
1299 	}
1300 
1301 	input->dest_vsi = vsi->idx;
1302 	input->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW;
1303 
1304 	ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
1305 	if (!ctrl_vsi) {
1306 		dev_dbg(dev, "Invalid ctrl_vsi for VF %d\n", vf->vf_id);
1307 		return -EINVAL;
1308 	}
1309 
1310 	pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL);
1311 	if (!pkt)
1312 		return -ENOMEM;
1313 
1314 	ice_fdir_get_prgm_desc(hw, input, &desc, add);
1315 	ret = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
1316 	if (ret) {
1317 		dev_dbg(dev, "Gen training pkt for VF %d ptype %d failed\n",
1318 			vf->vf_id, input->flow_type);
1319 		goto err_free_pkt;
1320 	}
1321 
1322 	ret = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt);
1323 	if (ret)
1324 		goto err_free_pkt;
1325 
1326 	return 0;
1327 
1328 err_free_pkt:
1329 	devm_kfree(dev, pkt);
1330 	return ret;
1331 }
1332 
1333 /**
1334  * ice_vf_fdir_timer - FDIR program waiting timer interrupt handler
1335  * @t: pointer to timer_list
1336  */
1337 static void ice_vf_fdir_timer(struct timer_list *t)
1338 {
1339 	struct ice_vf_fdir_ctx *ctx_irq = from_timer(ctx_irq, t, rx_tmr);
1340 	struct ice_vf_fdir_ctx *ctx_done;
1341 	struct ice_vf_fdir *fdir;
1342 	unsigned long flags;
1343 	struct ice_vf *vf;
1344 	struct ice_pf *pf;
1345 
1346 	fdir = container_of(ctx_irq, struct ice_vf_fdir, ctx_irq);
1347 	vf = container_of(fdir, struct ice_vf, fdir);
1348 	ctx_done = &fdir->ctx_done;
1349 	pf = vf->pf;
1350 	spin_lock_irqsave(&fdir->ctx_lock, flags);
1351 	if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) {
1352 		spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1353 		WARN_ON_ONCE(1);
1354 		return;
1355 	}
1356 
1357 	ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID;
1358 
1359 	ctx_done->flags |= ICE_VF_FDIR_CTX_VALID;
1360 	ctx_done->conf = ctx_irq->conf;
1361 	ctx_done->stat = ICE_FDIR_CTX_TIMEOUT;
1362 	ctx_done->v_opcode = ctx_irq->v_opcode;
1363 	spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1364 
1365 	set_bit(ICE_FD_VF_FLUSH_CTX, pf->state);
1366 	ice_service_task_schedule(pf);
1367 }
1368 
1369 /**
1370  * ice_vc_fdir_irq_handler - ctrl_vsi Rx queue interrupt handler
1371  * @ctrl_vsi: pointer to a VF's CTRL VSI
1372  * @rx_desc: pointer to FDIR Rx queue descriptor
1373  */
1374 void
1375 ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi,
1376 			union ice_32b_rx_flex_desc *rx_desc)
1377 {
1378 	struct ice_pf *pf = ctrl_vsi->back;
1379 	struct ice_vf *vf = ctrl_vsi->vf;
1380 	struct ice_vf_fdir_ctx *ctx_done;
1381 	struct ice_vf_fdir_ctx *ctx_irq;
1382 	struct ice_vf_fdir *fdir;
1383 	unsigned long flags;
1384 	struct device *dev;
1385 	int ret;
1386 
1387 	if (WARN_ON(!vf))
1388 		return;
1389 
1390 	fdir = &vf->fdir;
1391 	ctx_done = &fdir->ctx_done;
1392 	ctx_irq = &fdir->ctx_irq;
1393 	dev = ice_pf_to_dev(pf);
1394 	spin_lock_irqsave(&fdir->ctx_lock, flags);
1395 	if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) {
1396 		spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1397 		WARN_ON_ONCE(1);
1398 		return;
1399 	}
1400 
1401 	ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID;
1402 
1403 	ctx_done->flags |= ICE_VF_FDIR_CTX_VALID;
1404 	ctx_done->conf = ctx_irq->conf;
1405 	ctx_done->stat = ICE_FDIR_CTX_IRQ;
1406 	ctx_done->v_opcode = ctx_irq->v_opcode;
1407 	memcpy(&ctx_done->rx_desc, rx_desc, sizeof(*rx_desc));
1408 	spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1409 
1410 	ret = del_timer(&ctx_irq->rx_tmr);
1411 	if (!ret)
1412 		dev_err(dev, "VF %d: Unexpected inactive timer!\n", vf->vf_id);
1413 
1414 	set_bit(ICE_FD_VF_FLUSH_CTX, pf->state);
1415 	ice_service_task_schedule(pf);
1416 }
1417 
1418 /**
1419  * ice_vf_fdir_dump_info - dump FDIR information for diagnosis
1420  * @vf: pointer to the VF info
1421  */
1422 static void ice_vf_fdir_dump_info(struct ice_vf *vf)
1423 {
1424 	struct ice_vsi *vf_vsi;
1425 	u32 fd_size, fd_cnt;
1426 	struct device *dev;
1427 	struct ice_pf *pf;
1428 	struct ice_hw *hw;
1429 	u16 vsi_num;
1430 
1431 	pf = vf->pf;
1432 	hw = &pf->hw;
1433 	dev = ice_pf_to_dev(pf);
1434 	vf_vsi = ice_get_vf_vsi(vf);
1435 	if (!vf_vsi) {
1436 		dev_dbg(dev, "VF %d: invalid VSI pointer\n", vf->vf_id);
1437 		return;
1438 	}
1439 
1440 	vsi_num = ice_get_hw_vsi_num(hw, vf_vsi->idx);
1441 
1442 	fd_size = rd32(hw, VSIQF_FD_SIZE(vsi_num));
1443 	fd_cnt = rd32(hw, VSIQF_FD_CNT(vsi_num));
1444 	dev_dbg(dev, "VF %d: space allocated: guar:0x%x, be:0x%x, space consumed: guar:0x%x, be:0x%x\n",
1445 		vf->vf_id,
1446 		(fd_size & VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S,
1447 		(fd_size & VSIQF_FD_CNT_FD_BCNT_M) >> VSIQF_FD_CNT_FD_BCNT_S,
1448 		(fd_cnt & VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S,
1449 		(fd_cnt & VSIQF_FD_CNT_FD_BCNT_M) >> VSIQF_FD_CNT_FD_BCNT_S);
1450 }
1451 
1452 /**
1453  * ice_vf_verify_rx_desc - verify received FDIR programming status descriptor
1454  * @vf: pointer to the VF info
1455  * @ctx: FDIR context info for post processing
1456  * @status: virtchnl FDIR program status
1457  *
1458  * Return: 0 on success, and other on error.
1459  */
1460 static int
1461 ice_vf_verify_rx_desc(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
1462 		      enum virtchnl_fdir_prgm_status *status)
1463 {
1464 	struct device *dev = ice_pf_to_dev(vf->pf);
1465 	u32 stat_err, error, prog_id;
1466 	int ret;
1467 
1468 	stat_err = le16_to_cpu(ctx->rx_desc.wb.status_error0);
1469 	if (((stat_err & ICE_FXD_FLTR_WB_QW1_DD_M) >>
1470 	    ICE_FXD_FLTR_WB_QW1_DD_S) != ICE_FXD_FLTR_WB_QW1_DD_YES) {
1471 		*status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1472 		dev_err(dev, "VF %d: Desc Done not set\n", vf->vf_id);
1473 		ret = -EINVAL;
1474 		goto err_exit;
1475 	}
1476 
1477 	prog_id = (stat_err & ICE_FXD_FLTR_WB_QW1_PROG_ID_M) >>
1478 		ICE_FXD_FLTR_WB_QW1_PROG_ID_S;
1479 	if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD &&
1480 	    ctx->v_opcode != VIRTCHNL_OP_ADD_FDIR_FILTER) {
1481 		dev_err(dev, "VF %d: Desc show add, but ctx not",
1482 			vf->vf_id);
1483 		*status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
1484 		ret = -EINVAL;
1485 		goto err_exit;
1486 	}
1487 
1488 	if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_DEL &&
1489 	    ctx->v_opcode != VIRTCHNL_OP_DEL_FDIR_FILTER) {
1490 		dev_err(dev, "VF %d: Desc show del, but ctx not",
1491 			vf->vf_id);
1492 		*status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
1493 		ret = -EINVAL;
1494 		goto err_exit;
1495 	}
1496 
1497 	error = (stat_err & ICE_FXD_FLTR_WB_QW1_FAIL_M) >>
1498 		ICE_FXD_FLTR_WB_QW1_FAIL_S;
1499 	if (error == ICE_FXD_FLTR_WB_QW1_FAIL_YES) {
1500 		if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD) {
1501 			dev_err(dev, "VF %d, Failed to add FDIR rule due to no space in the table",
1502 				vf->vf_id);
1503 			*status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1504 		} else {
1505 			dev_err(dev, "VF %d, Failed to remove FDIR rule, attempt to remove non-existent entry",
1506 				vf->vf_id);
1507 			*status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST;
1508 		}
1509 		ret = -EINVAL;
1510 		goto err_exit;
1511 	}
1512 
1513 	error = (stat_err & ICE_FXD_FLTR_WB_QW1_FAIL_PROF_M) >>
1514 		ICE_FXD_FLTR_WB_QW1_FAIL_PROF_S;
1515 	if (error == ICE_FXD_FLTR_WB_QW1_FAIL_PROF_YES) {
1516 		dev_err(dev, "VF %d: Profile matching error", vf->vf_id);
1517 		*status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1518 		ret = -EINVAL;
1519 		goto err_exit;
1520 	}
1521 
1522 	*status = VIRTCHNL_FDIR_SUCCESS;
1523 
1524 	return 0;
1525 
1526 err_exit:
1527 	ice_vf_fdir_dump_info(vf);
1528 	return ret;
1529 }
1530 
1531 /**
1532  * ice_vc_add_fdir_fltr_post
1533  * @vf: pointer to the VF structure
1534  * @ctx: FDIR context info for post processing
1535  * @status: virtchnl FDIR program status
1536  * @success: true implies success, false implies failure
1537  *
1538  * Post process for flow director add command. If success, then do post process
1539  * and send back success msg by virtchnl. Otherwise, do context reversion and
1540  * send back failure msg by virtchnl.
1541  *
1542  * Return: 0 on success, and other on error.
1543  */
1544 static int
1545 ice_vc_add_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
1546 			  enum virtchnl_fdir_prgm_status status,
1547 			  bool success)
1548 {
1549 	struct virtchnl_fdir_fltr_conf *conf = ctx->conf;
1550 	struct device *dev = ice_pf_to_dev(vf->pf);
1551 	enum virtchnl_status_code v_ret;
1552 	struct virtchnl_fdir_add *resp;
1553 	int ret, len, is_tun;
1554 
1555 	v_ret = VIRTCHNL_STATUS_SUCCESS;
1556 	len = sizeof(*resp);
1557 	resp = kzalloc(len, GFP_KERNEL);
1558 	if (!resp) {
1559 		len = 0;
1560 		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1561 		dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id);
1562 		goto err_exit;
1563 	}
1564 
1565 	if (!success)
1566 		goto err_exit;
1567 
1568 	is_tun = 0;
1569 	resp->status = status;
1570 	resp->flow_id = conf->flow_id;
1571 	vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]++;
1572 	vf->fdir.fdir_fltr_cnt_total++;
1573 
1574 	ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1575 				    (u8 *)resp, len);
1576 	kfree(resp);
1577 
1578 	dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n",
1579 		vf->vf_id, conf->flow_id,
1580 		(ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ?
1581 		"add" : "del");
1582 	return ret;
1583 
1584 err_exit:
1585 	if (resp)
1586 		resp->status = status;
1587 	ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
1588 	devm_kfree(dev, conf);
1589 
1590 	ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1591 				    (u8 *)resp, len);
1592 	kfree(resp);
1593 	return ret;
1594 }
1595 
1596 /**
1597  * ice_vc_del_fdir_fltr_post
1598  * @vf: pointer to the VF structure
1599  * @ctx: FDIR context info for post processing
1600  * @status: virtchnl FDIR program status
1601  * @success: true implies success, false implies failure
1602  *
1603  * Post process for flow director del command. If success, then do post process
1604  * and send back success msg by virtchnl. Otherwise, do context reversion and
1605  * send back failure msg by virtchnl.
1606  *
1607  * Return: 0 on success, and other on error.
1608  */
1609 static int
1610 ice_vc_del_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
1611 			  enum virtchnl_fdir_prgm_status status,
1612 			  bool success)
1613 {
1614 	struct virtchnl_fdir_fltr_conf *conf = ctx->conf;
1615 	struct device *dev = ice_pf_to_dev(vf->pf);
1616 	enum virtchnl_status_code v_ret;
1617 	struct virtchnl_fdir_del *resp;
1618 	int ret, len, is_tun;
1619 
1620 	v_ret = VIRTCHNL_STATUS_SUCCESS;
1621 	len = sizeof(*resp);
1622 	resp = kzalloc(len, GFP_KERNEL);
1623 	if (!resp) {
1624 		len = 0;
1625 		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1626 		dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id);
1627 		goto err_exit;
1628 	}
1629 
1630 	if (!success)
1631 		goto err_exit;
1632 
1633 	is_tun = 0;
1634 	resp->status = status;
1635 	ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
1636 	vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]--;
1637 	vf->fdir.fdir_fltr_cnt_total--;
1638 
1639 	ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1640 				    (u8 *)resp, len);
1641 	kfree(resp);
1642 
1643 	dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n",
1644 		vf->vf_id, conf->flow_id,
1645 		(ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ?
1646 		"add" : "del");
1647 	devm_kfree(dev, conf);
1648 	return ret;
1649 
1650 err_exit:
1651 	if (resp)
1652 		resp->status = status;
1653 	if (success)
1654 		devm_kfree(dev, conf);
1655 
1656 	ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1657 				    (u8 *)resp, len);
1658 	kfree(resp);
1659 	return ret;
1660 }
1661 
1662 /**
1663  * ice_flush_fdir_ctx
1664  * @pf: pointer to the PF structure
1665  *
1666  * Flush all the pending event on ctx_done list and process them.
1667  */
1668 void ice_flush_fdir_ctx(struct ice_pf *pf)
1669 {
1670 	struct ice_vf *vf;
1671 	unsigned int bkt;
1672 
1673 	if (!test_and_clear_bit(ICE_FD_VF_FLUSH_CTX, pf->state))
1674 		return;
1675 
1676 	mutex_lock(&pf->vfs.table_lock);
1677 	ice_for_each_vf(pf, bkt, vf) {
1678 		struct device *dev = ice_pf_to_dev(pf);
1679 		enum virtchnl_fdir_prgm_status status;
1680 		struct ice_vf_fdir_ctx *ctx;
1681 		unsigned long flags;
1682 		int ret;
1683 
1684 		if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
1685 			continue;
1686 
1687 		if (vf->ctrl_vsi_idx == ICE_NO_VSI)
1688 			continue;
1689 
1690 		ctx = &vf->fdir.ctx_done;
1691 		spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1692 		if (!(ctx->flags & ICE_VF_FDIR_CTX_VALID)) {
1693 			spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1694 			continue;
1695 		}
1696 		spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1697 
1698 		WARN_ON(ctx->stat == ICE_FDIR_CTX_READY);
1699 		if (ctx->stat == ICE_FDIR_CTX_TIMEOUT) {
1700 			status = VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT;
1701 			dev_err(dev, "VF %d: ctrl_vsi irq timeout\n",
1702 				vf->vf_id);
1703 			goto err_exit;
1704 		}
1705 
1706 		ret = ice_vf_verify_rx_desc(vf, ctx, &status);
1707 		if (ret)
1708 			goto err_exit;
1709 
1710 		if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER)
1711 			ice_vc_add_fdir_fltr_post(vf, ctx, status, true);
1712 		else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER)
1713 			ice_vc_del_fdir_fltr_post(vf, ctx, status, true);
1714 		else
1715 			dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id);
1716 
1717 		spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1718 		ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
1719 		spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1720 		continue;
1721 err_exit:
1722 		if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER)
1723 			ice_vc_add_fdir_fltr_post(vf, ctx, status, false);
1724 		else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER)
1725 			ice_vc_del_fdir_fltr_post(vf, ctx, status, false);
1726 		else
1727 			dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id);
1728 
1729 		spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1730 		ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
1731 		spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1732 	}
1733 	mutex_unlock(&pf->vfs.table_lock);
1734 }
1735 
1736 /**
1737  * ice_vc_fdir_set_irq_ctx - set FDIR context info for later IRQ handler
1738  * @vf: pointer to the VF structure
1739  * @conf: FDIR configuration for each filter
1740  * @v_opcode: virtual channel operation code
1741  *
1742  * Return: 0 on success, and other on error.
1743  */
1744 static int
1745 ice_vc_fdir_set_irq_ctx(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf,
1746 			enum virtchnl_ops v_opcode)
1747 {
1748 	struct device *dev = ice_pf_to_dev(vf->pf);
1749 	struct ice_vf_fdir_ctx *ctx;
1750 	unsigned long flags;
1751 
1752 	ctx = &vf->fdir.ctx_irq;
1753 	spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1754 	if ((vf->fdir.ctx_irq.flags & ICE_VF_FDIR_CTX_VALID) ||
1755 	    (vf->fdir.ctx_done.flags & ICE_VF_FDIR_CTX_VALID)) {
1756 		spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1757 		dev_dbg(dev, "VF %d: Last request is still in progress\n",
1758 			vf->vf_id);
1759 		return -EBUSY;
1760 	}
1761 	ctx->flags |= ICE_VF_FDIR_CTX_VALID;
1762 	spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1763 
1764 	ctx->conf = conf;
1765 	ctx->v_opcode = v_opcode;
1766 	ctx->stat = ICE_FDIR_CTX_READY;
1767 	timer_setup(&ctx->rx_tmr, ice_vf_fdir_timer, 0);
1768 
1769 	mod_timer(&ctx->rx_tmr, round_jiffies(msecs_to_jiffies(10) + jiffies));
1770 
1771 	return 0;
1772 }
1773 
1774 /**
1775  * ice_vc_fdir_clear_irq_ctx - clear FDIR context info for IRQ handler
1776  * @vf: pointer to the VF structure
1777  *
1778  * Return: 0 on success, and other on error.
1779  */
1780 static void ice_vc_fdir_clear_irq_ctx(struct ice_vf *vf)
1781 {
1782 	struct ice_vf_fdir_ctx *ctx = &vf->fdir.ctx_irq;
1783 	unsigned long flags;
1784 
1785 	del_timer(&ctx->rx_tmr);
1786 	spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1787 	ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
1788 	spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1789 }
1790 
1791 /**
1792  * ice_vc_add_fdir_fltr - add a FDIR filter for VF by the msg buffer
1793  * @vf: pointer to the VF info
1794  * @msg: pointer to the msg buffer
1795  *
1796  * Return: 0 on success, and other on error.
1797  */
1798 int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
1799 {
1800 	struct virtchnl_fdir_add *fltr = (struct virtchnl_fdir_add *)msg;
1801 	struct virtchnl_fdir_add *stat = NULL;
1802 	struct virtchnl_fdir_fltr_conf *conf;
1803 	enum virtchnl_status_code v_ret;
1804 	struct ice_vsi *vf_vsi;
1805 	struct device *dev;
1806 	struct ice_pf *pf;
1807 	int is_tun = 0;
1808 	int len = 0;
1809 	int ret;
1810 
1811 	pf = vf->pf;
1812 	dev = ice_pf_to_dev(pf);
1813 	vf_vsi = ice_get_vf_vsi(vf);
1814 
1815 #define ICE_VF_MAX_FDIR_FILTERS	128
1816 	if (!ice_fdir_num_avail_fltr(&pf->hw, vf_vsi) ||
1817 	    vf->fdir.fdir_fltr_cnt_total >= ICE_VF_MAX_FDIR_FILTERS) {
1818 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1819 		dev_err(dev, "Max number of FDIR filters for VF %d is reached\n",
1820 			vf->vf_id);
1821 		goto err_exit;
1822 	}
1823 
1824 	ret = ice_vc_fdir_param_check(vf, fltr->vsi_id);
1825 	if (ret) {
1826 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1827 		dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id);
1828 		goto err_exit;
1829 	}
1830 
1831 	ret = ice_vf_start_ctrl_vsi(vf);
1832 	if (ret && (ret != -EEXIST)) {
1833 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1834 		dev_err(dev, "Init FDIR for VF %d failed, ret:%d\n",
1835 			vf->vf_id, ret);
1836 		goto err_exit;
1837 	}
1838 
1839 	stat = kzalloc(sizeof(*stat), GFP_KERNEL);
1840 	if (!stat) {
1841 		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1842 		dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id);
1843 		goto err_exit;
1844 	}
1845 
1846 	conf = devm_kzalloc(dev, sizeof(*conf), GFP_KERNEL);
1847 	if (!conf) {
1848 		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1849 		dev_dbg(dev, "Alloc conf for VF %d failed\n", vf->vf_id);
1850 		goto err_exit;
1851 	}
1852 
1853 	len = sizeof(*stat);
1854 	ret = ice_vc_validate_fdir_fltr(vf, fltr, conf);
1855 	if (ret) {
1856 		v_ret = VIRTCHNL_STATUS_SUCCESS;
1857 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
1858 		dev_dbg(dev, "Invalid FDIR filter from VF %d\n", vf->vf_id);
1859 		goto err_free_conf;
1860 	}
1861 
1862 	if (fltr->validate_only) {
1863 		v_ret = VIRTCHNL_STATUS_SUCCESS;
1864 		stat->status = VIRTCHNL_FDIR_SUCCESS;
1865 		devm_kfree(dev, conf);
1866 		ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER,
1867 					    v_ret, (u8 *)stat, len);
1868 		goto exit;
1869 	}
1870 
1871 	ret = ice_vc_fdir_config_input_set(vf, fltr, conf, is_tun);
1872 	if (ret) {
1873 		v_ret = VIRTCHNL_STATUS_SUCCESS;
1874 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT;
1875 		dev_err(dev, "VF %d: FDIR input set configure failed, ret:%d\n",
1876 			vf->vf_id, ret);
1877 		goto err_free_conf;
1878 	}
1879 
1880 	ret = ice_vc_fdir_is_dup_fltr(vf, conf);
1881 	if (ret) {
1882 		v_ret = VIRTCHNL_STATUS_SUCCESS;
1883 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_EXIST;
1884 		dev_dbg(dev, "VF %d: duplicated FDIR rule detected\n",
1885 			vf->vf_id);
1886 		goto err_free_conf;
1887 	}
1888 
1889 	ret = ice_vc_fdir_insert_entry(vf, conf, &conf->flow_id);
1890 	if (ret) {
1891 		v_ret = VIRTCHNL_STATUS_SUCCESS;
1892 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1893 		dev_dbg(dev, "VF %d: insert FDIR list failed\n", vf->vf_id);
1894 		goto err_free_conf;
1895 	}
1896 
1897 	ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_ADD_FDIR_FILTER);
1898 	if (ret) {
1899 		v_ret = VIRTCHNL_STATUS_SUCCESS;
1900 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1901 		dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id);
1902 		goto err_rem_entry;
1903 	}
1904 
1905 	ret = ice_vc_fdir_write_fltr(vf, conf, true, is_tun);
1906 	if (ret) {
1907 		v_ret = VIRTCHNL_STATUS_SUCCESS;
1908 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1909 		dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n",
1910 			vf->vf_id, ret);
1911 		goto err_clr_irq;
1912 	}
1913 
1914 exit:
1915 	kfree(stat);
1916 	return ret;
1917 
1918 err_clr_irq:
1919 	ice_vc_fdir_clear_irq_ctx(vf);
1920 err_rem_entry:
1921 	ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
1922 err_free_conf:
1923 	devm_kfree(dev, conf);
1924 err_exit:
1925 	ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER, v_ret,
1926 				    (u8 *)stat, len);
1927 	kfree(stat);
1928 	return ret;
1929 }
1930 
1931 /**
1932  * ice_vc_del_fdir_fltr - delete a FDIR filter for VF by the msg buffer
1933  * @vf: pointer to the VF info
1934  * @msg: pointer to the msg buffer
1935  *
1936  * Return: 0 on success, and other on error.
1937  */
1938 int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg)
1939 {
1940 	struct virtchnl_fdir_del *fltr = (struct virtchnl_fdir_del *)msg;
1941 	struct virtchnl_fdir_del *stat = NULL;
1942 	struct virtchnl_fdir_fltr_conf *conf;
1943 	enum virtchnl_status_code v_ret;
1944 	struct device *dev;
1945 	struct ice_pf *pf;
1946 	int is_tun = 0;
1947 	int len = 0;
1948 	int ret;
1949 
1950 	pf = vf->pf;
1951 	dev = ice_pf_to_dev(pf);
1952 	ret = ice_vc_fdir_param_check(vf, fltr->vsi_id);
1953 	if (ret) {
1954 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1955 		dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id);
1956 		goto err_exit;
1957 	}
1958 
1959 	stat = kzalloc(sizeof(*stat), GFP_KERNEL);
1960 	if (!stat) {
1961 		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1962 		dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id);
1963 		goto err_exit;
1964 	}
1965 
1966 	len = sizeof(*stat);
1967 
1968 	conf = ice_vc_fdir_lookup_entry(vf, fltr->flow_id);
1969 	if (!conf) {
1970 		v_ret = VIRTCHNL_STATUS_SUCCESS;
1971 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST;
1972 		dev_dbg(dev, "VF %d: FDIR invalid flow_id:0x%X\n",
1973 			vf->vf_id, fltr->flow_id);
1974 		goto err_exit;
1975 	}
1976 
1977 	/* Just return failure when ctrl_vsi idx is invalid */
1978 	if (vf->ctrl_vsi_idx == ICE_NO_VSI) {
1979 		v_ret = VIRTCHNL_STATUS_SUCCESS;
1980 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1981 		dev_err(dev, "Invalid FDIR ctrl_vsi for VF %d\n", vf->vf_id);
1982 		goto err_exit;
1983 	}
1984 
1985 	ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_DEL_FDIR_FILTER);
1986 	if (ret) {
1987 		v_ret = VIRTCHNL_STATUS_SUCCESS;
1988 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1989 		dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id);
1990 		goto err_exit;
1991 	}
1992 
1993 	ret = ice_vc_fdir_write_fltr(vf, conf, false, is_tun);
1994 	if (ret) {
1995 		v_ret = VIRTCHNL_STATUS_SUCCESS;
1996 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1997 		dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n",
1998 			vf->vf_id, ret);
1999 		goto err_del_tmr;
2000 	}
2001 
2002 	kfree(stat);
2003 
2004 	return ret;
2005 
2006 err_del_tmr:
2007 	ice_vc_fdir_clear_irq_ctx(vf);
2008 err_exit:
2009 	ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_FDIR_FILTER, v_ret,
2010 				    (u8 *)stat, len);
2011 	kfree(stat);
2012 	return ret;
2013 }
2014 
2015 /**
2016  * ice_vf_fdir_init - init FDIR resource for VF
2017  * @vf: pointer to the VF info
2018  */
2019 void ice_vf_fdir_init(struct ice_vf *vf)
2020 {
2021 	struct ice_vf_fdir *fdir = &vf->fdir;
2022 
2023 	idr_init(&fdir->fdir_rule_idr);
2024 	INIT_LIST_HEAD(&fdir->fdir_rule_list);
2025 
2026 	spin_lock_init(&fdir->ctx_lock);
2027 	fdir->ctx_irq.flags = 0;
2028 	fdir->ctx_done.flags = 0;
2029 	ice_vc_fdir_reset_cnt_all(fdir);
2030 }
2031 
2032 /**
2033  * ice_vf_fdir_exit - destroy FDIR resource for VF
2034  * @vf: pointer to the VF info
2035  */
2036 void ice_vf_fdir_exit(struct ice_vf *vf)
2037 {
2038 	ice_vc_fdir_flush_entry(vf);
2039 	idr_destroy(&vf->fdir.fdir_rule_idr);
2040 	ice_vc_fdir_rem_prof_all(vf);
2041 	ice_vc_fdir_free_prof_all(vf);
2042 }
2043