xref: /openbmc/linux/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c (revision 0f9b4c3ca5fdf3e177266ef994071b1a03f07318)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021-2023, Intel Corporation. */
3 
4 #include "ice.h"
5 #include "ice_base.h"
6 #include "ice_lib.h"
7 #include "ice_flow.h"
8 #include "ice_vf_lib_private.h"
9 
10 #define to_fltr_conf_from_desc(p) \
11 	container_of(p, struct virtchnl_fdir_fltr_conf, input)
12 
13 #define ICE_FLOW_PROF_TYPE_S	0
14 #define ICE_FLOW_PROF_TYPE_M	(0xFFFFFFFFULL << ICE_FLOW_PROF_TYPE_S)
15 #define ICE_FLOW_PROF_VSI_S	32
16 #define ICE_FLOW_PROF_VSI_M	(0xFFFFFFFFULL << ICE_FLOW_PROF_VSI_S)
17 
18 /* Flow profile ID format:
19  * [0:31] - flow type, flow + tun_offs
20  * [32:63] - VSI index
21  */
22 #define ICE_FLOW_PROF_FD(vsi, flow, tun_offs) \
23 	((u64)(((((flow) + (tun_offs)) & ICE_FLOW_PROF_TYPE_M)) | \
24 	      (((u64)(vsi) << ICE_FLOW_PROF_VSI_S) & ICE_FLOW_PROF_VSI_M)))
25 
26 #define GTPU_TEID_OFFSET 4
27 #define GTPU_EH_QFI_OFFSET 1
28 #define GTPU_EH_QFI_MASK 0x3F
29 #define PFCP_S_OFFSET 0
30 #define PFCP_S_MASK 0x1
31 #define PFCP_PORT_NR 8805
32 
33 #define FDIR_INSET_FLAG_ESP_S 0
34 #define FDIR_INSET_FLAG_ESP_M BIT_ULL(FDIR_INSET_FLAG_ESP_S)
35 #define FDIR_INSET_FLAG_ESP_UDP BIT_ULL(FDIR_INSET_FLAG_ESP_S)
36 #define FDIR_INSET_FLAG_ESP_IPSEC (0ULL << FDIR_INSET_FLAG_ESP_S)
37 
38 enum ice_fdir_tunnel_type {
39 	ICE_FDIR_TUNNEL_TYPE_NONE = 0,
40 	ICE_FDIR_TUNNEL_TYPE_GTPU,
41 	ICE_FDIR_TUNNEL_TYPE_GTPU_EH,
42 };
43 
44 struct virtchnl_fdir_fltr_conf {
45 	struct ice_fdir_fltr input;
46 	enum ice_fdir_tunnel_type ttype;
47 	u64 inset_flag;
48 	u32 flow_id;
49 };
50 
51 struct virtchnl_fdir_inset_map {
52 	enum virtchnl_proto_hdr_field field;
53 	enum ice_flow_field fld;
54 	u64 flag;
55 	u64 mask;
56 };
57 
58 static const struct virtchnl_fdir_inset_map fdir_inset_map[] = {
59 	{VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE, ICE_FLOW_FIELD_IDX_ETH_TYPE, 0, 0},
60 	{VIRTCHNL_PROTO_HDR_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA, 0, 0},
61 	{VIRTCHNL_PROTO_HDR_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA, 0, 0},
62 	{VIRTCHNL_PROTO_HDR_IPV4_DSCP, ICE_FLOW_FIELD_IDX_IPV4_DSCP, 0, 0},
63 	{VIRTCHNL_PROTO_HDR_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL, 0, 0},
64 	{VIRTCHNL_PROTO_HDR_IPV4_PROT, ICE_FLOW_FIELD_IDX_IPV4_PROT, 0, 0},
65 	{VIRTCHNL_PROTO_HDR_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA, 0, 0},
66 	{VIRTCHNL_PROTO_HDR_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA, 0, 0},
67 	{VIRTCHNL_PROTO_HDR_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP, 0, 0},
68 	{VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL, 0, 0},
69 	{VIRTCHNL_PROTO_HDR_IPV6_PROT, ICE_FLOW_FIELD_IDX_IPV6_PROT, 0, 0},
70 	{VIRTCHNL_PROTO_HDR_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT, 0, 0},
71 	{VIRTCHNL_PROTO_HDR_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0},
72 	{VIRTCHNL_PROTO_HDR_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT, 0, 0},
73 	{VIRTCHNL_PROTO_HDR_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT, 0, 0},
74 	{VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT, 0, 0},
75 	{VIRTCHNL_PROTO_HDR_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT, 0, 0},
76 	{VIRTCHNL_PROTO_HDR_GTPU_IP_TEID, ICE_FLOW_FIELD_IDX_GTPU_IP_TEID, 0, 0},
77 	{VIRTCHNL_PROTO_HDR_GTPU_EH_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI, 0, 0},
78 	{VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_ESP_SPI,
79 		FDIR_INSET_FLAG_ESP_IPSEC, FDIR_INSET_FLAG_ESP_M},
80 	{VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI,
81 		FDIR_INSET_FLAG_ESP_UDP, FDIR_INSET_FLAG_ESP_M},
82 	{VIRTCHNL_PROTO_HDR_AH_SPI, ICE_FLOW_FIELD_IDX_AH_SPI, 0, 0},
83 	{VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID, ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID, 0, 0},
84 	{VIRTCHNL_PROTO_HDR_PFCP_S_FIELD, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0},
85 };
86 
87 /**
88  * ice_vc_fdir_param_check
89  * @vf: pointer to the VF structure
90  * @vsi_id: VF relative VSI ID
91  *
92  * Check for the valid VSI ID, PF's state and VF's state
93  *
94  * Return: 0 on success, and -EINVAL on error.
95  */
96 static int
ice_vc_fdir_param_check(struct ice_vf * vf,u16 vsi_id)97 ice_vc_fdir_param_check(struct ice_vf *vf, u16 vsi_id)
98 {
99 	struct ice_pf *pf = vf->pf;
100 
101 	if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
102 		return -EINVAL;
103 
104 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
105 		return -EINVAL;
106 
107 	if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF))
108 		return -EINVAL;
109 
110 	if (!ice_vc_isvalid_vsi_id(vf, vsi_id))
111 		return -EINVAL;
112 
113 	if (!ice_get_vf_vsi(vf))
114 		return -EINVAL;
115 
116 	return 0;
117 }
118 
119 /**
120  * ice_vf_start_ctrl_vsi
121  * @vf: pointer to the VF structure
122  *
123  * Allocate ctrl_vsi for the first time and open the ctrl_vsi port for VF
124  *
125  * Return: 0 on success, and other on error.
126  */
ice_vf_start_ctrl_vsi(struct ice_vf * vf)127 static int ice_vf_start_ctrl_vsi(struct ice_vf *vf)
128 {
129 	struct ice_pf *pf = vf->pf;
130 	struct ice_vsi *ctrl_vsi;
131 	struct device *dev;
132 	int err;
133 
134 	dev = ice_pf_to_dev(pf);
135 	if (vf->ctrl_vsi_idx != ICE_NO_VSI)
136 		return -EEXIST;
137 
138 	ctrl_vsi = ice_vf_ctrl_vsi_setup(vf);
139 	if (!ctrl_vsi) {
140 		dev_dbg(dev, "Could not setup control VSI for VF %d\n",
141 			vf->vf_id);
142 		return -ENOMEM;
143 	}
144 
145 	err = ice_vsi_open_ctrl(ctrl_vsi);
146 	if (err) {
147 		dev_dbg(dev, "Could not open control VSI for VF %d\n",
148 			vf->vf_id);
149 		goto err_vsi_open;
150 	}
151 
152 	return 0;
153 
154 err_vsi_open:
155 	ice_vsi_release(ctrl_vsi);
156 	if (vf->ctrl_vsi_idx != ICE_NO_VSI) {
157 		pf->vsi[vf->ctrl_vsi_idx] = NULL;
158 		vf->ctrl_vsi_idx = ICE_NO_VSI;
159 	}
160 	return err;
161 }
162 
163 /**
164  * ice_vc_fdir_alloc_prof - allocate profile for this filter flow type
165  * @vf: pointer to the VF structure
166  * @flow: filter flow type
167  *
168  * Return: 0 on success, and other on error.
169  */
170 static int
ice_vc_fdir_alloc_prof(struct ice_vf * vf,enum ice_fltr_ptype flow)171 ice_vc_fdir_alloc_prof(struct ice_vf *vf, enum ice_fltr_ptype flow)
172 {
173 	struct ice_vf_fdir *fdir = &vf->fdir;
174 
175 	if (!fdir->fdir_prof) {
176 		fdir->fdir_prof = devm_kcalloc(ice_pf_to_dev(vf->pf),
177 					       ICE_FLTR_PTYPE_MAX,
178 					       sizeof(*fdir->fdir_prof),
179 					       GFP_KERNEL);
180 		if (!fdir->fdir_prof)
181 			return -ENOMEM;
182 	}
183 
184 	if (!fdir->fdir_prof[flow]) {
185 		fdir->fdir_prof[flow] = devm_kzalloc(ice_pf_to_dev(vf->pf),
186 						     sizeof(**fdir->fdir_prof),
187 						     GFP_KERNEL);
188 		if (!fdir->fdir_prof[flow])
189 			return -ENOMEM;
190 	}
191 
192 	return 0;
193 }
194 
195 /**
196  * ice_vc_fdir_free_prof - free profile for this filter flow type
197  * @vf: pointer to the VF structure
198  * @flow: filter flow type
199  */
200 static void
ice_vc_fdir_free_prof(struct ice_vf * vf,enum ice_fltr_ptype flow)201 ice_vc_fdir_free_prof(struct ice_vf *vf, enum ice_fltr_ptype flow)
202 {
203 	struct ice_vf_fdir *fdir = &vf->fdir;
204 
205 	if (!fdir->fdir_prof)
206 		return;
207 
208 	if (!fdir->fdir_prof[flow])
209 		return;
210 
211 	devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof[flow]);
212 	fdir->fdir_prof[flow] = NULL;
213 }
214 
215 /**
216  * ice_vc_fdir_free_prof_all - free all the profile for this VF
217  * @vf: pointer to the VF structure
218  */
ice_vc_fdir_free_prof_all(struct ice_vf * vf)219 static void ice_vc_fdir_free_prof_all(struct ice_vf *vf)
220 {
221 	struct ice_vf_fdir *fdir = &vf->fdir;
222 	enum ice_fltr_ptype flow;
223 
224 	if (!fdir->fdir_prof)
225 		return;
226 
227 	for (flow = ICE_FLTR_PTYPE_NONF_NONE; flow < ICE_FLTR_PTYPE_MAX; flow++)
228 		ice_vc_fdir_free_prof(vf, flow);
229 
230 	devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof);
231 	fdir->fdir_prof = NULL;
232 }
233 
234 /**
235  * ice_vc_fdir_parse_flow_fld
236  * @proto_hdr: virtual channel protocol filter header
237  * @conf: FDIR configuration for each filter
238  * @fld: field type array
239  * @fld_cnt: field counter
240  *
241  * Parse the virtual channel filter header and store them into field type array
242  *
243  * Return: 0 on success, and other on error.
244  */
245 static int
ice_vc_fdir_parse_flow_fld(struct virtchnl_proto_hdr * proto_hdr,struct virtchnl_fdir_fltr_conf * conf,enum ice_flow_field * fld,int * fld_cnt)246 ice_vc_fdir_parse_flow_fld(struct virtchnl_proto_hdr *proto_hdr,
247 			   struct virtchnl_fdir_fltr_conf *conf,
248 			   enum ice_flow_field *fld, int *fld_cnt)
249 {
250 	struct virtchnl_proto_hdr hdr;
251 	u32 i;
252 
253 	memcpy(&hdr, proto_hdr, sizeof(hdr));
254 
255 	for (i = 0; (i < ARRAY_SIZE(fdir_inset_map)) &&
256 	     VIRTCHNL_GET_PROTO_HDR_FIELD(&hdr); i++)
257 		if (VIRTCHNL_TEST_PROTO_HDR(&hdr, fdir_inset_map[i].field)) {
258 			if (fdir_inset_map[i].mask &&
259 			    ((fdir_inset_map[i].mask & conf->inset_flag) !=
260 			     fdir_inset_map[i].flag))
261 				continue;
262 
263 			fld[*fld_cnt] = fdir_inset_map[i].fld;
264 			*fld_cnt += 1;
265 			if (*fld_cnt >= ICE_FLOW_FIELD_IDX_MAX)
266 				return -EINVAL;
267 			VIRTCHNL_DEL_PROTO_HDR_FIELD(&hdr,
268 						     fdir_inset_map[i].field);
269 		}
270 
271 	return 0;
272 }
273 
274 /**
275  * ice_vc_fdir_set_flow_fld
276  * @vf: pointer to the VF structure
277  * @fltr: virtual channel add cmd buffer
278  * @conf: FDIR configuration for each filter
279  * @seg: array of one or more packet segments that describe the flow
280  *
281  * Parse the virtual channel add msg buffer's field vector and store them into
282  * flow's packet segment field
283  *
284  * Return: 0 on success, and other on error.
285  */
286 static int
ice_vc_fdir_set_flow_fld(struct ice_vf * vf,struct virtchnl_fdir_add * fltr,struct virtchnl_fdir_fltr_conf * conf,struct ice_flow_seg_info * seg)287 ice_vc_fdir_set_flow_fld(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
288 			 struct virtchnl_fdir_fltr_conf *conf,
289 			 struct ice_flow_seg_info *seg)
290 {
291 	struct virtchnl_fdir_rule *rule = &fltr->rule_cfg;
292 	enum ice_flow_field fld[ICE_FLOW_FIELD_IDX_MAX];
293 	struct device *dev = ice_pf_to_dev(vf->pf);
294 	struct virtchnl_proto_hdrs *proto;
295 	int fld_cnt = 0;
296 	int i;
297 
298 	proto = &rule->proto_hdrs;
299 	for (i = 0; i < proto->count; i++) {
300 		struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i];
301 		int ret;
302 
303 		ret = ice_vc_fdir_parse_flow_fld(hdr, conf, fld, &fld_cnt);
304 		if (ret)
305 			return ret;
306 	}
307 
308 	if (fld_cnt == 0) {
309 		dev_dbg(dev, "Empty input set for VF %d\n", vf->vf_id);
310 		return -EINVAL;
311 	}
312 
313 	for (i = 0; i < fld_cnt; i++)
314 		ice_flow_set_fld(seg, fld[i],
315 				 ICE_FLOW_FLD_OFF_INVAL,
316 				 ICE_FLOW_FLD_OFF_INVAL,
317 				 ICE_FLOW_FLD_OFF_INVAL, false);
318 
319 	return 0;
320 }
321 
322 /**
323  * ice_vc_fdir_set_flow_hdr - config the flow's packet segment header
324  * @vf: pointer to the VF structure
325  * @conf: FDIR configuration for each filter
326  * @seg: array of one or more packet segments that describe the flow
327  *
328  * Return: 0 on success, and other on error.
329  */
330 static int
ice_vc_fdir_set_flow_hdr(struct ice_vf * vf,struct virtchnl_fdir_fltr_conf * conf,struct ice_flow_seg_info * seg)331 ice_vc_fdir_set_flow_hdr(struct ice_vf *vf,
332 			 struct virtchnl_fdir_fltr_conf *conf,
333 			 struct ice_flow_seg_info *seg)
334 {
335 	enum ice_fltr_ptype flow = conf->input.flow_type;
336 	enum ice_fdir_tunnel_type ttype = conf->ttype;
337 	struct device *dev = ice_pf_to_dev(vf->pf);
338 
339 	switch (flow) {
340 	case ICE_FLTR_PTYPE_NON_IP_L2:
341 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ETH_NON_IP);
342 		break;
343 	case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3:
344 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 |
345 				  ICE_FLOW_SEG_HDR_IPV4 |
346 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
347 		break;
348 	case ICE_FLTR_PTYPE_NONF_IPV4_ESP:
349 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP |
350 				  ICE_FLOW_SEG_HDR_IPV4 |
351 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
352 		break;
353 	case ICE_FLTR_PTYPE_NONF_IPV4_AH:
354 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH |
355 				  ICE_FLOW_SEG_HDR_IPV4 |
356 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
357 		break;
358 	case ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP:
359 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP |
360 				  ICE_FLOW_SEG_HDR_IPV4 |
361 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
362 		break;
363 	case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE:
364 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE |
365 				  ICE_FLOW_SEG_HDR_IPV4 |
366 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
367 		break;
368 	case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION:
369 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION |
370 				  ICE_FLOW_SEG_HDR_IPV4 |
371 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
372 		break;
373 	case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
374 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 |
375 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
376 		break;
377 	case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
378 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
379 				  ICE_FLOW_SEG_HDR_IPV4 |
380 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
381 		break;
382 	case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
383 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
384 				  ICE_FLOW_SEG_HDR_IPV4 |
385 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
386 		break;
387 	case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
388 	case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
389 	case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
390 	case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
391 		if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU) {
392 			ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP |
393 					  ICE_FLOW_SEG_HDR_IPV4 |
394 					  ICE_FLOW_SEG_HDR_IPV_OTHER);
395 		} else if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH) {
396 			ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
397 					  ICE_FLOW_SEG_HDR_GTPU_IP |
398 					  ICE_FLOW_SEG_HDR_IPV4 |
399 					  ICE_FLOW_SEG_HDR_IPV_OTHER);
400 		} else {
401 			dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n",
402 				flow, vf->vf_id);
403 			return -EINVAL;
404 		}
405 		break;
406 	case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
407 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
408 				  ICE_FLOW_SEG_HDR_IPV4 |
409 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
410 		break;
411 	case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3:
412 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 |
413 				  ICE_FLOW_SEG_HDR_IPV6 |
414 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
415 		break;
416 	case ICE_FLTR_PTYPE_NONF_IPV6_ESP:
417 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP |
418 				  ICE_FLOW_SEG_HDR_IPV6 |
419 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
420 		break;
421 	case ICE_FLTR_PTYPE_NONF_IPV6_AH:
422 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH |
423 				  ICE_FLOW_SEG_HDR_IPV6 |
424 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
425 		break;
426 	case ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP:
427 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP |
428 				  ICE_FLOW_SEG_HDR_IPV6 |
429 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
430 		break;
431 	case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE:
432 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE |
433 				  ICE_FLOW_SEG_HDR_IPV6 |
434 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
435 		break;
436 	case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION:
437 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION |
438 				  ICE_FLOW_SEG_HDR_IPV6 |
439 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
440 		break;
441 	case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
442 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 |
443 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
444 		break;
445 	case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
446 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
447 				  ICE_FLOW_SEG_HDR_IPV6 |
448 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
449 		break;
450 	case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
451 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
452 				  ICE_FLOW_SEG_HDR_IPV6 |
453 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
454 		break;
455 	case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
456 		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
457 				  ICE_FLOW_SEG_HDR_IPV6 |
458 				  ICE_FLOW_SEG_HDR_IPV_OTHER);
459 		break;
460 	default:
461 		dev_dbg(dev, "Invalid flow type 0x%x for VF %d failed\n",
462 			flow, vf->vf_id);
463 		return -EINVAL;
464 	}
465 
466 	return 0;
467 }
468 
469 /**
470  * ice_vc_fdir_rem_prof - remove profile for this filter flow type
471  * @vf: pointer to the VF structure
472  * @flow: filter flow type
473  * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter
474  */
475 static void
ice_vc_fdir_rem_prof(struct ice_vf * vf,enum ice_fltr_ptype flow,int tun)476 ice_vc_fdir_rem_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, int tun)
477 {
478 	struct ice_vf_fdir *fdir = &vf->fdir;
479 	struct ice_fd_hw_prof *vf_prof;
480 	struct ice_pf *pf = vf->pf;
481 	struct ice_vsi *vf_vsi;
482 	struct device *dev;
483 	struct ice_hw *hw;
484 	u64 prof_id;
485 	int i;
486 
487 	dev = ice_pf_to_dev(pf);
488 	hw = &pf->hw;
489 	if (!fdir->fdir_prof || !fdir->fdir_prof[flow])
490 		return;
491 
492 	vf_prof = fdir->fdir_prof[flow];
493 
494 	vf_vsi = ice_get_vf_vsi(vf);
495 	if (!vf_vsi) {
496 		dev_dbg(dev, "NULL vf %d vsi pointer\n", vf->vf_id);
497 		return;
498 	}
499 
500 	if (!fdir->prof_entry_cnt[flow][tun])
501 		return;
502 
503 	prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num,
504 				   flow, tun ? ICE_FLTR_PTYPE_MAX : 0);
505 
506 	for (i = 0; i < fdir->prof_entry_cnt[flow][tun]; i++)
507 		if (vf_prof->entry_h[i][tun]) {
508 			u16 vsi_num = ice_get_hw_vsi_num(hw, vf_prof->vsi_h[i]);
509 
510 			ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
511 			ice_flow_rem_entry(hw, ICE_BLK_FD,
512 					   vf_prof->entry_h[i][tun]);
513 			vf_prof->entry_h[i][tun] = 0;
514 		}
515 
516 	ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
517 	devm_kfree(dev, vf_prof->fdir_seg[tun]);
518 	vf_prof->fdir_seg[tun] = NULL;
519 
520 	for (i = 0; i < vf_prof->cnt; i++)
521 		vf_prof->vsi_h[i] = 0;
522 
523 	fdir->prof_entry_cnt[flow][tun] = 0;
524 }
525 
526 /**
527  * ice_vc_fdir_rem_prof_all - remove profile for this VF
528  * @vf: pointer to the VF structure
529  */
ice_vc_fdir_rem_prof_all(struct ice_vf * vf)530 static void ice_vc_fdir_rem_prof_all(struct ice_vf *vf)
531 {
532 	enum ice_fltr_ptype flow;
533 
534 	for (flow = ICE_FLTR_PTYPE_NONF_NONE;
535 	     flow < ICE_FLTR_PTYPE_MAX; flow++) {
536 		ice_vc_fdir_rem_prof(vf, flow, 0);
537 		ice_vc_fdir_rem_prof(vf, flow, 1);
538 	}
539 }
540 
541 /**
542  * ice_vc_fdir_reset_cnt_all - reset all FDIR counters for this VF FDIR
543  * @fdir: pointer to the VF FDIR structure
544  */
ice_vc_fdir_reset_cnt_all(struct ice_vf_fdir * fdir)545 static void ice_vc_fdir_reset_cnt_all(struct ice_vf_fdir *fdir)
546 {
547 	enum ice_fltr_ptype flow;
548 
549 	for (flow = ICE_FLTR_PTYPE_NONF_NONE;
550 	     flow < ICE_FLTR_PTYPE_MAX; flow++) {
551 		fdir->fdir_fltr_cnt[flow][0] = 0;
552 		fdir->fdir_fltr_cnt[flow][1] = 0;
553 	}
554 
555 	fdir->fdir_fltr_cnt_total = 0;
556 }
557 
558 /**
559  * ice_vc_fdir_has_prof_conflict
560  * @vf: pointer to the VF structure
561  * @conf: FDIR configuration for each filter
562  *
563  * Check if @conf has conflicting profile with existing profiles
564  *
565  * Return: true on success, and false on error.
566  */
567 static bool
ice_vc_fdir_has_prof_conflict(struct ice_vf * vf,struct virtchnl_fdir_fltr_conf * conf)568 ice_vc_fdir_has_prof_conflict(struct ice_vf *vf,
569 			      struct virtchnl_fdir_fltr_conf *conf)
570 {
571 	struct ice_fdir_fltr *desc;
572 
573 	list_for_each_entry(desc, &vf->fdir.fdir_rule_list, fltr_node) {
574 		struct virtchnl_fdir_fltr_conf *existing_conf;
575 		enum ice_fltr_ptype flow_type_a, flow_type_b;
576 		struct ice_fdir_fltr *a, *b;
577 
578 		existing_conf = to_fltr_conf_from_desc(desc);
579 		a = &existing_conf->input;
580 		b = &conf->input;
581 		flow_type_a = a->flow_type;
582 		flow_type_b = b->flow_type;
583 
584 		/* No need to compare two rules with different tunnel types or
585 		 * with the same protocol type.
586 		 */
587 		if (existing_conf->ttype != conf->ttype ||
588 		    flow_type_a == flow_type_b)
589 			continue;
590 
591 		switch (flow_type_a) {
592 		case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
593 		case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
594 		case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
595 			if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_OTHER)
596 				return true;
597 			break;
598 		case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
599 			if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
600 			    flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
601 			    flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_SCTP)
602 				return true;
603 			break;
604 		case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
605 		case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
606 		case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
607 			if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_OTHER)
608 				return true;
609 			break;
610 		case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
611 			if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_UDP ||
612 			    flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_TCP ||
613 			    flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_SCTP)
614 				return true;
615 			break;
616 		default:
617 			break;
618 		}
619 	}
620 
621 	return false;
622 }
623 
624 /**
625  * ice_vc_fdir_write_flow_prof
626  * @vf: pointer to the VF structure
627  * @flow: filter flow type
628  * @seg: array of one or more packet segments that describe the flow
629  * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter
630  *
631  * Write the flow's profile config and packet segment into the hardware
632  *
633  * Return: 0 on success, and other on error.
634  */
635 static int
ice_vc_fdir_write_flow_prof(struct ice_vf * vf,enum ice_fltr_ptype flow,struct ice_flow_seg_info * seg,int tun)636 ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow,
637 			    struct ice_flow_seg_info *seg, int tun)
638 {
639 	struct ice_vf_fdir *fdir = &vf->fdir;
640 	struct ice_vsi *vf_vsi, *ctrl_vsi;
641 	struct ice_flow_seg_info *old_seg;
642 	struct ice_flow_prof *prof = NULL;
643 	struct ice_fd_hw_prof *vf_prof;
644 	struct device *dev;
645 	struct ice_pf *pf;
646 	struct ice_hw *hw;
647 	u64 entry1_h = 0;
648 	u64 entry2_h = 0;
649 	u64 prof_id;
650 	int ret;
651 
652 	pf = vf->pf;
653 	dev = ice_pf_to_dev(pf);
654 	hw = &pf->hw;
655 	vf_vsi = ice_get_vf_vsi(vf);
656 	if (!vf_vsi)
657 		return -EINVAL;
658 
659 	ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
660 	if (!ctrl_vsi)
661 		return -EINVAL;
662 
663 	vf_prof = fdir->fdir_prof[flow];
664 	old_seg = vf_prof->fdir_seg[tun];
665 	if (old_seg) {
666 		if (!memcmp(old_seg, seg, sizeof(*seg))) {
667 			dev_dbg(dev, "Duplicated profile for VF %d!\n",
668 				vf->vf_id);
669 			return -EEXIST;
670 		}
671 
672 		if (fdir->fdir_fltr_cnt[flow][tun]) {
673 			ret = -EINVAL;
674 			dev_dbg(dev, "Input set conflicts for VF %d\n",
675 				vf->vf_id);
676 			goto err_exit;
677 		}
678 
679 		/* remove previously allocated profile */
680 		ice_vc_fdir_rem_prof(vf, flow, tun);
681 	}
682 
683 	prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num, flow,
684 				   tun ? ICE_FLTR_PTYPE_MAX : 0);
685 
686 	ret = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg,
687 				tun + 1, &prof);
688 	if (ret) {
689 		dev_dbg(dev, "Could not add VSI flow 0x%x for VF %d\n",
690 			flow, vf->vf_id);
691 		goto err_exit;
692 	}
693 
694 	ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx,
695 				 vf_vsi->idx, ICE_FLOW_PRIO_NORMAL,
696 				 seg, &entry1_h);
697 	if (ret) {
698 		dev_dbg(dev, "Could not add flow 0x%x VSI entry for VF %d\n",
699 			flow, vf->vf_id);
700 		goto err_prof;
701 	}
702 
703 	ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx,
704 				 ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
705 				 seg, &entry2_h);
706 	if (ret) {
707 		dev_dbg(dev,
708 			"Could not add flow 0x%x Ctrl VSI entry for VF %d\n",
709 			flow, vf->vf_id);
710 		goto err_entry_1;
711 	}
712 
713 	vf_prof->fdir_seg[tun] = seg;
714 	vf_prof->cnt = 0;
715 	fdir->prof_entry_cnt[flow][tun] = 0;
716 
717 	vf_prof->entry_h[vf_prof->cnt][tun] = entry1_h;
718 	vf_prof->vsi_h[vf_prof->cnt] = vf_vsi->idx;
719 	vf_prof->cnt++;
720 	fdir->prof_entry_cnt[flow][tun]++;
721 
722 	vf_prof->entry_h[vf_prof->cnt][tun] = entry2_h;
723 	vf_prof->vsi_h[vf_prof->cnt] = ctrl_vsi->idx;
724 	vf_prof->cnt++;
725 	fdir->prof_entry_cnt[flow][tun]++;
726 
727 	return 0;
728 
729 err_entry_1:
730 	ice_rem_prof_id_flow(hw, ICE_BLK_FD,
731 			     ice_get_hw_vsi_num(hw, vf_vsi->idx), prof_id);
732 	ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h);
733 err_prof:
734 	ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
735 err_exit:
736 	return ret;
737 }
738 
739 /**
740  * ice_vc_fdir_config_input_set
741  * @vf: pointer to the VF structure
742  * @fltr: virtual channel add cmd buffer
743  * @conf: FDIR configuration for each filter
744  * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter
745  *
746  * Config the input set type and value for virtual channel add msg buffer
747  *
748  * Return: 0 on success, and other on error.
749  */
750 static int
ice_vc_fdir_config_input_set(struct ice_vf * vf,struct virtchnl_fdir_add * fltr,struct virtchnl_fdir_fltr_conf * conf,int tun)751 ice_vc_fdir_config_input_set(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
752 			     struct virtchnl_fdir_fltr_conf *conf, int tun)
753 {
754 	struct ice_fdir_fltr *input = &conf->input;
755 	struct device *dev = ice_pf_to_dev(vf->pf);
756 	struct ice_flow_seg_info *seg;
757 	enum ice_fltr_ptype flow;
758 	int ret;
759 
760 	ret = ice_vc_fdir_has_prof_conflict(vf, conf);
761 	if (ret) {
762 		dev_dbg(dev, "Found flow profile conflict for VF %d\n",
763 			vf->vf_id);
764 		return ret;
765 	}
766 
767 	flow = input->flow_type;
768 	ret = ice_vc_fdir_alloc_prof(vf, flow);
769 	if (ret) {
770 		dev_dbg(dev, "Alloc flow prof for VF %d failed\n", vf->vf_id);
771 		return ret;
772 	}
773 
774 	seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL);
775 	if (!seg)
776 		return -ENOMEM;
777 
778 	ret = ice_vc_fdir_set_flow_fld(vf, fltr, conf, seg);
779 	if (ret) {
780 		dev_dbg(dev, "Set flow field for VF %d failed\n", vf->vf_id);
781 		goto err_exit;
782 	}
783 
784 	ret = ice_vc_fdir_set_flow_hdr(vf, conf, seg);
785 	if (ret) {
786 		dev_dbg(dev, "Set flow hdr for VF %d failed\n", vf->vf_id);
787 		goto err_exit;
788 	}
789 
790 	ret = ice_vc_fdir_write_flow_prof(vf, flow, seg, tun);
791 	if (ret == -EEXIST) {
792 		devm_kfree(dev, seg);
793 	} else if (ret) {
794 		dev_dbg(dev, "Write flow profile for VF %d failed\n",
795 			vf->vf_id);
796 		goto err_exit;
797 	}
798 
799 	return 0;
800 
801 err_exit:
802 	devm_kfree(dev, seg);
803 	return ret;
804 }
805 
806 /**
807  * ice_vc_fdir_parse_pattern
808  * @vf: pointer to the VF info
809  * @fltr: virtual channel add cmd buffer
810  * @conf: FDIR configuration for each filter
811  *
812  * Parse the virtual channel filter's pattern and store them into conf
813  *
814  * Return: 0 on success, and other on error.
815  */
816 static int
ice_vc_fdir_parse_pattern(struct ice_vf * vf,struct virtchnl_fdir_add * fltr,struct virtchnl_fdir_fltr_conf * conf)817 ice_vc_fdir_parse_pattern(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
818 			  struct virtchnl_fdir_fltr_conf *conf)
819 {
820 	struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs;
821 	enum virtchnl_proto_hdr_type l3 = VIRTCHNL_PROTO_HDR_NONE;
822 	enum virtchnl_proto_hdr_type l4 = VIRTCHNL_PROTO_HDR_NONE;
823 	struct device *dev = ice_pf_to_dev(vf->pf);
824 	struct ice_fdir_fltr *input = &conf->input;
825 	int i;
826 
827 	if (proto->count > VIRTCHNL_MAX_NUM_PROTO_HDRS) {
828 		dev_dbg(dev, "Invalid protocol count:0x%x for VF %d\n",
829 			proto->count, vf->vf_id);
830 		return -EINVAL;
831 	}
832 
833 	for (i = 0; i < proto->count; i++) {
834 		struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i];
835 		struct ip_esp_hdr *esph;
836 		struct ip_auth_hdr *ah;
837 		struct sctphdr *sctph;
838 		struct ipv6hdr *ip6h;
839 		struct udphdr *udph;
840 		struct tcphdr *tcph;
841 		struct ethhdr *eth;
842 		struct iphdr *iph;
843 		u8 s_field;
844 		u8 *rawh;
845 
846 		switch (hdr->type) {
847 		case VIRTCHNL_PROTO_HDR_ETH:
848 			eth = (struct ethhdr *)hdr->buffer;
849 			input->flow_type = ICE_FLTR_PTYPE_NON_IP_L2;
850 
851 			if (hdr->field_selector)
852 				input->ext_data.ether_type = eth->h_proto;
853 			break;
854 		case VIRTCHNL_PROTO_HDR_IPV4:
855 			iph = (struct iphdr *)hdr->buffer;
856 			l3 = VIRTCHNL_PROTO_HDR_IPV4;
857 			input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
858 
859 			if (hdr->field_selector) {
860 				input->ip.v4.src_ip = iph->saddr;
861 				input->ip.v4.dst_ip = iph->daddr;
862 				input->ip.v4.tos = iph->tos;
863 				input->ip.v4.proto = iph->protocol;
864 			}
865 			break;
866 		case VIRTCHNL_PROTO_HDR_IPV6:
867 			ip6h = (struct ipv6hdr *)hdr->buffer;
868 			l3 = VIRTCHNL_PROTO_HDR_IPV6;
869 			input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
870 
871 			if (hdr->field_selector) {
872 				memcpy(input->ip.v6.src_ip,
873 				       ip6h->saddr.in6_u.u6_addr8,
874 				       sizeof(ip6h->saddr));
875 				memcpy(input->ip.v6.dst_ip,
876 				       ip6h->daddr.in6_u.u6_addr8,
877 				       sizeof(ip6h->daddr));
878 				input->ip.v6.tc = ((u8)(ip6h->priority) << 4) |
879 						  (ip6h->flow_lbl[0] >> 4);
880 				input->ip.v6.proto = ip6h->nexthdr;
881 			}
882 			break;
883 		case VIRTCHNL_PROTO_HDR_TCP:
884 			tcph = (struct tcphdr *)hdr->buffer;
885 			if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
886 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
887 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
888 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
889 
890 			if (hdr->field_selector) {
891 				if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
892 					input->ip.v4.src_port = tcph->source;
893 					input->ip.v4.dst_port = tcph->dest;
894 				} else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
895 					input->ip.v6.src_port = tcph->source;
896 					input->ip.v6.dst_port = tcph->dest;
897 				}
898 			}
899 			break;
900 		case VIRTCHNL_PROTO_HDR_UDP:
901 			udph = (struct udphdr *)hdr->buffer;
902 			if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
903 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
904 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
905 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
906 
907 			if (hdr->field_selector) {
908 				if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
909 					input->ip.v4.src_port = udph->source;
910 					input->ip.v4.dst_port = udph->dest;
911 				} else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
912 					input->ip.v6.src_port = udph->source;
913 					input->ip.v6.dst_port = udph->dest;
914 				}
915 			}
916 			break;
917 		case VIRTCHNL_PROTO_HDR_SCTP:
918 			sctph = (struct sctphdr *)hdr->buffer;
919 			if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
920 				input->flow_type =
921 					ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
922 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
923 				input->flow_type =
924 					ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
925 
926 			if (hdr->field_selector) {
927 				if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
928 					input->ip.v4.src_port = sctph->source;
929 					input->ip.v4.dst_port = sctph->dest;
930 				} else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
931 					input->ip.v6.src_port = sctph->source;
932 					input->ip.v6.dst_port = sctph->dest;
933 				}
934 			}
935 			break;
936 		case VIRTCHNL_PROTO_HDR_L2TPV3:
937 			if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
938 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3;
939 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
940 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3;
941 
942 			if (hdr->field_selector)
943 				input->l2tpv3_data.session_id = *((__be32 *)hdr->buffer);
944 			break;
945 		case VIRTCHNL_PROTO_HDR_ESP:
946 			esph = (struct ip_esp_hdr *)hdr->buffer;
947 			if (l3 == VIRTCHNL_PROTO_HDR_IPV4 &&
948 			    l4 == VIRTCHNL_PROTO_HDR_UDP)
949 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP;
950 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 &&
951 				 l4 == VIRTCHNL_PROTO_HDR_UDP)
952 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP;
953 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 &&
954 				 l4 == VIRTCHNL_PROTO_HDR_NONE)
955 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_ESP;
956 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 &&
957 				 l4 == VIRTCHNL_PROTO_HDR_NONE)
958 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_ESP;
959 
960 			if (l4 == VIRTCHNL_PROTO_HDR_UDP)
961 				conf->inset_flag |= FDIR_INSET_FLAG_ESP_UDP;
962 			else
963 				conf->inset_flag |= FDIR_INSET_FLAG_ESP_IPSEC;
964 
965 			if (hdr->field_selector) {
966 				if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
967 					input->ip.v4.sec_parm_idx = esph->spi;
968 				else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
969 					input->ip.v6.sec_parm_idx = esph->spi;
970 			}
971 			break;
972 		case VIRTCHNL_PROTO_HDR_AH:
973 			ah = (struct ip_auth_hdr *)hdr->buffer;
974 			if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
975 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_AH;
976 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
977 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_AH;
978 
979 			if (hdr->field_selector) {
980 				if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
981 					input->ip.v4.sec_parm_idx = ah->spi;
982 				else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
983 					input->ip.v6.sec_parm_idx = ah->spi;
984 			}
985 			break;
986 		case VIRTCHNL_PROTO_HDR_PFCP:
987 			rawh = (u8 *)hdr->buffer;
988 			s_field = (rawh[0] >> PFCP_S_OFFSET) & PFCP_S_MASK;
989 			if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 0)
990 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE;
991 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 1)
992 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION;
993 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 0)
994 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE;
995 			else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 1)
996 				input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION;
997 
998 			if (hdr->field_selector) {
999 				if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
1000 					input->ip.v4.dst_port = cpu_to_be16(PFCP_PORT_NR);
1001 				else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
1002 					input->ip.v6.dst_port = cpu_to_be16(PFCP_PORT_NR);
1003 			}
1004 			break;
1005 		case VIRTCHNL_PROTO_HDR_GTPU_IP:
1006 			rawh = (u8 *)hdr->buffer;
1007 			input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
1008 
1009 			if (hdr->field_selector)
1010 				input->gtpu_data.teid = *(__be32 *)(&rawh[GTPU_TEID_OFFSET]);
1011 			conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU;
1012 			break;
1013 		case VIRTCHNL_PROTO_HDR_GTPU_EH:
1014 			rawh = (u8 *)hdr->buffer;
1015 
1016 			if (hdr->field_selector)
1017 				input->gtpu_data.qfi = rawh[GTPU_EH_QFI_OFFSET] & GTPU_EH_QFI_MASK;
1018 			conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU_EH;
1019 			break;
1020 		default:
1021 			dev_dbg(dev, "Invalid header type 0x:%x for VF %d\n",
1022 				hdr->type, vf->vf_id);
1023 			return -EINVAL;
1024 		}
1025 	}
1026 
1027 	return 0;
1028 }
1029 
1030 /**
1031  * ice_vc_fdir_parse_action
1032  * @vf: pointer to the VF info
1033  * @fltr: virtual channel add cmd buffer
1034  * @conf: FDIR configuration for each filter
1035  *
1036  * Parse the virtual channel filter's action and store them into conf
1037  *
1038  * Return: 0 on success, and other on error.
1039  */
1040 static int
ice_vc_fdir_parse_action(struct ice_vf * vf,struct virtchnl_fdir_add * fltr,struct virtchnl_fdir_fltr_conf * conf)1041 ice_vc_fdir_parse_action(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
1042 			 struct virtchnl_fdir_fltr_conf *conf)
1043 {
1044 	struct virtchnl_filter_action_set *as = &fltr->rule_cfg.action_set;
1045 	struct device *dev = ice_pf_to_dev(vf->pf);
1046 	struct ice_fdir_fltr *input = &conf->input;
1047 	u32 dest_num = 0;
1048 	u32 mark_num = 0;
1049 	int i;
1050 
1051 	if (as->count > VIRTCHNL_MAX_NUM_ACTIONS) {
1052 		dev_dbg(dev, "Invalid action numbers:0x%x for VF %d\n",
1053 			as->count, vf->vf_id);
1054 		return -EINVAL;
1055 	}
1056 
1057 	for (i = 0; i < as->count; i++) {
1058 		struct virtchnl_filter_action *action = &as->actions[i];
1059 
1060 		switch (action->type) {
1061 		case VIRTCHNL_ACTION_PASSTHRU:
1062 			dest_num++;
1063 			input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER;
1064 			break;
1065 		case VIRTCHNL_ACTION_DROP:
1066 			dest_num++;
1067 			input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1068 			break;
1069 		case VIRTCHNL_ACTION_QUEUE:
1070 			dest_num++;
1071 			input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1072 			input->q_index = action->act_conf.queue.index;
1073 			break;
1074 		case VIRTCHNL_ACTION_Q_REGION:
1075 			dest_num++;
1076 			input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
1077 			input->q_index = action->act_conf.queue.index;
1078 			input->q_region = action->act_conf.queue.region;
1079 			break;
1080 		case VIRTCHNL_ACTION_MARK:
1081 			mark_num++;
1082 			input->fltr_id = action->act_conf.mark_id;
1083 			input->fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_THREE;
1084 			break;
1085 		default:
1086 			dev_dbg(dev, "Invalid action type:0x%x for VF %d\n",
1087 				action->type, vf->vf_id);
1088 			return -EINVAL;
1089 		}
1090 	}
1091 
1092 	if (dest_num == 0 || dest_num >= 2) {
1093 		dev_dbg(dev, "Invalid destination action for VF %d\n",
1094 			vf->vf_id);
1095 		return -EINVAL;
1096 	}
1097 
1098 	if (mark_num >= 2) {
1099 		dev_dbg(dev, "Too many mark actions for VF %d\n", vf->vf_id);
1100 		return -EINVAL;
1101 	}
1102 
1103 	return 0;
1104 }
1105 
1106 /**
1107  * ice_vc_validate_fdir_fltr - validate the virtual channel filter
1108  * @vf: pointer to the VF info
1109  * @fltr: virtual channel add cmd buffer
1110  * @conf: FDIR configuration for each filter
1111  *
1112  * Return: 0 on success, and other on error.
1113  */
1114 static int
ice_vc_validate_fdir_fltr(struct ice_vf * vf,struct virtchnl_fdir_add * fltr,struct virtchnl_fdir_fltr_conf * conf)1115 ice_vc_validate_fdir_fltr(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
1116 			  struct virtchnl_fdir_fltr_conf *conf)
1117 {
1118 	struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs;
1119 	int ret;
1120 
1121 	if (!ice_vc_validate_pattern(vf, proto))
1122 		return -EINVAL;
1123 
1124 	ret = ice_vc_fdir_parse_pattern(vf, fltr, conf);
1125 	if (ret)
1126 		return ret;
1127 
1128 	return ice_vc_fdir_parse_action(vf, fltr, conf);
1129 }
1130 
1131 /**
1132  * ice_vc_fdir_comp_rules - compare if two filter rules have the same value
1133  * @conf_a: FDIR configuration for filter a
1134  * @conf_b: FDIR configuration for filter b
1135  *
1136  * Return: 0 on success, and other on error.
1137  */
1138 static bool
ice_vc_fdir_comp_rules(struct virtchnl_fdir_fltr_conf * conf_a,struct virtchnl_fdir_fltr_conf * conf_b)1139 ice_vc_fdir_comp_rules(struct virtchnl_fdir_fltr_conf *conf_a,
1140 		       struct virtchnl_fdir_fltr_conf *conf_b)
1141 {
1142 	struct ice_fdir_fltr *a = &conf_a->input;
1143 	struct ice_fdir_fltr *b = &conf_b->input;
1144 
1145 	if (conf_a->ttype != conf_b->ttype)
1146 		return false;
1147 	if (a->flow_type != b->flow_type)
1148 		return false;
1149 	if (memcmp(&a->ip, &b->ip, sizeof(a->ip)))
1150 		return false;
1151 	if (memcmp(&a->mask, &b->mask, sizeof(a->mask)))
1152 		return false;
1153 	if (memcmp(&a->gtpu_data, &b->gtpu_data, sizeof(a->gtpu_data)))
1154 		return false;
1155 	if (memcmp(&a->gtpu_mask, &b->gtpu_mask, sizeof(a->gtpu_mask)))
1156 		return false;
1157 	if (memcmp(&a->l2tpv3_data, &b->l2tpv3_data, sizeof(a->l2tpv3_data)))
1158 		return false;
1159 	if (memcmp(&a->l2tpv3_mask, &b->l2tpv3_mask, sizeof(a->l2tpv3_mask)))
1160 		return false;
1161 	if (memcmp(&a->ext_data, &b->ext_data, sizeof(a->ext_data)))
1162 		return false;
1163 	if (memcmp(&a->ext_mask, &b->ext_mask, sizeof(a->ext_mask)))
1164 		return false;
1165 
1166 	return true;
1167 }
1168 
1169 /**
1170  * ice_vc_fdir_is_dup_fltr
1171  * @vf: pointer to the VF info
1172  * @conf: FDIR configuration for each filter
1173  *
1174  * Check if there is duplicated rule with same conf value
1175  *
1176  * Return: 0 true success, and false on error.
1177  */
1178 static bool
ice_vc_fdir_is_dup_fltr(struct ice_vf * vf,struct virtchnl_fdir_fltr_conf * conf)1179 ice_vc_fdir_is_dup_fltr(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf)
1180 {
1181 	struct ice_fdir_fltr *desc;
1182 	bool ret;
1183 
1184 	list_for_each_entry(desc, &vf->fdir.fdir_rule_list, fltr_node) {
1185 		struct virtchnl_fdir_fltr_conf *node =
1186 				to_fltr_conf_from_desc(desc);
1187 
1188 		ret = ice_vc_fdir_comp_rules(node, conf);
1189 		if (ret)
1190 			return true;
1191 	}
1192 
1193 	return false;
1194 }
1195 
1196 /**
1197  * ice_vc_fdir_insert_entry
1198  * @vf: pointer to the VF info
1199  * @conf: FDIR configuration for each filter
1200  * @id: pointer to ID value allocated by driver
1201  *
1202  * Insert FDIR conf entry into list and allocate ID for this filter
1203  *
1204  * Return: 0 true success, and other on error.
1205  */
1206 static int
ice_vc_fdir_insert_entry(struct ice_vf * vf,struct virtchnl_fdir_fltr_conf * conf,u32 * id)1207 ice_vc_fdir_insert_entry(struct ice_vf *vf,
1208 			 struct virtchnl_fdir_fltr_conf *conf, u32 *id)
1209 {
1210 	struct ice_fdir_fltr *input = &conf->input;
1211 	int i;
1212 
1213 	/* alloc ID corresponding with conf */
1214 	i = idr_alloc(&vf->fdir.fdir_rule_idr, conf, 0,
1215 		      ICE_FDIR_MAX_FLTRS, GFP_KERNEL);
1216 	if (i < 0)
1217 		return -EINVAL;
1218 	*id = i;
1219 
1220 	list_add(&input->fltr_node, &vf->fdir.fdir_rule_list);
1221 	return 0;
1222 }
1223 
1224 /**
1225  * ice_vc_fdir_remove_entry - remove FDIR conf entry by ID value
1226  * @vf: pointer to the VF info
1227  * @conf: FDIR configuration for each filter
1228  * @id: filter rule's ID
1229  */
1230 static void
ice_vc_fdir_remove_entry(struct ice_vf * vf,struct virtchnl_fdir_fltr_conf * conf,u32 id)1231 ice_vc_fdir_remove_entry(struct ice_vf *vf,
1232 			 struct virtchnl_fdir_fltr_conf *conf, u32 id)
1233 {
1234 	struct ice_fdir_fltr *input = &conf->input;
1235 
1236 	idr_remove(&vf->fdir.fdir_rule_idr, id);
1237 	list_del(&input->fltr_node);
1238 }
1239 
1240 /**
1241  * ice_vc_fdir_lookup_entry - lookup FDIR conf entry by ID value
1242  * @vf: pointer to the VF info
1243  * @id: filter rule's ID
1244  *
1245  * Return: NULL on error, and other on success.
1246  */
1247 static struct virtchnl_fdir_fltr_conf *
ice_vc_fdir_lookup_entry(struct ice_vf * vf,u32 id)1248 ice_vc_fdir_lookup_entry(struct ice_vf *vf, u32 id)
1249 {
1250 	return idr_find(&vf->fdir.fdir_rule_idr, id);
1251 }
1252 
1253 /**
1254  * ice_vc_fdir_flush_entry - remove all FDIR conf entry
1255  * @vf: pointer to the VF info
1256  */
ice_vc_fdir_flush_entry(struct ice_vf * vf)1257 static void ice_vc_fdir_flush_entry(struct ice_vf *vf)
1258 {
1259 	struct virtchnl_fdir_fltr_conf *conf;
1260 	struct ice_fdir_fltr *desc, *temp;
1261 
1262 	list_for_each_entry_safe(desc, temp,
1263 				 &vf->fdir.fdir_rule_list, fltr_node) {
1264 		conf = to_fltr_conf_from_desc(desc);
1265 		list_del(&desc->fltr_node);
1266 		devm_kfree(ice_pf_to_dev(vf->pf), conf);
1267 	}
1268 }
1269 
1270 /**
1271  * ice_vc_fdir_write_fltr - write filter rule into hardware
1272  * @vf: pointer to the VF info
1273  * @conf: FDIR configuration for each filter
1274  * @add: true implies add rule, false implies del rules
1275  * @is_tun: false implies non-tunnel type filter, true implies tunnel filter
1276  *
1277  * Return: 0 on success, and other on error.
1278  */
ice_vc_fdir_write_fltr(struct ice_vf * vf,struct virtchnl_fdir_fltr_conf * conf,bool add,bool is_tun)1279 static int ice_vc_fdir_write_fltr(struct ice_vf *vf,
1280 				  struct virtchnl_fdir_fltr_conf *conf,
1281 				  bool add, bool is_tun)
1282 {
1283 	struct ice_fdir_fltr *input = &conf->input;
1284 	struct ice_vsi *vsi, *ctrl_vsi;
1285 	struct ice_fltr_desc desc;
1286 	struct device *dev;
1287 	struct ice_pf *pf;
1288 	struct ice_hw *hw;
1289 	int ret;
1290 	u8 *pkt;
1291 
1292 	pf = vf->pf;
1293 	dev = ice_pf_to_dev(pf);
1294 	hw = &pf->hw;
1295 	vsi = ice_get_vf_vsi(vf);
1296 	if (!vsi) {
1297 		dev_dbg(dev, "Invalid vsi for VF %d\n", vf->vf_id);
1298 		return -EINVAL;
1299 	}
1300 
1301 	input->dest_vsi = vsi->idx;
1302 	input->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW;
1303 
1304 	ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
1305 	if (!ctrl_vsi) {
1306 		dev_dbg(dev, "Invalid ctrl_vsi for VF %d\n", vf->vf_id);
1307 		return -EINVAL;
1308 	}
1309 
1310 	pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL);
1311 	if (!pkt)
1312 		return -ENOMEM;
1313 
1314 	ice_fdir_get_prgm_desc(hw, input, &desc, add);
1315 	ret = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
1316 	if (ret) {
1317 		dev_dbg(dev, "Gen training pkt for VF %d ptype %d failed\n",
1318 			vf->vf_id, input->flow_type);
1319 		goto err_free_pkt;
1320 	}
1321 
1322 	ret = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt);
1323 	if (ret)
1324 		goto err_free_pkt;
1325 
1326 	return 0;
1327 
1328 err_free_pkt:
1329 	devm_kfree(dev, pkt);
1330 	return ret;
1331 }
1332 
1333 /**
1334  * ice_vf_fdir_timer - FDIR program waiting timer interrupt handler
1335  * @t: pointer to timer_list
1336  */
ice_vf_fdir_timer(struct timer_list * t)1337 static void ice_vf_fdir_timer(struct timer_list *t)
1338 {
1339 	struct ice_vf_fdir_ctx *ctx_irq = from_timer(ctx_irq, t, rx_tmr);
1340 	struct ice_vf_fdir_ctx *ctx_done;
1341 	struct ice_vf_fdir *fdir;
1342 	unsigned long flags;
1343 	struct ice_vf *vf;
1344 	struct ice_pf *pf;
1345 
1346 	fdir = container_of(ctx_irq, struct ice_vf_fdir, ctx_irq);
1347 	vf = container_of(fdir, struct ice_vf, fdir);
1348 	ctx_done = &fdir->ctx_done;
1349 	pf = vf->pf;
1350 	spin_lock_irqsave(&fdir->ctx_lock, flags);
1351 	if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) {
1352 		spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1353 		WARN_ON_ONCE(1);
1354 		return;
1355 	}
1356 
1357 	ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID;
1358 
1359 	ctx_done->flags |= ICE_VF_FDIR_CTX_VALID;
1360 	ctx_done->conf = ctx_irq->conf;
1361 	ctx_done->stat = ICE_FDIR_CTX_TIMEOUT;
1362 	ctx_done->v_opcode = ctx_irq->v_opcode;
1363 	spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1364 
1365 	set_bit(ICE_FD_VF_FLUSH_CTX, pf->state);
1366 	ice_service_task_schedule(pf);
1367 }
1368 
1369 /**
1370  * ice_vc_fdir_irq_handler - ctrl_vsi Rx queue interrupt handler
1371  * @ctrl_vsi: pointer to a VF's CTRL VSI
1372  * @rx_desc: pointer to FDIR Rx queue descriptor
1373  */
1374 void
ice_vc_fdir_irq_handler(struct ice_vsi * ctrl_vsi,union ice_32b_rx_flex_desc * rx_desc)1375 ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi,
1376 			union ice_32b_rx_flex_desc *rx_desc)
1377 {
1378 	struct ice_pf *pf = ctrl_vsi->back;
1379 	struct ice_vf *vf = ctrl_vsi->vf;
1380 	struct ice_vf_fdir_ctx *ctx_done;
1381 	struct ice_vf_fdir_ctx *ctx_irq;
1382 	struct ice_vf_fdir *fdir;
1383 	unsigned long flags;
1384 	struct device *dev;
1385 	int ret;
1386 
1387 	if (WARN_ON(!vf))
1388 		return;
1389 
1390 	fdir = &vf->fdir;
1391 	ctx_done = &fdir->ctx_done;
1392 	ctx_irq = &fdir->ctx_irq;
1393 	dev = ice_pf_to_dev(pf);
1394 	spin_lock_irqsave(&fdir->ctx_lock, flags);
1395 	if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) {
1396 		spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1397 		WARN_ON_ONCE(1);
1398 		return;
1399 	}
1400 
1401 	ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID;
1402 
1403 	ctx_done->flags |= ICE_VF_FDIR_CTX_VALID;
1404 	ctx_done->conf = ctx_irq->conf;
1405 	ctx_done->stat = ICE_FDIR_CTX_IRQ;
1406 	ctx_done->v_opcode = ctx_irq->v_opcode;
1407 	memcpy(&ctx_done->rx_desc, rx_desc, sizeof(*rx_desc));
1408 	spin_unlock_irqrestore(&fdir->ctx_lock, flags);
1409 
1410 	ret = del_timer(&ctx_irq->rx_tmr);
1411 	if (!ret)
1412 		dev_err(dev, "VF %d: Unexpected inactive timer!\n", vf->vf_id);
1413 
1414 	set_bit(ICE_FD_VF_FLUSH_CTX, pf->state);
1415 	ice_service_task_schedule(pf);
1416 }
1417 
1418 /**
1419  * ice_vf_fdir_dump_info - dump FDIR information for diagnosis
1420  * @vf: pointer to the VF info
1421  */
ice_vf_fdir_dump_info(struct ice_vf * vf)1422 static void ice_vf_fdir_dump_info(struct ice_vf *vf)
1423 {
1424 	u32 fd_size, fd_cnt, fd_size_g, fd_cnt_g, fd_size_b, fd_cnt_b;
1425 	struct ice_vsi *vf_vsi;
1426 	struct device *dev;
1427 	struct ice_pf *pf;
1428 	struct ice_hw *hw;
1429 	u16 vsi_num;
1430 
1431 	pf = vf->pf;
1432 	hw = &pf->hw;
1433 	dev = ice_pf_to_dev(pf);
1434 	vf_vsi = ice_get_vf_vsi(vf);
1435 	if (!vf_vsi) {
1436 		dev_dbg(dev, "VF %d: invalid VSI pointer\n", vf->vf_id);
1437 		return;
1438 	}
1439 
1440 	vsi_num = ice_get_hw_vsi_num(hw, vf_vsi->idx);
1441 
1442 	fd_size = rd32(hw, VSIQF_FD_SIZE(vsi_num));
1443 	fd_cnt = rd32(hw, VSIQF_FD_CNT(vsi_num));
1444 	switch (hw->mac_type) {
1445 	case ICE_MAC_E830:
1446 		fd_size_g = FIELD_GET(E830_VSIQF_FD_CNT_FD_GCNT_M, fd_size);
1447 		fd_size_b = FIELD_GET(E830_VSIQF_FD_CNT_FD_BCNT_M, fd_size);
1448 		fd_cnt_g = FIELD_GET(E830_VSIQF_FD_CNT_FD_GCNT_M, fd_cnt);
1449 		fd_cnt_b = FIELD_GET(E830_VSIQF_FD_CNT_FD_BCNT_M, fd_cnt);
1450 		break;
1451 	case ICE_MAC_E810:
1452 	default:
1453 		fd_size_g = FIELD_GET(E800_VSIQF_FD_CNT_FD_GCNT_M, fd_size);
1454 		fd_size_b = FIELD_GET(E800_VSIQF_FD_CNT_FD_BCNT_M, fd_size);
1455 		fd_cnt_g = FIELD_GET(E800_VSIQF_FD_CNT_FD_GCNT_M, fd_cnt);
1456 		fd_cnt_b = FIELD_GET(E800_VSIQF_FD_CNT_FD_BCNT_M, fd_cnt);
1457 	}
1458 
1459 	dev_dbg(dev, "VF %d: Size in the FD table: guaranteed:0x%x, best effort:0x%x\n",
1460 		vf->vf_id, fd_size_g, fd_size_b);
1461 	dev_dbg(dev, "VF %d: Filter counter in the FD table: guaranteed:0x%x, best effort:0x%x\n",
1462 		vf->vf_id, fd_cnt_g, fd_cnt_b);
1463 }
1464 
1465 /**
1466  * ice_vf_verify_rx_desc - verify received FDIR programming status descriptor
1467  * @vf: pointer to the VF info
1468  * @ctx: FDIR context info for post processing
1469  * @status: virtchnl FDIR program status
1470  *
1471  * Return: 0 on success, and other on error.
1472  */
1473 static int
ice_vf_verify_rx_desc(struct ice_vf * vf,struct ice_vf_fdir_ctx * ctx,enum virtchnl_fdir_prgm_status * status)1474 ice_vf_verify_rx_desc(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
1475 		      enum virtchnl_fdir_prgm_status *status)
1476 {
1477 	struct device *dev = ice_pf_to_dev(vf->pf);
1478 	u32 stat_err, error, prog_id;
1479 	int ret;
1480 
1481 	stat_err = le16_to_cpu(ctx->rx_desc.wb.status_error0);
1482 	if (((stat_err & ICE_FXD_FLTR_WB_QW1_DD_M) >>
1483 	    ICE_FXD_FLTR_WB_QW1_DD_S) != ICE_FXD_FLTR_WB_QW1_DD_YES) {
1484 		*status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1485 		dev_err(dev, "VF %d: Desc Done not set\n", vf->vf_id);
1486 		ret = -EINVAL;
1487 		goto err_exit;
1488 	}
1489 
1490 	prog_id = (stat_err & ICE_FXD_FLTR_WB_QW1_PROG_ID_M) >>
1491 		ICE_FXD_FLTR_WB_QW1_PROG_ID_S;
1492 	if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD &&
1493 	    ctx->v_opcode != VIRTCHNL_OP_ADD_FDIR_FILTER) {
1494 		dev_err(dev, "VF %d: Desc show add, but ctx not",
1495 			vf->vf_id);
1496 		*status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
1497 		ret = -EINVAL;
1498 		goto err_exit;
1499 	}
1500 
1501 	if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_DEL &&
1502 	    ctx->v_opcode != VIRTCHNL_OP_DEL_FDIR_FILTER) {
1503 		dev_err(dev, "VF %d: Desc show del, but ctx not",
1504 			vf->vf_id);
1505 		*status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
1506 		ret = -EINVAL;
1507 		goto err_exit;
1508 	}
1509 
1510 	error = (stat_err & ICE_FXD_FLTR_WB_QW1_FAIL_M) >>
1511 		ICE_FXD_FLTR_WB_QW1_FAIL_S;
1512 	if (error == ICE_FXD_FLTR_WB_QW1_FAIL_YES) {
1513 		if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD) {
1514 			dev_err(dev, "VF %d, Failed to add FDIR rule due to no space in the table",
1515 				vf->vf_id);
1516 			*status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1517 		} else {
1518 			dev_err(dev, "VF %d, Failed to remove FDIR rule, attempt to remove non-existent entry",
1519 				vf->vf_id);
1520 			*status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST;
1521 		}
1522 		ret = -EINVAL;
1523 		goto err_exit;
1524 	}
1525 
1526 	error = (stat_err & ICE_FXD_FLTR_WB_QW1_FAIL_PROF_M) >>
1527 		ICE_FXD_FLTR_WB_QW1_FAIL_PROF_S;
1528 	if (error == ICE_FXD_FLTR_WB_QW1_FAIL_PROF_YES) {
1529 		dev_err(dev, "VF %d: Profile matching error", vf->vf_id);
1530 		*status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1531 		ret = -EINVAL;
1532 		goto err_exit;
1533 	}
1534 
1535 	*status = VIRTCHNL_FDIR_SUCCESS;
1536 
1537 	return 0;
1538 
1539 err_exit:
1540 	ice_vf_fdir_dump_info(vf);
1541 	return ret;
1542 }
1543 
1544 /**
1545  * ice_vc_add_fdir_fltr_post
1546  * @vf: pointer to the VF structure
1547  * @ctx: FDIR context info for post processing
1548  * @status: virtchnl FDIR program status
1549  * @success: true implies success, false implies failure
1550  *
1551  * Post process for flow director add command. If success, then do post process
1552  * and send back success msg by virtchnl. Otherwise, do context reversion and
1553  * send back failure msg by virtchnl.
1554  *
1555  * Return: 0 on success, and other on error.
1556  */
1557 static int
ice_vc_add_fdir_fltr_post(struct ice_vf * vf,struct ice_vf_fdir_ctx * ctx,enum virtchnl_fdir_prgm_status status,bool success)1558 ice_vc_add_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
1559 			  enum virtchnl_fdir_prgm_status status,
1560 			  bool success)
1561 {
1562 	struct virtchnl_fdir_fltr_conf *conf = ctx->conf;
1563 	struct device *dev = ice_pf_to_dev(vf->pf);
1564 	enum virtchnl_status_code v_ret;
1565 	struct virtchnl_fdir_add *resp;
1566 	int ret, len, is_tun;
1567 
1568 	v_ret = VIRTCHNL_STATUS_SUCCESS;
1569 	len = sizeof(*resp);
1570 	resp = kzalloc(len, GFP_KERNEL);
1571 	if (!resp) {
1572 		len = 0;
1573 		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1574 		dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id);
1575 		goto err_exit;
1576 	}
1577 
1578 	if (!success)
1579 		goto err_exit;
1580 
1581 	is_tun = 0;
1582 	resp->status = status;
1583 	resp->flow_id = conf->flow_id;
1584 	vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]++;
1585 	vf->fdir.fdir_fltr_cnt_total++;
1586 
1587 	ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1588 				    (u8 *)resp, len);
1589 	kfree(resp);
1590 
1591 	dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n",
1592 		vf->vf_id, conf->flow_id,
1593 		(ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ?
1594 		"add" : "del");
1595 	return ret;
1596 
1597 err_exit:
1598 	if (resp)
1599 		resp->status = status;
1600 	ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
1601 	devm_kfree(dev, conf);
1602 
1603 	ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1604 				    (u8 *)resp, len);
1605 	kfree(resp);
1606 	return ret;
1607 }
1608 
1609 /**
1610  * ice_vc_del_fdir_fltr_post
1611  * @vf: pointer to the VF structure
1612  * @ctx: FDIR context info for post processing
1613  * @status: virtchnl FDIR program status
1614  * @success: true implies success, false implies failure
1615  *
1616  * Post process for flow director del command. If success, then do post process
1617  * and send back success msg by virtchnl. Otherwise, do context reversion and
1618  * send back failure msg by virtchnl.
1619  *
1620  * Return: 0 on success, and other on error.
1621  */
1622 static int
ice_vc_del_fdir_fltr_post(struct ice_vf * vf,struct ice_vf_fdir_ctx * ctx,enum virtchnl_fdir_prgm_status status,bool success)1623 ice_vc_del_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
1624 			  enum virtchnl_fdir_prgm_status status,
1625 			  bool success)
1626 {
1627 	struct virtchnl_fdir_fltr_conf *conf = ctx->conf;
1628 	struct device *dev = ice_pf_to_dev(vf->pf);
1629 	enum virtchnl_status_code v_ret;
1630 	struct virtchnl_fdir_del *resp;
1631 	int ret, len, is_tun;
1632 
1633 	v_ret = VIRTCHNL_STATUS_SUCCESS;
1634 	len = sizeof(*resp);
1635 	resp = kzalloc(len, GFP_KERNEL);
1636 	if (!resp) {
1637 		len = 0;
1638 		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1639 		dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id);
1640 		goto err_exit;
1641 	}
1642 
1643 	if (!success)
1644 		goto err_exit;
1645 
1646 	is_tun = 0;
1647 	resp->status = status;
1648 	ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
1649 	vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]--;
1650 	vf->fdir.fdir_fltr_cnt_total--;
1651 
1652 	ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1653 				    (u8 *)resp, len);
1654 	kfree(resp);
1655 
1656 	dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n",
1657 		vf->vf_id, conf->flow_id,
1658 		(ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ?
1659 		"add" : "del");
1660 	devm_kfree(dev, conf);
1661 	return ret;
1662 
1663 err_exit:
1664 	if (resp)
1665 		resp->status = status;
1666 	if (success)
1667 		devm_kfree(dev, conf);
1668 
1669 	ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
1670 				    (u8 *)resp, len);
1671 	kfree(resp);
1672 	return ret;
1673 }
1674 
1675 /**
1676  * ice_flush_fdir_ctx
1677  * @pf: pointer to the PF structure
1678  *
1679  * Flush all the pending event on ctx_done list and process them.
1680  */
ice_flush_fdir_ctx(struct ice_pf * pf)1681 void ice_flush_fdir_ctx(struct ice_pf *pf)
1682 {
1683 	struct ice_vf *vf;
1684 	unsigned int bkt;
1685 
1686 	if (!test_and_clear_bit(ICE_FD_VF_FLUSH_CTX, pf->state))
1687 		return;
1688 
1689 	mutex_lock(&pf->vfs.table_lock);
1690 	ice_for_each_vf(pf, bkt, vf) {
1691 		struct device *dev = ice_pf_to_dev(pf);
1692 		enum virtchnl_fdir_prgm_status status;
1693 		struct ice_vf_fdir_ctx *ctx;
1694 		unsigned long flags;
1695 		int ret;
1696 
1697 		if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
1698 			continue;
1699 
1700 		if (vf->ctrl_vsi_idx == ICE_NO_VSI)
1701 			continue;
1702 
1703 		ctx = &vf->fdir.ctx_done;
1704 		spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1705 		if (!(ctx->flags & ICE_VF_FDIR_CTX_VALID)) {
1706 			spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1707 			continue;
1708 		}
1709 		spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1710 
1711 		WARN_ON(ctx->stat == ICE_FDIR_CTX_READY);
1712 		if (ctx->stat == ICE_FDIR_CTX_TIMEOUT) {
1713 			status = VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT;
1714 			dev_err(dev, "VF %d: ctrl_vsi irq timeout\n",
1715 				vf->vf_id);
1716 			goto err_exit;
1717 		}
1718 
1719 		ret = ice_vf_verify_rx_desc(vf, ctx, &status);
1720 		if (ret)
1721 			goto err_exit;
1722 
1723 		if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER)
1724 			ice_vc_add_fdir_fltr_post(vf, ctx, status, true);
1725 		else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER)
1726 			ice_vc_del_fdir_fltr_post(vf, ctx, status, true);
1727 		else
1728 			dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id);
1729 
1730 		spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1731 		ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
1732 		spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1733 		continue;
1734 err_exit:
1735 		if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER)
1736 			ice_vc_add_fdir_fltr_post(vf, ctx, status, false);
1737 		else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER)
1738 			ice_vc_del_fdir_fltr_post(vf, ctx, status, false);
1739 		else
1740 			dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id);
1741 
1742 		spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1743 		ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
1744 		spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1745 	}
1746 	mutex_unlock(&pf->vfs.table_lock);
1747 }
1748 
1749 /**
1750  * ice_vc_fdir_set_irq_ctx - set FDIR context info for later IRQ handler
1751  * @vf: pointer to the VF structure
1752  * @conf: FDIR configuration for each filter
1753  * @v_opcode: virtual channel operation code
1754  *
1755  * Return: 0 on success, and other on error.
1756  */
1757 static int
ice_vc_fdir_set_irq_ctx(struct ice_vf * vf,struct virtchnl_fdir_fltr_conf * conf,enum virtchnl_ops v_opcode)1758 ice_vc_fdir_set_irq_ctx(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf,
1759 			enum virtchnl_ops v_opcode)
1760 {
1761 	struct device *dev = ice_pf_to_dev(vf->pf);
1762 	struct ice_vf_fdir_ctx *ctx;
1763 	unsigned long flags;
1764 
1765 	ctx = &vf->fdir.ctx_irq;
1766 	spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1767 	if ((vf->fdir.ctx_irq.flags & ICE_VF_FDIR_CTX_VALID) ||
1768 	    (vf->fdir.ctx_done.flags & ICE_VF_FDIR_CTX_VALID)) {
1769 		spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1770 		dev_dbg(dev, "VF %d: Last request is still in progress\n",
1771 			vf->vf_id);
1772 		return -EBUSY;
1773 	}
1774 	ctx->flags |= ICE_VF_FDIR_CTX_VALID;
1775 	spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1776 
1777 	ctx->conf = conf;
1778 	ctx->v_opcode = v_opcode;
1779 	ctx->stat = ICE_FDIR_CTX_READY;
1780 	timer_setup(&ctx->rx_tmr, ice_vf_fdir_timer, 0);
1781 
1782 	mod_timer(&ctx->rx_tmr, round_jiffies(msecs_to_jiffies(10) + jiffies));
1783 
1784 	return 0;
1785 }
1786 
1787 /**
1788  * ice_vc_fdir_clear_irq_ctx - clear FDIR context info for IRQ handler
1789  * @vf: pointer to the VF structure
1790  *
1791  * Return: 0 on success, and other on error.
1792  */
ice_vc_fdir_clear_irq_ctx(struct ice_vf * vf)1793 static void ice_vc_fdir_clear_irq_ctx(struct ice_vf *vf)
1794 {
1795 	struct ice_vf_fdir_ctx *ctx = &vf->fdir.ctx_irq;
1796 	unsigned long flags;
1797 
1798 	del_timer(&ctx->rx_tmr);
1799 	spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
1800 	ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
1801 	spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
1802 }
1803 
1804 /**
1805  * ice_vc_add_fdir_fltr - add a FDIR filter for VF by the msg buffer
1806  * @vf: pointer to the VF info
1807  * @msg: pointer to the msg buffer
1808  *
1809  * Return: 0 on success, and other on error.
1810  */
ice_vc_add_fdir_fltr(struct ice_vf * vf,u8 * msg)1811 int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
1812 {
1813 	struct virtchnl_fdir_add *fltr = (struct virtchnl_fdir_add *)msg;
1814 	struct virtchnl_fdir_add *stat = NULL;
1815 	struct virtchnl_fdir_fltr_conf *conf;
1816 	enum virtchnl_status_code v_ret;
1817 	struct ice_vsi *vf_vsi;
1818 	struct device *dev;
1819 	struct ice_pf *pf;
1820 	int is_tun = 0;
1821 	int len = 0;
1822 	int ret;
1823 
1824 	pf = vf->pf;
1825 	dev = ice_pf_to_dev(pf);
1826 	vf_vsi = ice_get_vf_vsi(vf);
1827 
1828 #define ICE_VF_MAX_FDIR_FILTERS	128
1829 	if (!ice_fdir_num_avail_fltr(&pf->hw, vf_vsi) ||
1830 	    vf->fdir.fdir_fltr_cnt_total >= ICE_VF_MAX_FDIR_FILTERS) {
1831 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1832 		dev_err(dev, "Max number of FDIR filters for VF %d is reached\n",
1833 			vf->vf_id);
1834 		goto err_exit;
1835 	}
1836 
1837 	ret = ice_vc_fdir_param_check(vf, fltr->vsi_id);
1838 	if (ret) {
1839 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1840 		dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id);
1841 		goto err_exit;
1842 	}
1843 
1844 	ret = ice_vf_start_ctrl_vsi(vf);
1845 	if (ret && (ret != -EEXIST)) {
1846 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1847 		dev_err(dev, "Init FDIR for VF %d failed, ret:%d\n",
1848 			vf->vf_id, ret);
1849 		goto err_exit;
1850 	}
1851 
1852 	stat = kzalloc(sizeof(*stat), GFP_KERNEL);
1853 	if (!stat) {
1854 		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1855 		dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id);
1856 		goto err_exit;
1857 	}
1858 
1859 	conf = devm_kzalloc(dev, sizeof(*conf), GFP_KERNEL);
1860 	if (!conf) {
1861 		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1862 		dev_dbg(dev, "Alloc conf for VF %d failed\n", vf->vf_id);
1863 		goto err_exit;
1864 	}
1865 
1866 	len = sizeof(*stat);
1867 	ret = ice_vc_validate_fdir_fltr(vf, fltr, conf);
1868 	if (ret) {
1869 		v_ret = VIRTCHNL_STATUS_SUCCESS;
1870 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
1871 		dev_dbg(dev, "Invalid FDIR filter from VF %d\n", vf->vf_id);
1872 		goto err_free_conf;
1873 	}
1874 
1875 	if (fltr->validate_only) {
1876 		v_ret = VIRTCHNL_STATUS_SUCCESS;
1877 		stat->status = VIRTCHNL_FDIR_SUCCESS;
1878 		devm_kfree(dev, conf);
1879 		ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER,
1880 					    v_ret, (u8 *)stat, len);
1881 		goto exit;
1882 	}
1883 
1884 	ret = ice_vc_fdir_config_input_set(vf, fltr, conf, is_tun);
1885 	if (ret) {
1886 		v_ret = VIRTCHNL_STATUS_SUCCESS;
1887 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT;
1888 		dev_err(dev, "VF %d: FDIR input set configure failed, ret:%d\n",
1889 			vf->vf_id, ret);
1890 		goto err_free_conf;
1891 	}
1892 
1893 	ret = ice_vc_fdir_is_dup_fltr(vf, conf);
1894 	if (ret) {
1895 		v_ret = VIRTCHNL_STATUS_SUCCESS;
1896 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_EXIST;
1897 		dev_dbg(dev, "VF %d: duplicated FDIR rule detected\n",
1898 			vf->vf_id);
1899 		goto err_free_conf;
1900 	}
1901 
1902 	ret = ice_vc_fdir_insert_entry(vf, conf, &conf->flow_id);
1903 	if (ret) {
1904 		v_ret = VIRTCHNL_STATUS_SUCCESS;
1905 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1906 		dev_dbg(dev, "VF %d: insert FDIR list failed\n", vf->vf_id);
1907 		goto err_free_conf;
1908 	}
1909 
1910 	ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_ADD_FDIR_FILTER);
1911 	if (ret) {
1912 		v_ret = VIRTCHNL_STATUS_SUCCESS;
1913 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1914 		dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id);
1915 		goto err_rem_entry;
1916 	}
1917 
1918 	ret = ice_vc_fdir_write_fltr(vf, conf, true, is_tun);
1919 	if (ret) {
1920 		v_ret = VIRTCHNL_STATUS_SUCCESS;
1921 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1922 		dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n",
1923 			vf->vf_id, ret);
1924 		goto err_clr_irq;
1925 	}
1926 
1927 exit:
1928 	kfree(stat);
1929 	return ret;
1930 
1931 err_clr_irq:
1932 	ice_vc_fdir_clear_irq_ctx(vf);
1933 err_rem_entry:
1934 	ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
1935 err_free_conf:
1936 	devm_kfree(dev, conf);
1937 err_exit:
1938 	ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER, v_ret,
1939 				    (u8 *)stat, len);
1940 	kfree(stat);
1941 	return ret;
1942 }
1943 
1944 /**
1945  * ice_vc_del_fdir_fltr - delete a FDIR filter for VF by the msg buffer
1946  * @vf: pointer to the VF info
1947  * @msg: pointer to the msg buffer
1948  *
1949  * Return: 0 on success, and other on error.
1950  */
ice_vc_del_fdir_fltr(struct ice_vf * vf,u8 * msg)1951 int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg)
1952 {
1953 	struct virtchnl_fdir_del *fltr = (struct virtchnl_fdir_del *)msg;
1954 	struct virtchnl_fdir_del *stat = NULL;
1955 	struct virtchnl_fdir_fltr_conf *conf;
1956 	enum virtchnl_status_code v_ret;
1957 	struct device *dev;
1958 	struct ice_pf *pf;
1959 	int is_tun = 0;
1960 	int len = 0;
1961 	int ret;
1962 
1963 	pf = vf->pf;
1964 	dev = ice_pf_to_dev(pf);
1965 	ret = ice_vc_fdir_param_check(vf, fltr->vsi_id);
1966 	if (ret) {
1967 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1968 		dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id);
1969 		goto err_exit;
1970 	}
1971 
1972 	stat = kzalloc(sizeof(*stat), GFP_KERNEL);
1973 	if (!stat) {
1974 		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1975 		dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id);
1976 		goto err_exit;
1977 	}
1978 
1979 	len = sizeof(*stat);
1980 
1981 	conf = ice_vc_fdir_lookup_entry(vf, fltr->flow_id);
1982 	if (!conf) {
1983 		v_ret = VIRTCHNL_STATUS_SUCCESS;
1984 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST;
1985 		dev_dbg(dev, "VF %d: FDIR invalid flow_id:0x%X\n",
1986 			vf->vf_id, fltr->flow_id);
1987 		goto err_exit;
1988 	}
1989 
1990 	/* Just return failure when ctrl_vsi idx is invalid */
1991 	if (vf->ctrl_vsi_idx == ICE_NO_VSI) {
1992 		v_ret = VIRTCHNL_STATUS_SUCCESS;
1993 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1994 		dev_err(dev, "Invalid FDIR ctrl_vsi for VF %d\n", vf->vf_id);
1995 		goto err_exit;
1996 	}
1997 
1998 	ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_DEL_FDIR_FILTER);
1999 	if (ret) {
2000 		v_ret = VIRTCHNL_STATUS_SUCCESS;
2001 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
2002 		dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id);
2003 		goto err_exit;
2004 	}
2005 
2006 	ret = ice_vc_fdir_write_fltr(vf, conf, false, is_tun);
2007 	if (ret) {
2008 		v_ret = VIRTCHNL_STATUS_SUCCESS;
2009 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
2010 		dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n",
2011 			vf->vf_id, ret);
2012 		goto err_del_tmr;
2013 	}
2014 
2015 	kfree(stat);
2016 
2017 	return ret;
2018 
2019 err_del_tmr:
2020 	ice_vc_fdir_clear_irq_ctx(vf);
2021 err_exit:
2022 	ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_FDIR_FILTER, v_ret,
2023 				    (u8 *)stat, len);
2024 	kfree(stat);
2025 	return ret;
2026 }
2027 
2028 /**
2029  * ice_vf_fdir_init - init FDIR resource for VF
2030  * @vf: pointer to the VF info
2031  */
ice_vf_fdir_init(struct ice_vf * vf)2032 void ice_vf_fdir_init(struct ice_vf *vf)
2033 {
2034 	struct ice_vf_fdir *fdir = &vf->fdir;
2035 
2036 	idr_init(&fdir->fdir_rule_idr);
2037 	INIT_LIST_HEAD(&fdir->fdir_rule_list);
2038 
2039 	spin_lock_init(&fdir->ctx_lock);
2040 	fdir->ctx_irq.flags = 0;
2041 	fdir->ctx_done.flags = 0;
2042 	ice_vc_fdir_reset_cnt_all(fdir);
2043 }
2044 
2045 /**
2046  * ice_vf_fdir_exit - destroy FDIR resource for VF
2047  * @vf: pointer to the VF info
2048  */
ice_vf_fdir_exit(struct ice_vf * vf)2049 void ice_vf_fdir_exit(struct ice_vf *vf)
2050 {
2051 	ice_vc_fdir_flush_entry(vf);
2052 	idr_destroy(&vf->fdir.fdir_rule_idr);
2053 	ice_vc_fdir_rem_prof_all(vf);
2054 	ice_vc_fdir_free_prof_all(vf);
2055 }
2056