1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2019-2021, Intel Corporation. */
3 
4 #include "ice.h"
5 #include "ice_tc_lib.h"
6 #include "ice_fltr.h"
7 #include "ice_lib.h"
8 #include "ice_protocol_type.h"
9 
10 /**
11  * ice_tc_count_lkups - determine lookup count for switch filter
12  * @flags: TC-flower flags
13  * @headers: Pointer to TC flower filter header structure
14  * @fltr: Pointer to outer TC filter structure
15  *
16  * Determine lookup count based on TC flower input for switch filter.
17  */
18 static int
19 ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
20 		   struct ice_tc_flower_fltr *fltr)
21 {
22 	int lkups_cnt = 0;
23 
24 	if (flags & ICE_TC_FLWR_FIELD_TENANT_ID)
25 		lkups_cnt++;
26 
27 	if (flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC)
28 		lkups_cnt++;
29 
30 	if (flags & ICE_TC_FLWR_FIELD_ENC_OPTS)
31 		lkups_cnt++;
32 
33 	if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
34 		     ICE_TC_FLWR_FIELD_ENC_DEST_IPV4 |
35 		     ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 |
36 		     ICE_TC_FLWR_FIELD_ENC_DEST_IPV6))
37 		lkups_cnt++;
38 
39 	if (flags & (ICE_TC_FLWR_FIELD_ENC_IP_TOS |
40 		     ICE_TC_FLWR_FIELD_ENC_IP_TTL))
41 		lkups_cnt++;
42 
43 	if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT)
44 		lkups_cnt++;
45 
46 	if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID)
47 		lkups_cnt++;
48 
49 	/* are MAC fields specified? */
50 	if (flags & (ICE_TC_FLWR_FIELD_DST_MAC | ICE_TC_FLWR_FIELD_SRC_MAC))
51 		lkups_cnt++;
52 
53 	/* is VLAN specified? */
54 	if (flags & ICE_TC_FLWR_FIELD_VLAN)
55 		lkups_cnt++;
56 
57 	/* is CVLAN specified? */
58 	if (flags & ICE_TC_FLWR_FIELD_CVLAN)
59 		lkups_cnt++;
60 
61 	/* are PPPoE options specified? */
62 	if (flags & (ICE_TC_FLWR_FIELD_PPPOE_SESSID |
63 		     ICE_TC_FLWR_FIELD_PPP_PROTO))
64 		lkups_cnt++;
65 
66 	/* are IPv[4|6] fields specified? */
67 	if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV4 | ICE_TC_FLWR_FIELD_SRC_IPV4 |
68 		     ICE_TC_FLWR_FIELD_DEST_IPV6 | ICE_TC_FLWR_FIELD_SRC_IPV6))
69 		lkups_cnt++;
70 
71 	if (flags & (ICE_TC_FLWR_FIELD_IP_TOS | ICE_TC_FLWR_FIELD_IP_TTL))
72 		lkups_cnt++;
73 
74 	/* is L4 (TCP/UDP/any other L4 protocol fields) specified? */
75 	if (flags & (ICE_TC_FLWR_FIELD_DEST_L4_PORT |
76 		     ICE_TC_FLWR_FIELD_SRC_L4_PORT))
77 		lkups_cnt++;
78 
79 	return lkups_cnt;
80 }
81 
82 static enum ice_protocol_type ice_proto_type_from_mac(bool inner)
83 {
84 	return inner ? ICE_MAC_IL : ICE_MAC_OFOS;
85 }
86 
87 static enum ice_protocol_type ice_proto_type_from_etype(bool inner)
88 {
89 	return inner ? ICE_ETYPE_IL : ICE_ETYPE_OL;
90 }
91 
92 static enum ice_protocol_type ice_proto_type_from_ipv4(bool inner)
93 {
94 	return inner ? ICE_IPV4_IL : ICE_IPV4_OFOS;
95 }
96 
97 static enum ice_protocol_type ice_proto_type_from_ipv6(bool inner)
98 {
99 	return inner ? ICE_IPV6_IL : ICE_IPV6_OFOS;
100 }
101 
102 static enum ice_protocol_type ice_proto_type_from_l4_port(u16 ip_proto)
103 {
104 	switch (ip_proto) {
105 	case IPPROTO_TCP:
106 		return ICE_TCP_IL;
107 	case IPPROTO_UDP:
108 		return ICE_UDP_ILOS;
109 	}
110 
111 	return 0;
112 }
113 
114 static enum ice_protocol_type
115 ice_proto_type_from_tunnel(enum ice_tunnel_type type)
116 {
117 	switch (type) {
118 	case TNL_VXLAN:
119 		return ICE_VXLAN;
120 	case TNL_GENEVE:
121 		return ICE_GENEVE;
122 	case TNL_GRETAP:
123 		return ICE_NVGRE;
124 	case TNL_GTPU:
125 		/* NO_PAY profiles will not work with GTP-U */
126 		return ICE_GTP;
127 	case TNL_GTPC:
128 		return ICE_GTP_NO_PAY;
129 	default:
130 		return 0;
131 	}
132 }
133 
134 static enum ice_sw_tunnel_type
135 ice_sw_type_from_tunnel(enum ice_tunnel_type type)
136 {
137 	switch (type) {
138 	case TNL_VXLAN:
139 		return ICE_SW_TUN_VXLAN;
140 	case TNL_GENEVE:
141 		return ICE_SW_TUN_GENEVE;
142 	case TNL_GRETAP:
143 		return ICE_SW_TUN_NVGRE;
144 	case TNL_GTPU:
145 		return ICE_SW_TUN_GTPU;
146 	case TNL_GTPC:
147 		return ICE_SW_TUN_GTPC;
148 	default:
149 		return ICE_NON_TUN;
150 	}
151 }
152 
153 static u16 ice_check_supported_vlan_tpid(u16 vlan_tpid)
154 {
155 	switch (vlan_tpid) {
156 	case ETH_P_8021Q:
157 	case ETH_P_8021AD:
158 	case ETH_P_QINQ1:
159 		return vlan_tpid;
160 	default:
161 		return 0;
162 	}
163 }
164 
165 static int
166 ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr,
167 			 struct ice_adv_lkup_elem *list)
168 {
169 	struct ice_tc_flower_lyr_2_4_hdrs *hdr = &fltr->outer_headers;
170 	int i = 0;
171 
172 	if (flags & ICE_TC_FLWR_FIELD_TENANT_ID) {
173 		u32 tenant_id;
174 
175 		list[i].type = ice_proto_type_from_tunnel(fltr->tunnel_type);
176 		switch (fltr->tunnel_type) {
177 		case TNL_VXLAN:
178 		case TNL_GENEVE:
179 			tenant_id = be32_to_cpu(fltr->tenant_id) << 8;
180 			list[i].h_u.tnl_hdr.vni = cpu_to_be32(tenant_id);
181 			memcpy(&list[i].m_u.tnl_hdr.vni, "\xff\xff\xff\x00", 4);
182 			i++;
183 			break;
184 		case TNL_GRETAP:
185 			list[i].h_u.nvgre_hdr.tni_flow = fltr->tenant_id;
186 			memcpy(&list[i].m_u.nvgre_hdr.tni_flow,
187 			       "\xff\xff\xff\xff", 4);
188 			i++;
189 			break;
190 		case TNL_GTPC:
191 		case TNL_GTPU:
192 			list[i].h_u.gtp_hdr.teid = fltr->tenant_id;
193 			memcpy(&list[i].m_u.gtp_hdr.teid,
194 			       "\xff\xff\xff\xff", 4);
195 			i++;
196 			break;
197 		default:
198 			break;
199 		}
200 	}
201 
202 	if (flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC) {
203 		list[i].type = ice_proto_type_from_mac(false);
204 		ether_addr_copy(list[i].h_u.eth_hdr.dst_addr,
205 				hdr->l2_key.dst_mac);
206 		ether_addr_copy(list[i].m_u.eth_hdr.dst_addr,
207 				hdr->l2_mask.dst_mac);
208 		i++;
209 	}
210 
211 	if (flags & ICE_TC_FLWR_FIELD_ENC_OPTS &&
212 	    (fltr->tunnel_type == TNL_GTPU || fltr->tunnel_type == TNL_GTPC)) {
213 		list[i].type = ice_proto_type_from_tunnel(fltr->tunnel_type);
214 
215 		if (fltr->gtp_pdu_info_masks.pdu_type) {
216 			list[i].h_u.gtp_hdr.pdu_type =
217 				fltr->gtp_pdu_info_keys.pdu_type << 4;
218 			memcpy(&list[i].m_u.gtp_hdr.pdu_type, "\xf0", 1);
219 		}
220 
221 		if (fltr->gtp_pdu_info_masks.qfi) {
222 			list[i].h_u.gtp_hdr.qfi = fltr->gtp_pdu_info_keys.qfi;
223 			memcpy(&list[i].m_u.gtp_hdr.qfi, "\x3f", 1);
224 		}
225 
226 		i++;
227 	}
228 
229 	if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
230 		     ICE_TC_FLWR_FIELD_ENC_DEST_IPV4)) {
231 		list[i].type = ice_proto_type_from_ipv4(false);
232 
233 		if (flags & ICE_TC_FLWR_FIELD_ENC_SRC_IPV4) {
234 			list[i].h_u.ipv4_hdr.src_addr = hdr->l3_key.src_ipv4;
235 			list[i].m_u.ipv4_hdr.src_addr = hdr->l3_mask.src_ipv4;
236 		}
237 		if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_IPV4) {
238 			list[i].h_u.ipv4_hdr.dst_addr = hdr->l3_key.dst_ipv4;
239 			list[i].m_u.ipv4_hdr.dst_addr = hdr->l3_mask.dst_ipv4;
240 		}
241 		i++;
242 	}
243 
244 	if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 |
245 		     ICE_TC_FLWR_FIELD_ENC_DEST_IPV6)) {
246 		list[i].type = ice_proto_type_from_ipv6(false);
247 
248 		if (flags & ICE_TC_FLWR_FIELD_ENC_SRC_IPV6) {
249 			memcpy(&list[i].h_u.ipv6_hdr.src_addr,
250 			       &hdr->l3_key.src_ipv6_addr,
251 			       sizeof(hdr->l3_key.src_ipv6_addr));
252 			memcpy(&list[i].m_u.ipv6_hdr.src_addr,
253 			       &hdr->l3_mask.src_ipv6_addr,
254 			       sizeof(hdr->l3_mask.src_ipv6_addr));
255 		}
256 		if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_IPV6) {
257 			memcpy(&list[i].h_u.ipv6_hdr.dst_addr,
258 			       &hdr->l3_key.dst_ipv6_addr,
259 			       sizeof(hdr->l3_key.dst_ipv6_addr));
260 			memcpy(&list[i].m_u.ipv6_hdr.dst_addr,
261 			       &hdr->l3_mask.dst_ipv6_addr,
262 			       sizeof(hdr->l3_mask.dst_ipv6_addr));
263 		}
264 		i++;
265 	}
266 
267 	if (fltr->inner_headers.l2_key.n_proto == htons(ETH_P_IP) &&
268 	    (flags & (ICE_TC_FLWR_FIELD_ENC_IP_TOS |
269 		      ICE_TC_FLWR_FIELD_ENC_IP_TTL))) {
270 		list[i].type = ice_proto_type_from_ipv4(false);
271 
272 		if (flags & ICE_TC_FLWR_FIELD_ENC_IP_TOS) {
273 			list[i].h_u.ipv4_hdr.tos = hdr->l3_key.tos;
274 			list[i].m_u.ipv4_hdr.tos = hdr->l3_mask.tos;
275 		}
276 
277 		if (flags & ICE_TC_FLWR_FIELD_ENC_IP_TTL) {
278 			list[i].h_u.ipv4_hdr.time_to_live = hdr->l3_key.ttl;
279 			list[i].m_u.ipv4_hdr.time_to_live = hdr->l3_mask.ttl;
280 		}
281 
282 		i++;
283 	}
284 
285 	if (fltr->inner_headers.l2_key.n_proto == htons(ETH_P_IPV6) &&
286 	    (flags & (ICE_TC_FLWR_FIELD_ENC_IP_TOS |
287 		      ICE_TC_FLWR_FIELD_ENC_IP_TTL))) {
288 		struct ice_ipv6_hdr *hdr_h, *hdr_m;
289 
290 		hdr_h = &list[i].h_u.ipv6_hdr;
291 		hdr_m = &list[i].m_u.ipv6_hdr;
292 		list[i].type = ice_proto_type_from_ipv6(false);
293 
294 		if (flags & ICE_TC_FLWR_FIELD_ENC_IP_TOS) {
295 			be32p_replace_bits(&hdr_h->be_ver_tc_flow,
296 					   hdr->l3_key.tos,
297 					   ICE_IPV6_HDR_TC_MASK);
298 			be32p_replace_bits(&hdr_m->be_ver_tc_flow,
299 					   hdr->l3_mask.tos,
300 					   ICE_IPV6_HDR_TC_MASK);
301 		}
302 
303 		if (flags & ICE_TC_FLWR_FIELD_ENC_IP_TTL) {
304 			hdr_h->hop_limit = hdr->l3_key.ttl;
305 			hdr_m->hop_limit = hdr->l3_mask.ttl;
306 		}
307 
308 		i++;
309 	}
310 
311 	if ((flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT) &&
312 	    hdr->l3_key.ip_proto == IPPROTO_UDP) {
313 		list[i].type = ICE_UDP_OF;
314 		list[i].h_u.l4_hdr.dst_port = hdr->l4_key.dst_port;
315 		list[i].m_u.l4_hdr.dst_port = hdr->l4_mask.dst_port;
316 		i++;
317 	}
318 
319 	return i;
320 }
321 
322 /**
323  * ice_tc_fill_rules - fill filter rules based on TC fltr
324  * @hw: pointer to HW structure
325  * @flags: tc flower field flags
326  * @tc_fltr: pointer to TC flower filter
327  * @list: list of advance rule elements
328  * @rule_info: pointer to information about rule
329  * @l4_proto: pointer to information such as L4 proto type
330  *
331  * Fill ice_adv_lkup_elem list based on TC flower flags and
332  * TC flower headers. This list should be used to add
333  * advance filter in hardware.
334  */
335 static int
336 ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
337 		  struct ice_tc_flower_fltr *tc_fltr,
338 		  struct ice_adv_lkup_elem *list,
339 		  struct ice_adv_rule_info *rule_info,
340 		  u16 *l4_proto)
341 {
342 	struct ice_tc_flower_lyr_2_4_hdrs *headers = &tc_fltr->outer_headers;
343 	bool inner = false;
344 	u16 vlan_tpid = 0;
345 	int i = 0;
346 
347 	rule_info->vlan_type = vlan_tpid;
348 
349 	rule_info->tun_type = ice_sw_type_from_tunnel(tc_fltr->tunnel_type);
350 	if (tc_fltr->tunnel_type != TNL_LAST) {
351 		i = ice_tc_fill_tunnel_outer(flags, tc_fltr, list);
352 
353 		headers = &tc_fltr->inner_headers;
354 		inner = true;
355 	}
356 
357 	if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) {
358 		list[i].type = ice_proto_type_from_etype(inner);
359 		list[i].h_u.ethertype.ethtype_id = headers->l2_key.n_proto;
360 		list[i].m_u.ethertype.ethtype_id = headers->l2_mask.n_proto;
361 		i++;
362 	}
363 
364 	if (flags & (ICE_TC_FLWR_FIELD_DST_MAC |
365 		     ICE_TC_FLWR_FIELD_SRC_MAC)) {
366 		struct ice_tc_l2_hdr *l2_key, *l2_mask;
367 
368 		l2_key = &headers->l2_key;
369 		l2_mask = &headers->l2_mask;
370 
371 		list[i].type = ice_proto_type_from_mac(inner);
372 		if (flags & ICE_TC_FLWR_FIELD_DST_MAC) {
373 			ether_addr_copy(list[i].h_u.eth_hdr.dst_addr,
374 					l2_key->dst_mac);
375 			ether_addr_copy(list[i].m_u.eth_hdr.dst_addr,
376 					l2_mask->dst_mac);
377 		}
378 		if (flags & ICE_TC_FLWR_FIELD_SRC_MAC) {
379 			ether_addr_copy(list[i].h_u.eth_hdr.src_addr,
380 					l2_key->src_mac);
381 			ether_addr_copy(list[i].m_u.eth_hdr.src_addr,
382 					l2_mask->src_mac);
383 		}
384 		i++;
385 	}
386 
387 	/* copy VLAN info */
388 	if (flags & ICE_TC_FLWR_FIELD_VLAN) {
389 		vlan_tpid = be16_to_cpu(headers->vlan_hdr.vlan_tpid);
390 		rule_info->vlan_type =
391 				ice_check_supported_vlan_tpid(vlan_tpid);
392 
393 		if (flags & ICE_TC_FLWR_FIELD_CVLAN)
394 			list[i].type = ICE_VLAN_EX;
395 		else
396 			list[i].type = ICE_VLAN_OFOS;
397 		list[i].h_u.vlan_hdr.vlan = headers->vlan_hdr.vlan_id;
398 		list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xFFFF);
399 		i++;
400 	}
401 
402 	if (flags & ICE_TC_FLWR_FIELD_CVLAN) {
403 		list[i].type = ICE_VLAN_IN;
404 		list[i].h_u.vlan_hdr.vlan = headers->cvlan_hdr.vlan_id;
405 		list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xFFFF);
406 		i++;
407 	}
408 
409 	if (flags & (ICE_TC_FLWR_FIELD_PPPOE_SESSID |
410 		     ICE_TC_FLWR_FIELD_PPP_PROTO)) {
411 		struct ice_pppoe_hdr *vals, *masks;
412 
413 		vals = &list[i].h_u.pppoe_hdr;
414 		masks = &list[i].m_u.pppoe_hdr;
415 
416 		list[i].type = ICE_PPPOE;
417 
418 		if (flags & ICE_TC_FLWR_FIELD_PPPOE_SESSID) {
419 			vals->session_id = headers->pppoe_hdr.session_id;
420 			masks->session_id = cpu_to_be16(0xFFFF);
421 		}
422 
423 		if (flags & ICE_TC_FLWR_FIELD_PPP_PROTO) {
424 			vals->ppp_prot_id = headers->pppoe_hdr.ppp_proto;
425 			masks->ppp_prot_id = cpu_to_be16(0xFFFF);
426 		}
427 
428 		i++;
429 	}
430 
431 	/* copy L3 (IPv[4|6]: src, dest) address */
432 	if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV4 |
433 		     ICE_TC_FLWR_FIELD_SRC_IPV4)) {
434 		struct ice_tc_l3_hdr *l3_key, *l3_mask;
435 
436 		list[i].type = ice_proto_type_from_ipv4(inner);
437 		l3_key = &headers->l3_key;
438 		l3_mask = &headers->l3_mask;
439 		if (flags & ICE_TC_FLWR_FIELD_DEST_IPV4) {
440 			list[i].h_u.ipv4_hdr.dst_addr = l3_key->dst_ipv4;
441 			list[i].m_u.ipv4_hdr.dst_addr = l3_mask->dst_ipv4;
442 		}
443 		if (flags & ICE_TC_FLWR_FIELD_SRC_IPV4) {
444 			list[i].h_u.ipv4_hdr.src_addr = l3_key->src_ipv4;
445 			list[i].m_u.ipv4_hdr.src_addr = l3_mask->src_ipv4;
446 		}
447 		i++;
448 	} else if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV6 |
449 			    ICE_TC_FLWR_FIELD_SRC_IPV6)) {
450 		struct ice_ipv6_hdr *ipv6_hdr, *ipv6_mask;
451 		struct ice_tc_l3_hdr *l3_key, *l3_mask;
452 
453 		list[i].type = ice_proto_type_from_ipv6(inner);
454 		ipv6_hdr = &list[i].h_u.ipv6_hdr;
455 		ipv6_mask = &list[i].m_u.ipv6_hdr;
456 		l3_key = &headers->l3_key;
457 		l3_mask = &headers->l3_mask;
458 
459 		if (flags & ICE_TC_FLWR_FIELD_DEST_IPV6) {
460 			memcpy(&ipv6_hdr->dst_addr, &l3_key->dst_ipv6_addr,
461 			       sizeof(l3_key->dst_ipv6_addr));
462 			memcpy(&ipv6_mask->dst_addr, &l3_mask->dst_ipv6_addr,
463 			       sizeof(l3_mask->dst_ipv6_addr));
464 		}
465 		if (flags & ICE_TC_FLWR_FIELD_SRC_IPV6) {
466 			memcpy(&ipv6_hdr->src_addr, &l3_key->src_ipv6_addr,
467 			       sizeof(l3_key->src_ipv6_addr));
468 			memcpy(&ipv6_mask->src_addr, &l3_mask->src_ipv6_addr,
469 			       sizeof(l3_mask->src_ipv6_addr));
470 		}
471 		i++;
472 	}
473 
474 	if (headers->l2_key.n_proto == htons(ETH_P_IP) &&
475 	    (flags & (ICE_TC_FLWR_FIELD_IP_TOS | ICE_TC_FLWR_FIELD_IP_TTL))) {
476 		list[i].type = ice_proto_type_from_ipv4(inner);
477 
478 		if (flags & ICE_TC_FLWR_FIELD_IP_TOS) {
479 			list[i].h_u.ipv4_hdr.tos = headers->l3_key.tos;
480 			list[i].m_u.ipv4_hdr.tos = headers->l3_mask.tos;
481 		}
482 
483 		if (flags & ICE_TC_FLWR_FIELD_IP_TTL) {
484 			list[i].h_u.ipv4_hdr.time_to_live =
485 				headers->l3_key.ttl;
486 			list[i].m_u.ipv4_hdr.time_to_live =
487 				headers->l3_mask.ttl;
488 		}
489 
490 		i++;
491 	}
492 
493 	if (headers->l2_key.n_proto == htons(ETH_P_IPV6) &&
494 	    (flags & (ICE_TC_FLWR_FIELD_IP_TOS | ICE_TC_FLWR_FIELD_IP_TTL))) {
495 		struct ice_ipv6_hdr *hdr_h, *hdr_m;
496 
497 		hdr_h = &list[i].h_u.ipv6_hdr;
498 		hdr_m = &list[i].m_u.ipv6_hdr;
499 		list[i].type = ice_proto_type_from_ipv6(inner);
500 
501 		if (flags & ICE_TC_FLWR_FIELD_IP_TOS) {
502 			be32p_replace_bits(&hdr_h->be_ver_tc_flow,
503 					   headers->l3_key.tos,
504 					   ICE_IPV6_HDR_TC_MASK);
505 			be32p_replace_bits(&hdr_m->be_ver_tc_flow,
506 					   headers->l3_mask.tos,
507 					   ICE_IPV6_HDR_TC_MASK);
508 		}
509 
510 		if (flags & ICE_TC_FLWR_FIELD_IP_TTL) {
511 			hdr_h->hop_limit = headers->l3_key.ttl;
512 			hdr_m->hop_limit = headers->l3_mask.ttl;
513 		}
514 
515 		i++;
516 	}
517 
518 	/* copy L4 (src, dest) port */
519 	if (flags & (ICE_TC_FLWR_FIELD_DEST_L4_PORT |
520 		     ICE_TC_FLWR_FIELD_SRC_L4_PORT)) {
521 		struct ice_tc_l4_hdr *l4_key, *l4_mask;
522 
523 		list[i].type = ice_proto_type_from_l4_port(headers->l3_key.ip_proto);
524 		l4_key = &headers->l4_key;
525 		l4_mask = &headers->l4_mask;
526 
527 		if (flags & ICE_TC_FLWR_FIELD_DEST_L4_PORT) {
528 			list[i].h_u.l4_hdr.dst_port = l4_key->dst_port;
529 			list[i].m_u.l4_hdr.dst_port = l4_mask->dst_port;
530 		}
531 		if (flags & ICE_TC_FLWR_FIELD_SRC_L4_PORT) {
532 			list[i].h_u.l4_hdr.src_port = l4_key->src_port;
533 			list[i].m_u.l4_hdr.src_port = l4_mask->src_port;
534 		}
535 		i++;
536 	}
537 
538 	return i;
539 }
540 
541 /**
542  * ice_tc_tun_get_type - get the tunnel type
543  * @tunnel_dev: ptr to tunnel device
544  *
545  * This function detects appropriate tunnel_type if specified device is
546  * tunnel device such as VXLAN/Geneve
547  */
548 static int ice_tc_tun_get_type(struct net_device *tunnel_dev)
549 {
550 	if (netif_is_vxlan(tunnel_dev))
551 		return TNL_VXLAN;
552 	if (netif_is_geneve(tunnel_dev))
553 		return TNL_GENEVE;
554 	if (netif_is_gretap(tunnel_dev) ||
555 	    netif_is_ip6gretap(tunnel_dev))
556 		return TNL_GRETAP;
557 
558 	/* Assume GTP-U by default in case of GTP netdev.
559 	 * GTP-C may be selected later, based on enc_dst_port.
560 	 */
561 	if (netif_is_gtp(tunnel_dev))
562 		return TNL_GTPU;
563 	return TNL_LAST;
564 }
565 
566 bool ice_is_tunnel_supported(struct net_device *dev)
567 {
568 	return ice_tc_tun_get_type(dev) != TNL_LAST;
569 }
570 
571 static int
572 ice_eswitch_tc_parse_action(struct ice_tc_flower_fltr *fltr,
573 			    struct flow_action_entry *act)
574 {
575 	struct ice_repr *repr;
576 
577 	switch (act->id) {
578 	case FLOW_ACTION_DROP:
579 		fltr->action.fltr_act = ICE_DROP_PACKET;
580 		break;
581 
582 	case FLOW_ACTION_REDIRECT:
583 		fltr->action.fltr_act = ICE_FWD_TO_VSI;
584 
585 		if (ice_is_port_repr_netdev(act->dev)) {
586 			repr = ice_netdev_to_repr(act->dev);
587 
588 			fltr->dest_vsi = repr->src_vsi;
589 			fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
590 		} else if (netif_is_ice(act->dev) ||
591 			   ice_is_tunnel_supported(act->dev)) {
592 			fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
593 		} else {
594 			NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported netdevice in switchdev mode");
595 			return -EINVAL;
596 		}
597 
598 		break;
599 
600 	default:
601 		NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported action in switchdev mode");
602 		return -EINVAL;
603 	}
604 
605 	return 0;
606 }
607 
608 static int
609 ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
610 {
611 	struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers;
612 	struct ice_adv_rule_info rule_info = { 0 };
613 	struct ice_rule_query_data rule_added;
614 	struct ice_hw *hw = &vsi->back->hw;
615 	struct ice_adv_lkup_elem *list;
616 	u32 flags = fltr->flags;
617 	int lkups_cnt;
618 	int ret;
619 	int i;
620 
621 	if (!flags || (flags & ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT)) {
622 		NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported encap field(s)");
623 		return -EOPNOTSUPP;
624 	}
625 
626 	lkups_cnt = ice_tc_count_lkups(flags, headers, fltr);
627 	list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
628 	if (!list)
629 		return -ENOMEM;
630 
631 	i = ice_tc_fill_rules(hw, flags, fltr, list, &rule_info, NULL);
632 	if (i != lkups_cnt) {
633 		ret = -EINVAL;
634 		goto exit;
635 	}
636 
637 	/* egress traffic is always redirect to uplink */
638 	if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS)
639 		fltr->dest_vsi = vsi->back->switchdev.uplink_vsi;
640 
641 	rule_info.sw_act.fltr_act = fltr->action.fltr_act;
642 	if (fltr->action.fltr_act != ICE_DROP_PACKET)
643 		rule_info.sw_act.vsi_handle = fltr->dest_vsi->idx;
644 	/* For now, making priority to be highest, and it also becomes
645 	 * the priority for recipe which will get created as a result of
646 	 * new extraction sequence based on input set.
647 	 * Priority '7' is max val for switch recipe, higher the number
648 	 * results into order of switch rule evaluation.
649 	 */
650 	rule_info.priority = 7;
651 
652 	if (fltr->direction == ICE_ESWITCH_FLTR_INGRESS) {
653 		rule_info.sw_act.flag |= ICE_FLTR_RX;
654 		rule_info.sw_act.src = hw->pf_id;
655 		rule_info.rx = true;
656 	} else {
657 		rule_info.sw_act.flag |= ICE_FLTR_TX;
658 		rule_info.sw_act.src = vsi->idx;
659 		rule_info.rx = false;
660 		rule_info.flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE;
661 		rule_info.flags_info.act_valid = true;
662 	}
663 
664 	/* specify the cookie as filter_rule_id */
665 	rule_info.fltr_rule_id = fltr->cookie;
666 
667 	ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added);
668 	if (ret == -EEXIST) {
669 		NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because it already exist");
670 		ret = -EINVAL;
671 		goto exit;
672 	} else if (ret) {
673 		NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter due to error");
674 		goto exit;
675 	}
676 
677 	/* store the output params, which are needed later for removing
678 	 * advanced switch filter
679 	 */
680 	fltr->rid = rule_added.rid;
681 	fltr->rule_id = rule_added.rule_id;
682 	fltr->dest_id = rule_added.vsi_handle;
683 
684 exit:
685 	kfree(list);
686 	return ret;
687 }
688 
689 /**
690  * ice_add_tc_flower_adv_fltr - add appropriate filter rules
691  * @vsi: Pointer to VSI
692  * @tc_fltr: Pointer to TC flower filter structure
693  *
694  * based on filter parameters using Advance recipes supported
695  * by OS package.
696  */
697 static int
698 ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
699 			   struct ice_tc_flower_fltr *tc_fltr)
700 {
701 	struct ice_tc_flower_lyr_2_4_hdrs *headers = &tc_fltr->outer_headers;
702 	struct ice_adv_rule_info rule_info = {0};
703 	struct ice_rule_query_data rule_added;
704 	struct ice_adv_lkup_elem *list;
705 	struct ice_pf *pf = vsi->back;
706 	struct ice_hw *hw = &pf->hw;
707 	u32 flags = tc_fltr->flags;
708 	struct ice_vsi *ch_vsi;
709 	struct device *dev;
710 	u16 lkups_cnt = 0;
711 	u16 l4_proto = 0;
712 	int ret = 0;
713 	u16 i = 0;
714 
715 	dev = ice_pf_to_dev(pf);
716 	if (ice_is_safe_mode(pf)) {
717 		NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unable to add filter because driver is in safe mode");
718 		return -EOPNOTSUPP;
719 	}
720 
721 	if (!flags || (flags & (ICE_TC_FLWR_FIELD_ENC_DEST_IPV4 |
722 				ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
723 				ICE_TC_FLWR_FIELD_ENC_DEST_IPV6 |
724 				ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 |
725 				ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT))) {
726 		NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unsupported encap field(s)");
727 		return -EOPNOTSUPP;
728 	}
729 
730 	/* get the channel (aka ADQ VSI) */
731 	if (tc_fltr->dest_vsi)
732 		ch_vsi = tc_fltr->dest_vsi;
733 	else
734 		ch_vsi = vsi->tc_map_vsi[tc_fltr->action.tc_class];
735 
736 	lkups_cnt = ice_tc_count_lkups(flags, headers, tc_fltr);
737 	list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
738 	if (!list)
739 		return -ENOMEM;
740 
741 	i = ice_tc_fill_rules(hw, flags, tc_fltr, list, &rule_info, &l4_proto);
742 	if (i != lkups_cnt) {
743 		ret = -EINVAL;
744 		goto exit;
745 	}
746 
747 	rule_info.sw_act.fltr_act = tc_fltr->action.fltr_act;
748 	if (tc_fltr->action.tc_class >= ICE_CHNL_START_TC) {
749 		if (!ch_vsi) {
750 			NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unable to add filter because specified destination doesn't exist");
751 			ret = -EINVAL;
752 			goto exit;
753 		}
754 
755 		rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
756 		rule_info.sw_act.vsi_handle = ch_vsi->idx;
757 		rule_info.priority = 7;
758 		rule_info.sw_act.src = hw->pf_id;
759 		rule_info.rx = true;
760 		dev_dbg(dev, "add switch rule for TC:%u vsi_idx:%u, lkups_cnt:%u\n",
761 			tc_fltr->action.tc_class,
762 			rule_info.sw_act.vsi_handle, lkups_cnt);
763 	} else {
764 		rule_info.sw_act.flag |= ICE_FLTR_TX;
765 		rule_info.sw_act.src = vsi->idx;
766 		rule_info.rx = false;
767 	}
768 
769 	/* specify the cookie as filter_rule_id */
770 	rule_info.fltr_rule_id = tc_fltr->cookie;
771 
772 	ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added);
773 	if (ret == -EEXIST) {
774 		NL_SET_ERR_MSG_MOD(tc_fltr->extack,
775 				   "Unable to add filter because it already exist");
776 		ret = -EINVAL;
777 		goto exit;
778 	} else if (ret) {
779 		NL_SET_ERR_MSG_MOD(tc_fltr->extack,
780 				   "Unable to add filter due to error");
781 		goto exit;
782 	}
783 
784 	/* store the output params, which are needed later for removing
785 	 * advanced switch filter
786 	 */
787 	tc_fltr->rid = rule_added.rid;
788 	tc_fltr->rule_id = rule_added.rule_id;
789 	if (tc_fltr->action.tc_class > 0 && ch_vsi) {
790 		/* For PF ADQ, VSI type is set as ICE_VSI_CHNL, and
791 		 * for PF ADQ filter, it is not yet set in tc_fltr,
792 		 * hence store the dest_vsi ptr in tc_fltr
793 		 */
794 		if (ch_vsi->type == ICE_VSI_CHNL)
795 			tc_fltr->dest_vsi = ch_vsi;
796 		/* keep track of advanced switch filter for
797 		 * destination VSI (channel VSI)
798 		 */
799 		ch_vsi->num_chnl_fltr++;
800 		/* in this case, dest_id is VSI handle (sw handle) */
801 		tc_fltr->dest_id = rule_added.vsi_handle;
802 
803 		/* keeps track of channel filters for PF VSI */
804 		if (vsi->type == ICE_VSI_PF &&
805 		    (flags & (ICE_TC_FLWR_FIELD_DST_MAC |
806 			      ICE_TC_FLWR_FIELD_ENC_DST_MAC)))
807 			pf->num_dmac_chnl_fltrs++;
808 	}
809 	dev_dbg(dev, "added switch rule (lkups_cnt %u, flags 0x%x) for TC %u, rid %u, rule_id %u, vsi_idx %u\n",
810 		lkups_cnt, flags,
811 		tc_fltr->action.tc_class, rule_added.rid,
812 		rule_added.rule_id, rule_added.vsi_handle);
813 exit:
814 	kfree(list);
815 	return ret;
816 }
817 
818 /**
819  * ice_tc_set_pppoe - Parse PPPoE fields from TC flower filter
820  * @match: Pointer to flow match structure
821  * @fltr: Pointer to filter structure
822  * @headers: Pointer to outer header fields
823  * @returns PPP protocol used in filter (ppp_ses or ppp_disc)
824  */
825 static u16
826 ice_tc_set_pppoe(struct flow_match_pppoe *match,
827 		 struct ice_tc_flower_fltr *fltr,
828 		 struct ice_tc_flower_lyr_2_4_hdrs *headers)
829 {
830 	if (match->mask->session_id) {
831 		fltr->flags |= ICE_TC_FLWR_FIELD_PPPOE_SESSID;
832 		headers->pppoe_hdr.session_id = match->key->session_id;
833 	}
834 
835 	if (match->mask->ppp_proto) {
836 		fltr->flags |= ICE_TC_FLWR_FIELD_PPP_PROTO;
837 		headers->pppoe_hdr.ppp_proto = match->key->ppp_proto;
838 	}
839 
840 	return be16_to_cpu(match->key->type);
841 }
842 
843 /**
844  * ice_tc_set_ipv4 - Parse IPv4 addresses from TC flower filter
845  * @match: Pointer to flow match structure
846  * @fltr: Pointer to filter structure
847  * @headers: inner or outer header fields
848  * @is_encap: set true for tunnel IPv4 address
849  */
850 static int
851 ice_tc_set_ipv4(struct flow_match_ipv4_addrs *match,
852 		struct ice_tc_flower_fltr *fltr,
853 		struct ice_tc_flower_lyr_2_4_hdrs *headers, bool is_encap)
854 {
855 	if (match->key->dst) {
856 		if (is_encap)
857 			fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_IPV4;
858 		else
859 			fltr->flags |= ICE_TC_FLWR_FIELD_DEST_IPV4;
860 		headers->l3_key.dst_ipv4 = match->key->dst;
861 		headers->l3_mask.dst_ipv4 = match->mask->dst;
862 	}
863 	if (match->key->src) {
864 		if (is_encap)
865 			fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_IPV4;
866 		else
867 			fltr->flags |= ICE_TC_FLWR_FIELD_SRC_IPV4;
868 		headers->l3_key.src_ipv4 = match->key->src;
869 		headers->l3_mask.src_ipv4 = match->mask->src;
870 	}
871 	return 0;
872 }
873 
874 /**
875  * ice_tc_set_ipv6 - Parse IPv6 addresses from TC flower filter
876  * @match: Pointer to flow match structure
877  * @fltr: Pointer to filter structure
878  * @headers: inner or outer header fields
879  * @is_encap: set true for tunnel IPv6 address
880  */
881 static int
882 ice_tc_set_ipv6(struct flow_match_ipv6_addrs *match,
883 		struct ice_tc_flower_fltr *fltr,
884 		struct ice_tc_flower_lyr_2_4_hdrs *headers, bool is_encap)
885 {
886 	struct ice_tc_l3_hdr *l3_key, *l3_mask;
887 
888 	/* src and dest IPV6 address should not be LOOPBACK
889 	 * (0:0:0:0:0:0:0:1), which can be represented as ::1
890 	 */
891 	if (ipv6_addr_loopback(&match->key->dst) ||
892 	    ipv6_addr_loopback(&match->key->src)) {
893 		NL_SET_ERR_MSG_MOD(fltr->extack, "Bad IPv6, addr is LOOPBACK");
894 		return -EINVAL;
895 	}
896 	/* if src/dest IPv6 address is *,* error */
897 	if (ipv6_addr_any(&match->mask->dst) &&
898 	    ipv6_addr_any(&match->mask->src)) {
899 		NL_SET_ERR_MSG_MOD(fltr->extack, "Bad src/dest IPv6, addr is any");
900 		return -EINVAL;
901 	}
902 	if (!ipv6_addr_any(&match->mask->dst)) {
903 		if (is_encap)
904 			fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_IPV6;
905 		else
906 			fltr->flags |= ICE_TC_FLWR_FIELD_DEST_IPV6;
907 	}
908 	if (!ipv6_addr_any(&match->mask->src)) {
909 		if (is_encap)
910 			fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_IPV6;
911 		else
912 			fltr->flags |= ICE_TC_FLWR_FIELD_SRC_IPV6;
913 	}
914 
915 	l3_key = &headers->l3_key;
916 	l3_mask = &headers->l3_mask;
917 
918 	if (fltr->flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 |
919 			   ICE_TC_FLWR_FIELD_SRC_IPV6)) {
920 		memcpy(&l3_key->src_ipv6_addr, &match->key->src.s6_addr,
921 		       sizeof(match->key->src.s6_addr));
922 		memcpy(&l3_mask->src_ipv6_addr, &match->mask->src.s6_addr,
923 		       sizeof(match->mask->src.s6_addr));
924 	}
925 	if (fltr->flags & (ICE_TC_FLWR_FIELD_ENC_DEST_IPV6 |
926 			   ICE_TC_FLWR_FIELD_DEST_IPV6)) {
927 		memcpy(&l3_key->dst_ipv6_addr, &match->key->dst.s6_addr,
928 		       sizeof(match->key->dst.s6_addr));
929 		memcpy(&l3_mask->dst_ipv6_addr, &match->mask->dst.s6_addr,
930 		       sizeof(match->mask->dst.s6_addr));
931 	}
932 
933 	return 0;
934 }
935 
936 /**
937  * ice_tc_set_tos_ttl - Parse IP ToS/TTL from TC flower filter
938  * @match: Pointer to flow match structure
939  * @fltr: Pointer to filter structure
940  * @headers: inner or outer header fields
941  * @is_encap: set true for tunnel
942  */
943 static void
944 ice_tc_set_tos_ttl(struct flow_match_ip *match,
945 		   struct ice_tc_flower_fltr *fltr,
946 		   struct ice_tc_flower_lyr_2_4_hdrs *headers,
947 		   bool is_encap)
948 {
949 	if (match->mask->tos) {
950 		if (is_encap)
951 			fltr->flags |= ICE_TC_FLWR_FIELD_ENC_IP_TOS;
952 		else
953 			fltr->flags |= ICE_TC_FLWR_FIELD_IP_TOS;
954 
955 		headers->l3_key.tos = match->key->tos;
956 		headers->l3_mask.tos = match->mask->tos;
957 	}
958 
959 	if (match->mask->ttl) {
960 		if (is_encap)
961 			fltr->flags |= ICE_TC_FLWR_FIELD_ENC_IP_TTL;
962 		else
963 			fltr->flags |= ICE_TC_FLWR_FIELD_IP_TTL;
964 
965 		headers->l3_key.ttl = match->key->ttl;
966 		headers->l3_mask.ttl = match->mask->ttl;
967 	}
968 }
969 
970 /**
971  * ice_tc_set_port - Parse ports from TC flower filter
972  * @match: Flow match structure
973  * @fltr: Pointer to filter structure
974  * @headers: inner or outer header fields
975  * @is_encap: set true for tunnel port
976  */
977 static int
978 ice_tc_set_port(struct flow_match_ports match,
979 		struct ice_tc_flower_fltr *fltr,
980 		struct ice_tc_flower_lyr_2_4_hdrs *headers, bool is_encap)
981 {
982 	if (match.key->dst) {
983 		if (is_encap)
984 			fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT;
985 		else
986 			fltr->flags |= ICE_TC_FLWR_FIELD_DEST_L4_PORT;
987 
988 		headers->l4_key.dst_port = match.key->dst;
989 		headers->l4_mask.dst_port = match.mask->dst;
990 	}
991 	if (match.key->src) {
992 		if (is_encap)
993 			fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT;
994 		else
995 			fltr->flags |= ICE_TC_FLWR_FIELD_SRC_L4_PORT;
996 
997 		headers->l4_key.src_port = match.key->src;
998 		headers->l4_mask.src_port = match.mask->src;
999 	}
1000 	return 0;
1001 }
1002 
1003 static struct net_device *
1004 ice_get_tunnel_device(struct net_device *dev, struct flow_rule *rule)
1005 {
1006 	struct flow_action_entry *act;
1007 	int i;
1008 
1009 	if (ice_is_tunnel_supported(dev))
1010 		return dev;
1011 
1012 	flow_action_for_each(i, act, &rule->action) {
1013 		if (act->id == FLOW_ACTION_REDIRECT &&
1014 		    ice_is_tunnel_supported(act->dev))
1015 			return act->dev;
1016 	}
1017 
1018 	return NULL;
1019 }
1020 
1021 /**
1022  * ice_parse_gtp_type - Sets GTP tunnel type to GTP-U or GTP-C
1023  * @match: Flow match structure
1024  * @fltr: Pointer to filter structure
1025  *
1026  * GTP-C/GTP-U is selected based on destination port number (enc_dst_port).
1027  * Before calling this funtcion, fltr->tunnel_type should be set to TNL_GTPU,
1028  * therefore making GTP-U the default choice (when destination port number is
1029  * not specified).
1030  */
1031 static int
1032 ice_parse_gtp_type(struct flow_match_ports match,
1033 		   struct ice_tc_flower_fltr *fltr)
1034 {
1035 	u16 dst_port;
1036 
1037 	if (match.key->dst) {
1038 		dst_port = be16_to_cpu(match.key->dst);
1039 
1040 		switch (dst_port) {
1041 		case 2152:
1042 			break;
1043 		case 2123:
1044 			fltr->tunnel_type = TNL_GTPC;
1045 			break;
1046 		default:
1047 			NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported GTP port number");
1048 			return -EINVAL;
1049 		}
1050 	}
1051 
1052 	return 0;
1053 }
1054 
1055 static int
1056 ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule,
1057 		      struct ice_tc_flower_fltr *fltr)
1058 {
1059 	struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers;
1060 	struct flow_match_control enc_control;
1061 
1062 	fltr->tunnel_type = ice_tc_tun_get_type(dev);
1063 	headers->l3_key.ip_proto = IPPROTO_UDP;
1064 
1065 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
1066 		struct flow_match_enc_keyid enc_keyid;
1067 
1068 		flow_rule_match_enc_keyid(rule, &enc_keyid);
1069 
1070 		if (!enc_keyid.mask->keyid ||
1071 		    enc_keyid.mask->keyid != cpu_to_be32(ICE_TC_FLOWER_MASK_32))
1072 			return -EINVAL;
1073 
1074 		fltr->flags |= ICE_TC_FLWR_FIELD_TENANT_ID;
1075 		fltr->tenant_id = enc_keyid.key->keyid;
1076 	}
1077 
1078 	flow_rule_match_enc_control(rule, &enc_control);
1079 
1080 	if (enc_control.key->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1081 		struct flow_match_ipv4_addrs match;
1082 
1083 		flow_rule_match_enc_ipv4_addrs(rule, &match);
1084 		if (ice_tc_set_ipv4(&match, fltr, headers, true))
1085 			return -EINVAL;
1086 	} else if (enc_control.key->addr_type ==
1087 					FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1088 		struct flow_match_ipv6_addrs match;
1089 
1090 		flow_rule_match_enc_ipv6_addrs(rule, &match);
1091 		if (ice_tc_set_ipv6(&match, fltr, headers, true))
1092 			return -EINVAL;
1093 	}
1094 
1095 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
1096 		struct flow_match_ip match;
1097 
1098 		flow_rule_match_enc_ip(rule, &match);
1099 		ice_tc_set_tos_ttl(&match, fltr, headers, true);
1100 	}
1101 
1102 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS) &&
1103 	    fltr->tunnel_type != TNL_VXLAN && fltr->tunnel_type != TNL_GENEVE) {
1104 		struct flow_match_ports match;
1105 
1106 		flow_rule_match_enc_ports(rule, &match);
1107 
1108 		if (fltr->tunnel_type != TNL_GTPU) {
1109 			if (ice_tc_set_port(match, fltr, headers, true))
1110 				return -EINVAL;
1111 		} else {
1112 			if (ice_parse_gtp_type(match, fltr))
1113 				return -EINVAL;
1114 		}
1115 	}
1116 
1117 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS)) {
1118 		struct flow_match_enc_opts match;
1119 
1120 		flow_rule_match_enc_opts(rule, &match);
1121 
1122 		memcpy(&fltr->gtp_pdu_info_keys, &match.key->data[0],
1123 		       sizeof(struct gtp_pdu_session_info));
1124 
1125 		memcpy(&fltr->gtp_pdu_info_masks, &match.mask->data[0],
1126 		       sizeof(struct gtp_pdu_session_info));
1127 
1128 		fltr->flags |= ICE_TC_FLWR_FIELD_ENC_OPTS;
1129 	}
1130 
1131 	return 0;
1132 }
1133 
1134 /**
1135  * ice_parse_cls_flower - Parse TC flower filters provided by kernel
1136  * @vsi: Pointer to the VSI
1137  * @filter_dev: Pointer to device on which filter is being added
1138  * @f: Pointer to struct flow_cls_offload
1139  * @fltr: Pointer to filter structure
1140  */
1141 static int
1142 ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
1143 		     struct flow_cls_offload *f,
1144 		     struct ice_tc_flower_fltr *fltr)
1145 {
1146 	struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers;
1147 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
1148 	u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0;
1149 	struct flow_dissector *dissector;
1150 	struct net_device *tunnel_dev;
1151 
1152 	dissector = rule->match.dissector;
1153 
1154 	if (dissector->used_keys &
1155 	    ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
1156 	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
1157 	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
1158 	      BIT(FLOW_DISSECTOR_KEY_VLAN) |
1159 	      BIT(FLOW_DISSECTOR_KEY_CVLAN) |
1160 	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
1161 	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
1162 	      BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
1163 	      BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
1164 	      BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
1165 	      BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
1166 	      BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
1167 	      BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) |
1168 	      BIT(FLOW_DISSECTOR_KEY_IP) |
1169 	      BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
1170 	      BIT(FLOW_DISSECTOR_KEY_PORTS) |
1171 	      BIT(FLOW_DISSECTOR_KEY_PPPOE))) {
1172 		NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported key used");
1173 		return -EOPNOTSUPP;
1174 	}
1175 
1176 	tunnel_dev = ice_get_tunnel_device(filter_dev, rule);
1177 	if (tunnel_dev) {
1178 		int err;
1179 
1180 		filter_dev = tunnel_dev;
1181 
1182 		err = ice_parse_tunnel_attr(filter_dev, rule, fltr);
1183 		if (err) {
1184 			NL_SET_ERR_MSG_MOD(fltr->extack, "Failed to parse TC flower tunnel attributes");
1185 			return err;
1186 		}
1187 
1188 		/* header pointers should point to the inner headers, outer
1189 		 * header were already set by ice_parse_tunnel_attr
1190 		 */
1191 		headers = &fltr->inner_headers;
1192 	} else if (dissector->used_keys &
1193 		  (BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
1194 		   BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
1195 		   BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
1196 		   BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))) {
1197 		NL_SET_ERR_MSG_MOD(fltr->extack, "Tunnel key used, but device isn't a tunnel");
1198 		return -EOPNOTSUPP;
1199 	} else {
1200 		fltr->tunnel_type = TNL_LAST;
1201 	}
1202 
1203 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
1204 		struct flow_match_basic match;
1205 
1206 		flow_rule_match_basic(rule, &match);
1207 
1208 		n_proto_key = ntohs(match.key->n_proto);
1209 		n_proto_mask = ntohs(match.mask->n_proto);
1210 
1211 		if (n_proto_key == ETH_P_ALL || n_proto_key == 0 ||
1212 		    fltr->tunnel_type == TNL_GTPU ||
1213 		    fltr->tunnel_type == TNL_GTPC) {
1214 			n_proto_key = 0;
1215 			n_proto_mask = 0;
1216 		} else {
1217 			fltr->flags |= ICE_TC_FLWR_FIELD_ETH_TYPE_ID;
1218 		}
1219 
1220 		headers->l2_key.n_proto = cpu_to_be16(n_proto_key);
1221 		headers->l2_mask.n_proto = cpu_to_be16(n_proto_mask);
1222 		headers->l3_key.ip_proto = match.key->ip_proto;
1223 	}
1224 
1225 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
1226 		struct flow_match_eth_addrs match;
1227 
1228 		flow_rule_match_eth_addrs(rule, &match);
1229 
1230 		if (!is_zero_ether_addr(match.key->dst)) {
1231 			ether_addr_copy(headers->l2_key.dst_mac,
1232 					match.key->dst);
1233 			ether_addr_copy(headers->l2_mask.dst_mac,
1234 					match.mask->dst);
1235 			fltr->flags |= ICE_TC_FLWR_FIELD_DST_MAC;
1236 		}
1237 
1238 		if (!is_zero_ether_addr(match.key->src)) {
1239 			ether_addr_copy(headers->l2_key.src_mac,
1240 					match.key->src);
1241 			ether_addr_copy(headers->l2_mask.src_mac,
1242 					match.mask->src);
1243 			fltr->flags |= ICE_TC_FLWR_FIELD_SRC_MAC;
1244 		}
1245 	}
1246 
1247 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN) ||
1248 	    is_vlan_dev(filter_dev)) {
1249 		struct flow_dissector_key_vlan mask;
1250 		struct flow_dissector_key_vlan key;
1251 		struct flow_match_vlan match;
1252 
1253 		if (is_vlan_dev(filter_dev)) {
1254 			match.key = &key;
1255 			match.key->vlan_id = vlan_dev_vlan_id(filter_dev);
1256 			match.key->vlan_priority = 0;
1257 			match.mask = &mask;
1258 			memset(match.mask, 0xff, sizeof(*match.mask));
1259 			match.mask->vlan_priority = 0;
1260 		} else {
1261 			flow_rule_match_vlan(rule, &match);
1262 		}
1263 
1264 		if (match.mask->vlan_id) {
1265 			if (match.mask->vlan_id == VLAN_VID_MASK) {
1266 				fltr->flags |= ICE_TC_FLWR_FIELD_VLAN;
1267 			} else {
1268 				NL_SET_ERR_MSG_MOD(fltr->extack, "Bad VLAN mask");
1269 				return -EINVAL;
1270 			}
1271 		}
1272 
1273 		headers->vlan_hdr.vlan_id =
1274 				cpu_to_be16(match.key->vlan_id & VLAN_VID_MASK);
1275 		if (match.mask->vlan_priority)
1276 			headers->vlan_hdr.vlan_prio = match.key->vlan_priority;
1277 		if (match.mask->vlan_tpid)
1278 			headers->vlan_hdr.vlan_tpid = match.key->vlan_tpid;
1279 	}
1280 
1281 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
1282 		struct flow_match_vlan match;
1283 
1284 		if (!ice_is_dvm_ena(&vsi->back->hw)) {
1285 			NL_SET_ERR_MSG_MOD(fltr->extack, "Double VLAN mode is not enabled");
1286 			return -EINVAL;
1287 		}
1288 
1289 		flow_rule_match_cvlan(rule, &match);
1290 
1291 		if (match.mask->vlan_id) {
1292 			if (match.mask->vlan_id == VLAN_VID_MASK) {
1293 				fltr->flags |= ICE_TC_FLWR_FIELD_CVLAN;
1294 			} else {
1295 				NL_SET_ERR_MSG_MOD(fltr->extack,
1296 						   "Bad CVLAN mask");
1297 				return -EINVAL;
1298 			}
1299 		}
1300 
1301 		headers->cvlan_hdr.vlan_id =
1302 				cpu_to_be16(match.key->vlan_id & VLAN_VID_MASK);
1303 		if (match.mask->vlan_priority)
1304 			headers->cvlan_hdr.vlan_prio = match.key->vlan_priority;
1305 	}
1306 
1307 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PPPOE)) {
1308 		struct flow_match_pppoe match;
1309 
1310 		flow_rule_match_pppoe(rule, &match);
1311 		n_proto_key = ice_tc_set_pppoe(&match, fltr, headers);
1312 
1313 		/* If ethertype equals ETH_P_PPP_SES, n_proto might be
1314 		 * overwritten by encapsulated protocol (ppp_proto field) or set
1315 		 * to 0. To correct this, flow_match_pppoe provides the type
1316 		 * field, which contains the actual ethertype (ETH_P_PPP_SES).
1317 		 */
1318 		headers->l2_key.n_proto = cpu_to_be16(n_proto_key);
1319 		headers->l2_mask.n_proto = cpu_to_be16(0xFFFF);
1320 		fltr->flags |= ICE_TC_FLWR_FIELD_ETH_TYPE_ID;
1321 	}
1322 
1323 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
1324 		struct flow_match_control match;
1325 
1326 		flow_rule_match_control(rule, &match);
1327 
1328 		addr_type = match.key->addr_type;
1329 	}
1330 
1331 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1332 		struct flow_match_ipv4_addrs match;
1333 
1334 		flow_rule_match_ipv4_addrs(rule, &match);
1335 		if (ice_tc_set_ipv4(&match, fltr, headers, false))
1336 			return -EINVAL;
1337 	}
1338 
1339 	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1340 		struct flow_match_ipv6_addrs match;
1341 
1342 		flow_rule_match_ipv6_addrs(rule, &match);
1343 		if (ice_tc_set_ipv6(&match, fltr, headers, false))
1344 			return -EINVAL;
1345 	}
1346 
1347 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
1348 		struct flow_match_ip match;
1349 
1350 		flow_rule_match_ip(rule, &match);
1351 		ice_tc_set_tos_ttl(&match, fltr, headers, false);
1352 	}
1353 
1354 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
1355 		struct flow_match_ports match;
1356 
1357 		flow_rule_match_ports(rule, &match);
1358 		if (ice_tc_set_port(match, fltr, headers, false))
1359 			return -EINVAL;
1360 		switch (headers->l3_key.ip_proto) {
1361 		case IPPROTO_TCP:
1362 		case IPPROTO_UDP:
1363 			break;
1364 		default:
1365 			NL_SET_ERR_MSG_MOD(fltr->extack, "Only UDP and TCP transport are supported");
1366 			return -EINVAL;
1367 		}
1368 	}
1369 	return 0;
1370 }
1371 
1372 /**
1373  * ice_add_switch_fltr - Add TC flower filters
1374  * @vsi: Pointer to VSI
1375  * @fltr: Pointer to struct ice_tc_flower_fltr
1376  *
1377  * Add filter in HW switch block
1378  */
1379 static int
1380 ice_add_switch_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
1381 {
1382 	if (fltr->action.fltr_act == ICE_FWD_TO_QGRP)
1383 		return -EOPNOTSUPP;
1384 
1385 	if (ice_is_eswitch_mode_switchdev(vsi->back))
1386 		return ice_eswitch_add_tc_fltr(vsi, fltr);
1387 
1388 	return ice_add_tc_flower_adv_fltr(vsi, fltr);
1389 }
1390 
1391 /**
1392  * ice_handle_tclass_action - Support directing to a traffic class
1393  * @vsi: Pointer to VSI
1394  * @cls_flower: Pointer to TC flower offload structure
1395  * @fltr: Pointer to TC flower filter structure
1396  *
1397  * Support directing traffic to a traffic class
1398  */
1399 static int
1400 ice_handle_tclass_action(struct ice_vsi *vsi,
1401 			 struct flow_cls_offload *cls_flower,
1402 			 struct ice_tc_flower_fltr *fltr)
1403 {
1404 	int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid);
1405 	struct ice_vsi *main_vsi;
1406 
1407 	if (tc < 0) {
1408 		NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because specified destination is invalid");
1409 		return -EINVAL;
1410 	}
1411 	if (!tc) {
1412 		NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because of invalid destination");
1413 		return -EINVAL;
1414 	}
1415 
1416 	if (!(vsi->all_enatc & BIT(tc))) {
1417 		NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because of non-existence destination");
1418 		return -EINVAL;
1419 	}
1420 
1421 	/* Redirect to a TC class or Queue Group */
1422 	main_vsi = ice_get_main_vsi(vsi->back);
1423 	if (!main_vsi || !main_vsi->netdev) {
1424 		NL_SET_ERR_MSG_MOD(fltr->extack,
1425 				   "Unable to add filter because of invalid netdevice");
1426 		return -EINVAL;
1427 	}
1428 
1429 	if ((fltr->flags & ICE_TC_FLWR_FIELD_TENANT_ID) &&
1430 	    (fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC |
1431 			   ICE_TC_FLWR_FIELD_SRC_MAC))) {
1432 		NL_SET_ERR_MSG_MOD(fltr->extack,
1433 				   "Unable to add filter because filter using tunnel key and inner MAC is unsupported combination");
1434 		return -EOPNOTSUPP;
1435 	}
1436 
1437 	/* For ADQ, filter must include dest MAC address, otherwise unwanted
1438 	 * packets with unrelated MAC address get delivered to ADQ VSIs as long
1439 	 * as remaining filter criteria is satisfied such as dest IP address
1440 	 * and dest/src L4 port. Following code is trying to handle:
1441 	 * 1. For non-tunnel, if user specify MAC addresses, use them (means
1442 	 * this code won't do anything
1443 	 * 2. For non-tunnel, if user didn't specify MAC address, add implicit
1444 	 * dest MAC to be lower netdev's active unicast MAC address
1445 	 * 3. For tunnel,  as of now TC-filter through flower classifier doesn't
1446 	 * have provision for user to specify outer DMAC, hence driver to
1447 	 * implicitly add outer dest MAC to be lower netdev's active unicast
1448 	 * MAC address.
1449 	 */
1450 	if (fltr->tunnel_type != TNL_LAST &&
1451 	    !(fltr->flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC))
1452 		fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DST_MAC;
1453 
1454 	if (fltr->tunnel_type == TNL_LAST &&
1455 	    !(fltr->flags & ICE_TC_FLWR_FIELD_DST_MAC))
1456 		fltr->flags |= ICE_TC_FLWR_FIELD_DST_MAC;
1457 
1458 	if (fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC |
1459 			   ICE_TC_FLWR_FIELD_ENC_DST_MAC)) {
1460 		ether_addr_copy(fltr->outer_headers.l2_key.dst_mac,
1461 				vsi->netdev->dev_addr);
1462 		eth_broadcast_addr(fltr->outer_headers.l2_mask.dst_mac);
1463 	}
1464 
1465 	/* validate specified dest MAC address, make sure either it belongs to
1466 	 * lower netdev or any of MACVLAN. MACVLANs MAC address are added as
1467 	 * unicast MAC filter destined to main VSI.
1468 	 */
1469 	if (!ice_mac_fltr_exist(&main_vsi->back->hw,
1470 				fltr->outer_headers.l2_key.dst_mac,
1471 				main_vsi->idx)) {
1472 		NL_SET_ERR_MSG_MOD(fltr->extack,
1473 				   "Unable to add filter because legacy MAC filter for specified destination doesn't exist");
1474 		return -EINVAL;
1475 	}
1476 
1477 	/* Make sure VLAN is already added to main VSI, before allowing ADQ to
1478 	 * add a VLAN based filter such as MAC + VLAN + L4 port.
1479 	 */
1480 	if (fltr->flags & ICE_TC_FLWR_FIELD_VLAN) {
1481 		u16 vlan_id = be16_to_cpu(fltr->outer_headers.vlan_hdr.vlan_id);
1482 
1483 		if (!ice_vlan_fltr_exist(&main_vsi->back->hw, vlan_id,
1484 					 main_vsi->idx)) {
1485 			NL_SET_ERR_MSG_MOD(fltr->extack,
1486 					   "Unable to add filter because legacy VLAN filter for specified destination doesn't exist");
1487 			return -EINVAL;
1488 		}
1489 	}
1490 	fltr->action.fltr_act = ICE_FWD_TO_VSI;
1491 	fltr->action.tc_class = tc;
1492 
1493 	return 0;
1494 }
1495 
1496 /**
1497  * ice_parse_tc_flower_actions - Parse the actions for a TC filter
1498  * @vsi: Pointer to VSI
1499  * @cls_flower: Pointer to TC flower offload structure
1500  * @fltr: Pointer to TC flower filter structure
1501  *
1502  * Parse the actions for a TC filter
1503  */
1504 static int
1505 ice_parse_tc_flower_actions(struct ice_vsi *vsi,
1506 			    struct flow_cls_offload *cls_flower,
1507 			    struct ice_tc_flower_fltr *fltr)
1508 {
1509 	struct flow_rule *rule = flow_cls_offload_flow_rule(cls_flower);
1510 	struct flow_action *flow_action = &rule->action;
1511 	struct flow_action_entry *act;
1512 	int i;
1513 
1514 	if (cls_flower->classid)
1515 		return ice_handle_tclass_action(vsi, cls_flower, fltr);
1516 
1517 	if (!flow_action_has_entries(flow_action))
1518 		return -EINVAL;
1519 
1520 	flow_action_for_each(i, act, flow_action) {
1521 		if (ice_is_eswitch_mode_switchdev(vsi->back)) {
1522 			int err = ice_eswitch_tc_parse_action(fltr, act);
1523 
1524 			if (err)
1525 				return err;
1526 			continue;
1527 		}
1528 		/* Allow only one rule per filter */
1529 
1530 		/* Drop action */
1531 		if (act->id == FLOW_ACTION_DROP) {
1532 			NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported action DROP");
1533 			return -EINVAL;
1534 		}
1535 		fltr->action.fltr_act = ICE_FWD_TO_VSI;
1536 	}
1537 	return 0;
1538 }
1539 
1540 /**
1541  * ice_del_tc_fltr - deletes a filter from HW table
1542  * @vsi: Pointer to VSI
1543  * @fltr: Pointer to struct ice_tc_flower_fltr
1544  *
1545  * This function deletes a filter from HW table and manages book-keeping
1546  */
1547 static int ice_del_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
1548 {
1549 	struct ice_rule_query_data rule_rem;
1550 	struct ice_pf *pf = vsi->back;
1551 	int err;
1552 
1553 	rule_rem.rid = fltr->rid;
1554 	rule_rem.rule_id = fltr->rule_id;
1555 	rule_rem.vsi_handle = fltr->dest_id;
1556 	err = ice_rem_adv_rule_by_id(&pf->hw, &rule_rem);
1557 	if (err) {
1558 		if (err == -ENOENT) {
1559 			NL_SET_ERR_MSG_MOD(fltr->extack, "Filter does not exist");
1560 			return -ENOENT;
1561 		}
1562 		NL_SET_ERR_MSG_MOD(fltr->extack, "Failed to delete TC flower filter");
1563 		return -EIO;
1564 	}
1565 
1566 	/* update advanced switch filter count for destination
1567 	 * VSI if filter destination was VSI
1568 	 */
1569 	if (fltr->dest_vsi) {
1570 		if (fltr->dest_vsi->type == ICE_VSI_CHNL) {
1571 			fltr->dest_vsi->num_chnl_fltr--;
1572 
1573 			/* keeps track of channel filters for PF VSI */
1574 			if (vsi->type == ICE_VSI_PF &&
1575 			    (fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC |
1576 					    ICE_TC_FLWR_FIELD_ENC_DST_MAC)))
1577 				pf->num_dmac_chnl_fltrs--;
1578 		}
1579 	}
1580 	return 0;
1581 }
1582 
1583 /**
1584  * ice_add_tc_fltr - adds a TC flower filter
1585  * @netdev: Pointer to netdev
1586  * @vsi: Pointer to VSI
1587  * @f: Pointer to flower offload structure
1588  * @__fltr: Pointer to struct ice_tc_flower_fltr
1589  *
1590  * This function parses TC-flower input fields, parses action,
1591  * and adds a filter.
1592  */
1593 static int
1594 ice_add_tc_fltr(struct net_device *netdev, struct ice_vsi *vsi,
1595 		struct flow_cls_offload *f,
1596 		struct ice_tc_flower_fltr **__fltr)
1597 {
1598 	struct ice_tc_flower_fltr *fltr;
1599 	int err;
1600 
1601 	/* by default, set output to be INVALID */
1602 	*__fltr = NULL;
1603 
1604 	fltr = kzalloc(sizeof(*fltr), GFP_KERNEL);
1605 	if (!fltr)
1606 		return -ENOMEM;
1607 
1608 	fltr->cookie = f->cookie;
1609 	fltr->extack = f->common.extack;
1610 	fltr->src_vsi = vsi;
1611 	INIT_HLIST_NODE(&fltr->tc_flower_node);
1612 
1613 	err = ice_parse_cls_flower(netdev, vsi, f, fltr);
1614 	if (err < 0)
1615 		goto err;
1616 
1617 	err = ice_parse_tc_flower_actions(vsi, f, fltr);
1618 	if (err < 0)
1619 		goto err;
1620 
1621 	err = ice_add_switch_fltr(vsi, fltr);
1622 	if (err < 0)
1623 		goto err;
1624 
1625 	/* return the newly created filter */
1626 	*__fltr = fltr;
1627 
1628 	return 0;
1629 err:
1630 	kfree(fltr);
1631 	return err;
1632 }
1633 
1634 /**
1635  * ice_find_tc_flower_fltr - Find the TC flower filter in the list
1636  * @pf: Pointer to PF
1637  * @cookie: filter specific cookie
1638  */
1639 static struct ice_tc_flower_fltr *
1640 ice_find_tc_flower_fltr(struct ice_pf *pf, unsigned long cookie)
1641 {
1642 	struct ice_tc_flower_fltr *fltr;
1643 
1644 	hlist_for_each_entry(fltr, &pf->tc_flower_fltr_list, tc_flower_node)
1645 		if (cookie == fltr->cookie)
1646 			return fltr;
1647 
1648 	return NULL;
1649 }
1650 
1651 /**
1652  * ice_add_cls_flower - add TC flower filters
1653  * @netdev: Pointer to filter device
1654  * @vsi: Pointer to VSI
1655  * @cls_flower: Pointer to flower offload structure
1656  */
1657 int
1658 ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi,
1659 		   struct flow_cls_offload *cls_flower)
1660 {
1661 	struct netlink_ext_ack *extack = cls_flower->common.extack;
1662 	struct net_device *vsi_netdev = vsi->netdev;
1663 	struct ice_tc_flower_fltr *fltr;
1664 	struct ice_pf *pf = vsi->back;
1665 	int err;
1666 
1667 	if (ice_is_reset_in_progress(pf->state))
1668 		return -EBUSY;
1669 	if (test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
1670 		return -EINVAL;
1671 
1672 	if (ice_is_port_repr_netdev(netdev))
1673 		vsi_netdev = netdev;
1674 
1675 	if (!(vsi_netdev->features & NETIF_F_HW_TC) &&
1676 	    !test_bit(ICE_FLAG_CLS_FLOWER, pf->flags)) {
1677 		/* Based on TC indirect notifications from kernel, all ice
1678 		 * devices get an instance of rule from higher level device.
1679 		 * Avoid triggering explicit error in this case.
1680 		 */
1681 		if (netdev == vsi_netdev)
1682 			NL_SET_ERR_MSG_MOD(extack, "can't apply TC flower filters, turn ON hw-tc-offload and try again");
1683 		return -EINVAL;
1684 	}
1685 
1686 	/* avoid duplicate entries, if exists - return error */
1687 	fltr = ice_find_tc_flower_fltr(pf, cls_flower->cookie);
1688 	if (fltr) {
1689 		NL_SET_ERR_MSG_MOD(extack, "filter cookie already exists, ignoring");
1690 		return -EEXIST;
1691 	}
1692 
1693 	/* prep and add TC-flower filter in HW */
1694 	err = ice_add_tc_fltr(netdev, vsi, cls_flower, &fltr);
1695 	if (err)
1696 		return err;
1697 
1698 	/* add filter into an ordered list */
1699 	hlist_add_head(&fltr->tc_flower_node, &pf->tc_flower_fltr_list);
1700 	return 0;
1701 }
1702 
1703 /**
1704  * ice_del_cls_flower - delete TC flower filters
1705  * @vsi: Pointer to VSI
1706  * @cls_flower: Pointer to struct flow_cls_offload
1707  */
1708 int
1709 ice_del_cls_flower(struct ice_vsi *vsi, struct flow_cls_offload *cls_flower)
1710 {
1711 	struct ice_tc_flower_fltr *fltr;
1712 	struct ice_pf *pf = vsi->back;
1713 	int err;
1714 
1715 	/* find filter */
1716 	fltr = ice_find_tc_flower_fltr(pf, cls_flower->cookie);
1717 	if (!fltr) {
1718 		if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) &&
1719 		    hlist_empty(&pf->tc_flower_fltr_list))
1720 			return 0;
1721 
1722 		NL_SET_ERR_MSG_MOD(cls_flower->common.extack, "failed to delete TC flower filter because unable to find it");
1723 		return -EINVAL;
1724 	}
1725 
1726 	fltr->extack = cls_flower->common.extack;
1727 	/* delete filter from HW */
1728 	err = ice_del_tc_fltr(vsi, fltr);
1729 	if (err)
1730 		return err;
1731 
1732 	/* delete filter from an ordered list */
1733 	hlist_del(&fltr->tc_flower_node);
1734 
1735 	/* free the filter node */
1736 	kfree(fltr);
1737 
1738 	return 0;
1739 }
1740 
1741 /**
1742  * ice_replay_tc_fltrs - replay TC filters
1743  * @pf: pointer to PF struct
1744  */
1745 void ice_replay_tc_fltrs(struct ice_pf *pf)
1746 {
1747 	struct ice_tc_flower_fltr *fltr;
1748 	struct hlist_node *node;
1749 
1750 	hlist_for_each_entry_safe(fltr, node,
1751 				  &pf->tc_flower_fltr_list,
1752 				  tc_flower_node) {
1753 		fltr->extack = NULL;
1754 		ice_add_switch_fltr(fltr->src_vsi, fltr);
1755 	}
1756 }
1757