1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2019-2021, Intel Corporation. */
3 
4 #include "ice.h"
5 #include "ice_tc_lib.h"
6 #include "ice_fltr.h"
7 #include "ice_lib.h"
8 #include "ice_protocol_type.h"
9 
10 /**
11  * ice_tc_count_lkups - determine lookup count for switch filter
12  * @flags: TC-flower flags
13  * @headers: Pointer to TC flower filter header structure
14  * @fltr: Pointer to outer TC filter structure
15  *
16  * Determine lookup count based on TC flower input for switch filter.
17  */
18 static int
19 ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
20 		   struct ice_tc_flower_fltr *fltr)
21 {
22 	int lkups_cnt = 0;
23 
24 	if (flags & ICE_TC_FLWR_FIELD_TENANT_ID)
25 		lkups_cnt++;
26 
27 	if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
28 		     ICE_TC_FLWR_FIELD_ENC_DEST_IPV4 |
29 		     ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 |
30 		     ICE_TC_FLWR_FIELD_ENC_DEST_IPV6))
31 		lkups_cnt++;
32 
33 	if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT)
34 		lkups_cnt++;
35 
36 	/* currently inner etype filter isn't supported */
37 	if ((flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) &&
38 	    fltr->tunnel_type == TNL_LAST)
39 		lkups_cnt++;
40 
41 	/* are MAC fields specified? */
42 	if (flags & (ICE_TC_FLWR_FIELD_DST_MAC | ICE_TC_FLWR_FIELD_SRC_MAC))
43 		lkups_cnt++;
44 
45 	/* is VLAN specified? */
46 	if (flags & ICE_TC_FLWR_FIELD_VLAN)
47 		lkups_cnt++;
48 
49 	/* are IPv[4|6] fields specified? */
50 	if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV4 | ICE_TC_FLWR_FIELD_SRC_IPV4 |
51 		     ICE_TC_FLWR_FIELD_DEST_IPV6 | ICE_TC_FLWR_FIELD_SRC_IPV6))
52 		lkups_cnt++;
53 
54 	/* is L4 (TCP/UDP/any other L4 protocol fields) specified? */
55 	if (flags & (ICE_TC_FLWR_FIELD_DEST_L4_PORT |
56 		     ICE_TC_FLWR_FIELD_SRC_L4_PORT))
57 		lkups_cnt++;
58 
59 	return lkups_cnt;
60 }
61 
62 static enum ice_protocol_type ice_proto_type_from_mac(bool inner)
63 {
64 	return inner ? ICE_MAC_IL : ICE_MAC_OFOS;
65 }
66 
67 static enum ice_protocol_type ice_proto_type_from_ipv4(bool inner)
68 {
69 	return inner ? ICE_IPV4_IL : ICE_IPV4_OFOS;
70 }
71 
72 static enum ice_protocol_type ice_proto_type_from_ipv6(bool inner)
73 {
74 	return inner ? ICE_IPV6_IL : ICE_IPV6_OFOS;
75 }
76 
77 static enum ice_protocol_type ice_proto_type_from_l4_port(u16 ip_proto)
78 {
79 	switch (ip_proto) {
80 	case IPPROTO_TCP:
81 		return ICE_TCP_IL;
82 	case IPPROTO_UDP:
83 		return ICE_UDP_ILOS;
84 	}
85 
86 	return 0;
87 }
88 
89 static enum ice_protocol_type
90 ice_proto_type_from_tunnel(enum ice_tunnel_type type)
91 {
92 	switch (type) {
93 	case TNL_VXLAN:
94 		return ICE_VXLAN;
95 	case TNL_GENEVE:
96 		return ICE_GENEVE;
97 	case TNL_GRETAP:
98 		return ICE_NVGRE;
99 	default:
100 		return 0;
101 	}
102 }
103 
104 static enum ice_sw_tunnel_type
105 ice_sw_type_from_tunnel(enum ice_tunnel_type type)
106 {
107 	switch (type) {
108 	case TNL_VXLAN:
109 		return ICE_SW_TUN_VXLAN;
110 	case TNL_GENEVE:
111 		return ICE_SW_TUN_GENEVE;
112 	case TNL_GRETAP:
113 		return ICE_SW_TUN_NVGRE;
114 	default:
115 		return ICE_NON_TUN;
116 	}
117 }
118 
119 static int
120 ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr,
121 			 struct ice_adv_lkup_elem *list)
122 {
123 	struct ice_tc_flower_lyr_2_4_hdrs *hdr = &fltr->outer_headers;
124 	int i = 0;
125 
126 	if (flags & ICE_TC_FLWR_FIELD_TENANT_ID) {
127 		u32 tenant_id;
128 
129 		list[i].type = ice_proto_type_from_tunnel(fltr->tunnel_type);
130 		switch (fltr->tunnel_type) {
131 		case TNL_VXLAN:
132 		case TNL_GENEVE:
133 			tenant_id = be32_to_cpu(fltr->tenant_id) << 8;
134 			list[i].h_u.tnl_hdr.vni = cpu_to_be32(tenant_id);
135 			memcpy(&list[i].m_u.tnl_hdr.vni, "\xff\xff\xff\x00", 4);
136 			i++;
137 			break;
138 		case TNL_GRETAP:
139 			list[i].h_u.nvgre_hdr.tni_flow = fltr->tenant_id;
140 			memcpy(&list[i].m_u.nvgre_hdr.tni_flow, "\xff\xff\xff\xff", 4);
141 			i++;
142 			break;
143 		default:
144 			break;
145 		}
146 	}
147 
148 	if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
149 		     ICE_TC_FLWR_FIELD_ENC_DEST_IPV4)) {
150 		list[i].type = ice_proto_type_from_ipv4(false);
151 
152 		if (flags & ICE_TC_FLWR_FIELD_ENC_SRC_IPV4) {
153 			list[i].h_u.ipv4_hdr.src_addr = hdr->l3_key.src_ipv4;
154 			list[i].m_u.ipv4_hdr.src_addr = hdr->l3_mask.src_ipv4;
155 		}
156 		if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_IPV4) {
157 			list[i].h_u.ipv4_hdr.dst_addr = hdr->l3_key.dst_ipv4;
158 			list[i].m_u.ipv4_hdr.dst_addr = hdr->l3_mask.dst_ipv4;
159 		}
160 		i++;
161 	}
162 
163 	if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 |
164 		     ICE_TC_FLWR_FIELD_ENC_DEST_IPV6)) {
165 		list[i].type = ice_proto_type_from_ipv6(false);
166 
167 		if (flags & ICE_TC_FLWR_FIELD_ENC_SRC_IPV6) {
168 			memcpy(&list[i].h_u.ipv6_hdr.src_addr,
169 			       &hdr->l3_key.src_ipv6_addr,
170 			       sizeof(hdr->l3_key.src_ipv6_addr));
171 			memcpy(&list[i].m_u.ipv6_hdr.src_addr,
172 			       &hdr->l3_mask.src_ipv6_addr,
173 			       sizeof(hdr->l3_mask.src_ipv6_addr));
174 		}
175 		if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_IPV6) {
176 			memcpy(&list[i].h_u.ipv6_hdr.dst_addr,
177 			       &hdr->l3_key.dst_ipv6_addr,
178 			       sizeof(hdr->l3_key.dst_ipv6_addr));
179 			memcpy(&list[i].m_u.ipv6_hdr.dst_addr,
180 			       &hdr->l3_mask.dst_ipv6_addr,
181 			       sizeof(hdr->l3_mask.dst_ipv6_addr));
182 		}
183 		i++;
184 	}
185 
186 	if ((flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT) &&
187 	    hdr->l3_key.ip_proto == IPPROTO_UDP) {
188 		list[i].type = ICE_UDP_OF;
189 		list[i].h_u.l4_hdr.dst_port = hdr->l4_key.dst_port;
190 		list[i].m_u.l4_hdr.dst_port = hdr->l4_mask.dst_port;
191 		i++;
192 	}
193 
194 	return i;
195 }
196 
197 /**
198  * ice_tc_fill_rules - fill filter rules based on TC fltr
199  * @hw: pointer to HW structure
200  * @flags: tc flower field flags
201  * @tc_fltr: pointer to TC flower filter
202  * @list: list of advance rule elements
203  * @rule_info: pointer to information about rule
204  * @l4_proto: pointer to information such as L4 proto type
205  *
206  * Fill ice_adv_lkup_elem list based on TC flower flags and
207  * TC flower headers. This list should be used to add
208  * advance filter in hardware.
209  */
210 static int
211 ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
212 		  struct ice_tc_flower_fltr *tc_fltr,
213 		  struct ice_adv_lkup_elem *list,
214 		  struct ice_adv_rule_info *rule_info,
215 		  u16 *l4_proto)
216 {
217 	struct ice_tc_flower_lyr_2_4_hdrs *headers = &tc_fltr->outer_headers;
218 	bool inner = false;
219 	int i = 0;
220 
221 	rule_info->tun_type = ice_sw_type_from_tunnel(tc_fltr->tunnel_type);
222 	if (tc_fltr->tunnel_type != TNL_LAST) {
223 		i = ice_tc_fill_tunnel_outer(flags, tc_fltr, list);
224 
225 		headers = &tc_fltr->inner_headers;
226 		inner = true;
227 	} else if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) {
228 		list[i].type = ICE_ETYPE_OL;
229 		list[i].h_u.ethertype.ethtype_id = headers->l2_key.n_proto;
230 		list[i].m_u.ethertype.ethtype_id = headers->l2_mask.n_proto;
231 		i++;
232 	}
233 
234 	if (flags & (ICE_TC_FLWR_FIELD_DST_MAC |
235 		     ICE_TC_FLWR_FIELD_SRC_MAC)) {
236 		struct ice_tc_l2_hdr *l2_key, *l2_mask;
237 
238 		l2_key = &headers->l2_key;
239 		l2_mask = &headers->l2_mask;
240 
241 		list[i].type = ice_proto_type_from_mac(inner);
242 		if (flags & ICE_TC_FLWR_FIELD_DST_MAC) {
243 			ether_addr_copy(list[i].h_u.eth_hdr.dst_addr,
244 					l2_key->dst_mac);
245 			ether_addr_copy(list[i].m_u.eth_hdr.dst_addr,
246 					l2_mask->dst_mac);
247 		}
248 		if (flags & ICE_TC_FLWR_FIELD_SRC_MAC) {
249 			ether_addr_copy(list[i].h_u.eth_hdr.src_addr,
250 					l2_key->src_mac);
251 			ether_addr_copy(list[i].m_u.eth_hdr.src_addr,
252 					l2_mask->src_mac);
253 		}
254 		i++;
255 	}
256 
257 	/* copy VLAN info */
258 	if (flags & ICE_TC_FLWR_FIELD_VLAN) {
259 		list[i].type = ICE_VLAN_OFOS;
260 		list[i].h_u.vlan_hdr.vlan = headers->vlan_hdr.vlan_id;
261 		list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xFFFF);
262 		i++;
263 	}
264 
265 	/* copy L3 (IPv[4|6]: src, dest) address */
266 	if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV4 |
267 		     ICE_TC_FLWR_FIELD_SRC_IPV4)) {
268 		struct ice_tc_l3_hdr *l3_key, *l3_mask;
269 
270 		list[i].type = ice_proto_type_from_ipv4(inner);
271 		l3_key = &headers->l3_key;
272 		l3_mask = &headers->l3_mask;
273 		if (flags & ICE_TC_FLWR_FIELD_DEST_IPV4) {
274 			list[i].h_u.ipv4_hdr.dst_addr = l3_key->dst_ipv4;
275 			list[i].m_u.ipv4_hdr.dst_addr = l3_mask->dst_ipv4;
276 		}
277 		if (flags & ICE_TC_FLWR_FIELD_SRC_IPV4) {
278 			list[i].h_u.ipv4_hdr.src_addr = l3_key->src_ipv4;
279 			list[i].m_u.ipv4_hdr.src_addr = l3_mask->src_ipv4;
280 		}
281 		i++;
282 	} else if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV6 |
283 			    ICE_TC_FLWR_FIELD_SRC_IPV6)) {
284 		struct ice_ipv6_hdr *ipv6_hdr, *ipv6_mask;
285 		struct ice_tc_l3_hdr *l3_key, *l3_mask;
286 
287 		list[i].type = ice_proto_type_from_ipv6(inner);
288 		ipv6_hdr = &list[i].h_u.ipv6_hdr;
289 		ipv6_mask = &list[i].m_u.ipv6_hdr;
290 		l3_key = &headers->l3_key;
291 		l3_mask = &headers->l3_mask;
292 
293 		if (flags & ICE_TC_FLWR_FIELD_DEST_IPV6) {
294 			memcpy(&ipv6_hdr->dst_addr, &l3_key->dst_ipv6_addr,
295 			       sizeof(l3_key->dst_ipv6_addr));
296 			memcpy(&ipv6_mask->dst_addr, &l3_mask->dst_ipv6_addr,
297 			       sizeof(l3_mask->dst_ipv6_addr));
298 		}
299 		if (flags & ICE_TC_FLWR_FIELD_SRC_IPV6) {
300 			memcpy(&ipv6_hdr->src_addr, &l3_key->src_ipv6_addr,
301 			       sizeof(l3_key->src_ipv6_addr));
302 			memcpy(&ipv6_mask->src_addr, &l3_mask->src_ipv6_addr,
303 			       sizeof(l3_mask->src_ipv6_addr));
304 		}
305 		i++;
306 	}
307 
308 	/* copy L4 (src, dest) port */
309 	if (flags & (ICE_TC_FLWR_FIELD_DEST_L4_PORT |
310 		     ICE_TC_FLWR_FIELD_SRC_L4_PORT)) {
311 		struct ice_tc_l4_hdr *l4_key, *l4_mask;
312 
313 		list[i].type = ice_proto_type_from_l4_port(headers->l3_key.ip_proto);
314 		l4_key = &headers->l4_key;
315 		l4_mask = &headers->l4_mask;
316 
317 		if (flags & ICE_TC_FLWR_FIELD_DEST_L4_PORT) {
318 			list[i].h_u.l4_hdr.dst_port = l4_key->dst_port;
319 			list[i].m_u.l4_hdr.dst_port = l4_mask->dst_port;
320 		}
321 		if (flags & ICE_TC_FLWR_FIELD_SRC_L4_PORT) {
322 			list[i].h_u.l4_hdr.src_port = l4_key->src_port;
323 			list[i].m_u.l4_hdr.src_port = l4_mask->src_port;
324 		}
325 		i++;
326 	}
327 
328 	return i;
329 }
330 
331 /**
332  * ice_tc_tun_get_type - get the tunnel type
333  * @tunnel_dev: ptr to tunnel device
334  *
335  * This function detects appropriate tunnel_type if specified device is
336  * tunnel device such as VXLAN/Geneve
337  */
338 static int ice_tc_tun_get_type(struct net_device *tunnel_dev)
339 {
340 	if (netif_is_vxlan(tunnel_dev))
341 		return TNL_VXLAN;
342 	if (netif_is_geneve(tunnel_dev))
343 		return TNL_GENEVE;
344 	if (netif_is_gretap(tunnel_dev) ||
345 	    netif_is_ip6gretap(tunnel_dev))
346 		return TNL_GRETAP;
347 	return TNL_LAST;
348 }
349 
350 bool ice_is_tunnel_supported(struct net_device *dev)
351 {
352 	return ice_tc_tun_get_type(dev) != TNL_LAST;
353 }
354 
355 static int
356 ice_eswitch_tc_parse_action(struct ice_tc_flower_fltr *fltr,
357 			    struct flow_action_entry *act)
358 {
359 	struct ice_repr *repr;
360 
361 	switch (act->id) {
362 	case FLOW_ACTION_DROP:
363 		fltr->action.fltr_act = ICE_DROP_PACKET;
364 		break;
365 
366 	case FLOW_ACTION_REDIRECT:
367 		fltr->action.fltr_act = ICE_FWD_TO_VSI;
368 
369 		if (ice_is_port_repr_netdev(act->dev)) {
370 			repr = ice_netdev_to_repr(act->dev);
371 
372 			fltr->dest_vsi = repr->src_vsi;
373 			fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
374 		} else if (netif_is_ice(act->dev) ||
375 			   ice_is_tunnel_supported(act->dev)) {
376 			fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
377 		} else {
378 			NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported netdevice in switchdev mode");
379 			return -EINVAL;
380 		}
381 
382 		break;
383 
384 	default:
385 		NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported action in switchdev mode");
386 		return -EINVAL;
387 	}
388 
389 	return 0;
390 }
391 
392 static int
393 ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
394 {
395 	struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers;
396 	struct ice_adv_rule_info rule_info = { 0 };
397 	struct ice_rule_query_data rule_added;
398 	struct ice_hw *hw = &vsi->back->hw;
399 	struct ice_adv_lkup_elem *list;
400 	u32 flags = fltr->flags;
401 	enum ice_status status;
402 	int lkups_cnt;
403 	int ret = 0;
404 	int i;
405 
406 	if (!flags || (flags & ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT)) {
407 		NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported encap field(s)");
408 		return -EOPNOTSUPP;
409 	}
410 
411 	lkups_cnt = ice_tc_count_lkups(flags, headers, fltr);
412 	list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
413 	if (!list)
414 		return -ENOMEM;
415 
416 	i = ice_tc_fill_rules(hw, flags, fltr, list, &rule_info, NULL);
417 	if (i != lkups_cnt) {
418 		ret = -EINVAL;
419 		goto exit;
420 	}
421 
422 	/* egress traffic is always redirect to uplink */
423 	if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS)
424 		fltr->dest_vsi = vsi->back->switchdev.uplink_vsi;
425 
426 	rule_info.sw_act.fltr_act = fltr->action.fltr_act;
427 	if (fltr->action.fltr_act != ICE_DROP_PACKET)
428 		rule_info.sw_act.vsi_handle = fltr->dest_vsi->idx;
429 	/* For now, making priority to be highest, and it also becomes
430 	 * the priority for recipe which will get created as a result of
431 	 * new extraction sequence based on input set.
432 	 * Priority '7' is max val for switch recipe, higher the number
433 	 * results into order of switch rule evaluation.
434 	 */
435 	rule_info.priority = 7;
436 
437 	if (fltr->direction == ICE_ESWITCH_FLTR_INGRESS) {
438 		rule_info.sw_act.flag |= ICE_FLTR_RX;
439 		rule_info.sw_act.src = hw->pf_id;
440 		rule_info.rx = true;
441 	} else {
442 		rule_info.sw_act.flag |= ICE_FLTR_TX;
443 		rule_info.sw_act.src = vsi->idx;
444 		rule_info.rx = false;
445 		rule_info.flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE;
446 		rule_info.flags_info.act_valid = true;
447 	}
448 
449 	/* specify the cookie as filter_rule_id */
450 	rule_info.fltr_rule_id = fltr->cookie;
451 
452 	status = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added);
453 	if (status == ICE_ERR_ALREADY_EXISTS) {
454 		NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because it already exist");
455 		ret = -EINVAL;
456 		goto exit;
457 	} else if (status) {
458 		NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter due to error");
459 		ret = -EIO;
460 		goto exit;
461 	}
462 
463 	/* store the output params, which are needed later for removing
464 	 * advanced switch filter
465 	 */
466 	fltr->rid = rule_added.rid;
467 	fltr->rule_id = rule_added.rule_id;
468 
469 exit:
470 	kfree(list);
471 	return ret;
472 }
473 
474 /**
475  * ice_add_tc_flower_adv_fltr - add appropriate filter rules
476  * @vsi: Pointer to VSI
477  * @tc_fltr: Pointer to TC flower filter structure
478  *
479  * based on filter parameters using Advance recipes supported
480  * by OS package.
481  */
482 static int
483 ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
484 			   struct ice_tc_flower_fltr *tc_fltr)
485 {
486 	struct ice_tc_flower_lyr_2_4_hdrs *headers = &tc_fltr->outer_headers;
487 	struct ice_adv_rule_info rule_info = {0};
488 	struct ice_rule_query_data rule_added;
489 	struct ice_adv_lkup_elem *list;
490 	struct ice_pf *pf = vsi->back;
491 	struct ice_hw *hw = &pf->hw;
492 	u32 flags = tc_fltr->flags;
493 	struct ice_vsi *ch_vsi;
494 	struct device *dev;
495 	u16 lkups_cnt = 0;
496 	u16 l4_proto = 0;
497 	int ret = 0;
498 	u16 i = 0;
499 
500 	dev = ice_pf_to_dev(pf);
501 	if (ice_is_safe_mode(pf)) {
502 		NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unable to add filter because driver is in safe mode");
503 		return -EOPNOTSUPP;
504 	}
505 
506 	if (!flags || (flags & (ICE_TC_FLWR_FIELD_ENC_DEST_IPV4 |
507 				ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
508 				ICE_TC_FLWR_FIELD_ENC_DEST_IPV6 |
509 				ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 |
510 				ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT))) {
511 		NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unsupported encap field(s)");
512 		return -EOPNOTSUPP;
513 	}
514 
515 	/* get the channel (aka ADQ VSI) */
516 	if (tc_fltr->dest_vsi)
517 		ch_vsi = tc_fltr->dest_vsi;
518 	else
519 		ch_vsi = vsi->tc_map_vsi[tc_fltr->action.tc_class];
520 
521 	lkups_cnt = ice_tc_count_lkups(flags, headers, tc_fltr);
522 	list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
523 	if (!list)
524 		return -ENOMEM;
525 
526 	i = ice_tc_fill_rules(hw, flags, tc_fltr, list, &rule_info, &l4_proto);
527 	if (i != lkups_cnt) {
528 		ret = -EINVAL;
529 		goto exit;
530 	}
531 
532 	rule_info.sw_act.fltr_act = tc_fltr->action.fltr_act;
533 	if (tc_fltr->action.tc_class >= ICE_CHNL_START_TC) {
534 		if (!ch_vsi) {
535 			NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unable to add filter because specified destination doesn't exist");
536 			ret = -EINVAL;
537 			goto exit;
538 		}
539 
540 		rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
541 		rule_info.sw_act.vsi_handle = ch_vsi->idx;
542 		rule_info.priority = 7;
543 		rule_info.sw_act.src = hw->pf_id;
544 		rule_info.rx = true;
545 		dev_dbg(dev, "add switch rule for TC:%u vsi_idx:%u, lkups_cnt:%u\n",
546 			tc_fltr->action.tc_class,
547 			rule_info.sw_act.vsi_handle, lkups_cnt);
548 	} else {
549 		rule_info.sw_act.flag |= ICE_FLTR_TX;
550 		rule_info.sw_act.src = vsi->idx;
551 		rule_info.rx = false;
552 	}
553 
554 	/* specify the cookie as filter_rule_id */
555 	rule_info.fltr_rule_id = tc_fltr->cookie;
556 
557 	ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added);
558 	if (ret == -EEXIST) {
559 		NL_SET_ERR_MSG_MOD(tc_fltr->extack,
560 				   "Unable to add filter because it already exist");
561 		ret = -EINVAL;
562 		goto exit;
563 	} else if (ret) {
564 		NL_SET_ERR_MSG_MOD(tc_fltr->extack,
565 				   "Unable to add filter due to error");
566 		ret = -EIO;
567 		goto exit;
568 	}
569 
570 	/* store the output params, which are needed later for removing
571 	 * advanced switch filter
572 	 */
573 	tc_fltr->rid = rule_added.rid;
574 	tc_fltr->rule_id = rule_added.rule_id;
575 	if (tc_fltr->action.tc_class > 0 && ch_vsi) {
576 		/* For PF ADQ, VSI type is set as ICE_VSI_CHNL, and
577 		 * for PF ADQ filter, it is not yet set in tc_fltr,
578 		 * hence store the dest_vsi ptr in tc_fltr
579 		 */
580 		if (ch_vsi->type == ICE_VSI_CHNL)
581 			tc_fltr->dest_vsi = ch_vsi;
582 		/* keep track of advanced switch filter for
583 		 * destination VSI (channel VSI)
584 		 */
585 		ch_vsi->num_chnl_fltr++;
586 		/* in this case, dest_id is VSI handle (sw handle) */
587 		tc_fltr->dest_id = rule_added.vsi_handle;
588 
589 		/* keeps track of channel filters for PF VSI */
590 		if (vsi->type == ICE_VSI_PF &&
591 		    (flags & (ICE_TC_FLWR_FIELD_DST_MAC |
592 			      ICE_TC_FLWR_FIELD_ENC_DST_MAC)))
593 			pf->num_dmac_chnl_fltrs++;
594 	}
595 	dev_dbg(dev, "added switch rule (lkups_cnt %u, flags 0x%x) for TC %u, rid %u, rule_id %u, vsi_idx %u\n",
596 		lkups_cnt, flags,
597 		tc_fltr->action.tc_class, rule_added.rid,
598 		rule_added.rule_id, rule_added.vsi_handle);
599 exit:
600 	kfree(list);
601 	return ret;
602 }
603 
604 /**
605  * ice_tc_set_ipv4 - Parse IPv4 addresses from TC flower filter
606  * @match: Pointer to flow match structure
607  * @fltr: Pointer to filter structure
608  * @headers: inner or outer header fields
609  * @is_encap: set true for tunnel IPv4 address
610  */
611 static int
612 ice_tc_set_ipv4(struct flow_match_ipv4_addrs *match,
613 		struct ice_tc_flower_fltr *fltr,
614 		struct ice_tc_flower_lyr_2_4_hdrs *headers, bool is_encap)
615 {
616 	if (match->key->dst) {
617 		if (is_encap)
618 			fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_IPV4;
619 		else
620 			fltr->flags |= ICE_TC_FLWR_FIELD_DEST_IPV4;
621 		headers->l3_key.dst_ipv4 = match->key->dst;
622 		headers->l3_mask.dst_ipv4 = match->mask->dst;
623 	}
624 	if (match->key->src) {
625 		if (is_encap)
626 			fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_IPV4;
627 		else
628 			fltr->flags |= ICE_TC_FLWR_FIELD_SRC_IPV4;
629 		headers->l3_key.src_ipv4 = match->key->src;
630 		headers->l3_mask.src_ipv4 = match->mask->src;
631 	}
632 	return 0;
633 }
634 
635 /**
636  * ice_tc_set_ipv6 - Parse IPv6 addresses from TC flower filter
637  * @match: Pointer to flow match structure
638  * @fltr: Pointer to filter structure
639  * @headers: inner or outer header fields
640  * @is_encap: set true for tunnel IPv6 address
641  */
642 static int
643 ice_tc_set_ipv6(struct flow_match_ipv6_addrs *match,
644 		struct ice_tc_flower_fltr *fltr,
645 		struct ice_tc_flower_lyr_2_4_hdrs *headers, bool is_encap)
646 {
647 	struct ice_tc_l3_hdr *l3_key, *l3_mask;
648 
649 	/* src and dest IPV6 address should not be LOOPBACK
650 	 * (0:0:0:0:0:0:0:1), which can be represented as ::1
651 	 */
652 	if (ipv6_addr_loopback(&match->key->dst) ||
653 	    ipv6_addr_loopback(&match->key->src)) {
654 		NL_SET_ERR_MSG_MOD(fltr->extack, "Bad IPv6, addr is LOOPBACK");
655 		return -EINVAL;
656 	}
657 	/* if src/dest IPv6 address is *,* error */
658 	if (ipv6_addr_any(&match->mask->dst) &&
659 	    ipv6_addr_any(&match->mask->src)) {
660 		NL_SET_ERR_MSG_MOD(fltr->extack, "Bad src/dest IPv6, addr is any");
661 		return -EINVAL;
662 	}
663 	if (!ipv6_addr_any(&match->mask->dst)) {
664 		if (is_encap)
665 			fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_IPV6;
666 		else
667 			fltr->flags |= ICE_TC_FLWR_FIELD_DEST_IPV6;
668 	}
669 	if (!ipv6_addr_any(&match->mask->src)) {
670 		if (is_encap)
671 			fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_IPV6;
672 		else
673 			fltr->flags |= ICE_TC_FLWR_FIELD_SRC_IPV6;
674 	}
675 
676 	l3_key = &headers->l3_key;
677 	l3_mask = &headers->l3_mask;
678 
679 	if (fltr->flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 |
680 			   ICE_TC_FLWR_FIELD_SRC_IPV6)) {
681 		memcpy(&l3_key->src_ipv6_addr, &match->key->src.s6_addr,
682 		       sizeof(match->key->src.s6_addr));
683 		memcpy(&l3_mask->src_ipv6_addr, &match->mask->src.s6_addr,
684 		       sizeof(match->mask->src.s6_addr));
685 	}
686 	if (fltr->flags & (ICE_TC_FLWR_FIELD_ENC_DEST_IPV6 |
687 			   ICE_TC_FLWR_FIELD_DEST_IPV6)) {
688 		memcpy(&l3_key->dst_ipv6_addr, &match->key->dst.s6_addr,
689 		       sizeof(match->key->dst.s6_addr));
690 		memcpy(&l3_mask->dst_ipv6_addr, &match->mask->dst.s6_addr,
691 		       sizeof(match->mask->dst.s6_addr));
692 	}
693 
694 	return 0;
695 }
696 
697 /**
698  * ice_tc_set_port - Parse ports from TC flower filter
699  * @match: Flow match structure
700  * @fltr: Pointer to filter structure
701  * @headers: inner or outer header fields
702  * @is_encap: set true for tunnel port
703  */
704 static int
705 ice_tc_set_port(struct flow_match_ports match,
706 		struct ice_tc_flower_fltr *fltr,
707 		struct ice_tc_flower_lyr_2_4_hdrs *headers, bool is_encap)
708 {
709 	if (match.key->dst) {
710 		if (is_encap)
711 			fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT;
712 		else
713 			fltr->flags |= ICE_TC_FLWR_FIELD_DEST_L4_PORT;
714 		fltr->flags |= ICE_TC_FLWR_FIELD_DEST_L4_PORT;
715 		headers->l4_key.dst_port = match.key->dst;
716 		headers->l4_mask.dst_port = match.mask->dst;
717 	}
718 	if (match.key->src) {
719 		if (is_encap)
720 			fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT;
721 		else
722 			fltr->flags |= ICE_TC_FLWR_FIELD_SRC_L4_PORT;
723 		fltr->flags |= ICE_TC_FLWR_FIELD_SRC_L4_PORT;
724 		headers->l4_key.src_port = match.key->src;
725 		headers->l4_mask.src_port = match.mask->src;
726 	}
727 	return 0;
728 }
729 
730 static struct net_device *
731 ice_get_tunnel_device(struct net_device *dev, struct flow_rule *rule)
732 {
733 	struct flow_action_entry *act;
734 	int i;
735 
736 	if (ice_is_tunnel_supported(dev))
737 		return dev;
738 
739 	flow_action_for_each(i, act, &rule->action) {
740 		if (act->id == FLOW_ACTION_REDIRECT &&
741 		    ice_is_tunnel_supported(act->dev))
742 			return act->dev;
743 	}
744 
745 	return NULL;
746 }
747 
748 static int
749 ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule,
750 		      struct ice_tc_flower_fltr *fltr)
751 {
752 	struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers;
753 	struct flow_match_control enc_control;
754 
755 	fltr->tunnel_type = ice_tc_tun_get_type(dev);
756 	headers->l3_key.ip_proto = IPPROTO_UDP;
757 
758 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
759 		struct flow_match_enc_keyid enc_keyid;
760 
761 		flow_rule_match_enc_keyid(rule, &enc_keyid);
762 
763 		if (!enc_keyid.mask->keyid ||
764 		    enc_keyid.mask->keyid != cpu_to_be32(ICE_TC_FLOWER_MASK_32))
765 			return -EINVAL;
766 
767 		fltr->flags |= ICE_TC_FLWR_FIELD_TENANT_ID;
768 		fltr->tenant_id = enc_keyid.key->keyid;
769 	}
770 
771 	flow_rule_match_enc_control(rule, &enc_control);
772 
773 	if (enc_control.key->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
774 		struct flow_match_ipv4_addrs match;
775 
776 		flow_rule_match_enc_ipv4_addrs(rule, &match);
777 		if (ice_tc_set_ipv4(&match, fltr, headers, true))
778 			return -EINVAL;
779 	} else if (enc_control.key->addr_type ==
780 					FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
781 		struct flow_match_ipv6_addrs match;
782 
783 		flow_rule_match_enc_ipv6_addrs(rule, &match);
784 		if (ice_tc_set_ipv6(&match, fltr, headers, true))
785 			return -EINVAL;
786 	}
787 
788 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
789 		struct flow_match_ip match;
790 
791 		flow_rule_match_enc_ip(rule, &match);
792 		headers->l3_key.tos = match.key->tos;
793 		headers->l3_key.ttl = match.key->ttl;
794 		headers->l3_mask.tos = match.mask->tos;
795 		headers->l3_mask.ttl = match.mask->ttl;
796 	}
797 
798 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS) &&
799 	    fltr->tunnel_type != TNL_VXLAN && fltr->tunnel_type != TNL_GENEVE) {
800 		struct flow_match_ports match;
801 
802 		flow_rule_match_enc_ports(rule, &match);
803 		if (ice_tc_set_port(match, fltr, headers, true))
804 			return -EINVAL;
805 	}
806 
807 	return 0;
808 }
809 
810 /**
811  * ice_parse_cls_flower - Parse TC flower filters provided by kernel
812  * @vsi: Pointer to the VSI
813  * @filter_dev: Pointer to device on which filter is being added
814  * @f: Pointer to struct flow_cls_offload
815  * @fltr: Pointer to filter structure
816  */
817 static int
818 ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
819 		     struct flow_cls_offload *f,
820 		     struct ice_tc_flower_fltr *fltr)
821 {
822 	struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers;
823 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
824 	u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0;
825 	struct flow_dissector *dissector;
826 	struct net_device *tunnel_dev;
827 
828 	dissector = rule->match.dissector;
829 
830 	if (dissector->used_keys &
831 	    ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
832 	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
833 	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
834 	      BIT(FLOW_DISSECTOR_KEY_VLAN) |
835 	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
836 	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
837 	      BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
838 	      BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
839 	      BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
840 	      BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
841 	      BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
842 	      BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
843 	      BIT(FLOW_DISSECTOR_KEY_PORTS))) {
844 		NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported key used");
845 		return -EOPNOTSUPP;
846 	}
847 
848 	tunnel_dev = ice_get_tunnel_device(filter_dev, rule);
849 	if (tunnel_dev) {
850 		int err;
851 
852 		filter_dev = tunnel_dev;
853 
854 		err = ice_parse_tunnel_attr(filter_dev, rule, fltr);
855 		if (err) {
856 			NL_SET_ERR_MSG_MOD(fltr->extack, "Failed to parse TC flower tunnel attributes");
857 			return err;
858 		}
859 
860 		/* header pointers should point to the inner headers, outer
861 		 * header were already set by ice_parse_tunnel_attr
862 		 */
863 		headers = &fltr->inner_headers;
864 	} else if (dissector->used_keys &
865 		  (BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
866 		   BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
867 		   BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
868 		   BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))) {
869 		NL_SET_ERR_MSG_MOD(fltr->extack, "Tunnel key used, but device isn't a tunnel");
870 		return -EOPNOTSUPP;
871 	} else {
872 		fltr->tunnel_type = TNL_LAST;
873 	}
874 
875 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
876 		struct flow_match_basic match;
877 
878 		flow_rule_match_basic(rule, &match);
879 
880 		n_proto_key = ntohs(match.key->n_proto);
881 		n_proto_mask = ntohs(match.mask->n_proto);
882 
883 		if (n_proto_key == ETH_P_ALL || n_proto_key == 0) {
884 			n_proto_key = 0;
885 			n_proto_mask = 0;
886 		} else {
887 			fltr->flags |= ICE_TC_FLWR_FIELD_ETH_TYPE_ID;
888 		}
889 
890 		headers->l2_key.n_proto = cpu_to_be16(n_proto_key);
891 		headers->l2_mask.n_proto = cpu_to_be16(n_proto_mask);
892 		headers->l3_key.ip_proto = match.key->ip_proto;
893 	}
894 
895 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
896 		struct flow_match_eth_addrs match;
897 
898 		flow_rule_match_eth_addrs(rule, &match);
899 
900 		if (!is_zero_ether_addr(match.key->dst)) {
901 			ether_addr_copy(headers->l2_key.dst_mac,
902 					match.key->dst);
903 			ether_addr_copy(headers->l2_mask.dst_mac,
904 					match.mask->dst);
905 			fltr->flags |= ICE_TC_FLWR_FIELD_DST_MAC;
906 		}
907 
908 		if (!is_zero_ether_addr(match.key->src)) {
909 			ether_addr_copy(headers->l2_key.src_mac,
910 					match.key->src);
911 			ether_addr_copy(headers->l2_mask.src_mac,
912 					match.mask->src);
913 			fltr->flags |= ICE_TC_FLWR_FIELD_SRC_MAC;
914 		}
915 	}
916 
917 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN) ||
918 	    is_vlan_dev(filter_dev)) {
919 		struct flow_dissector_key_vlan mask;
920 		struct flow_dissector_key_vlan key;
921 		struct flow_match_vlan match;
922 
923 		if (is_vlan_dev(filter_dev)) {
924 			match.key = &key;
925 			match.key->vlan_id = vlan_dev_vlan_id(filter_dev);
926 			match.key->vlan_priority = 0;
927 			match.mask = &mask;
928 			memset(match.mask, 0xff, sizeof(*match.mask));
929 			match.mask->vlan_priority = 0;
930 		} else {
931 			flow_rule_match_vlan(rule, &match);
932 		}
933 
934 		if (match.mask->vlan_id) {
935 			if (match.mask->vlan_id == VLAN_VID_MASK) {
936 				fltr->flags |= ICE_TC_FLWR_FIELD_VLAN;
937 			} else {
938 				NL_SET_ERR_MSG_MOD(fltr->extack, "Bad VLAN mask");
939 				return -EINVAL;
940 			}
941 		}
942 
943 		headers->vlan_hdr.vlan_id =
944 				cpu_to_be16(match.key->vlan_id & VLAN_VID_MASK);
945 		if (match.mask->vlan_priority)
946 			headers->vlan_hdr.vlan_prio = match.key->vlan_priority;
947 	}
948 
949 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
950 		struct flow_match_control match;
951 
952 		flow_rule_match_control(rule, &match);
953 
954 		addr_type = match.key->addr_type;
955 	}
956 
957 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
958 		struct flow_match_ipv4_addrs match;
959 
960 		flow_rule_match_ipv4_addrs(rule, &match);
961 		if (ice_tc_set_ipv4(&match, fltr, headers, false))
962 			return -EINVAL;
963 	}
964 
965 	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
966 		struct flow_match_ipv6_addrs match;
967 
968 		flow_rule_match_ipv6_addrs(rule, &match);
969 		if (ice_tc_set_ipv6(&match, fltr, headers, false))
970 			return -EINVAL;
971 	}
972 
973 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
974 		struct flow_match_ports match;
975 
976 		flow_rule_match_ports(rule, &match);
977 		if (ice_tc_set_port(match, fltr, headers, false))
978 			return -EINVAL;
979 		switch (headers->l3_key.ip_proto) {
980 		case IPPROTO_TCP:
981 		case IPPROTO_UDP:
982 			break;
983 		default:
984 			NL_SET_ERR_MSG_MOD(fltr->extack, "Only UDP and TCP transport are supported");
985 			return -EINVAL;
986 		}
987 	}
988 	return 0;
989 }
990 
991 /**
992  * ice_add_switch_fltr - Add TC flower filters
993  * @vsi: Pointer to VSI
994  * @fltr: Pointer to struct ice_tc_flower_fltr
995  *
996  * Add filter in HW switch block
997  */
998 static int
999 ice_add_switch_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
1000 {
1001 	if (fltr->action.fltr_act == ICE_FWD_TO_QGRP)
1002 		return -EOPNOTSUPP;
1003 
1004 	if (ice_is_eswitch_mode_switchdev(vsi->back))
1005 		return ice_eswitch_add_tc_fltr(vsi, fltr);
1006 
1007 	return ice_add_tc_flower_adv_fltr(vsi, fltr);
1008 }
1009 
1010 /**
1011  * ice_handle_tclass_action - Support directing to a traffic class
1012  * @vsi: Pointer to VSI
1013  * @cls_flower: Pointer to TC flower offload structure
1014  * @fltr: Pointer to TC flower filter structure
1015  *
1016  * Support directing traffic to a traffic class
1017  */
1018 static int
1019 ice_handle_tclass_action(struct ice_vsi *vsi,
1020 			 struct flow_cls_offload *cls_flower,
1021 			 struct ice_tc_flower_fltr *fltr)
1022 {
1023 	int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid);
1024 	struct ice_vsi *main_vsi;
1025 
1026 	if (tc < 0) {
1027 		NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because specified destination is invalid");
1028 		return -EINVAL;
1029 	}
1030 	if (!tc) {
1031 		NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because of invalid destination");
1032 		return -EINVAL;
1033 	}
1034 
1035 	if (!(vsi->all_enatc & BIT(tc))) {
1036 		NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because of non-existence destination");
1037 		return -EINVAL;
1038 	}
1039 
1040 	/* Redirect to a TC class or Queue Group */
1041 	main_vsi = ice_get_main_vsi(vsi->back);
1042 	if (!main_vsi || !main_vsi->netdev) {
1043 		NL_SET_ERR_MSG_MOD(fltr->extack,
1044 				   "Unable to add filter because of invalid netdevice");
1045 		return -EINVAL;
1046 	}
1047 
1048 	if ((fltr->flags & ICE_TC_FLWR_FIELD_TENANT_ID) &&
1049 	    (fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC |
1050 			   ICE_TC_FLWR_FIELD_SRC_MAC))) {
1051 		NL_SET_ERR_MSG_MOD(fltr->extack,
1052 				   "Unable to add filter because filter using tunnel key and inner MAC is unsupported combination");
1053 		return -EOPNOTSUPP;
1054 	}
1055 
1056 	/* For ADQ, filter must include dest MAC address, otherwise unwanted
1057 	 * packets with unrelated MAC address get delivered to ADQ VSIs as long
1058 	 * as remaining filter criteria is satisfied such as dest IP address
1059 	 * and dest/src L4 port. Following code is trying to handle:
1060 	 * 1. For non-tunnel, if user specify MAC addresses, use them (means
1061 	 * this code won't do anything
1062 	 * 2. For non-tunnel, if user didn't specify MAC address, add implicit
1063 	 * dest MAC to be lower netdev's active unicast MAC address
1064 	 */
1065 	if (!(fltr->flags & ICE_TC_FLWR_FIELD_DST_MAC)) {
1066 		ether_addr_copy(fltr->outer_headers.l2_key.dst_mac,
1067 				main_vsi->netdev->dev_addr);
1068 		eth_broadcast_addr(fltr->outer_headers.l2_mask.dst_mac);
1069 		fltr->flags |= ICE_TC_FLWR_FIELD_DST_MAC;
1070 	}
1071 
1072 	/* validate specified dest MAC address, make sure either it belongs to
1073 	 * lower netdev or any of MACVLAN. MACVLANs MAC address are added as
1074 	 * unicast MAC filter destined to main VSI.
1075 	 */
1076 	if (!ice_mac_fltr_exist(&main_vsi->back->hw,
1077 				fltr->outer_headers.l2_key.dst_mac,
1078 				main_vsi->idx)) {
1079 		NL_SET_ERR_MSG_MOD(fltr->extack,
1080 				   "Unable to add filter because legacy MAC filter for specified destination doesn't exist");
1081 		return -EINVAL;
1082 	}
1083 
1084 	/* Make sure VLAN is already added to main VSI, before allowing ADQ to
1085 	 * add a VLAN based filter such as MAC + VLAN + L4 port.
1086 	 */
1087 	if (fltr->flags & ICE_TC_FLWR_FIELD_VLAN) {
1088 		u16 vlan_id = be16_to_cpu(fltr->outer_headers.vlan_hdr.vlan_id);
1089 
1090 		if (!ice_vlan_fltr_exist(&main_vsi->back->hw, vlan_id,
1091 					 main_vsi->idx)) {
1092 			NL_SET_ERR_MSG_MOD(fltr->extack,
1093 					   "Unable to add filter because legacy VLAN filter for specified destination doesn't exist");
1094 			return -EINVAL;
1095 		}
1096 	}
1097 	fltr->action.fltr_act = ICE_FWD_TO_VSI;
1098 	fltr->action.tc_class = tc;
1099 
1100 	return 0;
1101 }
1102 
1103 /**
1104  * ice_parse_tc_flower_actions - Parse the actions for a TC filter
1105  * @vsi: Pointer to VSI
1106  * @cls_flower: Pointer to TC flower offload structure
1107  * @fltr: Pointer to TC flower filter structure
1108  *
1109  * Parse the actions for a TC filter
1110  */
1111 static int
1112 ice_parse_tc_flower_actions(struct ice_vsi *vsi,
1113 			    struct flow_cls_offload *cls_flower,
1114 			    struct ice_tc_flower_fltr *fltr)
1115 {
1116 	struct flow_rule *rule = flow_cls_offload_flow_rule(cls_flower);
1117 	struct flow_action *flow_action = &rule->action;
1118 	struct flow_action_entry *act;
1119 	int i;
1120 
1121 	if (cls_flower->classid)
1122 		return ice_handle_tclass_action(vsi, cls_flower, fltr);
1123 
1124 	if (!flow_action_has_entries(flow_action))
1125 		return -EINVAL;
1126 
1127 	flow_action_for_each(i, act, flow_action) {
1128 		if (ice_is_eswitch_mode_switchdev(vsi->back)) {
1129 			int err = ice_eswitch_tc_parse_action(fltr, act);
1130 
1131 			if (err)
1132 				return err;
1133 			continue;
1134 		}
1135 		/* Allow only one rule per filter */
1136 
1137 		/* Drop action */
1138 		if (act->id == FLOW_ACTION_DROP) {
1139 			NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported action DROP");
1140 			return -EINVAL;
1141 		}
1142 		fltr->action.fltr_act = ICE_FWD_TO_VSI;
1143 	}
1144 	return 0;
1145 }
1146 
1147 /**
1148  * ice_del_tc_fltr - deletes a filter from HW table
1149  * @vsi: Pointer to VSI
1150  * @fltr: Pointer to struct ice_tc_flower_fltr
1151  *
1152  * This function deletes a filter from HW table and manages book-keeping
1153  */
1154 static int ice_del_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
1155 {
1156 	struct ice_rule_query_data rule_rem;
1157 	struct ice_pf *pf = vsi->back;
1158 	int err;
1159 
1160 	rule_rem.rid = fltr->rid;
1161 	rule_rem.rule_id = fltr->rule_id;
1162 	rule_rem.vsi_handle = fltr->dest_id;
1163 	err = ice_rem_adv_rule_by_id(&pf->hw, &rule_rem);
1164 	if (err) {
1165 		if (err == ICE_ERR_DOES_NOT_EXIST) {
1166 			NL_SET_ERR_MSG_MOD(fltr->extack, "Filter does not exist");
1167 			return -ENOENT;
1168 		}
1169 		NL_SET_ERR_MSG_MOD(fltr->extack, "Failed to delete TC flower filter");
1170 		return -EIO;
1171 	}
1172 
1173 	/* update advanced switch filter count for destination
1174 	 * VSI if filter destination was VSI
1175 	 */
1176 	if (fltr->dest_vsi) {
1177 		if (fltr->dest_vsi->type == ICE_VSI_CHNL) {
1178 			fltr->dest_vsi->num_chnl_fltr--;
1179 
1180 			/* keeps track of channel filters for PF VSI */
1181 			if (vsi->type == ICE_VSI_PF &&
1182 			    (fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC |
1183 					    ICE_TC_FLWR_FIELD_ENC_DST_MAC)))
1184 				pf->num_dmac_chnl_fltrs--;
1185 		}
1186 	}
1187 	return 0;
1188 }
1189 
1190 /**
1191  * ice_add_tc_fltr - adds a TC flower filter
1192  * @netdev: Pointer to netdev
1193  * @vsi: Pointer to VSI
1194  * @f: Pointer to flower offload structure
1195  * @__fltr: Pointer to struct ice_tc_flower_fltr
1196  *
1197  * This function parses TC-flower input fields, parses action,
1198  * and adds a filter.
1199  */
1200 static int
1201 ice_add_tc_fltr(struct net_device *netdev, struct ice_vsi *vsi,
1202 		struct flow_cls_offload *f,
1203 		struct ice_tc_flower_fltr **__fltr)
1204 {
1205 	struct ice_tc_flower_fltr *fltr;
1206 	int err;
1207 
1208 	/* by default, set output to be INVALID */
1209 	*__fltr = NULL;
1210 
1211 	fltr = kzalloc(sizeof(*fltr), GFP_KERNEL);
1212 	if (!fltr)
1213 		return -ENOMEM;
1214 
1215 	fltr->cookie = f->cookie;
1216 	fltr->extack = f->common.extack;
1217 	fltr->src_vsi = vsi;
1218 	INIT_HLIST_NODE(&fltr->tc_flower_node);
1219 
1220 	err = ice_parse_cls_flower(netdev, vsi, f, fltr);
1221 	if (err < 0)
1222 		goto err;
1223 
1224 	err = ice_parse_tc_flower_actions(vsi, f, fltr);
1225 	if (err < 0)
1226 		goto err;
1227 
1228 	err = ice_add_switch_fltr(vsi, fltr);
1229 	if (err < 0)
1230 		goto err;
1231 
1232 	/* return the newly created filter */
1233 	*__fltr = fltr;
1234 
1235 	return 0;
1236 err:
1237 	kfree(fltr);
1238 	return err;
1239 }
1240 
1241 /**
1242  * ice_find_tc_flower_fltr - Find the TC flower filter in the list
1243  * @pf: Pointer to PF
1244  * @cookie: filter specific cookie
1245  */
1246 static struct ice_tc_flower_fltr *
1247 ice_find_tc_flower_fltr(struct ice_pf *pf, unsigned long cookie)
1248 {
1249 	struct ice_tc_flower_fltr *fltr;
1250 
1251 	hlist_for_each_entry(fltr, &pf->tc_flower_fltr_list, tc_flower_node)
1252 		if (cookie == fltr->cookie)
1253 			return fltr;
1254 
1255 	return NULL;
1256 }
1257 
1258 /**
1259  * ice_add_cls_flower - add TC flower filters
1260  * @netdev: Pointer to filter device
1261  * @vsi: Pointer to VSI
1262  * @cls_flower: Pointer to flower offload structure
1263  */
1264 int
1265 ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi,
1266 		   struct flow_cls_offload *cls_flower)
1267 {
1268 	struct netlink_ext_ack *extack = cls_flower->common.extack;
1269 	struct net_device *vsi_netdev = vsi->netdev;
1270 	struct ice_tc_flower_fltr *fltr;
1271 	struct ice_pf *pf = vsi->back;
1272 	int err;
1273 
1274 	if (ice_is_reset_in_progress(pf->state))
1275 		return -EBUSY;
1276 	if (test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
1277 		return -EINVAL;
1278 
1279 	if (ice_is_port_repr_netdev(netdev))
1280 		vsi_netdev = netdev;
1281 
1282 	if (!(vsi_netdev->features & NETIF_F_HW_TC) &&
1283 	    !test_bit(ICE_FLAG_CLS_FLOWER, pf->flags)) {
1284 		/* Based on TC indirect notifications from kernel, all ice
1285 		 * devices get an instance of rule from higher level device.
1286 		 * Avoid triggering explicit error in this case.
1287 		 */
1288 		if (netdev == vsi_netdev)
1289 			NL_SET_ERR_MSG_MOD(extack, "can't apply TC flower filters, turn ON hw-tc-offload and try again");
1290 		return -EINVAL;
1291 	}
1292 
1293 	/* avoid duplicate entries, if exists - return error */
1294 	fltr = ice_find_tc_flower_fltr(pf, cls_flower->cookie);
1295 	if (fltr) {
1296 		NL_SET_ERR_MSG_MOD(extack, "filter cookie already exists, ignoring");
1297 		return -EEXIST;
1298 	}
1299 
1300 	/* prep and add TC-flower filter in HW */
1301 	err = ice_add_tc_fltr(netdev, vsi, cls_flower, &fltr);
1302 	if (err)
1303 		return err;
1304 
1305 	/* add filter into an ordered list */
1306 	hlist_add_head(&fltr->tc_flower_node, &pf->tc_flower_fltr_list);
1307 	return 0;
1308 }
1309 
1310 /**
1311  * ice_del_cls_flower - delete TC flower filters
1312  * @vsi: Pointer to VSI
1313  * @cls_flower: Pointer to struct flow_cls_offload
1314  */
1315 int
1316 ice_del_cls_flower(struct ice_vsi *vsi, struct flow_cls_offload *cls_flower)
1317 {
1318 	struct ice_tc_flower_fltr *fltr;
1319 	struct ice_pf *pf = vsi->back;
1320 	int err;
1321 
1322 	/* find filter */
1323 	fltr = ice_find_tc_flower_fltr(pf, cls_flower->cookie);
1324 	if (!fltr) {
1325 		if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) &&
1326 		    hlist_empty(&pf->tc_flower_fltr_list))
1327 			return 0;
1328 
1329 		NL_SET_ERR_MSG_MOD(cls_flower->common.extack, "failed to delete TC flower filter because unable to find it");
1330 		return -EINVAL;
1331 	}
1332 
1333 	fltr->extack = cls_flower->common.extack;
1334 	/* delete filter from HW */
1335 	err = ice_del_tc_fltr(vsi, fltr);
1336 	if (err)
1337 		return err;
1338 
1339 	/* delete filter from an ordered list */
1340 	hlist_del(&fltr->tc_flower_node);
1341 
1342 	/* free the filter node */
1343 	kfree(fltr);
1344 
1345 	return 0;
1346 }
1347 
1348 /**
1349  * ice_replay_tc_fltrs - replay TC filters
1350  * @pf: pointer to PF struct
1351  */
1352 void ice_replay_tc_fltrs(struct ice_pf *pf)
1353 {
1354 	struct ice_tc_flower_fltr *fltr;
1355 	struct hlist_node *node;
1356 
1357 	hlist_for_each_entry_safe(fltr, node,
1358 				  &pf->tc_flower_fltr_list,
1359 				  tc_flower_node) {
1360 		fltr->extack = NULL;
1361 		ice_add_switch_fltr(fltr->src_vsi, fltr);
1362 	}
1363 }
1364