1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2019-2021, Intel Corporation. */
3 
4 #include "ice.h"
5 #include "ice_tc_lib.h"
6 #include "ice_fltr.h"
7 #include "ice_lib.h"
8 #include "ice_protocol_type.h"
9 
10 /**
11  * ice_tc_count_lkups - determine lookup count for switch filter
12  * @flags: TC-flower flags
13  * @headers: Pointer to TC flower filter header structure
14  * @fltr: Pointer to outer TC filter structure
15  *
16  * Determine lookup count based on TC flower input for switch filter.
17  */
18 static int
19 ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
20 		   struct ice_tc_flower_fltr *fltr)
21 {
22 	int lkups_cnt = 0;
23 
24 	if (flags & ICE_TC_FLWR_FIELD_TENANT_ID)
25 		lkups_cnt++;
26 
27 	if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
28 		     ICE_TC_FLWR_FIELD_ENC_DEST_IPV4 |
29 		     ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 |
30 		     ICE_TC_FLWR_FIELD_ENC_DEST_IPV6))
31 		lkups_cnt++;
32 
33 	if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT)
34 		lkups_cnt++;
35 
36 	/* currently inner etype filter isn't supported */
37 	if ((flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) &&
38 	    fltr->tunnel_type == TNL_LAST)
39 		lkups_cnt++;
40 
41 	/* are MAC fields specified? */
42 	if (flags & (ICE_TC_FLWR_FIELD_DST_MAC | ICE_TC_FLWR_FIELD_SRC_MAC))
43 		lkups_cnt++;
44 
45 	/* is VLAN specified? */
46 	if (flags & ICE_TC_FLWR_FIELD_VLAN)
47 		lkups_cnt++;
48 
49 	/* are IPv[4|6] fields specified? */
50 	if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV4 | ICE_TC_FLWR_FIELD_SRC_IPV4 |
51 		     ICE_TC_FLWR_FIELD_DEST_IPV6 | ICE_TC_FLWR_FIELD_SRC_IPV6))
52 		lkups_cnt++;
53 
54 	/* is L4 (TCP/UDP/any other L4 protocol fields) specified? */
55 	if (flags & (ICE_TC_FLWR_FIELD_DEST_L4_PORT |
56 		     ICE_TC_FLWR_FIELD_SRC_L4_PORT))
57 		lkups_cnt++;
58 
59 	return lkups_cnt;
60 }
61 
62 static enum ice_protocol_type ice_proto_type_from_mac(bool inner)
63 {
64 	return inner ? ICE_MAC_IL : ICE_MAC_OFOS;
65 }
66 
67 static enum ice_protocol_type ice_proto_type_from_ipv4(bool inner)
68 {
69 	return inner ? ICE_IPV4_IL : ICE_IPV4_OFOS;
70 }
71 
72 static enum ice_protocol_type ice_proto_type_from_ipv6(bool inner)
73 {
74 	return inner ? ICE_IPV6_IL : ICE_IPV6_OFOS;
75 }
76 
77 static enum ice_protocol_type
78 ice_proto_type_from_l4_port(bool inner, u16 ip_proto)
79 {
80 	if (inner) {
81 		switch (ip_proto) {
82 		case IPPROTO_UDP:
83 			return ICE_UDP_ILOS;
84 		}
85 	} else {
86 		switch (ip_proto) {
87 		case IPPROTO_TCP:
88 			return ICE_TCP_IL;
89 		case IPPROTO_UDP:
90 			return ICE_UDP_OF;
91 		}
92 	}
93 
94 	return 0;
95 }
96 
97 static enum ice_protocol_type
98 ice_proto_type_from_tunnel(enum ice_tunnel_type type)
99 {
100 	switch (type) {
101 	case TNL_VXLAN:
102 		return ICE_VXLAN;
103 	case TNL_GENEVE:
104 		return ICE_GENEVE;
105 	case TNL_GRETAP:
106 		return ICE_NVGRE;
107 	default:
108 		return 0;
109 	}
110 }
111 
112 static enum ice_sw_tunnel_type
113 ice_sw_type_from_tunnel(enum ice_tunnel_type type)
114 {
115 	switch (type) {
116 	case TNL_VXLAN:
117 		return ICE_SW_TUN_VXLAN;
118 	case TNL_GENEVE:
119 		return ICE_SW_TUN_GENEVE;
120 	case TNL_GRETAP:
121 		return ICE_SW_TUN_NVGRE;
122 	default:
123 		return ICE_NON_TUN;
124 	}
125 }
126 
127 static int
128 ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr,
129 			 struct ice_adv_lkup_elem *list)
130 {
131 	struct ice_tc_flower_lyr_2_4_hdrs *hdr = &fltr->outer_headers;
132 	int i = 0;
133 
134 	if (flags & ICE_TC_FLWR_FIELD_TENANT_ID) {
135 		u32 tenant_id;
136 
137 		list[i].type = ice_proto_type_from_tunnel(fltr->tunnel_type);
138 		switch (fltr->tunnel_type) {
139 		case TNL_VXLAN:
140 		case TNL_GENEVE:
141 			tenant_id = be32_to_cpu(fltr->tenant_id) << 8;
142 			list[i].h_u.tnl_hdr.vni = cpu_to_be32(tenant_id);
143 			memcpy(&list[i].m_u.tnl_hdr.vni, "\xff\xff\xff\x00", 4);
144 			i++;
145 			break;
146 		case TNL_GRETAP:
147 			list[i].h_u.nvgre_hdr.tni_flow = fltr->tenant_id;
148 			memcpy(&list[i].m_u.nvgre_hdr.tni_flow, "\xff\xff\xff\xff", 4);
149 			i++;
150 			break;
151 		default:
152 			break;
153 		}
154 	}
155 
156 	if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
157 		     ICE_TC_FLWR_FIELD_ENC_DEST_IPV4)) {
158 		list[i].type = ice_proto_type_from_ipv4(false);
159 
160 		if (flags & ICE_TC_FLWR_FIELD_ENC_SRC_IPV4) {
161 			list[i].h_u.ipv4_hdr.src_addr = hdr->l3_key.src_ipv4;
162 			list[i].m_u.ipv4_hdr.src_addr = hdr->l3_mask.src_ipv4;
163 		}
164 		if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_IPV4) {
165 			list[i].h_u.ipv4_hdr.dst_addr = hdr->l3_key.dst_ipv4;
166 			list[i].m_u.ipv4_hdr.dst_addr = hdr->l3_mask.dst_ipv4;
167 		}
168 		i++;
169 	}
170 
171 	if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 |
172 		     ICE_TC_FLWR_FIELD_ENC_DEST_IPV6)) {
173 		list[i].type = ice_proto_type_from_ipv6(false);
174 
175 		if (flags & ICE_TC_FLWR_FIELD_ENC_SRC_IPV6) {
176 			memcpy(&list[i].h_u.ipv6_hdr.src_addr,
177 			       &hdr->l3_key.src_ipv6_addr,
178 			       sizeof(hdr->l3_key.src_ipv6_addr));
179 			memcpy(&list[i].m_u.ipv6_hdr.src_addr,
180 			       &hdr->l3_mask.src_ipv6_addr,
181 			       sizeof(hdr->l3_mask.src_ipv6_addr));
182 		}
183 		if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_IPV6) {
184 			memcpy(&list[i].h_u.ipv6_hdr.dst_addr,
185 			       &hdr->l3_key.dst_ipv6_addr,
186 			       sizeof(hdr->l3_key.dst_ipv6_addr));
187 			memcpy(&list[i].m_u.ipv6_hdr.dst_addr,
188 			       &hdr->l3_mask.dst_ipv6_addr,
189 			       sizeof(hdr->l3_mask.dst_ipv6_addr));
190 		}
191 		i++;
192 	}
193 
194 	if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT) {
195 		list[i].type = ice_proto_type_from_l4_port(false, hdr->l3_key.ip_proto);
196 		list[i].h_u.l4_hdr.dst_port = hdr->l4_key.dst_port;
197 		list[i].m_u.l4_hdr.dst_port = hdr->l4_mask.dst_port;
198 		i++;
199 	}
200 
201 	return i;
202 }
203 
204 /**
205  * ice_tc_fill_rules - fill filter rules based on TC fltr
206  * @hw: pointer to HW structure
207  * @flags: tc flower field flags
208  * @tc_fltr: pointer to TC flower filter
209  * @list: list of advance rule elements
210  * @rule_info: pointer to information about rule
211  * @l4_proto: pointer to information such as L4 proto type
212  *
213  * Fill ice_adv_lkup_elem list based on TC flower flags and
214  * TC flower headers. This list should be used to add
215  * advance filter in hardware.
216  */
217 static int
218 ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
219 		  struct ice_tc_flower_fltr *tc_fltr,
220 		  struct ice_adv_lkup_elem *list,
221 		  struct ice_adv_rule_info *rule_info,
222 		  u16 *l4_proto)
223 {
224 	struct ice_tc_flower_lyr_2_4_hdrs *headers = &tc_fltr->outer_headers;
225 	bool inner = false;
226 	int i = 0;
227 
228 	rule_info->tun_type = ice_sw_type_from_tunnel(tc_fltr->tunnel_type);
229 	if (tc_fltr->tunnel_type != TNL_LAST) {
230 		i = ice_tc_fill_tunnel_outer(flags, tc_fltr, list);
231 
232 		headers = &tc_fltr->inner_headers;
233 		inner = true;
234 	} else if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) {
235 		list[i].type = ICE_ETYPE_OL;
236 		list[i].h_u.ethertype.ethtype_id = headers->l2_key.n_proto;
237 		list[i].m_u.ethertype.ethtype_id = headers->l2_mask.n_proto;
238 		i++;
239 	}
240 
241 	if (flags & (ICE_TC_FLWR_FIELD_DST_MAC |
242 		     ICE_TC_FLWR_FIELD_SRC_MAC)) {
243 		struct ice_tc_l2_hdr *l2_key, *l2_mask;
244 
245 		l2_key = &headers->l2_key;
246 		l2_mask = &headers->l2_mask;
247 
248 		list[i].type = ice_proto_type_from_mac(inner);
249 		if (flags & ICE_TC_FLWR_FIELD_DST_MAC) {
250 			ether_addr_copy(list[i].h_u.eth_hdr.dst_addr,
251 					l2_key->dst_mac);
252 			ether_addr_copy(list[i].m_u.eth_hdr.dst_addr,
253 					l2_mask->dst_mac);
254 		}
255 		if (flags & ICE_TC_FLWR_FIELD_SRC_MAC) {
256 			ether_addr_copy(list[i].h_u.eth_hdr.src_addr,
257 					l2_key->src_mac);
258 			ether_addr_copy(list[i].m_u.eth_hdr.src_addr,
259 					l2_mask->src_mac);
260 		}
261 		i++;
262 	}
263 
264 	/* copy VLAN info */
265 	if (flags & ICE_TC_FLWR_FIELD_VLAN) {
266 		list[i].type = ICE_VLAN_OFOS;
267 		list[i].h_u.vlan_hdr.vlan = headers->vlan_hdr.vlan_id;
268 		list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xFFFF);
269 		i++;
270 	}
271 
272 	/* copy L3 (IPv[4|6]: src, dest) address */
273 	if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV4 |
274 		     ICE_TC_FLWR_FIELD_SRC_IPV4)) {
275 		struct ice_tc_l3_hdr *l3_key, *l3_mask;
276 
277 		list[i].type = ice_proto_type_from_ipv4(inner);
278 		l3_key = &headers->l3_key;
279 		l3_mask = &headers->l3_mask;
280 		if (flags & ICE_TC_FLWR_FIELD_DEST_IPV4) {
281 			list[i].h_u.ipv4_hdr.dst_addr = l3_key->dst_ipv4;
282 			list[i].m_u.ipv4_hdr.dst_addr = l3_mask->dst_ipv4;
283 		}
284 		if (flags & ICE_TC_FLWR_FIELD_SRC_IPV4) {
285 			list[i].h_u.ipv4_hdr.src_addr = l3_key->src_ipv4;
286 			list[i].m_u.ipv4_hdr.src_addr = l3_mask->src_ipv4;
287 		}
288 		i++;
289 	} else if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV6 |
290 			    ICE_TC_FLWR_FIELD_SRC_IPV6)) {
291 		struct ice_ipv6_hdr *ipv6_hdr, *ipv6_mask;
292 		struct ice_tc_l3_hdr *l3_key, *l3_mask;
293 
294 		list[i].type = ice_proto_type_from_ipv6(inner);
295 		ipv6_hdr = &list[i].h_u.ipv6_hdr;
296 		ipv6_mask = &list[i].m_u.ipv6_hdr;
297 		l3_key = &headers->l3_key;
298 		l3_mask = &headers->l3_mask;
299 
300 		if (flags & ICE_TC_FLWR_FIELD_DEST_IPV6) {
301 			memcpy(&ipv6_hdr->dst_addr, &l3_key->dst_ipv6_addr,
302 			       sizeof(l3_key->dst_ipv6_addr));
303 			memcpy(&ipv6_mask->dst_addr, &l3_mask->dst_ipv6_addr,
304 			       sizeof(l3_mask->dst_ipv6_addr));
305 		}
306 		if (flags & ICE_TC_FLWR_FIELD_SRC_IPV6) {
307 			memcpy(&ipv6_hdr->src_addr, &l3_key->src_ipv6_addr,
308 			       sizeof(l3_key->src_ipv6_addr));
309 			memcpy(&ipv6_mask->src_addr, &l3_mask->src_ipv6_addr,
310 			       sizeof(l3_mask->src_ipv6_addr));
311 		}
312 		i++;
313 	}
314 
315 	/* copy L4 (src, dest) port */
316 	if (flags & (ICE_TC_FLWR_FIELD_DEST_L4_PORT |
317 		     ICE_TC_FLWR_FIELD_SRC_L4_PORT)) {
318 		struct ice_tc_l4_hdr *l4_key, *l4_mask;
319 
320 		list[i].type = ice_proto_type_from_l4_port(inner, headers->l3_key.ip_proto);
321 		l4_key = &headers->l4_key;
322 		l4_mask = &headers->l4_mask;
323 
324 		if (flags & ICE_TC_FLWR_FIELD_DEST_L4_PORT) {
325 			list[i].h_u.l4_hdr.dst_port = l4_key->dst_port;
326 			list[i].m_u.l4_hdr.dst_port = l4_mask->dst_port;
327 		}
328 		if (flags & ICE_TC_FLWR_FIELD_SRC_L4_PORT) {
329 			list[i].h_u.l4_hdr.src_port = l4_key->src_port;
330 			list[i].m_u.l4_hdr.src_port = l4_mask->src_port;
331 		}
332 		i++;
333 	}
334 
335 	return i;
336 }
337 
338 /**
339  * ice_tc_tun_get_type - get the tunnel type
340  * @tunnel_dev: ptr to tunnel device
341  *
342  * This function detects appropriate tunnel_type if specified device is
343  * tunnel device such as VXLAN/Geneve
344  */
345 static int ice_tc_tun_get_type(struct net_device *tunnel_dev)
346 {
347 	if (netif_is_vxlan(tunnel_dev))
348 		return TNL_VXLAN;
349 	if (netif_is_geneve(tunnel_dev))
350 		return TNL_GENEVE;
351 	if (netif_is_gretap(tunnel_dev) ||
352 	    netif_is_ip6gretap(tunnel_dev))
353 		return TNL_GRETAP;
354 	return TNL_LAST;
355 }
356 
357 bool ice_is_tunnel_supported(struct net_device *dev)
358 {
359 	return ice_tc_tun_get_type(dev) != TNL_LAST;
360 }
361 
362 static int
363 ice_eswitch_tc_parse_action(struct ice_tc_flower_fltr *fltr,
364 			    struct flow_action_entry *act)
365 {
366 	struct ice_repr *repr;
367 
368 	switch (act->id) {
369 	case FLOW_ACTION_DROP:
370 		fltr->action.fltr_act = ICE_DROP_PACKET;
371 		break;
372 
373 	case FLOW_ACTION_REDIRECT:
374 		fltr->action.fltr_act = ICE_FWD_TO_VSI;
375 
376 		if (ice_is_port_repr_netdev(act->dev)) {
377 			repr = ice_netdev_to_repr(act->dev);
378 
379 			fltr->dest_vsi = repr->src_vsi;
380 			fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
381 		} else if (netif_is_ice(act->dev) ||
382 			   ice_is_tunnel_supported(act->dev)) {
383 			fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
384 		} else {
385 			NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported netdevice in switchdev mode");
386 			return -EINVAL;
387 		}
388 
389 		break;
390 
391 	default:
392 		NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported action in switchdev mode");
393 		return -EINVAL;
394 	}
395 
396 	return 0;
397 }
398 
399 static int
400 ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
401 {
402 	struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers;
403 	struct ice_adv_rule_info rule_info = { 0 };
404 	struct ice_rule_query_data rule_added;
405 	struct ice_hw *hw = &vsi->back->hw;
406 	struct ice_adv_lkup_elem *list;
407 	u32 flags = fltr->flags;
408 	enum ice_status status;
409 	int lkups_cnt;
410 	int ret = 0;
411 	int i;
412 
413 	if (!flags || (flags & ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT)) {
414 		NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported encap field(s)");
415 		return -EOPNOTSUPP;
416 	}
417 
418 	lkups_cnt = ice_tc_count_lkups(flags, headers, fltr);
419 	list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
420 	if (!list)
421 		return -ENOMEM;
422 
423 	i = ice_tc_fill_rules(hw, flags, fltr, list, &rule_info, NULL);
424 	if (i != lkups_cnt) {
425 		ret = -EINVAL;
426 		goto exit;
427 	}
428 
429 	/* egress traffic is always redirect to uplink */
430 	if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS)
431 		fltr->dest_vsi = vsi->back->switchdev.uplink_vsi;
432 
433 	rule_info.sw_act.fltr_act = fltr->action.fltr_act;
434 	if (fltr->action.fltr_act != ICE_DROP_PACKET)
435 		rule_info.sw_act.vsi_handle = fltr->dest_vsi->idx;
436 	/* For now, making priority to be highest, and it also becomes
437 	 * the priority for recipe which will get created as a result of
438 	 * new extraction sequence based on input set.
439 	 * Priority '7' is max val for switch recipe, higher the number
440 	 * results into order of switch rule evaluation.
441 	 */
442 	rule_info.priority = 7;
443 
444 	if (fltr->direction == ICE_ESWITCH_FLTR_INGRESS) {
445 		rule_info.sw_act.flag |= ICE_FLTR_RX;
446 		rule_info.sw_act.src = hw->pf_id;
447 		rule_info.rx = true;
448 	} else {
449 		rule_info.sw_act.flag |= ICE_FLTR_TX;
450 		rule_info.sw_act.src = vsi->idx;
451 		rule_info.rx = false;
452 		rule_info.flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE;
453 		rule_info.flags_info.act_valid = true;
454 	}
455 
456 	/* specify the cookie as filter_rule_id */
457 	rule_info.fltr_rule_id = fltr->cookie;
458 
459 	status = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added);
460 	if (status == ICE_ERR_ALREADY_EXISTS) {
461 		NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because it already exist");
462 		ret = -EINVAL;
463 		goto exit;
464 	} else if (status) {
465 		NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter due to error");
466 		ret = -EIO;
467 		goto exit;
468 	}
469 
470 	/* store the output params, which are needed later for removing
471 	 * advanced switch filter
472 	 */
473 	fltr->rid = rule_added.rid;
474 	fltr->rule_id = rule_added.rule_id;
475 
476 exit:
477 	kfree(list);
478 	return ret;
479 }
480 
481 /**
482  * ice_add_tc_flower_adv_fltr - add appropriate filter rules
483  * @vsi: Pointer to VSI
484  * @tc_fltr: Pointer to TC flower filter structure
485  *
486  * based on filter parameters using Advance recipes supported
487  * by OS package.
488  */
489 static int
490 ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
491 			   struct ice_tc_flower_fltr *tc_fltr)
492 {
493 	struct ice_tc_flower_lyr_2_4_hdrs *headers = &tc_fltr->outer_headers;
494 	struct ice_adv_rule_info rule_info = {0};
495 	struct ice_rule_query_data rule_added;
496 	struct ice_adv_lkup_elem *list;
497 	struct ice_pf *pf = vsi->back;
498 	struct ice_hw *hw = &pf->hw;
499 	u32 flags = tc_fltr->flags;
500 	struct ice_vsi *ch_vsi;
501 	struct device *dev;
502 	u16 lkups_cnt = 0;
503 	u16 l4_proto = 0;
504 	int ret = 0;
505 	u16 i = 0;
506 
507 	dev = ice_pf_to_dev(pf);
508 	if (ice_is_safe_mode(pf)) {
509 		NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unable to add filter because driver is in safe mode");
510 		return -EOPNOTSUPP;
511 	}
512 
513 	if (!flags || (flags & (ICE_TC_FLWR_FIELD_ENC_DEST_IPV4 |
514 				ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
515 				ICE_TC_FLWR_FIELD_ENC_DEST_IPV6 |
516 				ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 |
517 				ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT))) {
518 		NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unsupported encap field(s)");
519 		return -EOPNOTSUPP;
520 	}
521 
522 	/* get the channel (aka ADQ VSI) */
523 	if (tc_fltr->dest_vsi)
524 		ch_vsi = tc_fltr->dest_vsi;
525 	else
526 		ch_vsi = vsi->tc_map_vsi[tc_fltr->action.tc_class];
527 
528 	lkups_cnt = ice_tc_count_lkups(flags, headers, tc_fltr);
529 	list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
530 	if (!list)
531 		return -ENOMEM;
532 
533 	i = ice_tc_fill_rules(hw, flags, tc_fltr, list, &rule_info, &l4_proto);
534 	if (i != lkups_cnt) {
535 		ret = -EINVAL;
536 		goto exit;
537 	}
538 
539 	rule_info.sw_act.fltr_act = tc_fltr->action.fltr_act;
540 	if (tc_fltr->action.tc_class >= ICE_CHNL_START_TC) {
541 		if (!ch_vsi) {
542 			NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unable to add filter because specified destination doesn't exist");
543 			ret = -EINVAL;
544 			goto exit;
545 		}
546 
547 		rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
548 		rule_info.sw_act.vsi_handle = ch_vsi->idx;
549 		rule_info.priority = 7;
550 		rule_info.sw_act.src = hw->pf_id;
551 		rule_info.rx = true;
552 		dev_dbg(dev, "add switch rule for TC:%u vsi_idx:%u, lkups_cnt:%u\n",
553 			tc_fltr->action.tc_class,
554 			rule_info.sw_act.vsi_handle, lkups_cnt);
555 	} else {
556 		rule_info.sw_act.flag |= ICE_FLTR_TX;
557 		rule_info.sw_act.src = vsi->idx;
558 		rule_info.rx = false;
559 	}
560 
561 	/* specify the cookie as filter_rule_id */
562 	rule_info.fltr_rule_id = tc_fltr->cookie;
563 
564 	ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added);
565 	if (ret == -EEXIST) {
566 		NL_SET_ERR_MSG_MOD(tc_fltr->extack,
567 				   "Unable to add filter because it already exist");
568 		ret = -EINVAL;
569 		goto exit;
570 	} else if (ret) {
571 		NL_SET_ERR_MSG_MOD(tc_fltr->extack,
572 				   "Unable to add filter due to error");
573 		ret = -EIO;
574 		goto exit;
575 	}
576 
577 	/* store the output params, which are needed later for removing
578 	 * advanced switch filter
579 	 */
580 	tc_fltr->rid = rule_added.rid;
581 	tc_fltr->rule_id = rule_added.rule_id;
582 	if (tc_fltr->action.tc_class > 0 && ch_vsi) {
583 		/* For PF ADQ, VSI type is set as ICE_VSI_CHNL, and
584 		 * for PF ADQ filter, it is not yet set in tc_fltr,
585 		 * hence store the dest_vsi ptr in tc_fltr
586 		 */
587 		if (ch_vsi->type == ICE_VSI_CHNL)
588 			tc_fltr->dest_vsi = ch_vsi;
589 		/* keep track of advanced switch filter for
590 		 * destination VSI (channel VSI)
591 		 */
592 		ch_vsi->num_chnl_fltr++;
593 		/* in this case, dest_id is VSI handle (sw handle) */
594 		tc_fltr->dest_id = rule_added.vsi_handle;
595 
596 		/* keeps track of channel filters for PF VSI */
597 		if (vsi->type == ICE_VSI_PF &&
598 		    (flags & (ICE_TC_FLWR_FIELD_DST_MAC |
599 			      ICE_TC_FLWR_FIELD_ENC_DST_MAC)))
600 			pf->num_dmac_chnl_fltrs++;
601 	}
602 	dev_dbg(dev, "added switch rule (lkups_cnt %u, flags 0x%x) for TC %u, rid %u, rule_id %u, vsi_idx %u\n",
603 		lkups_cnt, flags,
604 		tc_fltr->action.tc_class, rule_added.rid,
605 		rule_added.rule_id, rule_added.vsi_handle);
606 exit:
607 	kfree(list);
608 	return ret;
609 }
610 
611 /**
612  * ice_tc_set_ipv4 - Parse IPv4 addresses from TC flower filter
613  * @match: Pointer to flow match structure
614  * @fltr: Pointer to filter structure
615  * @headers: inner or outer header fields
616  * @is_encap: set true for tunnel IPv4 address
617  */
618 static int
619 ice_tc_set_ipv4(struct flow_match_ipv4_addrs *match,
620 		struct ice_tc_flower_fltr *fltr,
621 		struct ice_tc_flower_lyr_2_4_hdrs *headers, bool is_encap)
622 {
623 	if (match->key->dst) {
624 		if (is_encap)
625 			fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_IPV4;
626 		else
627 			fltr->flags |= ICE_TC_FLWR_FIELD_DEST_IPV4;
628 		headers->l3_key.dst_ipv4 = match->key->dst;
629 		headers->l3_mask.dst_ipv4 = match->mask->dst;
630 	}
631 	if (match->key->src) {
632 		if (is_encap)
633 			fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_IPV4;
634 		else
635 			fltr->flags |= ICE_TC_FLWR_FIELD_SRC_IPV4;
636 		headers->l3_key.src_ipv4 = match->key->src;
637 		headers->l3_mask.src_ipv4 = match->mask->src;
638 	}
639 	return 0;
640 }
641 
642 /**
643  * ice_tc_set_ipv6 - Parse IPv6 addresses from TC flower filter
644  * @match: Pointer to flow match structure
645  * @fltr: Pointer to filter structure
646  * @headers: inner or outer header fields
647  * @is_encap: set true for tunnel IPv6 address
648  */
649 static int
650 ice_tc_set_ipv6(struct flow_match_ipv6_addrs *match,
651 		struct ice_tc_flower_fltr *fltr,
652 		struct ice_tc_flower_lyr_2_4_hdrs *headers, bool is_encap)
653 {
654 	struct ice_tc_l3_hdr *l3_key, *l3_mask;
655 
656 	/* src and dest IPV6 address should not be LOOPBACK
657 	 * (0:0:0:0:0:0:0:1), which can be represented as ::1
658 	 */
659 	if (ipv6_addr_loopback(&match->key->dst) ||
660 	    ipv6_addr_loopback(&match->key->src)) {
661 		NL_SET_ERR_MSG_MOD(fltr->extack, "Bad IPv6, addr is LOOPBACK");
662 		return -EINVAL;
663 	}
664 	/* if src/dest IPv6 address is *,* error */
665 	if (ipv6_addr_any(&match->mask->dst) &&
666 	    ipv6_addr_any(&match->mask->src)) {
667 		NL_SET_ERR_MSG_MOD(fltr->extack, "Bad src/dest IPv6, addr is any");
668 		return -EINVAL;
669 	}
670 	if (!ipv6_addr_any(&match->mask->dst)) {
671 		if (is_encap)
672 			fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_IPV6;
673 		else
674 			fltr->flags |= ICE_TC_FLWR_FIELD_DEST_IPV6;
675 	}
676 	if (!ipv6_addr_any(&match->mask->src)) {
677 		if (is_encap)
678 			fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_IPV6;
679 		else
680 			fltr->flags |= ICE_TC_FLWR_FIELD_SRC_IPV6;
681 	}
682 
683 	l3_key = &headers->l3_key;
684 	l3_mask = &headers->l3_mask;
685 
686 	if (fltr->flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 |
687 			   ICE_TC_FLWR_FIELD_SRC_IPV6)) {
688 		memcpy(&l3_key->src_ipv6_addr, &match->key->src.s6_addr,
689 		       sizeof(match->key->src.s6_addr));
690 		memcpy(&l3_mask->src_ipv6_addr, &match->mask->src.s6_addr,
691 		       sizeof(match->mask->src.s6_addr));
692 	}
693 	if (fltr->flags & (ICE_TC_FLWR_FIELD_ENC_DEST_IPV6 |
694 			   ICE_TC_FLWR_FIELD_DEST_IPV6)) {
695 		memcpy(&l3_key->dst_ipv6_addr, &match->key->dst.s6_addr,
696 		       sizeof(match->key->dst.s6_addr));
697 		memcpy(&l3_mask->dst_ipv6_addr, &match->mask->dst.s6_addr,
698 		       sizeof(match->mask->dst.s6_addr));
699 	}
700 
701 	return 0;
702 }
703 
704 /**
705  * ice_tc_set_port - Parse ports from TC flower filter
706  * @match: Flow match structure
707  * @fltr: Pointer to filter structure
708  * @headers: inner or outer header fields
709  * @is_encap: set true for tunnel port
710  */
711 static int
712 ice_tc_set_port(struct flow_match_ports match,
713 		struct ice_tc_flower_fltr *fltr,
714 		struct ice_tc_flower_lyr_2_4_hdrs *headers, bool is_encap)
715 {
716 	if (match.key->dst) {
717 		if (is_encap)
718 			fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT;
719 		else
720 			fltr->flags |= ICE_TC_FLWR_FIELD_DEST_L4_PORT;
721 		fltr->flags |= ICE_TC_FLWR_FIELD_DEST_L4_PORT;
722 		headers->l4_key.dst_port = match.key->dst;
723 		headers->l4_mask.dst_port = match.mask->dst;
724 	}
725 	if (match.key->src) {
726 		if (is_encap)
727 			fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT;
728 		else
729 			fltr->flags |= ICE_TC_FLWR_FIELD_SRC_L4_PORT;
730 		fltr->flags |= ICE_TC_FLWR_FIELD_SRC_L4_PORT;
731 		headers->l4_key.src_port = match.key->src;
732 		headers->l4_mask.src_port = match.mask->src;
733 	}
734 	return 0;
735 }
736 
737 static struct net_device *
738 ice_get_tunnel_device(struct net_device *dev, struct flow_rule *rule)
739 {
740 	struct flow_action_entry *act;
741 	int i;
742 
743 	if (ice_is_tunnel_supported(dev))
744 		return dev;
745 
746 	flow_action_for_each(i, act, &rule->action) {
747 		if (act->id == FLOW_ACTION_REDIRECT &&
748 		    ice_is_tunnel_supported(act->dev))
749 			return act->dev;
750 	}
751 
752 	return NULL;
753 }
754 
755 static int
756 ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule,
757 		      struct ice_tc_flower_fltr *fltr)
758 {
759 	struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers;
760 	struct flow_match_control enc_control;
761 
762 	fltr->tunnel_type = ice_tc_tun_get_type(dev);
763 	headers->l3_key.ip_proto = IPPROTO_UDP;
764 
765 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
766 		struct flow_match_enc_keyid enc_keyid;
767 
768 		flow_rule_match_enc_keyid(rule, &enc_keyid);
769 
770 		if (!enc_keyid.mask->keyid ||
771 		    enc_keyid.mask->keyid != cpu_to_be32(ICE_TC_FLOWER_MASK_32))
772 			return -EINVAL;
773 
774 		fltr->flags |= ICE_TC_FLWR_FIELD_TENANT_ID;
775 		fltr->tenant_id = enc_keyid.key->keyid;
776 	}
777 
778 	flow_rule_match_enc_control(rule, &enc_control);
779 
780 	if (enc_control.key->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
781 		struct flow_match_ipv4_addrs match;
782 
783 		flow_rule_match_enc_ipv4_addrs(rule, &match);
784 		if (ice_tc_set_ipv4(&match, fltr, headers, true))
785 			return -EINVAL;
786 	} else if (enc_control.key->addr_type ==
787 					FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
788 		struct flow_match_ipv6_addrs match;
789 
790 		flow_rule_match_enc_ipv6_addrs(rule, &match);
791 		if (ice_tc_set_ipv6(&match, fltr, headers, true))
792 			return -EINVAL;
793 	}
794 
795 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
796 		struct flow_match_ip match;
797 
798 		flow_rule_match_enc_ip(rule, &match);
799 		headers->l3_key.tos = match.key->tos;
800 		headers->l3_key.ttl = match.key->ttl;
801 		headers->l3_mask.tos = match.mask->tos;
802 		headers->l3_mask.ttl = match.mask->ttl;
803 	}
804 
805 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
806 		struct flow_match_ports match;
807 
808 		flow_rule_match_enc_ports(rule, &match);
809 		if (ice_tc_set_port(match, fltr, headers, true))
810 			return -EINVAL;
811 	}
812 
813 	return 0;
814 }
815 
816 /**
817  * ice_parse_cls_flower - Parse TC flower filters provided by kernel
818  * @vsi: Pointer to the VSI
819  * @filter_dev: Pointer to device on which filter is being added
820  * @f: Pointer to struct flow_cls_offload
821  * @fltr: Pointer to filter structure
822  */
823 static int
824 ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
825 		     struct flow_cls_offload *f,
826 		     struct ice_tc_flower_fltr *fltr)
827 {
828 	struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers;
829 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
830 	u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0;
831 	struct flow_dissector *dissector;
832 	struct net_device *tunnel_dev;
833 
834 	dissector = rule->match.dissector;
835 
836 	if (dissector->used_keys &
837 	    ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
838 	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
839 	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
840 	      BIT(FLOW_DISSECTOR_KEY_VLAN) |
841 	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
842 	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
843 	      BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
844 	      BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
845 	      BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
846 	      BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
847 	      BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
848 	      BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
849 	      BIT(FLOW_DISSECTOR_KEY_PORTS))) {
850 		NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported key used");
851 		return -EOPNOTSUPP;
852 	}
853 
854 	tunnel_dev = ice_get_tunnel_device(filter_dev, rule);
855 	if (tunnel_dev) {
856 		int err;
857 
858 		filter_dev = tunnel_dev;
859 
860 		err = ice_parse_tunnel_attr(filter_dev, rule, fltr);
861 		if (err) {
862 			NL_SET_ERR_MSG_MOD(fltr->extack, "Failed to parse TC flower tunnel attributes");
863 			return err;
864 		}
865 
866 		/* header pointers should point to the inner headers, outer
867 		 * header were already set by ice_parse_tunnel_attr
868 		 */
869 		headers = &fltr->inner_headers;
870 	} else if (dissector->used_keys &
871 		  (BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
872 		   BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
873 		   BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
874 		   BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))) {
875 		NL_SET_ERR_MSG_MOD(fltr->extack, "Tunnel key used, but device isn't a tunnel");
876 		return -EOPNOTSUPP;
877 	} else {
878 		fltr->tunnel_type = TNL_LAST;
879 	}
880 
881 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
882 		struct flow_match_basic match;
883 
884 		flow_rule_match_basic(rule, &match);
885 
886 		n_proto_key = ntohs(match.key->n_proto);
887 		n_proto_mask = ntohs(match.mask->n_proto);
888 
889 		if (n_proto_key == ETH_P_ALL || n_proto_key == 0) {
890 			n_proto_key = 0;
891 			n_proto_mask = 0;
892 		} else {
893 			fltr->flags |= ICE_TC_FLWR_FIELD_ETH_TYPE_ID;
894 		}
895 
896 		headers->l2_key.n_proto = cpu_to_be16(n_proto_key);
897 		headers->l2_mask.n_proto = cpu_to_be16(n_proto_mask);
898 		headers->l3_key.ip_proto = match.key->ip_proto;
899 	}
900 
901 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
902 		struct flow_match_eth_addrs match;
903 
904 		flow_rule_match_eth_addrs(rule, &match);
905 
906 		if (!is_zero_ether_addr(match.key->dst)) {
907 			ether_addr_copy(headers->l2_key.dst_mac,
908 					match.key->dst);
909 			ether_addr_copy(headers->l2_mask.dst_mac,
910 					match.mask->dst);
911 			fltr->flags |= ICE_TC_FLWR_FIELD_DST_MAC;
912 		}
913 
914 		if (!is_zero_ether_addr(match.key->src)) {
915 			ether_addr_copy(headers->l2_key.src_mac,
916 					match.key->src);
917 			ether_addr_copy(headers->l2_mask.src_mac,
918 					match.mask->src);
919 			fltr->flags |= ICE_TC_FLWR_FIELD_SRC_MAC;
920 		}
921 	}
922 
923 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN) ||
924 	    is_vlan_dev(filter_dev)) {
925 		struct flow_dissector_key_vlan mask;
926 		struct flow_dissector_key_vlan key;
927 		struct flow_match_vlan match;
928 
929 		if (is_vlan_dev(filter_dev)) {
930 			match.key = &key;
931 			match.key->vlan_id = vlan_dev_vlan_id(filter_dev);
932 			match.key->vlan_priority = 0;
933 			match.mask = &mask;
934 			memset(match.mask, 0xff, sizeof(*match.mask));
935 			match.mask->vlan_priority = 0;
936 		} else {
937 			flow_rule_match_vlan(rule, &match);
938 		}
939 
940 		if (match.mask->vlan_id) {
941 			if (match.mask->vlan_id == VLAN_VID_MASK) {
942 				fltr->flags |= ICE_TC_FLWR_FIELD_VLAN;
943 			} else {
944 				NL_SET_ERR_MSG_MOD(fltr->extack, "Bad VLAN mask");
945 				return -EINVAL;
946 			}
947 		}
948 
949 		headers->vlan_hdr.vlan_id =
950 				cpu_to_be16(match.key->vlan_id & VLAN_VID_MASK);
951 		if (match.mask->vlan_priority)
952 			headers->vlan_hdr.vlan_prio = match.key->vlan_priority;
953 	}
954 
955 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
956 		struct flow_match_control match;
957 
958 		flow_rule_match_control(rule, &match);
959 
960 		addr_type = match.key->addr_type;
961 	}
962 
963 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
964 		struct flow_match_ipv4_addrs match;
965 
966 		flow_rule_match_ipv4_addrs(rule, &match);
967 		if (ice_tc_set_ipv4(&match, fltr, headers, false))
968 			return -EINVAL;
969 	}
970 
971 	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
972 		struct flow_match_ipv6_addrs match;
973 
974 		flow_rule_match_ipv6_addrs(rule, &match);
975 		if (ice_tc_set_ipv6(&match, fltr, headers, false))
976 			return -EINVAL;
977 	}
978 
979 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
980 		struct flow_match_ports match;
981 
982 		flow_rule_match_ports(rule, &match);
983 		if (ice_tc_set_port(match, fltr, headers, false))
984 			return -EINVAL;
985 		switch (headers->l3_key.ip_proto) {
986 		case IPPROTO_TCP:
987 		case IPPROTO_UDP:
988 			break;
989 		default:
990 			NL_SET_ERR_MSG_MOD(fltr->extack, "Only UDP and TCP transport are supported");
991 			return -EINVAL;
992 		}
993 	}
994 	return 0;
995 }
996 
997 /**
998  * ice_add_switch_fltr - Add TC flower filters
999  * @vsi: Pointer to VSI
1000  * @fltr: Pointer to struct ice_tc_flower_fltr
1001  *
1002  * Add filter in HW switch block
1003  */
1004 static int
1005 ice_add_switch_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
1006 {
1007 	if (fltr->action.fltr_act == ICE_FWD_TO_QGRP)
1008 		return -EOPNOTSUPP;
1009 
1010 	if (ice_is_eswitch_mode_switchdev(vsi->back))
1011 		return ice_eswitch_add_tc_fltr(vsi, fltr);
1012 
1013 	return ice_add_tc_flower_adv_fltr(vsi, fltr);
1014 }
1015 
1016 /**
1017  * ice_handle_tclass_action - Support directing to a traffic class
1018  * @vsi: Pointer to VSI
1019  * @cls_flower: Pointer to TC flower offload structure
1020  * @fltr: Pointer to TC flower filter structure
1021  *
1022  * Support directing traffic to a traffic class
1023  */
1024 static int
1025 ice_handle_tclass_action(struct ice_vsi *vsi,
1026 			 struct flow_cls_offload *cls_flower,
1027 			 struct ice_tc_flower_fltr *fltr)
1028 {
1029 	int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid);
1030 	struct ice_vsi *main_vsi;
1031 
1032 	if (tc < 0) {
1033 		NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because specified destination is invalid");
1034 		return -EINVAL;
1035 	}
1036 	if (!tc) {
1037 		NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because of invalid destination");
1038 		return -EINVAL;
1039 	}
1040 
1041 	if (!(vsi->all_enatc & BIT(tc))) {
1042 		NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because of non-existence destination");
1043 		return -EINVAL;
1044 	}
1045 
1046 	/* Redirect to a TC class or Queue Group */
1047 	main_vsi = ice_get_main_vsi(vsi->back);
1048 	if (!main_vsi || !main_vsi->netdev) {
1049 		NL_SET_ERR_MSG_MOD(fltr->extack,
1050 				   "Unable to add filter because of invalid netdevice");
1051 		return -EINVAL;
1052 	}
1053 
1054 	if ((fltr->flags & ICE_TC_FLWR_FIELD_TENANT_ID) &&
1055 	    (fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC |
1056 			   ICE_TC_FLWR_FIELD_SRC_MAC))) {
1057 		NL_SET_ERR_MSG_MOD(fltr->extack,
1058 				   "Unable to add filter because filter using tunnel key and inner MAC is unsupported combination");
1059 		return -EOPNOTSUPP;
1060 	}
1061 
1062 	/* For ADQ, filter must include dest MAC address, otherwise unwanted
1063 	 * packets with unrelated MAC address get delivered to ADQ VSIs as long
1064 	 * as remaining filter criteria is satisfied such as dest IP address
1065 	 * and dest/src L4 port. Following code is trying to handle:
1066 	 * 1. For non-tunnel, if user specify MAC addresses, use them (means
1067 	 * this code won't do anything
1068 	 * 2. For non-tunnel, if user didn't specify MAC address, add implicit
1069 	 * dest MAC to be lower netdev's active unicast MAC address
1070 	 */
1071 	if (!(fltr->flags & ICE_TC_FLWR_FIELD_DST_MAC)) {
1072 		ether_addr_copy(fltr->outer_headers.l2_key.dst_mac,
1073 				main_vsi->netdev->dev_addr);
1074 		eth_broadcast_addr(fltr->outer_headers.l2_mask.dst_mac);
1075 		fltr->flags |= ICE_TC_FLWR_FIELD_DST_MAC;
1076 	}
1077 
1078 	/* validate specified dest MAC address, make sure either it belongs to
1079 	 * lower netdev or any of MACVLAN. MACVLANs MAC address are added as
1080 	 * unicast MAC filter destined to main VSI.
1081 	 */
1082 	if (!ice_mac_fltr_exist(&main_vsi->back->hw,
1083 				fltr->outer_headers.l2_key.dst_mac,
1084 				main_vsi->idx)) {
1085 		NL_SET_ERR_MSG_MOD(fltr->extack,
1086 				   "Unable to add filter because legacy MAC filter for specified destination doesn't exist");
1087 		return -EINVAL;
1088 	}
1089 
1090 	/* Make sure VLAN is already added to main VSI, before allowing ADQ to
1091 	 * add a VLAN based filter such as MAC + VLAN + L4 port.
1092 	 */
1093 	if (fltr->flags & ICE_TC_FLWR_FIELD_VLAN) {
1094 		u16 vlan_id = be16_to_cpu(fltr->outer_headers.vlan_hdr.vlan_id);
1095 
1096 		if (!ice_vlan_fltr_exist(&main_vsi->back->hw, vlan_id,
1097 					 main_vsi->idx)) {
1098 			NL_SET_ERR_MSG_MOD(fltr->extack,
1099 					   "Unable to add filter because legacy VLAN filter for specified destination doesn't exist");
1100 			return -EINVAL;
1101 		}
1102 	}
1103 	fltr->action.fltr_act = ICE_FWD_TO_VSI;
1104 	fltr->action.tc_class = tc;
1105 
1106 	return 0;
1107 }
1108 
1109 /**
1110  * ice_parse_tc_flower_actions - Parse the actions for a TC filter
1111  * @vsi: Pointer to VSI
1112  * @cls_flower: Pointer to TC flower offload structure
1113  * @fltr: Pointer to TC flower filter structure
1114  *
1115  * Parse the actions for a TC filter
1116  */
1117 static int
1118 ice_parse_tc_flower_actions(struct ice_vsi *vsi,
1119 			    struct flow_cls_offload *cls_flower,
1120 			    struct ice_tc_flower_fltr *fltr)
1121 {
1122 	struct flow_rule *rule = flow_cls_offload_flow_rule(cls_flower);
1123 	struct flow_action *flow_action = &rule->action;
1124 	struct flow_action_entry *act;
1125 	int i;
1126 
1127 	if (cls_flower->classid)
1128 		return ice_handle_tclass_action(vsi, cls_flower, fltr);
1129 
1130 	if (!flow_action_has_entries(flow_action))
1131 		return -EINVAL;
1132 
1133 	flow_action_for_each(i, act, flow_action) {
1134 		if (ice_is_eswitch_mode_switchdev(vsi->back)) {
1135 			int err = ice_eswitch_tc_parse_action(fltr, act);
1136 
1137 			if (err)
1138 				return err;
1139 			continue;
1140 		}
1141 		/* Allow only one rule per filter */
1142 
1143 		/* Drop action */
1144 		if (act->id == FLOW_ACTION_DROP) {
1145 			NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported action DROP");
1146 			return -EINVAL;
1147 		}
1148 		fltr->action.fltr_act = ICE_FWD_TO_VSI;
1149 	}
1150 	return 0;
1151 }
1152 
1153 /**
1154  * ice_del_tc_fltr - deletes a filter from HW table
1155  * @vsi: Pointer to VSI
1156  * @fltr: Pointer to struct ice_tc_flower_fltr
1157  *
1158  * This function deletes a filter from HW table and manages book-keeping
1159  */
1160 static int ice_del_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
1161 {
1162 	struct ice_rule_query_data rule_rem;
1163 	struct ice_pf *pf = vsi->back;
1164 	int err;
1165 
1166 	rule_rem.rid = fltr->rid;
1167 	rule_rem.rule_id = fltr->rule_id;
1168 	rule_rem.vsi_handle = fltr->dest_id;
1169 	err = ice_rem_adv_rule_by_id(&pf->hw, &rule_rem);
1170 	if (err) {
1171 		if (err == ICE_ERR_DOES_NOT_EXIST) {
1172 			NL_SET_ERR_MSG_MOD(fltr->extack, "Filter does not exist");
1173 			return -ENOENT;
1174 		}
1175 		NL_SET_ERR_MSG_MOD(fltr->extack, "Failed to delete TC flower filter");
1176 		return -EIO;
1177 	}
1178 
1179 	/* update advanced switch filter count for destination
1180 	 * VSI if filter destination was VSI
1181 	 */
1182 	if (fltr->dest_vsi) {
1183 		if (fltr->dest_vsi->type == ICE_VSI_CHNL) {
1184 			fltr->dest_vsi->num_chnl_fltr--;
1185 
1186 			/* keeps track of channel filters for PF VSI */
1187 			if (vsi->type == ICE_VSI_PF &&
1188 			    (fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC |
1189 					    ICE_TC_FLWR_FIELD_ENC_DST_MAC)))
1190 				pf->num_dmac_chnl_fltrs--;
1191 		}
1192 	}
1193 	return 0;
1194 }
1195 
1196 /**
1197  * ice_add_tc_fltr - adds a TC flower filter
1198  * @netdev: Pointer to netdev
1199  * @vsi: Pointer to VSI
1200  * @f: Pointer to flower offload structure
1201  * @__fltr: Pointer to struct ice_tc_flower_fltr
1202  *
1203  * This function parses TC-flower input fields, parses action,
1204  * and adds a filter.
1205  */
1206 static int
1207 ice_add_tc_fltr(struct net_device *netdev, struct ice_vsi *vsi,
1208 		struct flow_cls_offload *f,
1209 		struct ice_tc_flower_fltr **__fltr)
1210 {
1211 	struct ice_tc_flower_fltr *fltr;
1212 	int err;
1213 
1214 	/* by default, set output to be INVALID */
1215 	*__fltr = NULL;
1216 
1217 	fltr = kzalloc(sizeof(*fltr), GFP_KERNEL);
1218 	if (!fltr)
1219 		return -ENOMEM;
1220 
1221 	fltr->cookie = f->cookie;
1222 	fltr->extack = f->common.extack;
1223 	fltr->src_vsi = vsi;
1224 	INIT_HLIST_NODE(&fltr->tc_flower_node);
1225 
1226 	err = ice_parse_cls_flower(netdev, vsi, f, fltr);
1227 	if (err < 0)
1228 		goto err;
1229 
1230 	err = ice_parse_tc_flower_actions(vsi, f, fltr);
1231 	if (err < 0)
1232 		goto err;
1233 
1234 	err = ice_add_switch_fltr(vsi, fltr);
1235 	if (err < 0)
1236 		goto err;
1237 
1238 	/* return the newly created filter */
1239 	*__fltr = fltr;
1240 
1241 	return 0;
1242 err:
1243 	kfree(fltr);
1244 	return err;
1245 }
1246 
1247 /**
1248  * ice_find_tc_flower_fltr - Find the TC flower filter in the list
1249  * @pf: Pointer to PF
1250  * @cookie: filter specific cookie
1251  */
1252 static struct ice_tc_flower_fltr *
1253 ice_find_tc_flower_fltr(struct ice_pf *pf, unsigned long cookie)
1254 {
1255 	struct ice_tc_flower_fltr *fltr;
1256 
1257 	hlist_for_each_entry(fltr, &pf->tc_flower_fltr_list, tc_flower_node)
1258 		if (cookie == fltr->cookie)
1259 			return fltr;
1260 
1261 	return NULL;
1262 }
1263 
1264 /**
1265  * ice_add_cls_flower - add TC flower filters
1266  * @netdev: Pointer to filter device
1267  * @vsi: Pointer to VSI
1268  * @cls_flower: Pointer to flower offload structure
1269  */
1270 int
1271 ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi,
1272 		   struct flow_cls_offload *cls_flower)
1273 {
1274 	struct netlink_ext_ack *extack = cls_flower->common.extack;
1275 	struct net_device *vsi_netdev = vsi->netdev;
1276 	struct ice_tc_flower_fltr *fltr;
1277 	struct ice_pf *pf = vsi->back;
1278 	int err;
1279 
1280 	if (ice_is_reset_in_progress(pf->state))
1281 		return -EBUSY;
1282 	if (test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
1283 		return -EINVAL;
1284 
1285 	if (ice_is_port_repr_netdev(netdev))
1286 		vsi_netdev = netdev;
1287 
1288 	if (!(vsi_netdev->features & NETIF_F_HW_TC) &&
1289 	    !test_bit(ICE_FLAG_CLS_FLOWER, pf->flags)) {
1290 		/* Based on TC indirect notifications from kernel, all ice
1291 		 * devices get an instance of rule from higher level device.
1292 		 * Avoid triggering explicit error in this case.
1293 		 */
1294 		if (netdev == vsi_netdev)
1295 			NL_SET_ERR_MSG_MOD(extack, "can't apply TC flower filters, turn ON hw-tc-offload and try again");
1296 		return -EINVAL;
1297 	}
1298 
1299 	/* avoid duplicate entries, if exists - return error */
1300 	fltr = ice_find_tc_flower_fltr(pf, cls_flower->cookie);
1301 	if (fltr) {
1302 		NL_SET_ERR_MSG_MOD(extack, "filter cookie already exists, ignoring");
1303 		return -EEXIST;
1304 	}
1305 
1306 	/* prep and add TC-flower filter in HW */
1307 	err = ice_add_tc_fltr(netdev, vsi, cls_flower, &fltr);
1308 	if (err)
1309 		return err;
1310 
1311 	/* add filter into an ordered list */
1312 	hlist_add_head(&fltr->tc_flower_node, &pf->tc_flower_fltr_list);
1313 	return 0;
1314 }
1315 
1316 /**
1317  * ice_del_cls_flower - delete TC flower filters
1318  * @vsi: Pointer to VSI
1319  * @cls_flower: Pointer to struct flow_cls_offload
1320  */
1321 int
1322 ice_del_cls_flower(struct ice_vsi *vsi, struct flow_cls_offload *cls_flower)
1323 {
1324 	struct ice_tc_flower_fltr *fltr;
1325 	struct ice_pf *pf = vsi->back;
1326 	int err;
1327 
1328 	/* find filter */
1329 	fltr = ice_find_tc_flower_fltr(pf, cls_flower->cookie);
1330 	if (!fltr) {
1331 		if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) &&
1332 		    hlist_empty(&pf->tc_flower_fltr_list))
1333 			return 0;
1334 
1335 		NL_SET_ERR_MSG_MOD(cls_flower->common.extack, "failed to delete TC flower filter because unable to find it");
1336 		return -EINVAL;
1337 	}
1338 
1339 	fltr->extack = cls_flower->common.extack;
1340 	/* delete filter from HW */
1341 	err = ice_del_tc_fltr(vsi, fltr);
1342 	if (err)
1343 		return err;
1344 
1345 	/* delete filter from an ordered list */
1346 	hlist_del(&fltr->tc_flower_node);
1347 
1348 	/* free the filter node */
1349 	kfree(fltr);
1350 
1351 	return 0;
1352 }
1353 
1354 /**
1355  * ice_replay_tc_fltrs - replay TC filters
1356  * @pf: pointer to PF struct
1357  */
1358 void ice_replay_tc_fltrs(struct ice_pf *pf)
1359 {
1360 	struct ice_tc_flower_fltr *fltr;
1361 	struct hlist_node *node;
1362 
1363 	hlist_for_each_entry_safe(fltr, node,
1364 				  &pf->tc_flower_fltr_list,
1365 				  tc_flower_node) {
1366 		fltr->extack = NULL;
1367 		ice_add_switch_fltr(fltr->src_vsi, fltr);
1368 	}
1369 }
1370