1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2019-2021, Intel Corporation. */
3 
4 #include "ice.h"
5 #include "ice_tc_lib.h"
6 #include "ice_lib.h"
7 #include "ice_fltr.h"
8 
9 /**
10  * ice_tc_count_lkups - determine lookup count for switch filter
11  * @flags: TC-flower flags
12  * @headers: Pointer to TC flower filter header structure
13  * @fltr: Pointer to outer TC filter structure
14  *
15  * Determine lookup count based on TC flower input for switch filter.
16  */
17 static int
18 ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
19 		   struct ice_tc_flower_fltr *fltr)
20 {
21 	int lkups_cnt = 0;
22 
23 	if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID)
24 		lkups_cnt++;
25 
26 	/* are MAC fields specified? */
27 	if (flags & (ICE_TC_FLWR_FIELD_DST_MAC | ICE_TC_FLWR_FIELD_SRC_MAC))
28 		lkups_cnt++;
29 
30 	/* is VLAN specified? */
31 	if (flags & ICE_TC_FLWR_FIELD_VLAN)
32 		lkups_cnt++;
33 
34 	/* are IPv[4|6] fields specified? */
35 	if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV4 | ICE_TC_FLWR_FIELD_SRC_IPV4))
36 		lkups_cnt++;
37 	else if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV6 |
38 			  ICE_TC_FLWR_FIELD_SRC_IPV6))
39 		lkups_cnt++;
40 
41 	/* is L4 (TCP/UDP/any other L4 protocol fields) specified? */
42 	if (flags & (ICE_TC_FLWR_FIELD_DEST_L4_PORT |
43 		     ICE_TC_FLWR_FIELD_SRC_L4_PORT))
44 		lkups_cnt++;
45 
46 	return lkups_cnt;
47 }
48 
49 /**
50  * ice_tc_fill_rules - fill filter rules based on TC fltr
51  * @hw: pointer to HW structure
52  * @flags: tc flower field flags
53  * @tc_fltr: pointer to TC flower filter
54  * @list: list of advance rule elements
55  * @rule_info: pointer to information about rule
56  * @l4_proto: pointer to information such as L4 proto type
57  *
58  * Fill ice_adv_lkup_elem list based on TC flower flags and
59  * TC flower headers. This list should be used to add
60  * advance filter in hardware.
61  */
62 static int
63 ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
64 		  struct ice_tc_flower_fltr *tc_fltr,
65 		  struct ice_adv_lkup_elem *list,
66 		  struct ice_adv_rule_info *rule_info,
67 		  u16 *l4_proto)
68 {
69 	struct ice_tc_flower_lyr_2_4_hdrs *headers = &tc_fltr->outer_headers;
70 	int i = 0;
71 
72 	if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) {
73 		list[i].type = ICE_ETYPE_OL;
74 		list[i].h_u.ethertype.ethtype_id = headers->l2_key.n_proto;
75 		list[i].m_u.ethertype.ethtype_id = headers->l2_mask.n_proto;
76 		i++;
77 	}
78 
79 	if (flags & (ICE_TC_FLWR_FIELD_DST_MAC |
80 		     ICE_TC_FLWR_FIELD_SRC_MAC)) {
81 		struct ice_tc_l2_hdr *l2_key, *l2_mask;
82 
83 		l2_key = &headers->l2_key;
84 		l2_mask = &headers->l2_mask;
85 
86 		list[i].type = ICE_MAC_OFOS;
87 		if (flags & ICE_TC_FLWR_FIELD_DST_MAC) {
88 			ether_addr_copy(list[i].h_u.eth_hdr.dst_addr,
89 					l2_key->dst_mac);
90 			ether_addr_copy(list[i].m_u.eth_hdr.dst_addr,
91 					l2_mask->dst_mac);
92 		}
93 		if (flags & ICE_TC_FLWR_FIELD_SRC_MAC) {
94 			ether_addr_copy(list[i].h_u.eth_hdr.src_addr,
95 					l2_key->src_mac);
96 			ether_addr_copy(list[i].m_u.eth_hdr.src_addr,
97 					l2_mask->src_mac);
98 		}
99 		i++;
100 	}
101 
102 	/* copy VLAN info */
103 	if (flags & ICE_TC_FLWR_FIELD_VLAN) {
104 		list[i].type = ICE_VLAN_OFOS;
105 		list[i].h_u.vlan_hdr.vlan = headers->vlan_hdr.vlan_id;
106 		list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xFFFF);
107 		i++;
108 	}
109 
110 	/* copy L3 (IPv[4|6]: src, dest) address */
111 	if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV4 |
112 		     ICE_TC_FLWR_FIELD_SRC_IPV4)) {
113 		struct ice_tc_l3_hdr *l3_key, *l3_mask;
114 
115 		list[i].type = ICE_IPV4_OFOS;
116 		l3_key = &headers->l3_key;
117 		l3_mask = &headers->l3_mask;
118 		if (flags & ICE_TC_FLWR_FIELD_DEST_IPV4) {
119 			list[i].h_u.ipv4_hdr.dst_addr = l3_key->dst_ipv4;
120 			list[i].m_u.ipv4_hdr.dst_addr = l3_mask->dst_ipv4;
121 		}
122 		if (flags & ICE_TC_FLWR_FIELD_SRC_IPV4) {
123 			list[i].h_u.ipv4_hdr.src_addr = l3_key->src_ipv4;
124 			list[i].m_u.ipv4_hdr.src_addr = l3_mask->src_ipv4;
125 		}
126 		i++;
127 	} else if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV6 |
128 			    ICE_TC_FLWR_FIELD_SRC_IPV6)) {
129 		struct ice_ipv6_hdr *ipv6_hdr, *ipv6_mask;
130 		struct ice_tc_l3_hdr *l3_key, *l3_mask;
131 
132 		list[i].type = ICE_IPV6_OFOS;
133 		ipv6_hdr = &list[i].h_u.ipv6_hdr;
134 		ipv6_mask = &list[i].m_u.ipv6_hdr;
135 		l3_key = &headers->l3_key;
136 		l3_mask = &headers->l3_mask;
137 
138 		if (flags & ICE_TC_FLWR_FIELD_DEST_IPV6) {
139 			memcpy(&ipv6_hdr->dst_addr, &l3_key->dst_ipv6_addr,
140 			       sizeof(l3_key->dst_ipv6_addr));
141 			memcpy(&ipv6_mask->dst_addr, &l3_mask->dst_ipv6_addr,
142 			       sizeof(l3_mask->dst_ipv6_addr));
143 		}
144 		if (flags & ICE_TC_FLWR_FIELD_SRC_IPV6) {
145 			memcpy(&ipv6_hdr->src_addr, &l3_key->src_ipv6_addr,
146 			       sizeof(l3_key->src_ipv6_addr));
147 			memcpy(&ipv6_mask->src_addr, &l3_mask->src_ipv6_addr,
148 			       sizeof(l3_mask->src_ipv6_addr));
149 		}
150 		i++;
151 	}
152 
153 	/* copy L4 (src, dest) port */
154 	if (flags & (ICE_TC_FLWR_FIELD_DEST_L4_PORT |
155 		     ICE_TC_FLWR_FIELD_SRC_L4_PORT)) {
156 		struct ice_tc_l4_hdr *l4_key, *l4_mask;
157 
158 		l4_key = &headers->l4_key;
159 		l4_mask = &headers->l4_mask;
160 		if (headers->l3_key.ip_proto == IPPROTO_TCP) {
161 			list[i].type = ICE_TCP_IL;
162 			/* detected L4 proto is TCP */
163 			if (l4_proto)
164 				*l4_proto = IPPROTO_TCP;
165 		} else if (headers->l3_key.ip_proto == IPPROTO_UDP) {
166 			list[i].type = ICE_UDP_ILOS;
167 			/* detected L4 proto is UDP */
168 			if (l4_proto)
169 				*l4_proto = IPPROTO_UDP;
170 		}
171 		if (flags & ICE_TC_FLWR_FIELD_DEST_L4_PORT) {
172 			list[i].h_u.l4_hdr.dst_port = l4_key->dst_port;
173 			list[i].m_u.l4_hdr.dst_port = l4_mask->dst_port;
174 		}
175 		if (flags & ICE_TC_FLWR_FIELD_SRC_L4_PORT) {
176 			list[i].h_u.l4_hdr.src_port = l4_key->src_port;
177 			list[i].m_u.l4_hdr.src_port = l4_mask->src_port;
178 		}
179 		i++;
180 	}
181 
182 	return i;
183 }
184 
185 static int
186 ice_eswitch_tc_parse_action(struct ice_tc_flower_fltr *fltr,
187 			    struct flow_action_entry *act)
188 {
189 	struct ice_repr *repr;
190 
191 	switch (act->id) {
192 	case FLOW_ACTION_DROP:
193 		fltr->action.fltr_act = ICE_DROP_PACKET;
194 		break;
195 
196 	case FLOW_ACTION_REDIRECT:
197 		fltr->action.fltr_act = ICE_FWD_TO_VSI;
198 
199 		if (ice_is_port_repr_netdev(act->dev)) {
200 			repr = ice_netdev_to_repr(act->dev);
201 
202 			fltr->dest_vsi = repr->src_vsi;
203 			fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
204 		} else if (netif_is_ice(act->dev)) {
205 			struct ice_netdev_priv *np = netdev_priv(act->dev);
206 
207 			fltr->dest_vsi = np->vsi;
208 			fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
209 		} else {
210 			NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported netdevice in switchdev mode");
211 			return -EINVAL;
212 		}
213 
214 		break;
215 
216 	default:
217 		NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported action in switchdev mode");
218 		return -EINVAL;
219 	}
220 
221 	return 0;
222 }
223 
224 static int
225 ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
226 {
227 	struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers;
228 	struct ice_adv_rule_info rule_info = { 0 };
229 	struct ice_rule_query_data rule_added;
230 	struct ice_hw *hw = &vsi->back->hw;
231 	struct ice_adv_lkup_elem *list;
232 	u32 flags = fltr->flags;
233 	enum ice_status status;
234 	int lkups_cnt;
235 	int ret = 0;
236 	int i;
237 
238 	if (!flags || (flags & (ICE_TC_FLWR_FIELD_ENC_DEST_IPV4 |
239 				ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
240 				ICE_TC_FLWR_FIELD_ENC_DEST_IPV6 |
241 				ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 |
242 				ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT))) {
243 		NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported encap field(s)");
244 		return -EOPNOTSUPP;
245 	}
246 
247 	lkups_cnt = ice_tc_count_lkups(flags, headers, fltr);
248 	list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
249 	if (!list)
250 		return -ENOMEM;
251 
252 	i = ice_tc_fill_rules(hw, flags, fltr, list, &rule_info, NULL);
253 	if (i != lkups_cnt) {
254 		ret = -EINVAL;
255 		goto exit;
256 	}
257 
258 	rule_info.sw_act.fltr_act = fltr->action.fltr_act;
259 	if (fltr->action.fltr_act != ICE_DROP_PACKET)
260 		rule_info.sw_act.vsi_handle = fltr->dest_vsi->idx;
261 	/* For now, making priority to be highest, and it also becomes
262 	 * the priority for recipe which will get created as a result of
263 	 * new extraction sequence based on input set.
264 	 * Priority '7' is max val for switch recipe, higher the number
265 	 * results into order of switch rule evaluation.
266 	 */
267 	rule_info.priority = 7;
268 
269 	if (fltr->direction == ICE_ESWITCH_FLTR_INGRESS) {
270 		rule_info.sw_act.flag |= ICE_FLTR_RX;
271 		rule_info.sw_act.src = hw->pf_id;
272 		rule_info.rx = true;
273 	} else {
274 		rule_info.sw_act.flag |= ICE_FLTR_TX;
275 		rule_info.sw_act.src = vsi->idx;
276 		rule_info.rx = false;
277 	}
278 
279 	/* specify the cookie as filter_rule_id */
280 	rule_info.fltr_rule_id = fltr->cookie;
281 
282 	status = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added);
283 	if (status == ICE_ERR_ALREADY_EXISTS) {
284 		NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because it already exist");
285 		ret = -EINVAL;
286 		goto exit;
287 	} else if (status) {
288 		NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter due to error");
289 		ret = -EIO;
290 		goto exit;
291 	}
292 
293 	/* store the output params, which are needed later for removing
294 	 * advanced switch filter
295 	 */
296 	fltr->rid = rule_added.rid;
297 	fltr->rule_id = rule_added.rule_id;
298 
299 	if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS) {
300 		if (ice_fltr_update_flags(vsi, fltr->rule_id, fltr->rid,
301 					  ICE_SINGLE_ACT_LAN_ENABLE))
302 			ice_rem_adv_rule_by_id(hw, &rule_added);
303 	}
304 
305 exit:
306 	kfree(list);
307 	return ret;
308 }
309 
310 /**
311  * ice_tc_set_ipv4 - Parse IPv4 addresses from TC flower filter
312  * @match: Pointer to flow match structure
313  * @fltr: Pointer to filter structure
314  * @headers: inner or outer header fields
315  */
316 static int
317 ice_tc_set_ipv4(struct flow_match_ipv4_addrs *match,
318 		struct ice_tc_flower_fltr *fltr,
319 		struct ice_tc_flower_lyr_2_4_hdrs *headers)
320 {
321 	if (match->key->dst) {
322 		fltr->flags |= ICE_TC_FLWR_FIELD_DEST_IPV4;
323 		headers->l3_key.dst_ipv4 = match->key->dst;
324 		headers->l3_mask.dst_ipv4 = match->mask->dst;
325 	}
326 	if (match->key->src) {
327 		fltr->flags |= ICE_TC_FLWR_FIELD_SRC_IPV4;
328 		headers->l3_key.src_ipv4 = match->key->src;
329 		headers->l3_mask.src_ipv4 = match->mask->src;
330 	}
331 	return 0;
332 }
333 
334 /**
335  * ice_tc_set_ipv6 - Parse IPv6 addresses from TC flower filter
336  * @match: Pointer to flow match structure
337  * @fltr: Pointer to filter structure
338  * @headers: inner or outer header fields
339  */
340 static int
341 ice_tc_set_ipv6(struct flow_match_ipv6_addrs *match,
342 		struct ice_tc_flower_fltr *fltr,
343 		struct ice_tc_flower_lyr_2_4_hdrs *headers)
344 {
345 	struct ice_tc_l3_hdr *l3_key, *l3_mask;
346 
347 	/* src and dest IPV6 address should not be LOOPBACK
348 	 * (0:0:0:0:0:0:0:1), which can be represented as ::1
349 	 */
350 	if (ipv6_addr_loopback(&match->key->dst) ||
351 	    ipv6_addr_loopback(&match->key->src)) {
352 		NL_SET_ERR_MSG_MOD(fltr->extack, "Bad IPv6, addr is LOOPBACK");
353 		return -EINVAL;
354 	}
355 	/* if src/dest IPv6 address is *,* error */
356 	if (ipv6_addr_any(&match->mask->dst) &&
357 	    ipv6_addr_any(&match->mask->src)) {
358 		NL_SET_ERR_MSG_MOD(fltr->extack, "Bad src/dest IPv6, addr is any");
359 		return -EINVAL;
360 	}
361 	if (!ipv6_addr_any(&match->mask->dst))
362 		fltr->flags |= ICE_TC_FLWR_FIELD_DEST_IPV6;
363 	if (!ipv6_addr_any(&match->mask->src))
364 		fltr->flags |= ICE_TC_FLWR_FIELD_SRC_IPV6;
365 
366 	l3_key = &headers->l3_key;
367 	l3_mask = &headers->l3_mask;
368 
369 	if (fltr->flags & ICE_TC_FLWR_FIELD_SRC_IPV6) {
370 		memcpy(&l3_key->src_ipv6_addr, &match->key->src.s6_addr,
371 		       sizeof(match->key->src.s6_addr));
372 		memcpy(&l3_mask->src_ipv6_addr, &match->mask->src.s6_addr,
373 		       sizeof(match->mask->src.s6_addr));
374 	}
375 	if (fltr->flags & ICE_TC_FLWR_FIELD_DEST_IPV6) {
376 		memcpy(&l3_key->dst_ipv6_addr, &match->key->dst.s6_addr,
377 		       sizeof(match->key->dst.s6_addr));
378 		memcpy(&l3_mask->dst_ipv6_addr, &match->mask->dst.s6_addr,
379 		       sizeof(match->mask->dst.s6_addr));
380 	}
381 
382 	return 0;
383 }
384 
385 /**
386  * ice_tc_set_port - Parse ports from TC flower filter
387  * @match: Flow match structure
388  * @fltr: Pointer to filter structure
389  * @headers: inner or outer header fields
390  */
391 static int
392 ice_tc_set_port(struct flow_match_ports match,
393 		struct ice_tc_flower_fltr *fltr,
394 		struct ice_tc_flower_lyr_2_4_hdrs *headers)
395 {
396 	if (match.key->dst) {
397 		fltr->flags |= ICE_TC_FLWR_FIELD_DEST_L4_PORT;
398 		headers->l4_key.dst_port = match.key->dst;
399 		headers->l4_mask.dst_port = match.mask->dst;
400 	}
401 	if (match.key->src) {
402 		fltr->flags |= ICE_TC_FLWR_FIELD_SRC_L4_PORT;
403 		headers->l4_key.src_port = match.key->src;
404 		headers->l4_mask.src_port = match.mask->src;
405 	}
406 	return 0;
407 }
408 
409 /**
410  * ice_parse_cls_flower - Parse TC flower filters provided by kernel
411  * @vsi: Pointer to the VSI
412  * @filter_dev: Pointer to device on which filter is being added
413  * @f: Pointer to struct flow_cls_offload
414  * @fltr: Pointer to filter structure
415  */
416 static int
417 ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
418 		     struct flow_cls_offload *f,
419 		     struct ice_tc_flower_fltr *fltr)
420 {
421 	struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers;
422 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
423 	u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0;
424 	struct flow_dissector *dissector;
425 
426 	dissector = rule->match.dissector;
427 
428 	if (dissector->used_keys &
429 	    ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
430 	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
431 	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
432 	      BIT(FLOW_DISSECTOR_KEY_VLAN) |
433 	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
434 	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
435 	      BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
436 	      BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
437 	      BIT(FLOW_DISSECTOR_KEY_PORTS))) {
438 		NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported key used");
439 		return -EOPNOTSUPP;
440 	}
441 
442 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
443 		struct flow_match_basic match;
444 
445 		flow_rule_match_basic(rule, &match);
446 
447 		n_proto_key = ntohs(match.key->n_proto);
448 		n_proto_mask = ntohs(match.mask->n_proto);
449 
450 		if (n_proto_key == ETH_P_ALL || n_proto_key == 0) {
451 			n_proto_key = 0;
452 			n_proto_mask = 0;
453 		} else {
454 			fltr->flags |= ICE_TC_FLWR_FIELD_ETH_TYPE_ID;
455 		}
456 
457 		headers->l2_key.n_proto = cpu_to_be16(n_proto_key);
458 		headers->l2_mask.n_proto = cpu_to_be16(n_proto_mask);
459 		headers->l3_key.ip_proto = match.key->ip_proto;
460 	}
461 
462 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
463 		struct flow_match_eth_addrs match;
464 
465 		flow_rule_match_eth_addrs(rule, &match);
466 
467 		if (!is_zero_ether_addr(match.key->dst)) {
468 			ether_addr_copy(headers->l2_key.dst_mac,
469 					match.key->dst);
470 			ether_addr_copy(headers->l2_mask.dst_mac,
471 					match.mask->dst);
472 			fltr->flags |= ICE_TC_FLWR_FIELD_DST_MAC;
473 		}
474 
475 		if (!is_zero_ether_addr(match.key->src)) {
476 			ether_addr_copy(headers->l2_key.src_mac,
477 					match.key->src);
478 			ether_addr_copy(headers->l2_mask.src_mac,
479 					match.mask->src);
480 			fltr->flags |= ICE_TC_FLWR_FIELD_SRC_MAC;
481 		}
482 	}
483 
484 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN) ||
485 	    is_vlan_dev(filter_dev)) {
486 		struct flow_dissector_key_vlan mask;
487 		struct flow_dissector_key_vlan key;
488 		struct flow_match_vlan match;
489 
490 		if (is_vlan_dev(filter_dev)) {
491 			match.key = &key;
492 			match.key->vlan_id = vlan_dev_vlan_id(filter_dev);
493 			match.key->vlan_priority = 0;
494 			match.mask = &mask;
495 			memset(match.mask, 0xff, sizeof(*match.mask));
496 			match.mask->vlan_priority = 0;
497 		} else {
498 			flow_rule_match_vlan(rule, &match);
499 		}
500 
501 		if (match.mask->vlan_id) {
502 			if (match.mask->vlan_id == VLAN_VID_MASK) {
503 				fltr->flags |= ICE_TC_FLWR_FIELD_VLAN;
504 			} else {
505 				NL_SET_ERR_MSG_MOD(fltr->extack, "Bad VLAN mask");
506 				return -EINVAL;
507 			}
508 		}
509 
510 		headers->vlan_hdr.vlan_id =
511 				cpu_to_be16(match.key->vlan_id & VLAN_VID_MASK);
512 		if (match.mask->vlan_priority)
513 			headers->vlan_hdr.vlan_prio = match.key->vlan_priority;
514 	}
515 
516 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
517 		struct flow_match_control match;
518 
519 		flow_rule_match_control(rule, &match);
520 
521 		addr_type = match.key->addr_type;
522 	}
523 
524 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
525 		struct flow_match_ipv4_addrs match;
526 
527 		flow_rule_match_ipv4_addrs(rule, &match);
528 		if (ice_tc_set_ipv4(&match, fltr, headers))
529 			return -EINVAL;
530 	}
531 
532 	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
533 		struct flow_match_ipv6_addrs match;
534 
535 		flow_rule_match_ipv6_addrs(rule, &match);
536 		if (ice_tc_set_ipv6(&match, fltr, headers))
537 			return -EINVAL;
538 	}
539 
540 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
541 		struct flow_match_ports match;
542 
543 		flow_rule_match_ports(rule, &match);
544 		if (ice_tc_set_port(match, fltr, headers))
545 			return -EINVAL;
546 		switch (headers->l3_key.ip_proto) {
547 		case IPPROTO_TCP:
548 		case IPPROTO_UDP:
549 			break;
550 		default:
551 			NL_SET_ERR_MSG_MOD(fltr->extack, "Only UDP and TCP transport are supported");
552 			return -EINVAL;
553 		}
554 	}
555 	return 0;
556 }
557 
558 /**
559  * ice_add_switch_fltr - Add TC flower filters
560  * @vsi: Pointer to VSI
561  * @fltr: Pointer to struct ice_tc_flower_fltr
562  *
563  * Add filter in HW switch block
564  */
565 static int
566 ice_add_switch_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
567 {
568 	if (ice_is_eswitch_mode_switchdev(vsi->back))
569 		return ice_eswitch_add_tc_fltr(vsi, fltr);
570 
571 	return -EOPNOTSUPP;
572 }
573 
574 /**
575  * ice_handle_tclass_action - Support directing to a traffic class
576  * @vsi: Pointer to VSI
577  * @cls_flower: Pointer to TC flower offload structure
578  * @fltr: Pointer to TC flower filter structure
579  *
580  * Support directing traffic to a traffic class
581  */
582 static int
583 ice_handle_tclass_action(struct ice_vsi *vsi,
584 			 struct flow_cls_offload *cls_flower,
585 			 struct ice_tc_flower_fltr *fltr)
586 {
587 	int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid);
588 
589 	if (tc < 0) {
590 		NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because specified destination is invalid");
591 		return -EINVAL;
592 	}
593 	if (!tc) {
594 		NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because of invalid destination");
595 		return -EINVAL;
596 	}
597 
598 	if (!(vsi->tc_cfg.ena_tc & BIT(tc))) {
599 		NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because of non-existence destination");
600 		return -EINVAL;
601 	}
602 
603 	/* Redirect to a TC class or Queue Group */
604 	fltr->action.fltr_act = ICE_FWD_TO_QGRP;
605 	fltr->action.tc_class = tc;
606 
607 	return 0;
608 }
609 
610 /**
611  * ice_parse_tc_flower_actions - Parse the actions for a TC filter
612  * @vsi: Pointer to VSI
613  * @cls_flower: Pointer to TC flower offload structure
614  * @fltr: Pointer to TC flower filter structure
615  *
616  * Parse the actions for a TC filter
617  */
618 static int
619 ice_parse_tc_flower_actions(struct ice_vsi *vsi,
620 			    struct flow_cls_offload *cls_flower,
621 			    struct ice_tc_flower_fltr *fltr)
622 {
623 	struct flow_rule *rule = flow_cls_offload_flow_rule(cls_flower);
624 	struct flow_action *flow_action = &rule->action;
625 	struct flow_action_entry *act;
626 	int i;
627 
628 	if (cls_flower->classid)
629 		return ice_handle_tclass_action(vsi, cls_flower, fltr);
630 
631 	if (!flow_action_has_entries(flow_action))
632 		return -EINVAL;
633 
634 	flow_action_for_each(i, act, flow_action) {
635 		if (ice_is_eswitch_mode_switchdev(vsi->back)) {
636 			int err = ice_eswitch_tc_parse_action(fltr, act);
637 
638 			if (err)
639 				return err;
640 			continue;
641 		}
642 		/* Allow only one rule per filter */
643 
644 		/* Drop action */
645 		if (act->id == FLOW_ACTION_DROP) {
646 			fltr->action.fltr_act = ICE_DROP_PACKET;
647 			return 0;
648 		}
649 		fltr->action.fltr_act = ICE_FWD_TO_VSI;
650 	}
651 	return 0;
652 }
653 
654 /**
655  * ice_del_tc_fltr - deletes a filter from HW table
656  * @vsi: Pointer to VSI
657  * @fltr: Pointer to struct ice_tc_flower_fltr
658  *
659  * This function deletes a filter from HW table and manages book-keeping
660  */
661 static int ice_del_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
662 {
663 	struct ice_rule_query_data rule_rem;
664 	struct ice_pf *pf = vsi->back;
665 	int err;
666 
667 	rule_rem.rid = fltr->rid;
668 	rule_rem.rule_id = fltr->rule_id;
669 	rule_rem.vsi_handle = fltr->dest_id;
670 	err = ice_rem_adv_rule_by_id(&pf->hw, &rule_rem);
671 	if (err) {
672 		if (err == ICE_ERR_DOES_NOT_EXIST) {
673 			NL_SET_ERR_MSG_MOD(fltr->extack, "Filter does not exist");
674 			return -ENOENT;
675 		}
676 		NL_SET_ERR_MSG_MOD(fltr->extack, "Failed to delete TC flower filter");
677 		return -EIO;
678 	}
679 
680 	return 0;
681 }
682 
683 /**
684  * ice_add_tc_fltr - adds a TC flower filter
685  * @netdev: Pointer to netdev
686  * @vsi: Pointer to VSI
687  * @f: Pointer to flower offload structure
688  * @__fltr: Pointer to struct ice_tc_flower_fltr
689  *
690  * This function parses TC-flower input fields, parses action,
691  * and adds a filter.
692  */
693 static int
694 ice_add_tc_fltr(struct net_device *netdev, struct ice_vsi *vsi,
695 		struct flow_cls_offload *f,
696 		struct ice_tc_flower_fltr **__fltr)
697 {
698 	struct ice_tc_flower_fltr *fltr;
699 	int err;
700 
701 	/* by default, set output to be INVALID */
702 	*__fltr = NULL;
703 
704 	fltr = kzalloc(sizeof(*fltr), GFP_KERNEL);
705 	if (!fltr)
706 		return -ENOMEM;
707 
708 	fltr->cookie = f->cookie;
709 	fltr->extack = f->common.extack;
710 	fltr->src_vsi = vsi;
711 	INIT_HLIST_NODE(&fltr->tc_flower_node);
712 
713 	err = ice_parse_cls_flower(netdev, vsi, f, fltr);
714 	if (err < 0)
715 		goto err;
716 
717 	err = ice_parse_tc_flower_actions(vsi, f, fltr);
718 	if (err < 0)
719 		goto err;
720 
721 	err = ice_add_switch_fltr(vsi, fltr);
722 	if (err < 0)
723 		goto err;
724 
725 	/* return the newly created filter */
726 	*__fltr = fltr;
727 
728 	return 0;
729 err:
730 	kfree(fltr);
731 	return err;
732 }
733 
734 /**
735  * ice_find_tc_flower_fltr - Find the TC flower filter in the list
736  * @pf: Pointer to PF
737  * @cookie: filter specific cookie
738  */
739 static struct ice_tc_flower_fltr *
740 ice_find_tc_flower_fltr(struct ice_pf *pf, unsigned long cookie)
741 {
742 	struct ice_tc_flower_fltr *fltr;
743 
744 	hlist_for_each_entry(fltr, &pf->tc_flower_fltr_list, tc_flower_node)
745 		if (cookie == fltr->cookie)
746 			return fltr;
747 
748 	return NULL;
749 }
750 
751 /**
752  * ice_add_cls_flower - add TC flower filters
753  * @netdev: Pointer to filter device
754  * @vsi: Pointer to VSI
755  * @cls_flower: Pointer to flower offload structure
756  */
757 int
758 ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi,
759 		   struct flow_cls_offload *cls_flower)
760 {
761 	struct netlink_ext_ack *extack = cls_flower->common.extack;
762 	struct net_device *vsi_netdev = vsi->netdev;
763 	struct ice_tc_flower_fltr *fltr;
764 	struct ice_pf *pf = vsi->back;
765 	int err;
766 
767 	if (ice_is_reset_in_progress(pf->state))
768 		return -EBUSY;
769 	if (test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
770 		return -EINVAL;
771 
772 	if (ice_is_port_repr_netdev(netdev))
773 		vsi_netdev = netdev;
774 
775 	if (!(vsi_netdev->features & NETIF_F_HW_TC) &&
776 	    !test_bit(ICE_FLAG_CLS_FLOWER, pf->flags)) {
777 		/* Based on TC indirect notifications from kernel, all ice
778 		 * devices get an instance of rule from higher level device.
779 		 * Avoid triggering explicit error in this case.
780 		 */
781 		if (netdev == vsi_netdev)
782 			NL_SET_ERR_MSG_MOD(extack, "can't apply TC flower filters, turn ON hw-tc-offload and try again");
783 		return -EINVAL;
784 	}
785 
786 	/* avoid duplicate entries, if exists - return error */
787 	fltr = ice_find_tc_flower_fltr(pf, cls_flower->cookie);
788 	if (fltr) {
789 		NL_SET_ERR_MSG_MOD(extack, "filter cookie already exists, ignoring");
790 		return -EEXIST;
791 	}
792 
793 	/* prep and add TC-flower filter in HW */
794 	err = ice_add_tc_fltr(netdev, vsi, cls_flower, &fltr);
795 	if (err)
796 		return err;
797 
798 	/* add filter into an ordered list */
799 	hlist_add_head(&fltr->tc_flower_node, &pf->tc_flower_fltr_list);
800 	return 0;
801 }
802 
803 /**
804  * ice_del_cls_flower - delete TC flower filters
805  * @vsi: Pointer to VSI
806  * @cls_flower: Pointer to struct flow_cls_offload
807  */
808 int
809 ice_del_cls_flower(struct ice_vsi *vsi, struct flow_cls_offload *cls_flower)
810 {
811 	struct ice_tc_flower_fltr *fltr;
812 	struct ice_pf *pf = vsi->back;
813 	int err;
814 
815 	/* find filter */
816 	fltr = ice_find_tc_flower_fltr(pf, cls_flower->cookie);
817 	if (!fltr) {
818 		if (hlist_empty(&pf->tc_flower_fltr_list))
819 			return 0;
820 
821 		NL_SET_ERR_MSG_MOD(cls_flower->common.extack, "failed to delete TC flower filter because unable to find it");
822 		return -EINVAL;
823 	}
824 
825 	fltr->extack = cls_flower->common.extack;
826 	/* delete filter from HW */
827 	err = ice_del_tc_fltr(vsi, fltr);
828 	if (err)
829 		return err;
830 
831 	/* delete filter from an ordered list */
832 	hlist_del(&fltr->tc_flower_node);
833 
834 	/* free the filter node */
835 	kfree(fltr);
836 
837 	return 0;
838 }
839 
840 /**
841  * ice_replay_tc_fltrs - replay TC filters
842  * @pf: pointer to PF struct
843  */
844 void ice_replay_tc_fltrs(struct ice_pf *pf)
845 {
846 	struct ice_tc_flower_fltr *fltr;
847 	struct hlist_node *node;
848 
849 	hlist_for_each_entry_safe(fltr, node,
850 				  &pf->tc_flower_fltr_list,
851 				  tc_flower_node) {
852 		fltr->extack = NULL;
853 		ice_add_switch_fltr(fltr->src_vsi, fltr);
854 	}
855 }
856