xref: /openbmc/linux/drivers/net/ethernet/intel/ice/ice_tc_lib.c (revision eccd0a80dc7f4be65430236db475546b0ab9ec37)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2019-2021, Intel Corporation. */
3 
4 #include "ice.h"
5 #include "ice_tc_lib.h"
6 #include "ice_lib.h"
7 #include "ice_fltr.h"
8 
9 /**
10  * ice_tc_count_lkups - determine lookup count for switch filter
11  * @flags: TC-flower flags
12  * @headers: Pointer to TC flower filter header structure
13  * @fltr: Pointer to outer TC filter structure
14  *
15  * Determine lookup count based on TC flower input for switch filter.
16  */
17 static int
18 ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
19 		   struct ice_tc_flower_fltr *fltr)
20 {
21 	int lkups_cnt = 0;
22 
23 	if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID)
24 		lkups_cnt++;
25 
26 	/* are MAC fields specified? */
27 	if (flags & (ICE_TC_FLWR_FIELD_DST_MAC | ICE_TC_FLWR_FIELD_SRC_MAC))
28 		lkups_cnt++;
29 
30 	/* is VLAN specified? */
31 	if (flags & ICE_TC_FLWR_FIELD_VLAN)
32 		lkups_cnt++;
33 
34 	/* are IPv[4|6] fields specified? */
35 	if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV4 | ICE_TC_FLWR_FIELD_SRC_IPV4))
36 		lkups_cnt++;
37 	else if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV6 |
38 			  ICE_TC_FLWR_FIELD_SRC_IPV6))
39 		lkups_cnt++;
40 
41 	/* is L4 (TCP/UDP/any other L4 protocol fields) specified? */
42 	if (flags & (ICE_TC_FLWR_FIELD_DEST_L4_PORT |
43 		     ICE_TC_FLWR_FIELD_SRC_L4_PORT))
44 		lkups_cnt++;
45 
46 	return lkups_cnt;
47 }
48 
49 /**
50  * ice_tc_fill_rules - fill filter rules based on TC fltr
51  * @hw: pointer to HW structure
52  * @flags: tc flower field flags
53  * @tc_fltr: pointer to TC flower filter
54  * @list: list of advance rule elements
55  * @rule_info: pointer to information about rule
56  * @l4_proto: pointer to information such as L4 proto type
57  *
58  * Fill ice_adv_lkup_elem list based on TC flower flags and
59  * TC flower headers. This list should be used to add
60  * advance filter in hardware.
61  */
62 static int
63 ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
64 		  struct ice_tc_flower_fltr *tc_fltr,
65 		  struct ice_adv_lkup_elem *list,
66 		  struct ice_adv_rule_info *rule_info,
67 		  u16 *l4_proto)
68 {
69 	struct ice_tc_flower_lyr_2_4_hdrs *headers = &tc_fltr->outer_headers;
70 	int i = 0;
71 
72 	if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) {
73 		list[i].type = ICE_ETYPE_OL;
74 		list[i].h_u.ethertype.ethtype_id = headers->l2_key.n_proto;
75 		list[i].m_u.ethertype.ethtype_id = headers->l2_mask.n_proto;
76 		i++;
77 	}
78 
79 	if (flags & (ICE_TC_FLWR_FIELD_DST_MAC |
80 		     ICE_TC_FLWR_FIELD_SRC_MAC)) {
81 		struct ice_tc_l2_hdr *l2_key, *l2_mask;
82 
83 		l2_key = &headers->l2_key;
84 		l2_mask = &headers->l2_mask;
85 
86 		list[i].type = ICE_MAC_OFOS;
87 		if (flags & ICE_TC_FLWR_FIELD_DST_MAC) {
88 			ether_addr_copy(list[i].h_u.eth_hdr.dst_addr,
89 					l2_key->dst_mac);
90 			ether_addr_copy(list[i].m_u.eth_hdr.dst_addr,
91 					l2_mask->dst_mac);
92 		}
93 		if (flags & ICE_TC_FLWR_FIELD_SRC_MAC) {
94 			ether_addr_copy(list[i].h_u.eth_hdr.src_addr,
95 					l2_key->src_mac);
96 			ether_addr_copy(list[i].m_u.eth_hdr.src_addr,
97 					l2_mask->src_mac);
98 		}
99 		i++;
100 	}
101 
102 	/* copy VLAN info */
103 	if (flags & ICE_TC_FLWR_FIELD_VLAN) {
104 		list[i].type = ICE_VLAN_OFOS;
105 		list[i].h_u.vlan_hdr.vlan = headers->vlan_hdr.vlan_id;
106 		list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xFFFF);
107 		i++;
108 	}
109 
110 	/* copy L3 (IPv[4|6]: src, dest) address */
111 	if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV4 |
112 		     ICE_TC_FLWR_FIELD_SRC_IPV4)) {
113 		struct ice_tc_l3_hdr *l3_key, *l3_mask;
114 
115 		list[i].type = ICE_IPV4_OFOS;
116 		l3_key = &headers->l3_key;
117 		l3_mask = &headers->l3_mask;
118 		if (flags & ICE_TC_FLWR_FIELD_DEST_IPV4) {
119 			list[i].h_u.ipv4_hdr.dst_addr = l3_key->dst_ipv4;
120 			list[i].m_u.ipv4_hdr.dst_addr = l3_mask->dst_ipv4;
121 		}
122 		if (flags & ICE_TC_FLWR_FIELD_SRC_IPV4) {
123 			list[i].h_u.ipv4_hdr.src_addr = l3_key->src_ipv4;
124 			list[i].m_u.ipv4_hdr.src_addr = l3_mask->src_ipv4;
125 		}
126 		i++;
127 	} else if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV6 |
128 			    ICE_TC_FLWR_FIELD_SRC_IPV6)) {
129 		struct ice_ipv6_hdr *ipv6_hdr, *ipv6_mask;
130 		struct ice_tc_l3_hdr *l3_key, *l3_mask;
131 
132 		list[i].type = ICE_IPV6_OFOS;
133 		ipv6_hdr = &list[i].h_u.ipv6_hdr;
134 		ipv6_mask = &list[i].m_u.ipv6_hdr;
135 		l3_key = &headers->l3_key;
136 		l3_mask = &headers->l3_mask;
137 
138 		if (flags & ICE_TC_FLWR_FIELD_DEST_IPV6) {
139 			memcpy(&ipv6_hdr->dst_addr, &l3_key->dst_ipv6_addr,
140 			       sizeof(l3_key->dst_ipv6_addr));
141 			memcpy(&ipv6_mask->dst_addr, &l3_mask->dst_ipv6_addr,
142 			       sizeof(l3_mask->dst_ipv6_addr));
143 		}
144 		if (flags & ICE_TC_FLWR_FIELD_SRC_IPV6) {
145 			memcpy(&ipv6_hdr->src_addr, &l3_key->src_ipv6_addr,
146 			       sizeof(l3_key->src_ipv6_addr));
147 			memcpy(&ipv6_mask->src_addr, &l3_mask->src_ipv6_addr,
148 			       sizeof(l3_mask->src_ipv6_addr));
149 		}
150 		i++;
151 	}
152 
153 	/* copy L4 (src, dest) port */
154 	if (flags & (ICE_TC_FLWR_FIELD_DEST_L4_PORT |
155 		     ICE_TC_FLWR_FIELD_SRC_L4_PORT)) {
156 		struct ice_tc_l4_hdr *l4_key, *l4_mask;
157 
158 		l4_key = &headers->l4_key;
159 		l4_mask = &headers->l4_mask;
160 		if (headers->l3_key.ip_proto == IPPROTO_TCP) {
161 			list[i].type = ICE_TCP_IL;
162 			/* detected L4 proto is TCP */
163 			if (l4_proto)
164 				*l4_proto = IPPROTO_TCP;
165 		} else if (headers->l3_key.ip_proto == IPPROTO_UDP) {
166 			list[i].type = ICE_UDP_ILOS;
167 			/* detected L4 proto is UDP */
168 			if (l4_proto)
169 				*l4_proto = IPPROTO_UDP;
170 		}
171 		if (flags & ICE_TC_FLWR_FIELD_DEST_L4_PORT) {
172 			list[i].h_u.l4_hdr.dst_port = l4_key->dst_port;
173 			list[i].m_u.l4_hdr.dst_port = l4_mask->dst_port;
174 		}
175 		if (flags & ICE_TC_FLWR_FIELD_SRC_L4_PORT) {
176 			list[i].h_u.l4_hdr.src_port = l4_key->src_port;
177 			list[i].m_u.l4_hdr.src_port = l4_mask->src_port;
178 		}
179 		i++;
180 	}
181 
182 	return i;
183 }
184 
185 static int
186 ice_eswitch_tc_parse_action(struct ice_tc_flower_fltr *fltr,
187 			    struct flow_action_entry *act)
188 {
189 	struct ice_repr *repr;
190 
191 	switch (act->id) {
192 	case FLOW_ACTION_DROP:
193 		fltr->action.fltr_act = ICE_DROP_PACKET;
194 		break;
195 
196 	case FLOW_ACTION_REDIRECT:
197 		fltr->action.fltr_act = ICE_FWD_TO_VSI;
198 
199 		if (ice_is_port_repr_netdev(act->dev)) {
200 			repr = ice_netdev_to_repr(act->dev);
201 
202 			fltr->dest_vsi = repr->src_vsi;
203 			fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
204 		} else if (netif_is_ice(act->dev)) {
205 			struct ice_netdev_priv *np = netdev_priv(act->dev);
206 
207 			fltr->dest_vsi = np->vsi;
208 			fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
209 		} else {
210 			NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported netdevice in switchdev mode");
211 			return -EINVAL;
212 		}
213 
214 		break;
215 
216 	default:
217 		NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported action in switchdev mode");
218 		return -EINVAL;
219 	}
220 
221 	return 0;
222 }
223 
224 static int
225 ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
226 {
227 	struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers;
228 	struct ice_adv_rule_info rule_info = { 0 };
229 	struct ice_rule_query_data rule_added;
230 	struct ice_hw *hw = &vsi->back->hw;
231 	struct ice_adv_lkup_elem *list;
232 	u32 flags = fltr->flags;
233 	enum ice_status status;
234 	int lkups_cnt;
235 	int ret = 0;
236 	int i;
237 
238 	if (!flags || (flags & (ICE_TC_FLWR_FIELD_ENC_DEST_IPV4 |
239 				ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
240 				ICE_TC_FLWR_FIELD_ENC_DEST_IPV6 |
241 				ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 |
242 				ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT))) {
243 		NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported encap field(s)");
244 		return -EOPNOTSUPP;
245 	}
246 
247 	lkups_cnt = ice_tc_count_lkups(flags, headers, fltr);
248 	list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
249 	if (!list)
250 		return -ENOMEM;
251 
252 	i = ice_tc_fill_rules(hw, flags, fltr, list, &rule_info, NULL);
253 	if (i != lkups_cnt) {
254 		ret = -EINVAL;
255 		goto exit;
256 	}
257 
258 	rule_info.sw_act.fltr_act = fltr->action.fltr_act;
259 	if (fltr->action.fltr_act != ICE_DROP_PACKET)
260 		rule_info.sw_act.vsi_handle = fltr->dest_vsi->idx;
261 	/* For now, making priority to be highest, and it also becomes
262 	 * the priority for recipe which will get created as a result of
263 	 * new extraction sequence based on input set.
264 	 * Priority '7' is max val for switch recipe, higher the number
265 	 * results into order of switch rule evaluation.
266 	 */
267 	rule_info.priority = 7;
268 
269 	if (fltr->direction == ICE_ESWITCH_FLTR_INGRESS) {
270 		rule_info.sw_act.flag |= ICE_FLTR_RX;
271 		rule_info.sw_act.src = hw->pf_id;
272 		rule_info.rx = true;
273 	} else {
274 		rule_info.sw_act.flag |= ICE_FLTR_TX;
275 		rule_info.sw_act.src = vsi->idx;
276 		rule_info.rx = false;
277 		rule_info.flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE;
278 		rule_info.flags_info.act_valid = true;
279 	}
280 
281 	/* specify the cookie as filter_rule_id */
282 	rule_info.fltr_rule_id = fltr->cookie;
283 
284 	status = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added);
285 	if (status == ICE_ERR_ALREADY_EXISTS) {
286 		NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because it already exist");
287 		ret = -EINVAL;
288 		goto exit;
289 	} else if (status) {
290 		NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter due to error");
291 		ret = -EIO;
292 		goto exit;
293 	}
294 
295 	/* store the output params, which are needed later for removing
296 	 * advanced switch filter
297 	 */
298 	fltr->rid = rule_added.rid;
299 	fltr->rule_id = rule_added.rule_id;
300 
301 exit:
302 	kfree(list);
303 	return ret;
304 }
305 
306 /**
307  * ice_add_tc_flower_adv_fltr - add appropriate filter rules
308  * @vsi: Pointer to VSI
309  * @tc_fltr: Pointer to TC flower filter structure
310  *
311  * based on filter parameters using Advance recipes supported
312  * by OS package.
313  */
314 static int
315 ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
316 			   struct ice_tc_flower_fltr *tc_fltr)
317 {
318 	struct ice_tc_flower_lyr_2_4_hdrs *headers = &tc_fltr->outer_headers;
319 	struct ice_adv_rule_info rule_info = {0};
320 	struct ice_rule_query_data rule_added;
321 	struct ice_adv_lkup_elem *list;
322 	struct ice_pf *pf = vsi->back;
323 	struct ice_hw *hw = &pf->hw;
324 	u32 flags = tc_fltr->flags;
325 	struct ice_vsi *ch_vsi;
326 	struct device *dev;
327 	u16 lkups_cnt = 0;
328 	u16 l4_proto = 0;
329 	int ret = 0;
330 	u16 i = 0;
331 
332 	dev = ice_pf_to_dev(pf);
333 	if (ice_is_safe_mode(pf)) {
334 		NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unable to add filter because driver is in safe mode");
335 		return -EOPNOTSUPP;
336 	}
337 
338 	if (!flags || (flags & (ICE_TC_FLWR_FIELD_ENC_DEST_IPV4 |
339 				ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
340 				ICE_TC_FLWR_FIELD_ENC_DEST_IPV6 |
341 				ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 |
342 				ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT))) {
343 		NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unsupported encap field(s)");
344 		return -EOPNOTSUPP;
345 	}
346 
347 	/* get the channel (aka ADQ VSI) */
348 	if (tc_fltr->dest_vsi)
349 		ch_vsi = tc_fltr->dest_vsi;
350 	else
351 		ch_vsi = vsi->tc_map_vsi[tc_fltr->action.tc_class];
352 
353 	lkups_cnt = ice_tc_count_lkups(flags, headers, tc_fltr);
354 	list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
355 	if (!list)
356 		return -ENOMEM;
357 
358 	i = ice_tc_fill_rules(hw, flags, tc_fltr, list, &rule_info, &l4_proto);
359 	if (i != lkups_cnt) {
360 		ret = -EINVAL;
361 		goto exit;
362 	}
363 
364 	rule_info.sw_act.fltr_act = tc_fltr->action.fltr_act;
365 	if (tc_fltr->action.tc_class >= ICE_CHNL_START_TC) {
366 		if (!ch_vsi) {
367 			NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unable to add filter because specified destination doesn't exist");
368 			ret = -EINVAL;
369 			goto exit;
370 		}
371 
372 		rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
373 		rule_info.sw_act.vsi_handle = ch_vsi->idx;
374 		rule_info.priority = 7;
375 		rule_info.sw_act.src = hw->pf_id;
376 		rule_info.rx = true;
377 		dev_dbg(dev, "add switch rule for TC:%u vsi_idx:%u, lkups_cnt:%u\n",
378 			tc_fltr->action.tc_class,
379 			rule_info.sw_act.vsi_handle, lkups_cnt);
380 	} else {
381 		rule_info.sw_act.flag |= ICE_FLTR_TX;
382 		rule_info.sw_act.src = vsi->idx;
383 		rule_info.rx = false;
384 	}
385 
386 	/* specify the cookie as filter_rule_id */
387 	rule_info.fltr_rule_id = tc_fltr->cookie;
388 
389 	ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added);
390 	if (ret == -EEXIST) {
391 		NL_SET_ERR_MSG_MOD(tc_fltr->extack,
392 				   "Unable to add filter because it already exist");
393 		ret = -EINVAL;
394 		goto exit;
395 	} else if (ret) {
396 		NL_SET_ERR_MSG_MOD(tc_fltr->extack,
397 				   "Unable to add filter due to error");
398 		ret = -EIO;
399 		goto exit;
400 	}
401 
402 	/* store the output params, which are needed later for removing
403 	 * advanced switch filter
404 	 */
405 	tc_fltr->rid = rule_added.rid;
406 	tc_fltr->rule_id = rule_added.rule_id;
407 	if (tc_fltr->action.tc_class > 0 && ch_vsi) {
408 		/* For PF ADQ, VSI type is set as ICE_VSI_CHNL, and
409 		 * for PF ADQ filter, it is not yet set in tc_fltr,
410 		 * hence store the dest_vsi ptr in tc_fltr
411 		 */
412 		if (ch_vsi->type == ICE_VSI_CHNL)
413 			tc_fltr->dest_vsi = ch_vsi;
414 		/* keep track of advanced switch filter for
415 		 * destination VSI (channel VSI)
416 		 */
417 		ch_vsi->num_chnl_fltr++;
418 		/* in this case, dest_id is VSI handle (sw handle) */
419 		tc_fltr->dest_id = rule_added.vsi_handle;
420 
421 		/* keeps track of channel filters for PF VSI */
422 		if (vsi->type == ICE_VSI_PF &&
423 		    (flags & (ICE_TC_FLWR_FIELD_DST_MAC |
424 			      ICE_TC_FLWR_FIELD_ENC_DST_MAC)))
425 			pf->num_dmac_chnl_fltrs++;
426 	}
427 	dev_dbg(dev, "added switch rule (lkups_cnt %u, flags 0x%x) for TC %u, rid %u, rule_id %u, vsi_idx %u\n",
428 		lkups_cnt, flags,
429 		tc_fltr->action.tc_class, rule_added.rid,
430 		rule_added.rule_id, rule_added.vsi_handle);
431 exit:
432 	kfree(list);
433 	return ret;
434 }
435 
436 /**
437  * ice_tc_set_ipv4 - Parse IPv4 addresses from TC flower filter
438  * @match: Pointer to flow match structure
439  * @fltr: Pointer to filter structure
440  * @headers: inner or outer header fields
441  */
442 static int
443 ice_tc_set_ipv4(struct flow_match_ipv4_addrs *match,
444 		struct ice_tc_flower_fltr *fltr,
445 		struct ice_tc_flower_lyr_2_4_hdrs *headers)
446 {
447 	if (match->key->dst) {
448 		fltr->flags |= ICE_TC_FLWR_FIELD_DEST_IPV4;
449 		headers->l3_key.dst_ipv4 = match->key->dst;
450 		headers->l3_mask.dst_ipv4 = match->mask->dst;
451 	}
452 	if (match->key->src) {
453 		fltr->flags |= ICE_TC_FLWR_FIELD_SRC_IPV4;
454 		headers->l3_key.src_ipv4 = match->key->src;
455 		headers->l3_mask.src_ipv4 = match->mask->src;
456 	}
457 	return 0;
458 }
459 
460 /**
461  * ice_tc_set_ipv6 - Parse IPv6 addresses from TC flower filter
462  * @match: Pointer to flow match structure
463  * @fltr: Pointer to filter structure
464  * @headers: inner or outer header fields
465  */
466 static int
467 ice_tc_set_ipv6(struct flow_match_ipv6_addrs *match,
468 		struct ice_tc_flower_fltr *fltr,
469 		struct ice_tc_flower_lyr_2_4_hdrs *headers)
470 {
471 	struct ice_tc_l3_hdr *l3_key, *l3_mask;
472 
473 	/* src and dest IPV6 address should not be LOOPBACK
474 	 * (0:0:0:0:0:0:0:1), which can be represented as ::1
475 	 */
476 	if (ipv6_addr_loopback(&match->key->dst) ||
477 	    ipv6_addr_loopback(&match->key->src)) {
478 		NL_SET_ERR_MSG_MOD(fltr->extack, "Bad IPv6, addr is LOOPBACK");
479 		return -EINVAL;
480 	}
481 	/* if src/dest IPv6 address is *,* error */
482 	if (ipv6_addr_any(&match->mask->dst) &&
483 	    ipv6_addr_any(&match->mask->src)) {
484 		NL_SET_ERR_MSG_MOD(fltr->extack, "Bad src/dest IPv6, addr is any");
485 		return -EINVAL;
486 	}
487 	if (!ipv6_addr_any(&match->mask->dst))
488 		fltr->flags |= ICE_TC_FLWR_FIELD_DEST_IPV6;
489 	if (!ipv6_addr_any(&match->mask->src))
490 		fltr->flags |= ICE_TC_FLWR_FIELD_SRC_IPV6;
491 
492 	l3_key = &headers->l3_key;
493 	l3_mask = &headers->l3_mask;
494 
495 	if (fltr->flags & ICE_TC_FLWR_FIELD_SRC_IPV6) {
496 		memcpy(&l3_key->src_ipv6_addr, &match->key->src.s6_addr,
497 		       sizeof(match->key->src.s6_addr));
498 		memcpy(&l3_mask->src_ipv6_addr, &match->mask->src.s6_addr,
499 		       sizeof(match->mask->src.s6_addr));
500 	}
501 	if (fltr->flags & ICE_TC_FLWR_FIELD_DEST_IPV6) {
502 		memcpy(&l3_key->dst_ipv6_addr, &match->key->dst.s6_addr,
503 		       sizeof(match->key->dst.s6_addr));
504 		memcpy(&l3_mask->dst_ipv6_addr, &match->mask->dst.s6_addr,
505 		       sizeof(match->mask->dst.s6_addr));
506 	}
507 
508 	return 0;
509 }
510 
511 /**
512  * ice_tc_set_port - Parse ports from TC flower filter
513  * @match: Flow match structure
514  * @fltr: Pointer to filter structure
515  * @headers: inner or outer header fields
516  */
517 static int
518 ice_tc_set_port(struct flow_match_ports match,
519 		struct ice_tc_flower_fltr *fltr,
520 		struct ice_tc_flower_lyr_2_4_hdrs *headers)
521 {
522 	if (match.key->dst) {
523 		fltr->flags |= ICE_TC_FLWR_FIELD_DEST_L4_PORT;
524 		headers->l4_key.dst_port = match.key->dst;
525 		headers->l4_mask.dst_port = match.mask->dst;
526 	}
527 	if (match.key->src) {
528 		fltr->flags |= ICE_TC_FLWR_FIELD_SRC_L4_PORT;
529 		headers->l4_key.src_port = match.key->src;
530 		headers->l4_mask.src_port = match.mask->src;
531 	}
532 	return 0;
533 }
534 
535 /**
536  * ice_parse_cls_flower - Parse TC flower filters provided by kernel
537  * @vsi: Pointer to the VSI
538  * @filter_dev: Pointer to device on which filter is being added
539  * @f: Pointer to struct flow_cls_offload
540  * @fltr: Pointer to filter structure
541  */
542 static int
543 ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
544 		     struct flow_cls_offload *f,
545 		     struct ice_tc_flower_fltr *fltr)
546 {
547 	struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers;
548 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
549 	u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0;
550 	struct flow_dissector *dissector;
551 
552 	dissector = rule->match.dissector;
553 
554 	if (dissector->used_keys &
555 	    ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
556 	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
557 	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
558 	      BIT(FLOW_DISSECTOR_KEY_VLAN) |
559 	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
560 	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
561 	      BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
562 	      BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
563 	      BIT(FLOW_DISSECTOR_KEY_PORTS))) {
564 		NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported key used");
565 		return -EOPNOTSUPP;
566 	}
567 
568 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
569 		struct flow_match_basic match;
570 
571 		flow_rule_match_basic(rule, &match);
572 
573 		n_proto_key = ntohs(match.key->n_proto);
574 		n_proto_mask = ntohs(match.mask->n_proto);
575 
576 		if (n_proto_key == ETH_P_ALL || n_proto_key == 0) {
577 			n_proto_key = 0;
578 			n_proto_mask = 0;
579 		} else {
580 			fltr->flags |= ICE_TC_FLWR_FIELD_ETH_TYPE_ID;
581 		}
582 
583 		headers->l2_key.n_proto = cpu_to_be16(n_proto_key);
584 		headers->l2_mask.n_proto = cpu_to_be16(n_proto_mask);
585 		headers->l3_key.ip_proto = match.key->ip_proto;
586 	}
587 
588 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
589 		struct flow_match_eth_addrs match;
590 
591 		flow_rule_match_eth_addrs(rule, &match);
592 
593 		if (!is_zero_ether_addr(match.key->dst)) {
594 			ether_addr_copy(headers->l2_key.dst_mac,
595 					match.key->dst);
596 			ether_addr_copy(headers->l2_mask.dst_mac,
597 					match.mask->dst);
598 			fltr->flags |= ICE_TC_FLWR_FIELD_DST_MAC;
599 		}
600 
601 		if (!is_zero_ether_addr(match.key->src)) {
602 			ether_addr_copy(headers->l2_key.src_mac,
603 					match.key->src);
604 			ether_addr_copy(headers->l2_mask.src_mac,
605 					match.mask->src);
606 			fltr->flags |= ICE_TC_FLWR_FIELD_SRC_MAC;
607 		}
608 	}
609 
610 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN) ||
611 	    is_vlan_dev(filter_dev)) {
612 		struct flow_dissector_key_vlan mask;
613 		struct flow_dissector_key_vlan key;
614 		struct flow_match_vlan match;
615 
616 		if (is_vlan_dev(filter_dev)) {
617 			match.key = &key;
618 			match.key->vlan_id = vlan_dev_vlan_id(filter_dev);
619 			match.key->vlan_priority = 0;
620 			match.mask = &mask;
621 			memset(match.mask, 0xff, sizeof(*match.mask));
622 			match.mask->vlan_priority = 0;
623 		} else {
624 			flow_rule_match_vlan(rule, &match);
625 		}
626 
627 		if (match.mask->vlan_id) {
628 			if (match.mask->vlan_id == VLAN_VID_MASK) {
629 				fltr->flags |= ICE_TC_FLWR_FIELD_VLAN;
630 			} else {
631 				NL_SET_ERR_MSG_MOD(fltr->extack, "Bad VLAN mask");
632 				return -EINVAL;
633 			}
634 		}
635 
636 		headers->vlan_hdr.vlan_id =
637 				cpu_to_be16(match.key->vlan_id & VLAN_VID_MASK);
638 		if (match.mask->vlan_priority)
639 			headers->vlan_hdr.vlan_prio = match.key->vlan_priority;
640 	}
641 
642 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
643 		struct flow_match_control match;
644 
645 		flow_rule_match_control(rule, &match);
646 
647 		addr_type = match.key->addr_type;
648 	}
649 
650 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
651 		struct flow_match_ipv4_addrs match;
652 
653 		flow_rule_match_ipv4_addrs(rule, &match);
654 		if (ice_tc_set_ipv4(&match, fltr, headers))
655 			return -EINVAL;
656 	}
657 
658 	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
659 		struct flow_match_ipv6_addrs match;
660 
661 		flow_rule_match_ipv6_addrs(rule, &match);
662 		if (ice_tc_set_ipv6(&match, fltr, headers))
663 			return -EINVAL;
664 	}
665 
666 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
667 		struct flow_match_ports match;
668 
669 		flow_rule_match_ports(rule, &match);
670 		if (ice_tc_set_port(match, fltr, headers))
671 			return -EINVAL;
672 		switch (headers->l3_key.ip_proto) {
673 		case IPPROTO_TCP:
674 		case IPPROTO_UDP:
675 			break;
676 		default:
677 			NL_SET_ERR_MSG_MOD(fltr->extack, "Only UDP and TCP transport are supported");
678 			return -EINVAL;
679 		}
680 	}
681 	return 0;
682 }
683 
684 /**
685  * ice_add_switch_fltr - Add TC flower filters
686  * @vsi: Pointer to VSI
687  * @fltr: Pointer to struct ice_tc_flower_fltr
688  *
689  * Add filter in HW switch block
690  */
691 static int
692 ice_add_switch_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
693 {
694 	if (fltr->action.fltr_act == ICE_FWD_TO_QGRP)
695 		return -EOPNOTSUPP;
696 
697 	if (ice_is_eswitch_mode_switchdev(vsi->back))
698 		return ice_eswitch_add_tc_fltr(vsi, fltr);
699 
700 	return ice_add_tc_flower_adv_fltr(vsi, fltr);
701 }
702 
703 /**
704  * ice_handle_tclass_action - Support directing to a traffic class
705  * @vsi: Pointer to VSI
706  * @cls_flower: Pointer to TC flower offload structure
707  * @fltr: Pointer to TC flower filter structure
708  *
709  * Support directing traffic to a traffic class
710  */
711 static int
712 ice_handle_tclass_action(struct ice_vsi *vsi,
713 			 struct flow_cls_offload *cls_flower,
714 			 struct ice_tc_flower_fltr *fltr)
715 {
716 	int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid);
717 	struct ice_vsi *main_vsi;
718 
719 	if (tc < 0) {
720 		NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because specified destination is invalid");
721 		return -EINVAL;
722 	}
723 	if (!tc) {
724 		NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because of invalid destination");
725 		return -EINVAL;
726 	}
727 
728 	if (!(vsi->all_enatc & BIT(tc))) {
729 		NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because of non-existence destination");
730 		return -EINVAL;
731 	}
732 
733 	/* Redirect to a TC class or Queue Group */
734 	main_vsi = ice_get_main_vsi(vsi->back);
735 	if (!main_vsi || !main_vsi->netdev) {
736 		NL_SET_ERR_MSG_MOD(fltr->extack,
737 				   "Unable to add filter because of invalid netdevice");
738 		return -EINVAL;
739 	}
740 
741 	if ((fltr->flags & ICE_TC_FLWR_FIELD_TENANT_ID) &&
742 	    (fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC |
743 			   ICE_TC_FLWR_FIELD_SRC_MAC))) {
744 		NL_SET_ERR_MSG_MOD(fltr->extack,
745 				   "Unable to add filter because filter using tunnel key and inner MAC is unsupported combination");
746 		return -EOPNOTSUPP;
747 	}
748 
749 	/* For ADQ, filter must include dest MAC address, otherwise unwanted
750 	 * packets with unrelated MAC address get delivered to ADQ VSIs as long
751 	 * as remaining filter criteria is satisfied such as dest IP address
752 	 * and dest/src L4 port. Following code is trying to handle:
753 	 * 1. For non-tunnel, if user specify MAC addresses, use them (means
754 	 * this code won't do anything
755 	 * 2. For non-tunnel, if user didn't specify MAC address, add implicit
756 	 * dest MAC to be lower netdev's active unicast MAC address
757 	 */
758 	if (!(fltr->flags & ICE_TC_FLWR_FIELD_DST_MAC)) {
759 		ether_addr_copy(fltr->outer_headers.l2_key.dst_mac,
760 				main_vsi->netdev->dev_addr);
761 		eth_broadcast_addr(fltr->outer_headers.l2_mask.dst_mac);
762 		fltr->flags |= ICE_TC_FLWR_FIELD_DST_MAC;
763 	}
764 
765 	/* validate specified dest MAC address, make sure either it belongs to
766 	 * lower netdev or any of MACVLAN. MACVLANs MAC address are added as
767 	 * unicast MAC filter destined to main VSI.
768 	 */
769 	if (!ice_mac_fltr_exist(&main_vsi->back->hw,
770 				fltr->outer_headers.l2_key.dst_mac,
771 				main_vsi->idx)) {
772 		NL_SET_ERR_MSG_MOD(fltr->extack,
773 				   "Unable to add filter because legacy MAC filter for specified destination doesn't exist");
774 		return -EINVAL;
775 	}
776 
777 	/* Make sure VLAN is already added to main VSI, before allowing ADQ to
778 	 * add a VLAN based filter such as MAC + VLAN + L4 port.
779 	 */
780 	if (fltr->flags & ICE_TC_FLWR_FIELD_VLAN) {
781 		u16 vlan_id = be16_to_cpu(fltr->outer_headers.vlan_hdr.vlan_id);
782 
783 		if (!ice_vlan_fltr_exist(&main_vsi->back->hw, vlan_id,
784 					 main_vsi->idx)) {
785 			NL_SET_ERR_MSG_MOD(fltr->extack,
786 					   "Unable to add filter because legacy VLAN filter for specified destination doesn't exist");
787 			return -EINVAL;
788 		}
789 	}
790 	fltr->action.fltr_act = ICE_FWD_TO_VSI;
791 	fltr->action.tc_class = tc;
792 
793 	return 0;
794 }
795 
796 /**
797  * ice_parse_tc_flower_actions - Parse the actions for a TC filter
798  * @vsi: Pointer to VSI
799  * @cls_flower: Pointer to TC flower offload structure
800  * @fltr: Pointer to TC flower filter structure
801  *
802  * Parse the actions for a TC filter
803  */
804 static int
805 ice_parse_tc_flower_actions(struct ice_vsi *vsi,
806 			    struct flow_cls_offload *cls_flower,
807 			    struct ice_tc_flower_fltr *fltr)
808 {
809 	struct flow_rule *rule = flow_cls_offload_flow_rule(cls_flower);
810 	struct flow_action *flow_action = &rule->action;
811 	struct flow_action_entry *act;
812 	int i;
813 
814 	if (cls_flower->classid)
815 		return ice_handle_tclass_action(vsi, cls_flower, fltr);
816 
817 	if (!flow_action_has_entries(flow_action))
818 		return -EINVAL;
819 
820 	flow_action_for_each(i, act, flow_action) {
821 		if (ice_is_eswitch_mode_switchdev(vsi->back)) {
822 			int err = ice_eswitch_tc_parse_action(fltr, act);
823 
824 			if (err)
825 				return err;
826 			continue;
827 		}
828 		/* Allow only one rule per filter */
829 
830 		/* Drop action */
831 		if (act->id == FLOW_ACTION_DROP) {
832 			NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported action DROP");
833 			return -EINVAL;
834 		}
835 		fltr->action.fltr_act = ICE_FWD_TO_VSI;
836 	}
837 	return 0;
838 }
839 
840 /**
841  * ice_del_tc_fltr - deletes a filter from HW table
842  * @vsi: Pointer to VSI
843  * @fltr: Pointer to struct ice_tc_flower_fltr
844  *
845  * This function deletes a filter from HW table and manages book-keeping
846  */
847 static int ice_del_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
848 {
849 	struct ice_rule_query_data rule_rem;
850 	struct ice_pf *pf = vsi->back;
851 	int err;
852 
853 	rule_rem.rid = fltr->rid;
854 	rule_rem.rule_id = fltr->rule_id;
855 	rule_rem.vsi_handle = fltr->dest_id;
856 	err = ice_rem_adv_rule_by_id(&pf->hw, &rule_rem);
857 	if (err) {
858 		if (err == ICE_ERR_DOES_NOT_EXIST) {
859 			NL_SET_ERR_MSG_MOD(fltr->extack, "Filter does not exist");
860 			return -ENOENT;
861 		}
862 		NL_SET_ERR_MSG_MOD(fltr->extack, "Failed to delete TC flower filter");
863 		return -EIO;
864 	}
865 
866 	/* update advanced switch filter count for destination
867 	 * VSI if filter destination was VSI
868 	 */
869 	if (fltr->dest_vsi) {
870 		if (fltr->dest_vsi->type == ICE_VSI_CHNL) {
871 			fltr->dest_vsi->num_chnl_fltr--;
872 
873 			/* keeps track of channel filters for PF VSI */
874 			if (vsi->type == ICE_VSI_PF &&
875 			    (fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC |
876 					    ICE_TC_FLWR_FIELD_ENC_DST_MAC)))
877 				pf->num_dmac_chnl_fltrs--;
878 		}
879 	}
880 	return 0;
881 }
882 
883 /**
884  * ice_add_tc_fltr - adds a TC flower filter
885  * @netdev: Pointer to netdev
886  * @vsi: Pointer to VSI
887  * @f: Pointer to flower offload structure
888  * @__fltr: Pointer to struct ice_tc_flower_fltr
889  *
890  * This function parses TC-flower input fields, parses action,
891  * and adds a filter.
892  */
893 static int
894 ice_add_tc_fltr(struct net_device *netdev, struct ice_vsi *vsi,
895 		struct flow_cls_offload *f,
896 		struct ice_tc_flower_fltr **__fltr)
897 {
898 	struct ice_tc_flower_fltr *fltr;
899 	int err;
900 
901 	/* by default, set output to be INVALID */
902 	*__fltr = NULL;
903 
904 	fltr = kzalloc(sizeof(*fltr), GFP_KERNEL);
905 	if (!fltr)
906 		return -ENOMEM;
907 
908 	fltr->cookie = f->cookie;
909 	fltr->extack = f->common.extack;
910 	fltr->src_vsi = vsi;
911 	INIT_HLIST_NODE(&fltr->tc_flower_node);
912 
913 	err = ice_parse_cls_flower(netdev, vsi, f, fltr);
914 	if (err < 0)
915 		goto err;
916 
917 	err = ice_parse_tc_flower_actions(vsi, f, fltr);
918 	if (err < 0)
919 		goto err;
920 
921 	err = ice_add_switch_fltr(vsi, fltr);
922 	if (err < 0)
923 		goto err;
924 
925 	/* return the newly created filter */
926 	*__fltr = fltr;
927 
928 	return 0;
929 err:
930 	kfree(fltr);
931 	return err;
932 }
933 
934 /**
935  * ice_find_tc_flower_fltr - Find the TC flower filter in the list
936  * @pf: Pointer to PF
937  * @cookie: filter specific cookie
938  */
939 static struct ice_tc_flower_fltr *
940 ice_find_tc_flower_fltr(struct ice_pf *pf, unsigned long cookie)
941 {
942 	struct ice_tc_flower_fltr *fltr;
943 
944 	hlist_for_each_entry(fltr, &pf->tc_flower_fltr_list, tc_flower_node)
945 		if (cookie == fltr->cookie)
946 			return fltr;
947 
948 	return NULL;
949 }
950 
951 /**
952  * ice_add_cls_flower - add TC flower filters
953  * @netdev: Pointer to filter device
954  * @vsi: Pointer to VSI
955  * @cls_flower: Pointer to flower offload structure
956  */
957 int
958 ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi,
959 		   struct flow_cls_offload *cls_flower)
960 {
961 	struct netlink_ext_ack *extack = cls_flower->common.extack;
962 	struct net_device *vsi_netdev = vsi->netdev;
963 	struct ice_tc_flower_fltr *fltr;
964 	struct ice_pf *pf = vsi->back;
965 	int err;
966 
967 	if (ice_is_reset_in_progress(pf->state))
968 		return -EBUSY;
969 	if (test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
970 		return -EINVAL;
971 
972 	if (ice_is_port_repr_netdev(netdev))
973 		vsi_netdev = netdev;
974 
975 	if (!(vsi_netdev->features & NETIF_F_HW_TC) &&
976 	    !test_bit(ICE_FLAG_CLS_FLOWER, pf->flags)) {
977 		/* Based on TC indirect notifications from kernel, all ice
978 		 * devices get an instance of rule from higher level device.
979 		 * Avoid triggering explicit error in this case.
980 		 */
981 		if (netdev == vsi_netdev)
982 			NL_SET_ERR_MSG_MOD(extack, "can't apply TC flower filters, turn ON hw-tc-offload and try again");
983 		return -EINVAL;
984 	}
985 
986 	/* avoid duplicate entries, if exists - return error */
987 	fltr = ice_find_tc_flower_fltr(pf, cls_flower->cookie);
988 	if (fltr) {
989 		NL_SET_ERR_MSG_MOD(extack, "filter cookie already exists, ignoring");
990 		return -EEXIST;
991 	}
992 
993 	/* prep and add TC-flower filter in HW */
994 	err = ice_add_tc_fltr(netdev, vsi, cls_flower, &fltr);
995 	if (err)
996 		return err;
997 
998 	/* add filter into an ordered list */
999 	hlist_add_head(&fltr->tc_flower_node, &pf->tc_flower_fltr_list);
1000 	return 0;
1001 }
1002 
1003 /**
1004  * ice_del_cls_flower - delete TC flower filters
1005  * @vsi: Pointer to VSI
1006  * @cls_flower: Pointer to struct flow_cls_offload
1007  */
1008 int
1009 ice_del_cls_flower(struct ice_vsi *vsi, struct flow_cls_offload *cls_flower)
1010 {
1011 	struct ice_tc_flower_fltr *fltr;
1012 	struct ice_pf *pf = vsi->back;
1013 	int err;
1014 
1015 	/* find filter */
1016 	fltr = ice_find_tc_flower_fltr(pf, cls_flower->cookie);
1017 	if (!fltr) {
1018 		if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) &&
1019 		    hlist_empty(&pf->tc_flower_fltr_list))
1020 			return 0;
1021 
1022 		NL_SET_ERR_MSG_MOD(cls_flower->common.extack, "failed to delete TC flower filter because unable to find it");
1023 		return -EINVAL;
1024 	}
1025 
1026 	fltr->extack = cls_flower->common.extack;
1027 	/* delete filter from HW */
1028 	err = ice_del_tc_fltr(vsi, fltr);
1029 	if (err)
1030 		return err;
1031 
1032 	/* delete filter from an ordered list */
1033 	hlist_del(&fltr->tc_flower_node);
1034 
1035 	/* free the filter node */
1036 	kfree(fltr);
1037 
1038 	return 0;
1039 }
1040 
1041 /**
1042  * ice_replay_tc_fltrs - replay TC filters
1043  * @pf: pointer to PF struct
1044  */
1045 void ice_replay_tc_fltrs(struct ice_pf *pf)
1046 {
1047 	struct ice_tc_flower_fltr *fltr;
1048 	struct hlist_node *node;
1049 
1050 	hlist_for_each_entry_safe(fltr, node,
1051 				  &pf->tc_flower_fltr_list,
1052 				  tc_flower_node) {
1053 		fltr->extack = NULL;
1054 		ice_add_switch_fltr(fltr->src_vsi, fltr);
1055 	}
1056 }
1057