1 // SPDX-License-Identifier: GPL-2.0+
2 /* Microchip VCAP API
3  *
4  * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
5  */
6 
7 #include <net/tc_act/tc_gate.h>
8 #include <net/tcp.h>
9 
10 #include "sparx5_tc.h"
11 #include "vcap_api.h"
12 #include "vcap_api_client.h"
13 #include "sparx5_main.h"
14 #include "sparx5_vcap_impl.h"
15 
16 #define SPX5_MAX_RULE_SIZE 13 /* allows X1, X2, X4, X6 and X12 rules */
17 
18 /* Collect keysets and type ids for multiple rules per size */
19 struct sparx5_wildcard_rule {
20 	bool selected;
21 	u8 value;
22 	u8 mask;
23 	enum vcap_keyfield_set keyset;
24 };
25 
26 struct sparx5_multiple_rules {
27 	struct sparx5_wildcard_rule rule[SPX5_MAX_RULE_SIZE];
28 };
29 
30 struct sparx5_tc_flower_parse_usage {
31 	struct flow_cls_offload *fco;
32 	struct flow_rule *frule;
33 	struct vcap_rule *vrule;
34 	struct vcap_admin *admin;
35 	u16 l3_proto;
36 	u8 l4_proto;
37 	unsigned int used_keys;
38 };
39 
40 enum sparx5_is2_arp_opcode {
41 	SPX5_IS2_ARP_REQUEST,
42 	SPX5_IS2_ARP_REPLY,
43 	SPX5_IS2_RARP_REQUEST,
44 	SPX5_IS2_RARP_REPLY,
45 };
46 
47 enum tc_arp_opcode {
48 	TC_ARP_OP_RESERVED,
49 	TC_ARP_OP_REQUEST,
50 	TC_ARP_OP_REPLY,
51 };
52 
53 static int sparx5_tc_flower_handler_ethaddr_usage(struct sparx5_tc_flower_parse_usage *st)
54 {
55 	enum vcap_key_field smac_key = VCAP_KF_L2_SMAC;
56 	enum vcap_key_field dmac_key = VCAP_KF_L2_DMAC;
57 	struct flow_match_eth_addrs match;
58 	struct vcap_u48_key smac, dmac;
59 	int err = 0;
60 
61 	flow_rule_match_eth_addrs(st->frule, &match);
62 
63 	if (!is_zero_ether_addr(match.mask->src)) {
64 		vcap_netbytes_copy(smac.value, match.key->src, ETH_ALEN);
65 		vcap_netbytes_copy(smac.mask, match.mask->src, ETH_ALEN);
66 		err = vcap_rule_add_key_u48(st->vrule, smac_key, &smac);
67 		if (err)
68 			goto out;
69 	}
70 
71 	if (!is_zero_ether_addr(match.mask->dst)) {
72 		vcap_netbytes_copy(dmac.value, match.key->dst, ETH_ALEN);
73 		vcap_netbytes_copy(dmac.mask, match.mask->dst, ETH_ALEN);
74 		err = vcap_rule_add_key_u48(st->vrule, dmac_key, &dmac);
75 		if (err)
76 			goto out;
77 	}
78 
79 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS);
80 
81 	return err;
82 
83 out:
84 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "eth_addr parse error");
85 	return err;
86 }
87 
88 static int
89 sparx5_tc_flower_handler_ipv4_usage(struct sparx5_tc_flower_parse_usage *st)
90 {
91 	int err = 0;
92 
93 	if (st->l3_proto == ETH_P_IP) {
94 		struct flow_match_ipv4_addrs mt;
95 
96 		flow_rule_match_ipv4_addrs(st->frule, &mt);
97 		if (mt.mask->src) {
98 			err = vcap_rule_add_key_u32(st->vrule,
99 						    VCAP_KF_L3_IP4_SIP,
100 						    be32_to_cpu(mt.key->src),
101 						    be32_to_cpu(mt.mask->src));
102 			if (err)
103 				goto out;
104 		}
105 		if (mt.mask->dst) {
106 			err = vcap_rule_add_key_u32(st->vrule,
107 						    VCAP_KF_L3_IP4_DIP,
108 						    be32_to_cpu(mt.key->dst),
109 						    be32_to_cpu(mt.mask->dst));
110 			if (err)
111 				goto out;
112 		}
113 	}
114 
115 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS);
116 
117 	return err;
118 
119 out:
120 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ipv4_addr parse error");
121 	return err;
122 }
123 
124 static int
125 sparx5_tc_flower_handler_ipv6_usage(struct sparx5_tc_flower_parse_usage *st)
126 {
127 	int err = 0;
128 
129 	if (st->l3_proto == ETH_P_IPV6) {
130 		struct flow_match_ipv6_addrs mt;
131 		struct vcap_u128_key sip;
132 		struct vcap_u128_key dip;
133 
134 		flow_rule_match_ipv6_addrs(st->frule, &mt);
135 		/* Check if address masks are non-zero */
136 		if (!ipv6_addr_any(&mt.mask->src)) {
137 			vcap_netbytes_copy(sip.value, mt.key->src.s6_addr, 16);
138 			vcap_netbytes_copy(sip.mask, mt.mask->src.s6_addr, 16);
139 			err = vcap_rule_add_key_u128(st->vrule,
140 						     VCAP_KF_L3_IP6_SIP, &sip);
141 			if (err)
142 				goto out;
143 		}
144 		if (!ipv6_addr_any(&mt.mask->dst)) {
145 			vcap_netbytes_copy(dip.value, mt.key->dst.s6_addr, 16);
146 			vcap_netbytes_copy(dip.mask, mt.mask->dst.s6_addr, 16);
147 			err = vcap_rule_add_key_u128(st->vrule,
148 						     VCAP_KF_L3_IP6_DIP, &dip);
149 			if (err)
150 				goto out;
151 		}
152 	}
153 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS);
154 	return err;
155 out:
156 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ipv6_addr parse error");
157 	return err;
158 }
159 
160 static int
161 sparx5_tc_flower_handler_control_usage(struct sparx5_tc_flower_parse_usage *st)
162 {
163 	struct flow_match_control mt;
164 	u32 value, mask;
165 	int err = 0;
166 
167 	flow_rule_match_control(st->frule, &mt);
168 
169 	if (mt.mask->flags) {
170 		if (mt.mask->flags & FLOW_DIS_FIRST_FRAG) {
171 			if (mt.key->flags & FLOW_DIS_FIRST_FRAG) {
172 				value = 1; /* initial fragment */
173 				mask = 0x3;
174 			} else {
175 				if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
176 					value = 3; /* follow up fragment */
177 					mask = 0x3;
178 				} else {
179 					value = 0; /* no fragment */
180 					mask = 0x3;
181 				}
182 			}
183 		} else {
184 			if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
185 				value = 3; /* follow up fragment */
186 				mask = 0x3;
187 			} else {
188 				value = 0; /* no fragment */
189 				mask = 0x3;
190 			}
191 		}
192 
193 		err = vcap_rule_add_key_u32(st->vrule,
194 					    VCAP_KF_L3_FRAGMENT_TYPE,
195 					    value, mask);
196 		if (err)
197 			goto out;
198 	}
199 
200 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_CONTROL);
201 
202 	return err;
203 
204 out:
205 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_frag parse error");
206 	return err;
207 }
208 
209 static int
210 sparx5_tc_flower_handler_portnum_usage(struct sparx5_tc_flower_parse_usage *st)
211 {
212 	struct flow_match_ports mt;
213 	u16 value, mask;
214 	int err = 0;
215 
216 	flow_rule_match_ports(st->frule, &mt);
217 
218 	if (mt.mask->src) {
219 		value = be16_to_cpu(mt.key->src);
220 		mask = be16_to_cpu(mt.mask->src);
221 		err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L4_SPORT, value,
222 					    mask);
223 		if (err)
224 			goto out;
225 	}
226 
227 	if (mt.mask->dst) {
228 		value = be16_to_cpu(mt.key->dst);
229 		mask = be16_to_cpu(mt.mask->dst);
230 		err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L4_DPORT, value,
231 					    mask);
232 		if (err)
233 			goto out;
234 	}
235 
236 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_PORTS);
237 
238 	return err;
239 
240 out:
241 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "port parse error");
242 	return err;
243 }
244 
245 static int
246 sparx5_tc_flower_handler_basic_usage(struct sparx5_tc_flower_parse_usage *st)
247 {
248 	struct flow_match_basic mt;
249 	int err = 0;
250 
251 	flow_rule_match_basic(st->frule, &mt);
252 
253 	if (mt.mask->n_proto) {
254 		st->l3_proto = be16_to_cpu(mt.key->n_proto);
255 		if (!sparx5_vcap_is_known_etype(st->admin, st->l3_proto)) {
256 			err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_ETYPE,
257 						    st->l3_proto, ~0);
258 			if (err)
259 				goto out;
260 		} else if (st->l3_proto == ETH_P_IP) {
261 			err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_IP4_IS,
262 						    VCAP_BIT_1);
263 			if (err)
264 				goto out;
265 		} else if (st->l3_proto == ETH_P_IPV6) {
266 			err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_IP4_IS,
267 						    VCAP_BIT_0);
268 			if (err)
269 				goto out;
270 			if (st->admin->vtype == VCAP_TYPE_IS0) {
271 				err = vcap_rule_add_key_bit(st->vrule,
272 							    VCAP_KF_IP_SNAP_IS,
273 							    VCAP_BIT_1);
274 				if (err)
275 					goto out;
276 			}
277 
278 		}
279 	}
280 
281 	if (mt.mask->ip_proto) {
282 		st->l4_proto = mt.key->ip_proto;
283 		if (st->l4_proto == IPPROTO_TCP) {
284 			err = vcap_rule_add_key_bit(st->vrule,
285 						    VCAP_KF_TCP_IS,
286 						    VCAP_BIT_1);
287 			if (err)
288 				goto out;
289 		} else if (st->l4_proto == IPPROTO_UDP) {
290 			err = vcap_rule_add_key_bit(st->vrule,
291 						    VCAP_KF_TCP_IS,
292 						    VCAP_BIT_0);
293 			if (err)
294 				goto out;
295 			if (st->admin->vtype == VCAP_TYPE_IS0) {
296 				err = vcap_rule_add_key_bit(st->vrule,
297 							    VCAP_KF_TCP_UDP_IS,
298 							    VCAP_BIT_1);
299 				if (err)
300 					goto out;
301 			}
302 		} else {
303 			err = vcap_rule_add_key_u32(st->vrule,
304 						    VCAP_KF_L3_IP_PROTO,
305 						    st->l4_proto, ~0);
306 			if (err)
307 				goto out;
308 		}
309 	}
310 
311 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_BASIC);
312 
313 	return err;
314 
315 out:
316 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_proto parse error");
317 	return err;
318 }
319 
320 static int
321 sparx5_tc_flower_handler_cvlan_usage(struct sparx5_tc_flower_parse_usage *st)
322 {
323 	enum vcap_key_field vid_key = VCAP_KF_8021Q_VID0;
324 	enum vcap_key_field pcp_key = VCAP_KF_8021Q_PCP0;
325 	struct flow_match_vlan mt;
326 	u16 tpid;
327 	int err;
328 
329 	if (st->admin->vtype != VCAP_TYPE_IS0) {
330 		NL_SET_ERR_MSG_MOD(st->fco->common.extack,
331 				   "cvlan not supported in this VCAP");
332 		return -EINVAL;
333 	}
334 
335 	flow_rule_match_cvlan(st->frule, &mt);
336 
337 	tpid = be16_to_cpu(mt.key->vlan_tpid);
338 
339 	if (tpid == ETH_P_8021Q) {
340 		vid_key = VCAP_KF_8021Q_VID1;
341 		pcp_key = VCAP_KF_8021Q_PCP1;
342 	}
343 
344 	if (mt.mask->vlan_id) {
345 		err = vcap_rule_add_key_u32(st->vrule, vid_key,
346 					    mt.key->vlan_id,
347 					    mt.mask->vlan_id);
348 		if (err)
349 			goto out;
350 	}
351 
352 	if (mt.mask->vlan_priority) {
353 		err = vcap_rule_add_key_u32(st->vrule, pcp_key,
354 					    mt.key->vlan_priority,
355 					    mt.mask->vlan_priority);
356 		if (err)
357 			goto out;
358 	}
359 
360 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_CVLAN);
361 
362 	return 0;
363 out:
364 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "cvlan parse error");
365 	return err;
366 }
367 
368 static int
369 sparx5_tc_flower_handler_vlan_usage(struct sparx5_tc_flower_parse_usage *st)
370 {
371 	enum vcap_key_field vid_key = VCAP_KF_8021Q_VID_CLS;
372 	enum vcap_key_field pcp_key = VCAP_KF_8021Q_PCP_CLS;
373 	struct flow_match_vlan mt;
374 	int err;
375 
376 	flow_rule_match_vlan(st->frule, &mt);
377 
378 	if (st->admin->vtype == VCAP_TYPE_IS0) {
379 		vid_key = VCAP_KF_8021Q_VID0;
380 		pcp_key = VCAP_KF_8021Q_PCP0;
381 	}
382 
383 	if (mt.mask->vlan_id) {
384 		err = vcap_rule_add_key_u32(st->vrule, vid_key,
385 					    mt.key->vlan_id,
386 					    mt.mask->vlan_id);
387 		if (err)
388 			goto out;
389 	}
390 
391 	if (mt.mask->vlan_priority) {
392 		err = vcap_rule_add_key_u32(st->vrule, pcp_key,
393 					    mt.key->vlan_priority,
394 					    mt.mask->vlan_priority);
395 		if (err)
396 			goto out;
397 	}
398 
399 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_VLAN);
400 
401 	return 0;
402 out:
403 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "vlan parse error");
404 	return err;
405 }
406 
407 static int
408 sparx5_tc_flower_handler_tcp_usage(struct sparx5_tc_flower_parse_usage *st)
409 {
410 	struct flow_match_tcp mt;
411 	u16 tcp_flags_mask;
412 	u16 tcp_flags_key;
413 	enum vcap_bit val;
414 	int err = 0;
415 
416 	flow_rule_match_tcp(st->frule, &mt);
417 	tcp_flags_key = be16_to_cpu(mt.key->flags);
418 	tcp_flags_mask = be16_to_cpu(mt.mask->flags);
419 
420 	if (tcp_flags_mask & TCPHDR_FIN) {
421 		val = VCAP_BIT_0;
422 		if (tcp_flags_key & TCPHDR_FIN)
423 			val = VCAP_BIT_1;
424 		err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_FIN, val);
425 		if (err)
426 			goto out;
427 	}
428 
429 	if (tcp_flags_mask & TCPHDR_SYN) {
430 		val = VCAP_BIT_0;
431 		if (tcp_flags_key & TCPHDR_SYN)
432 			val = VCAP_BIT_1;
433 		err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_SYN, val);
434 		if (err)
435 			goto out;
436 	}
437 
438 	if (tcp_flags_mask & TCPHDR_RST) {
439 		val = VCAP_BIT_0;
440 		if (tcp_flags_key & TCPHDR_RST)
441 			val = VCAP_BIT_1;
442 		err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_RST, val);
443 		if (err)
444 			goto out;
445 	}
446 
447 	if (tcp_flags_mask & TCPHDR_PSH) {
448 		val = VCAP_BIT_0;
449 		if (tcp_flags_key & TCPHDR_PSH)
450 			val = VCAP_BIT_1;
451 		err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_PSH, val);
452 		if (err)
453 			goto out;
454 	}
455 
456 	if (tcp_flags_mask & TCPHDR_ACK) {
457 		val = VCAP_BIT_0;
458 		if (tcp_flags_key & TCPHDR_ACK)
459 			val = VCAP_BIT_1;
460 		err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_ACK, val);
461 		if (err)
462 			goto out;
463 	}
464 
465 	if (tcp_flags_mask & TCPHDR_URG) {
466 		val = VCAP_BIT_0;
467 		if (tcp_flags_key & TCPHDR_URG)
468 			val = VCAP_BIT_1;
469 		err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_URG, val);
470 		if (err)
471 			goto out;
472 	}
473 
474 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_TCP);
475 
476 	return err;
477 
478 out:
479 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "tcp_flags parse error");
480 	return err;
481 }
482 
483 static int
484 sparx5_tc_flower_handler_arp_usage(struct sparx5_tc_flower_parse_usage *st)
485 {
486 	struct flow_match_arp mt;
487 	u16 value, mask;
488 	u32 ipval, ipmsk;
489 	int err;
490 
491 	flow_rule_match_arp(st->frule, &mt);
492 
493 	if (mt.mask->op) {
494 		mask = 0x3;
495 		if (st->l3_proto == ETH_P_ARP) {
496 			value = mt.key->op == TC_ARP_OP_REQUEST ?
497 					SPX5_IS2_ARP_REQUEST :
498 					SPX5_IS2_ARP_REPLY;
499 		} else { /* RARP */
500 			value = mt.key->op == TC_ARP_OP_REQUEST ?
501 					SPX5_IS2_RARP_REQUEST :
502 					SPX5_IS2_RARP_REPLY;
503 		}
504 		err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_ARP_OPCODE,
505 					    value, mask);
506 		if (err)
507 			goto out;
508 	}
509 
510 	/* The IS2 ARP keyset does not support ARP hardware addresses */
511 	if (!is_zero_ether_addr(mt.mask->sha) ||
512 	    !is_zero_ether_addr(mt.mask->tha)) {
513 		err = -EINVAL;
514 		goto out;
515 	}
516 
517 	if (mt.mask->sip) {
518 		ipval = be32_to_cpu((__force __be32)mt.key->sip);
519 		ipmsk = be32_to_cpu((__force __be32)mt.mask->sip);
520 
521 		err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_IP4_SIP,
522 					    ipval, ipmsk);
523 		if (err)
524 			goto out;
525 	}
526 
527 	if (mt.mask->tip) {
528 		ipval = be32_to_cpu((__force __be32)mt.key->tip);
529 		ipmsk = be32_to_cpu((__force __be32)mt.mask->tip);
530 
531 		err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_IP4_DIP,
532 					    ipval, ipmsk);
533 		if (err)
534 			goto out;
535 	}
536 
537 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_ARP);
538 
539 	return 0;
540 
541 out:
542 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "arp parse error");
543 	return err;
544 }
545 
546 static int
547 sparx5_tc_flower_handler_ip_usage(struct sparx5_tc_flower_parse_usage *st)
548 {
549 	struct flow_match_ip mt;
550 	int err = 0;
551 
552 	flow_rule_match_ip(st->frule, &mt);
553 
554 	if (mt.mask->tos) {
555 		err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_TOS,
556 					    mt.key->tos,
557 					    mt.mask->tos);
558 		if (err)
559 			goto out;
560 	}
561 
562 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IP);
563 
564 	return err;
565 
566 out:
567 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_tos parse error");
568 	return err;
569 }
570 
571 static int (*sparx5_tc_flower_usage_handlers[])(struct sparx5_tc_flower_parse_usage *st) = {
572 	[FLOW_DISSECTOR_KEY_ETH_ADDRS] = sparx5_tc_flower_handler_ethaddr_usage,
573 	[FLOW_DISSECTOR_KEY_IPV4_ADDRS] = sparx5_tc_flower_handler_ipv4_usage,
574 	[FLOW_DISSECTOR_KEY_IPV6_ADDRS] = sparx5_tc_flower_handler_ipv6_usage,
575 	[FLOW_DISSECTOR_KEY_CONTROL] = sparx5_tc_flower_handler_control_usage,
576 	[FLOW_DISSECTOR_KEY_PORTS] = sparx5_tc_flower_handler_portnum_usage,
577 	[FLOW_DISSECTOR_KEY_BASIC] = sparx5_tc_flower_handler_basic_usage,
578 	[FLOW_DISSECTOR_KEY_CVLAN] = sparx5_tc_flower_handler_cvlan_usage,
579 	[FLOW_DISSECTOR_KEY_VLAN] = sparx5_tc_flower_handler_vlan_usage,
580 	[FLOW_DISSECTOR_KEY_TCP] = sparx5_tc_flower_handler_tcp_usage,
581 	[FLOW_DISSECTOR_KEY_ARP] = sparx5_tc_flower_handler_arp_usage,
582 	[FLOW_DISSECTOR_KEY_IP] = sparx5_tc_flower_handler_ip_usage,
583 };
584 
585 static int sparx5_tc_use_dissectors(struct flow_cls_offload *fco,
586 				    struct vcap_admin *admin,
587 				    struct vcap_rule *vrule,
588 				    u16 *l3_proto)
589 {
590 	struct sparx5_tc_flower_parse_usage state = {
591 		.fco = fco,
592 		.vrule = vrule,
593 		.l3_proto = ETH_P_ALL,
594 		.admin = admin,
595 	};
596 	int idx, err = 0;
597 
598 	state.frule = flow_cls_offload_flow_rule(fco);
599 	for (idx = 0; idx < ARRAY_SIZE(sparx5_tc_flower_usage_handlers); ++idx) {
600 		if (!flow_rule_match_key(state.frule, idx))
601 			continue;
602 		if (!sparx5_tc_flower_usage_handlers[idx])
603 			continue;
604 		err = sparx5_tc_flower_usage_handlers[idx](&state);
605 		if (err)
606 			return err;
607 	}
608 
609 	if (state.frule->match.dissector->used_keys ^ state.used_keys) {
610 		NL_SET_ERR_MSG_MOD(fco->common.extack,
611 				   "Unsupported match item");
612 		return -ENOENT;
613 	}
614 
615 	if (l3_proto)
616 		*l3_proto = state.l3_proto;
617 	return err;
618 }
619 
620 static int sparx5_tc_flower_action_check(struct vcap_control *vctrl,
621 					 struct net_device *ndev,
622 					 struct flow_cls_offload *fco,
623 					 bool ingress)
624 {
625 	struct flow_rule *rule = flow_cls_offload_flow_rule(fco);
626 	struct flow_action_entry *actent, *last_actent = NULL;
627 	struct flow_action *act = &rule->action;
628 	u64 action_mask = 0;
629 	int idx;
630 
631 	if (!flow_action_has_entries(act)) {
632 		NL_SET_ERR_MSG_MOD(fco->common.extack, "No actions");
633 		return -EINVAL;
634 	}
635 
636 	if (!flow_action_basic_hw_stats_check(act, fco->common.extack))
637 		return -EOPNOTSUPP;
638 
639 	flow_action_for_each(idx, actent, act) {
640 		if (action_mask & BIT(actent->id)) {
641 			NL_SET_ERR_MSG_MOD(fco->common.extack,
642 					   "More actions of the same type");
643 			return -EINVAL;
644 		}
645 		action_mask |= BIT(actent->id);
646 		last_actent = actent; /* Save last action for later check */
647 	}
648 
649 	/* Check if last action is a goto
650 	 * The last chain/lookup does not need to have a goto action
651 	 */
652 	if (last_actent->id == FLOW_ACTION_GOTO) {
653 		/* Check if the destination chain is in one of the VCAPs */
654 		if (!vcap_is_next_lookup(vctrl, fco->common.chain_index,
655 					 last_actent->chain_index)) {
656 			NL_SET_ERR_MSG_MOD(fco->common.extack,
657 					   "Invalid goto chain");
658 			return -EINVAL;
659 		}
660 	} else if (!vcap_is_last_chain(vctrl, fco->common.chain_index,
661 				       ingress)) {
662 		NL_SET_ERR_MSG_MOD(fco->common.extack,
663 				   "Last action must be 'goto'");
664 		return -EINVAL;
665 	}
666 
667 	/* Catch unsupported combinations of actions */
668 	if (action_mask & BIT(FLOW_ACTION_TRAP) &&
669 	    action_mask & BIT(FLOW_ACTION_ACCEPT)) {
670 		NL_SET_ERR_MSG_MOD(fco->common.extack,
671 				   "Cannot combine pass and trap action");
672 		return -EOPNOTSUPP;
673 	}
674 
675 	return 0;
676 }
677 
678 /* Add a rule counter action */
679 static int sparx5_tc_add_rule_counter(struct vcap_admin *admin,
680 				      struct vcap_rule *vrule)
681 {
682 	int err;
683 
684 	if (admin->vtype == VCAP_TYPE_IS2 || admin->vtype == VCAP_TYPE_ES2) {
685 		err = vcap_rule_mod_action_u32(vrule, VCAP_AF_CNT_ID,
686 					       vrule->id);
687 		if (err)
688 			return err;
689 		vcap_rule_set_counter_id(vrule, vrule->id);
690 	}
691 
692 	return 0;
693 }
694 
695 /* Collect all port keysets and apply the first of them, possibly wildcarded */
696 static int sparx5_tc_select_protocol_keyset(struct net_device *ndev,
697 					    struct vcap_rule *vrule,
698 					    struct vcap_admin *admin,
699 					    u16 l3_proto,
700 					    struct sparx5_multiple_rules *multi)
701 {
702 	struct sparx5_port *port = netdev_priv(ndev);
703 	struct vcap_keyset_list portkeysetlist = {};
704 	enum vcap_keyfield_set portkeysets[10] = {};
705 	struct vcap_keyset_list matches = {};
706 	enum vcap_keyfield_set keysets[10];
707 	int idx, jdx, err = 0, count = 0;
708 	struct sparx5_wildcard_rule *mru;
709 	const struct vcap_set *kinfo;
710 	struct vcap_control *vctrl;
711 
712 	vctrl = port->sparx5->vcap_ctrl;
713 
714 	/* Find the keysets that the rule can use */
715 	matches.keysets = keysets;
716 	matches.max = ARRAY_SIZE(keysets);
717 	if (vcap_rule_find_keysets(vrule, &matches) == 0)
718 		return -EINVAL;
719 
720 	/* Find the keysets that the port configuration supports */
721 	portkeysetlist.max = ARRAY_SIZE(portkeysets);
722 	portkeysetlist.keysets = portkeysets;
723 	err = sparx5_vcap_get_port_keyset(ndev,
724 					  admin, vrule->vcap_chain_id,
725 					  l3_proto,
726 					  &portkeysetlist);
727 	if (err)
728 		return err;
729 
730 	/* Find the intersection of the two sets of keyset */
731 	for (idx = 0; idx < portkeysetlist.cnt; ++idx) {
732 		kinfo = vcap_keyfieldset(vctrl, admin->vtype,
733 					 portkeysetlist.keysets[idx]);
734 		if (!kinfo)
735 			continue;
736 
737 		/* Find a port keyset that matches the required keys
738 		 * If there are multiple keysets then compose a type id mask
739 		 */
740 		for (jdx = 0; jdx < matches.cnt; ++jdx) {
741 			if (portkeysetlist.keysets[idx] != matches.keysets[jdx])
742 				continue;
743 
744 			mru = &multi->rule[kinfo->sw_per_item];
745 			if (!mru->selected) {
746 				mru->selected = true;
747 				mru->keyset = portkeysetlist.keysets[idx];
748 				mru->value = kinfo->type_id;
749 			}
750 			mru->value &= kinfo->type_id;
751 			mru->mask |= kinfo->type_id;
752 			++count;
753 		}
754 	}
755 	if (count == 0)
756 		return -EPROTO;
757 
758 	if (l3_proto == ETH_P_ALL && count < portkeysetlist.cnt)
759 		return -ENOENT;
760 
761 	for (idx = 0; idx < SPX5_MAX_RULE_SIZE; ++idx) {
762 		mru = &multi->rule[idx];
763 		if (!mru->selected)
764 			continue;
765 
766 		/* Align the mask to the combined value */
767 		mru->mask ^= mru->value;
768 	}
769 
770 	/* Set the chosen keyset on the rule and set a wildcarded type if there
771 	 * are more than one keyset
772 	 */
773 	for (idx = 0; idx < SPX5_MAX_RULE_SIZE; ++idx) {
774 		mru = &multi->rule[idx];
775 		if (!mru->selected)
776 			continue;
777 
778 		vcap_set_rule_set_keyset(vrule, mru->keyset);
779 		if (count > 1)
780 			/* Some keysets do not have a type field */
781 			vcap_rule_mod_key_u32(vrule, VCAP_KF_TYPE,
782 					      mru->value,
783 					      ~mru->mask);
784 		mru->selected = false; /* mark as done */
785 		break; /* Stop here and add more rules later */
786 	}
787 	return err;
788 }
789 
790 static int sparx5_tc_add_rule_copy(struct vcap_control *vctrl,
791 				   struct flow_cls_offload *fco,
792 				   struct vcap_rule *erule,
793 				   struct vcap_admin *admin,
794 				   struct sparx5_wildcard_rule *rule)
795 {
796 	enum vcap_key_field keylist[] = {
797 		VCAP_KF_IF_IGR_PORT_MASK,
798 		VCAP_KF_IF_IGR_PORT_MASK_SEL,
799 		VCAP_KF_IF_IGR_PORT_MASK_RNG,
800 		VCAP_KF_LOOKUP_FIRST_IS,
801 		VCAP_KF_TYPE,
802 	};
803 	struct vcap_rule *vrule;
804 	int err;
805 
806 	/* Add an extra rule with a special user and the new keyset */
807 	erule->user = VCAP_USER_TC_EXTRA;
808 	vrule = vcap_copy_rule(erule);
809 	if (IS_ERR(vrule))
810 		return PTR_ERR(vrule);
811 
812 	/* Link the new rule to the existing rule with the cookie */
813 	vrule->cookie = erule->cookie;
814 	vcap_filter_rule_keys(vrule, keylist, ARRAY_SIZE(keylist), true);
815 	err = vcap_set_rule_set_keyset(vrule, rule->keyset);
816 	if (err) {
817 		pr_err("%s:%d: could not set keyset %s in rule: %u\n",
818 		       __func__, __LINE__,
819 		       vcap_keyset_name(vctrl, rule->keyset),
820 		       vrule->id);
821 		goto out;
822 	}
823 
824 	/* Some keysets do not have a type field, so ignore return value */
825 	vcap_rule_mod_key_u32(vrule, VCAP_KF_TYPE, rule->value, ~rule->mask);
826 
827 	err = vcap_set_rule_set_actionset(vrule, erule->actionset);
828 	if (err)
829 		goto out;
830 
831 	err = sparx5_tc_add_rule_counter(admin, vrule);
832 	if (err)
833 		goto out;
834 
835 	err = vcap_val_rule(vrule, ETH_P_ALL);
836 	if (err) {
837 		pr_err("%s:%d: could not validate rule: %u\n",
838 		       __func__, __LINE__, vrule->id);
839 		vcap_set_tc_exterr(fco, vrule);
840 		goto out;
841 	}
842 	err = vcap_add_rule(vrule);
843 	if (err) {
844 		pr_err("%s:%d: could not add rule: %u\n",
845 		       __func__, __LINE__, vrule->id);
846 		goto out;
847 	}
848 out:
849 	vcap_free_rule(vrule);
850 	return err;
851 }
852 
853 static int sparx5_tc_add_remaining_rules(struct vcap_control *vctrl,
854 					 struct flow_cls_offload *fco,
855 					 struct vcap_rule *erule,
856 					 struct vcap_admin *admin,
857 					 struct sparx5_multiple_rules *multi)
858 {
859 	int idx, err = 0;
860 
861 	for (idx = 0; idx < SPX5_MAX_RULE_SIZE; ++idx) {
862 		if (!multi->rule[idx].selected)
863 			continue;
864 
865 		err = sparx5_tc_add_rule_copy(vctrl, fco, erule, admin,
866 					      &multi->rule[idx]);
867 		if (err)
868 			break;
869 	}
870 	return err;
871 }
872 
873 /* Add the actionset that is the default for the VCAP type */
874 static int sparx5_tc_set_actionset(struct vcap_admin *admin,
875 				   struct vcap_rule *vrule)
876 {
877 	enum vcap_actionfield_set aset;
878 	int err = 0;
879 
880 	switch (admin->vtype) {
881 	case VCAP_TYPE_IS0:
882 		aset = VCAP_AFS_CLASSIFICATION;
883 		break;
884 	case VCAP_TYPE_IS2:
885 		aset = VCAP_AFS_BASE_TYPE;
886 		break;
887 	case VCAP_TYPE_ES2:
888 		aset = VCAP_AFS_BASE_TYPE;
889 		break;
890 	default:
891 		return -EINVAL;
892 	}
893 	/* Do not overwrite any current actionset */
894 	if (vrule->actionset == VCAP_AFS_NO_VALUE)
895 		err = vcap_set_rule_set_actionset(vrule, aset);
896 	return err;
897 }
898 
899 /* Add the VCAP key to match on for a rule target value */
900 static int sparx5_tc_add_rule_link_target(struct vcap_admin *admin,
901 					  struct vcap_rule *vrule,
902 					  int target_cid)
903 {
904 	int link_val = target_cid % VCAP_CID_LOOKUP_SIZE;
905 	int err;
906 
907 	if (!link_val)
908 		return 0;
909 
910 	switch (admin->vtype) {
911 	case VCAP_TYPE_IS0:
912 		/* Add NXT_IDX key for chaining rules between IS0 instances */
913 		err = vcap_rule_add_key_u32(vrule, VCAP_KF_LOOKUP_GEN_IDX_SEL,
914 					    1, /* enable */
915 					    ~0);
916 		if (err)
917 			return err;
918 		return vcap_rule_add_key_u32(vrule, VCAP_KF_LOOKUP_GEN_IDX,
919 					     link_val, /* target */
920 					     ~0);
921 	case VCAP_TYPE_IS2:
922 		/* Add PAG key for chaining rules from IS0 */
923 		return vcap_rule_add_key_u32(vrule, VCAP_KF_LOOKUP_PAG,
924 					     link_val, /* target */
925 					     ~0);
926 	case VCAP_TYPE_ES2:
927 		/* Add ISDX key for chaining rules from IS0 */
928 		return vcap_rule_add_key_u32(vrule, VCAP_KF_ISDX_CLS, link_val,
929 					     ~0);
930 	default:
931 		break;
932 	}
933 	return 0;
934 }
935 
936 /* Add the VCAP action that adds a target value to a rule */
937 static int sparx5_tc_add_rule_link(struct vcap_control *vctrl,
938 				   struct vcap_admin *admin,
939 				   struct vcap_rule *vrule,
940 				   int from_cid, int to_cid)
941 {
942 	struct vcap_admin *to_admin = vcap_find_admin(vctrl, to_cid);
943 	int diff, err = 0;
944 
945 	diff = vcap_chain_offset(vctrl, from_cid, to_cid);
946 	if (!(to_admin && diff > 0)) {
947 		pr_err("%s:%d: unsupported chain direction: %d\n",
948 		       __func__, __LINE__, to_cid);
949 		return -EINVAL;
950 	}
951 	if (admin->vtype == VCAP_TYPE_IS0 &&
952 	    to_admin->vtype == VCAP_TYPE_IS0) {
953 		/* Between IS0 instances the G_IDX value is used */
954 		err = vcap_rule_add_action_u32(vrule, VCAP_AF_NXT_IDX, diff);
955 		if (err)
956 			goto out;
957 		err = vcap_rule_add_action_u32(vrule, VCAP_AF_NXT_IDX_CTRL,
958 					       1); /* Replace */
959 		if (err)
960 			goto out;
961 	} else if (admin->vtype == VCAP_TYPE_IS0 &&
962 		   to_admin->vtype == VCAP_TYPE_IS2) {
963 		/* Between IS0 and IS2 the PAG value is used */
964 		err = vcap_rule_add_action_u32(vrule, VCAP_AF_PAG_VAL, diff);
965 		if (err)
966 			goto out;
967 		err = vcap_rule_add_action_u32(vrule,
968 					       VCAP_AF_PAG_OVERRIDE_MASK,
969 					       0xff);
970 		if (err)
971 			goto out;
972 	} else if (admin->vtype == VCAP_TYPE_IS0 &&
973 		   to_admin->vtype == VCAP_TYPE_ES2) {
974 		/* Between IS0 and ES2 the ISDX value is used */
975 		err = vcap_rule_add_action_u32(vrule, VCAP_AF_ISDX_VAL,
976 					       diff);
977 		if (err)
978 			goto out;
979 		err = vcap_rule_add_action_bit(vrule,
980 					       VCAP_AF_ISDX_ADD_REPLACE_SEL,
981 					       VCAP_BIT_1);
982 		if (err)
983 			goto out;
984 	} else {
985 		pr_err("%s:%d: unsupported chain destination: %d\n",
986 		       __func__, __LINE__, to_cid);
987 		err = -EOPNOTSUPP;
988 	}
989 out:
990 	return err;
991 }
992 
993 static int sparx5_tc_flower_parse_act_gate(struct sparx5_psfp_sg *sg,
994 					   struct flow_action_entry *act,
995 					   struct netlink_ext_ack *extack)
996 {
997 	int i;
998 
999 	if (act->gate.prio < -1 || act->gate.prio > SPX5_PSFP_SG_MAX_IPV) {
1000 		NL_SET_ERR_MSG_MOD(extack, "Invalid gate priority");
1001 		return -EINVAL;
1002 	}
1003 
1004 	if (act->gate.cycletime < SPX5_PSFP_SG_MIN_CYCLE_TIME_NS ||
1005 	    act->gate.cycletime > SPX5_PSFP_SG_MAX_CYCLE_TIME_NS) {
1006 		NL_SET_ERR_MSG_MOD(extack, "Invalid gate cycletime");
1007 		return -EINVAL;
1008 	}
1009 
1010 	if (act->gate.cycletimeext > SPX5_PSFP_SG_MAX_CYCLE_TIME_NS) {
1011 		NL_SET_ERR_MSG_MOD(extack, "Invalid gate cycletimeext");
1012 		return -EINVAL;
1013 	}
1014 
1015 	if (act->gate.num_entries >= SPX5_PSFP_GCE_CNT) {
1016 		NL_SET_ERR_MSG_MOD(extack, "Invalid number of gate entries");
1017 		return -EINVAL;
1018 	}
1019 
1020 	sg->gate_state = true;
1021 	sg->ipv = act->gate.prio;
1022 	sg->num_entries = act->gate.num_entries;
1023 	sg->cycletime = act->gate.cycletime;
1024 	sg->cycletimeext = act->gate.cycletimeext;
1025 
1026 	for (i = 0; i < sg->num_entries; i++) {
1027 		sg->gce[i].gate_state = !!act->gate.entries[i].gate_state;
1028 		sg->gce[i].interval = act->gate.entries[i].interval;
1029 		sg->gce[i].ipv = act->gate.entries[i].ipv;
1030 		sg->gce[i].maxoctets = act->gate.entries[i].maxoctets;
1031 	}
1032 
1033 	return 0;
1034 }
1035 
1036 static int sparx5_tc_flower_parse_act_police(struct sparx5_policer *pol,
1037 					     struct flow_action_entry *act,
1038 					     struct netlink_ext_ack *extack)
1039 {
1040 	pol->type = SPX5_POL_SERVICE;
1041 	pol->rate = div_u64(act->police.rate_bytes_ps, 1000) * 8;
1042 	pol->burst = act->police.burst;
1043 	pol->idx = act->hw_index;
1044 
1045 	/* rate is now in kbit */
1046 	if (pol->rate > DIV_ROUND_UP(SPX5_SDLB_GROUP_RATE_MAX, 1000)) {
1047 		NL_SET_ERR_MSG_MOD(extack, "Maximum rate exceeded");
1048 		return -EINVAL;
1049 	}
1050 
1051 	if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
1052 		NL_SET_ERR_MSG_MOD(extack, "Offload not supported when exceed action is not drop");
1053 		return -EOPNOTSUPP;
1054 	}
1055 
1056 	if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
1057 	    act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
1058 		NL_SET_ERR_MSG_MOD(extack, "Offload not supported when conform action is not pipe or ok");
1059 		return -EOPNOTSUPP;
1060 	}
1061 
1062 	return 0;
1063 }
1064 
1065 static int sparx5_tc_flower_psfp_setup(struct sparx5 *sparx5,
1066 				       struct vcap_rule *vrule, int sg_idx,
1067 				       int pol_idx, struct sparx5_psfp_sg *sg,
1068 				       struct sparx5_psfp_fm *fm,
1069 				       struct sparx5_psfp_sf *sf)
1070 {
1071 	u32 psfp_sfid = 0, psfp_fmid = 0, psfp_sgid = 0;
1072 	int ret;
1073 
1074 	/* Must always have a stream gate - max sdu (filter option) is evaluated
1075 	 * after frames have passed the gate, so in case of only a policer, we
1076 	 * allocate a stream gate that is always open.
1077 	 */
1078 	if (sg_idx < 0) {
1079 		sg_idx = sparx5_pool_idx_to_id(SPX5_PSFP_SG_OPEN);
1080 		sg->ipv = 0; /* Disabled */
1081 		sg->cycletime = SPX5_PSFP_SG_CYCLE_TIME_DEFAULT;
1082 		sg->num_entries = 1;
1083 		sg->gate_state = 1; /* Open */
1084 		sg->gate_enabled = 1;
1085 		sg->gce[0].gate_state = 1;
1086 		sg->gce[0].interval = SPX5_PSFP_SG_CYCLE_TIME_DEFAULT;
1087 		sg->gce[0].ipv = 0;
1088 		sg->gce[0].maxoctets = 0; /* Disabled */
1089 	}
1090 
1091 	ret = sparx5_psfp_sg_add(sparx5, sg_idx, sg, &psfp_sgid);
1092 	if (ret < 0)
1093 		return ret;
1094 
1095 	if (pol_idx >= 0) {
1096 		/* Add new flow-meter */
1097 		ret = sparx5_psfp_fm_add(sparx5, pol_idx, fm, &psfp_fmid);
1098 		if (ret < 0)
1099 			return ret;
1100 	}
1101 
1102 	/* Map stream filter to stream gate */
1103 	sf->sgid = psfp_sgid;
1104 
1105 	/* Add new stream-filter and map it to a steam gate */
1106 	ret = sparx5_psfp_sf_add(sparx5, sf, &psfp_sfid);
1107 	if (ret < 0)
1108 		return ret;
1109 
1110 	/* Streams are classified by ISDX - map ISDX 1:1 to sfid for now. */
1111 	sparx5_isdx_conf_set(sparx5, psfp_sfid, psfp_sfid, psfp_fmid);
1112 
1113 	ret = vcap_rule_add_action_bit(vrule, VCAP_AF_ISDX_ADD_REPLACE_SEL,
1114 				       VCAP_BIT_1);
1115 	if (ret)
1116 		return ret;
1117 
1118 	ret = vcap_rule_add_action_u32(vrule, VCAP_AF_ISDX_VAL, psfp_sfid);
1119 	if (ret)
1120 		return ret;
1121 
1122 	return 0;
1123 }
1124 
1125 static int sparx5_tc_flower_replace(struct net_device *ndev,
1126 				    struct flow_cls_offload *fco,
1127 				    struct vcap_admin *admin,
1128 				    bool ingress)
1129 {
1130 	struct sparx5_psfp_sf sf = { .max_sdu = SPX5_PSFP_SF_MAX_SDU };
1131 	struct netlink_ext_ack *extack = fco->common.extack;
1132 	int err, idx, tc_sg_idx = -1, tc_pol_idx = -1;
1133 	struct sparx5_port *port = netdev_priv(ndev);
1134 	struct sparx5_multiple_rules multi = {};
1135 	struct sparx5 *sparx5 = port->sparx5;
1136 	struct sparx5_psfp_sg sg = { 0 };
1137 	struct sparx5_psfp_fm fm = { 0 };
1138 	struct flow_action_entry *act;
1139 	struct vcap_control *vctrl;
1140 	struct flow_rule *frule;
1141 	struct vcap_rule *vrule;
1142 	u16 l3_proto;
1143 
1144 	vctrl = port->sparx5->vcap_ctrl;
1145 
1146 	err = sparx5_tc_flower_action_check(vctrl, ndev, fco, ingress);
1147 	if (err)
1148 		return err;
1149 
1150 	vrule = vcap_alloc_rule(vctrl, ndev, fco->common.chain_index, VCAP_USER_TC,
1151 				fco->common.prio, 0);
1152 	if (IS_ERR(vrule))
1153 		return PTR_ERR(vrule);
1154 
1155 	vrule->cookie = fco->cookie;
1156 
1157 	l3_proto = ETH_P_ALL;
1158 	err = sparx5_tc_use_dissectors(fco, admin, vrule, &l3_proto);
1159 	if (err)
1160 		goto out;
1161 
1162 	err = sparx5_tc_add_rule_counter(admin, vrule);
1163 	if (err)
1164 		goto out;
1165 
1166 	err = sparx5_tc_add_rule_link_target(admin, vrule,
1167 					     fco->common.chain_index);
1168 	if (err)
1169 		goto out;
1170 
1171 	frule = flow_cls_offload_flow_rule(fco);
1172 	flow_action_for_each(idx, act, &frule->action) {
1173 		switch (act->id) {
1174 		case FLOW_ACTION_GATE: {
1175 			err = sparx5_tc_flower_parse_act_gate(&sg, act, extack);
1176 			if (err < 0)
1177 				goto out;
1178 
1179 			tc_sg_idx = act->hw_index;
1180 
1181 			break;
1182 		}
1183 		case FLOW_ACTION_POLICE: {
1184 			err = sparx5_tc_flower_parse_act_police(&fm.pol, act,
1185 								extack);
1186 			if (err < 0)
1187 				goto out;
1188 
1189 			tc_pol_idx = fm.pol.idx;
1190 			sf.max_sdu = act->police.mtu;
1191 
1192 			break;
1193 		}
1194 		case FLOW_ACTION_TRAP:
1195 			if (admin->vtype != VCAP_TYPE_IS2 &&
1196 			    admin->vtype != VCAP_TYPE_ES2) {
1197 				NL_SET_ERR_MSG_MOD(fco->common.extack,
1198 						   "Trap action not supported in this VCAP");
1199 				err = -EOPNOTSUPP;
1200 				goto out;
1201 			}
1202 			err = vcap_rule_add_action_bit(vrule,
1203 						       VCAP_AF_CPU_COPY_ENA,
1204 						       VCAP_BIT_1);
1205 			if (err)
1206 				goto out;
1207 			err = vcap_rule_add_action_u32(vrule,
1208 						       VCAP_AF_CPU_QUEUE_NUM, 0);
1209 			if (err)
1210 				goto out;
1211 			if (admin->vtype != VCAP_TYPE_IS2)
1212 				break;
1213 			err = vcap_rule_add_action_u32(vrule,
1214 						       VCAP_AF_MASK_MODE,
1215 				SPX5_PMM_REPLACE_ALL);
1216 			if (err)
1217 				goto out;
1218 			break;
1219 		case FLOW_ACTION_ACCEPT:
1220 			err = sparx5_tc_set_actionset(admin, vrule);
1221 			if (err)
1222 				goto out;
1223 			break;
1224 		case FLOW_ACTION_GOTO:
1225 			err = sparx5_tc_set_actionset(admin, vrule);
1226 			if (err)
1227 				goto out;
1228 			sparx5_tc_add_rule_link(vctrl, admin, vrule,
1229 						fco->common.chain_index,
1230 						act->chain_index);
1231 			break;
1232 		default:
1233 			NL_SET_ERR_MSG_MOD(fco->common.extack,
1234 					   "Unsupported TC action");
1235 			err = -EOPNOTSUPP;
1236 			goto out;
1237 		}
1238 	}
1239 
1240 	/* Setup PSFP */
1241 	if (tc_sg_idx >= 0 || tc_pol_idx >= 0) {
1242 		err = sparx5_tc_flower_psfp_setup(sparx5, vrule, tc_sg_idx,
1243 						  tc_pol_idx, &sg, &fm, &sf);
1244 		if (err)
1245 			goto out;
1246 	}
1247 
1248 	err = sparx5_tc_select_protocol_keyset(ndev, vrule, admin, l3_proto,
1249 					       &multi);
1250 	if (err) {
1251 		NL_SET_ERR_MSG_MOD(fco->common.extack,
1252 				   "No matching port keyset for filter protocol and keys");
1253 		goto out;
1254 	}
1255 
1256 	/* provide the l3 protocol to guide the keyset selection */
1257 	err = vcap_val_rule(vrule, l3_proto);
1258 	if (err) {
1259 		vcap_set_tc_exterr(fco, vrule);
1260 		goto out;
1261 	}
1262 	err = vcap_add_rule(vrule);
1263 	if (err)
1264 		NL_SET_ERR_MSG_MOD(fco->common.extack,
1265 				   "Could not add the filter");
1266 
1267 	if (l3_proto == ETH_P_ALL)
1268 		err = sparx5_tc_add_remaining_rules(vctrl, fco, vrule, admin,
1269 						    &multi);
1270 
1271 out:
1272 	vcap_free_rule(vrule);
1273 	return err;
1274 }
1275 
1276 static void sparx5_tc_free_psfp_resources(struct sparx5 *sparx5,
1277 					  struct vcap_rule *vrule)
1278 {
1279 	struct vcap_client_actionfield *afield;
1280 	u32 isdx, sfid, sgid, fmid;
1281 
1282 	/* Check if VCAP_AF_ISDX_VAL action is set for this rule - and if
1283 	 * it is used for stream and/or flow-meter classification.
1284 	 */
1285 	afield = vcap_find_actionfield(vrule, VCAP_AF_ISDX_VAL);
1286 	if (!afield)
1287 		return;
1288 
1289 	isdx = afield->data.u32.value;
1290 	sfid = sparx5_psfp_isdx_get_sf(sparx5, isdx);
1291 
1292 	if (!sfid)
1293 		return;
1294 
1295 	fmid = sparx5_psfp_isdx_get_fm(sparx5, isdx);
1296 	sgid = sparx5_psfp_sf_get_sg(sparx5, sfid);
1297 
1298 	if (fmid && sparx5_psfp_fm_del(sparx5, fmid) < 0)
1299 		pr_err("%s:%d Could not delete invalid fmid: %d", __func__,
1300 		       __LINE__, fmid);
1301 
1302 	if (sgid && sparx5_psfp_sg_del(sparx5, sgid) < 0)
1303 		pr_err("%s:%d Could not delete invalid sgid: %d", __func__,
1304 		       __LINE__, sgid);
1305 
1306 	if (sparx5_psfp_sf_del(sparx5, sfid) < 0)
1307 		pr_err("%s:%d Could not delete invalid sfid: %d", __func__,
1308 		       __LINE__, sfid);
1309 
1310 	sparx5_isdx_conf_set(sparx5, isdx, 0, 0);
1311 }
1312 
1313 static int sparx5_tc_free_rule_resources(struct net_device *ndev,
1314 					 struct vcap_control *vctrl,
1315 					 int rule_id)
1316 {
1317 	struct sparx5_port *port = netdev_priv(ndev);
1318 	struct sparx5 *sparx5 = port->sparx5;
1319 	struct vcap_rule *vrule;
1320 	int ret = 0;
1321 
1322 	vrule = vcap_get_rule(vctrl, rule_id);
1323 	if (!vrule || IS_ERR(vrule))
1324 		return -EINVAL;
1325 
1326 	sparx5_tc_free_psfp_resources(sparx5, vrule);
1327 
1328 	vcap_free_rule(vrule);
1329 	return ret;
1330 }
1331 
1332 static int sparx5_tc_flower_destroy(struct net_device *ndev,
1333 				    struct flow_cls_offload *fco,
1334 				    struct vcap_admin *admin)
1335 {
1336 	struct sparx5_port *port = netdev_priv(ndev);
1337 	int err = -ENOENT, count = 0, rule_id;
1338 	struct vcap_control *vctrl;
1339 
1340 	vctrl = port->sparx5->vcap_ctrl;
1341 	while (true) {
1342 		rule_id = vcap_lookup_rule_by_cookie(vctrl, fco->cookie);
1343 		if (rule_id <= 0)
1344 			break;
1345 		if (count == 0) {
1346 			/* Resources are attached to the first rule of
1347 			 * a set of rules. Only works if the rules are
1348 			 * in the correct order.
1349 			 */
1350 			err = sparx5_tc_free_rule_resources(ndev, vctrl,
1351 							    rule_id);
1352 			if (err)
1353 				pr_err("%s:%d: could not free resources %d\n",
1354 				       __func__, __LINE__, rule_id);
1355 		}
1356 		err = vcap_del_rule(vctrl, ndev, rule_id);
1357 		if (err) {
1358 			pr_err("%s:%d: could not delete rule %d\n",
1359 			       __func__, __LINE__, rule_id);
1360 			break;
1361 		}
1362 	}
1363 	return err;
1364 }
1365 
1366 static int sparx5_tc_flower_stats(struct net_device *ndev,
1367 				  struct flow_cls_offload *fco,
1368 				  struct vcap_admin *admin)
1369 {
1370 	struct sparx5_port *port = netdev_priv(ndev);
1371 	struct vcap_counter ctr = {};
1372 	struct vcap_control *vctrl;
1373 	ulong lastused = 0;
1374 	int err;
1375 
1376 	vctrl = port->sparx5->vcap_ctrl;
1377 	err = vcap_get_rule_count_by_cookie(vctrl, &ctr, fco->cookie);
1378 	if (err)
1379 		return err;
1380 	flow_stats_update(&fco->stats, 0x0, ctr.value, 0, lastused,
1381 			  FLOW_ACTION_HW_STATS_IMMEDIATE);
1382 	return err;
1383 }
1384 
1385 int sparx5_tc_flower(struct net_device *ndev, struct flow_cls_offload *fco,
1386 		     bool ingress)
1387 {
1388 	struct sparx5_port *port = netdev_priv(ndev);
1389 	struct vcap_control *vctrl;
1390 	struct vcap_admin *admin;
1391 	int err = -EINVAL;
1392 
1393 	/* Get vcap instance from the chain id */
1394 	vctrl = port->sparx5->vcap_ctrl;
1395 	admin = vcap_find_admin(vctrl, fco->common.chain_index);
1396 	if (!admin) {
1397 		NL_SET_ERR_MSG_MOD(fco->common.extack, "Invalid chain");
1398 		return err;
1399 	}
1400 
1401 	switch (fco->command) {
1402 	case FLOW_CLS_REPLACE:
1403 		return sparx5_tc_flower_replace(ndev, fco, admin, ingress);
1404 	case FLOW_CLS_DESTROY:
1405 		return sparx5_tc_flower_destroy(ndev, fco, admin);
1406 	case FLOW_CLS_STATS:
1407 		return sparx5_tc_flower_stats(ndev, fco, admin);
1408 	default:
1409 		return -EOPNOTSUPP;
1410 	}
1411 }
1412