1 // SPDX-License-Identifier: GPL-2.0+
2 /* Microchip VCAP API
3  *
4  * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
5  */
6 
7 #include <net/tcp.h>
8 
9 #include "sparx5_tc.h"
10 #include "vcap_api.h"
11 #include "vcap_api_client.h"
12 #include "sparx5_main.h"
13 #include "sparx5_vcap_impl.h"
14 
15 #define SPX5_MAX_RULE_SIZE 13 /* allows X1, X2, X4, X6 and X12 rules */
16 
17 /* Collect keysets and type ids for multiple rules per size */
18 struct sparx5_wildcard_rule {
19 	bool selected;
20 	u8 value;
21 	u8 mask;
22 	enum vcap_keyfield_set keyset;
23 };
24 
25 struct sparx5_multiple_rules {
26 	struct sparx5_wildcard_rule rule[SPX5_MAX_RULE_SIZE];
27 };
28 
29 struct sparx5_tc_flower_parse_usage {
30 	struct flow_cls_offload *fco;
31 	struct flow_rule *frule;
32 	struct vcap_rule *vrule;
33 	struct vcap_admin *admin;
34 	u16 l3_proto;
35 	u8 l4_proto;
36 	unsigned int used_keys;
37 };
38 
39 enum sparx5_is2_arp_opcode {
40 	SPX5_IS2_ARP_REQUEST,
41 	SPX5_IS2_ARP_REPLY,
42 	SPX5_IS2_RARP_REQUEST,
43 	SPX5_IS2_RARP_REPLY,
44 };
45 
46 enum tc_arp_opcode {
47 	TC_ARP_OP_RESERVED,
48 	TC_ARP_OP_REQUEST,
49 	TC_ARP_OP_REPLY,
50 };
51 
52 static int sparx5_tc_flower_handler_ethaddr_usage(struct sparx5_tc_flower_parse_usage *st)
53 {
54 	enum vcap_key_field smac_key = VCAP_KF_L2_SMAC;
55 	enum vcap_key_field dmac_key = VCAP_KF_L2_DMAC;
56 	struct flow_match_eth_addrs match;
57 	struct vcap_u48_key smac, dmac;
58 	int err = 0;
59 
60 	flow_rule_match_eth_addrs(st->frule, &match);
61 
62 	if (!is_zero_ether_addr(match.mask->src)) {
63 		vcap_netbytes_copy(smac.value, match.key->src, ETH_ALEN);
64 		vcap_netbytes_copy(smac.mask, match.mask->src, ETH_ALEN);
65 		err = vcap_rule_add_key_u48(st->vrule, smac_key, &smac);
66 		if (err)
67 			goto out;
68 	}
69 
70 	if (!is_zero_ether_addr(match.mask->dst)) {
71 		vcap_netbytes_copy(dmac.value, match.key->dst, ETH_ALEN);
72 		vcap_netbytes_copy(dmac.mask, match.mask->dst, ETH_ALEN);
73 		err = vcap_rule_add_key_u48(st->vrule, dmac_key, &dmac);
74 		if (err)
75 			goto out;
76 	}
77 
78 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS);
79 
80 	return err;
81 
82 out:
83 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "eth_addr parse error");
84 	return err;
85 }
86 
87 static int
88 sparx5_tc_flower_handler_ipv4_usage(struct sparx5_tc_flower_parse_usage *st)
89 {
90 	int err = 0;
91 
92 	if (st->l3_proto == ETH_P_IP) {
93 		struct flow_match_ipv4_addrs mt;
94 
95 		flow_rule_match_ipv4_addrs(st->frule, &mt);
96 		if (mt.mask->src) {
97 			err = vcap_rule_add_key_u32(st->vrule,
98 						    VCAP_KF_L3_IP4_SIP,
99 						    be32_to_cpu(mt.key->src),
100 						    be32_to_cpu(mt.mask->src));
101 			if (err)
102 				goto out;
103 		}
104 		if (mt.mask->dst) {
105 			err = vcap_rule_add_key_u32(st->vrule,
106 						    VCAP_KF_L3_IP4_DIP,
107 						    be32_to_cpu(mt.key->dst),
108 						    be32_to_cpu(mt.mask->dst));
109 			if (err)
110 				goto out;
111 		}
112 	}
113 
114 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS);
115 
116 	return err;
117 
118 out:
119 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ipv4_addr parse error");
120 	return err;
121 }
122 
123 static int
124 sparx5_tc_flower_handler_ipv6_usage(struct sparx5_tc_flower_parse_usage *st)
125 {
126 	int err = 0;
127 
128 	if (st->l3_proto == ETH_P_IPV6) {
129 		struct flow_match_ipv6_addrs mt;
130 		struct vcap_u128_key sip;
131 		struct vcap_u128_key dip;
132 
133 		flow_rule_match_ipv6_addrs(st->frule, &mt);
134 		/* Check if address masks are non-zero */
135 		if (!ipv6_addr_any(&mt.mask->src)) {
136 			vcap_netbytes_copy(sip.value, mt.key->src.s6_addr, 16);
137 			vcap_netbytes_copy(sip.mask, mt.mask->src.s6_addr, 16);
138 			err = vcap_rule_add_key_u128(st->vrule,
139 						     VCAP_KF_L3_IP6_SIP, &sip);
140 			if (err)
141 				goto out;
142 		}
143 		if (!ipv6_addr_any(&mt.mask->dst)) {
144 			vcap_netbytes_copy(dip.value, mt.key->dst.s6_addr, 16);
145 			vcap_netbytes_copy(dip.mask, mt.mask->dst.s6_addr, 16);
146 			err = vcap_rule_add_key_u128(st->vrule,
147 						     VCAP_KF_L3_IP6_DIP, &dip);
148 			if (err)
149 				goto out;
150 		}
151 	}
152 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS);
153 	return err;
154 out:
155 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ipv6_addr parse error");
156 	return err;
157 }
158 
159 static int
160 sparx5_tc_flower_handler_control_usage(struct sparx5_tc_flower_parse_usage *st)
161 {
162 	struct flow_match_control mt;
163 	u32 value, mask;
164 	int err = 0;
165 
166 	flow_rule_match_control(st->frule, &mt);
167 
168 	if (mt.mask->flags) {
169 		if (mt.mask->flags & FLOW_DIS_FIRST_FRAG) {
170 			if (mt.key->flags & FLOW_DIS_FIRST_FRAG) {
171 				value = 1; /* initial fragment */
172 				mask = 0x3;
173 			} else {
174 				if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
175 					value = 3; /* follow up fragment */
176 					mask = 0x3;
177 				} else {
178 					value = 0; /* no fragment */
179 					mask = 0x3;
180 				}
181 			}
182 		} else {
183 			if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
184 				value = 3; /* follow up fragment */
185 				mask = 0x3;
186 			} else {
187 				value = 0; /* no fragment */
188 				mask = 0x3;
189 			}
190 		}
191 
192 		err = vcap_rule_add_key_u32(st->vrule,
193 					    VCAP_KF_L3_FRAGMENT_TYPE,
194 					    value, mask);
195 		if (err)
196 			goto out;
197 	}
198 
199 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_CONTROL);
200 
201 	return err;
202 
203 out:
204 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_frag parse error");
205 	return err;
206 }
207 
208 static int
209 sparx5_tc_flower_handler_portnum_usage(struct sparx5_tc_flower_parse_usage *st)
210 {
211 	struct flow_match_ports mt;
212 	u16 value, mask;
213 	int err = 0;
214 
215 	flow_rule_match_ports(st->frule, &mt);
216 
217 	if (mt.mask->src) {
218 		value = be16_to_cpu(mt.key->src);
219 		mask = be16_to_cpu(mt.mask->src);
220 		err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L4_SPORT, value,
221 					    mask);
222 		if (err)
223 			goto out;
224 	}
225 
226 	if (mt.mask->dst) {
227 		value = be16_to_cpu(mt.key->dst);
228 		mask = be16_to_cpu(mt.mask->dst);
229 		err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L4_DPORT, value,
230 					    mask);
231 		if (err)
232 			goto out;
233 	}
234 
235 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_PORTS);
236 
237 	return err;
238 
239 out:
240 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "port parse error");
241 	return err;
242 }
243 
244 static int
245 sparx5_tc_flower_handler_basic_usage(struct sparx5_tc_flower_parse_usage *st)
246 {
247 	struct flow_match_basic mt;
248 	int err = 0;
249 
250 	flow_rule_match_basic(st->frule, &mt);
251 
252 	if (mt.mask->n_proto) {
253 		st->l3_proto = be16_to_cpu(mt.key->n_proto);
254 		if (!sparx5_vcap_is_known_etype(st->admin, st->l3_proto)) {
255 			err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_ETYPE,
256 						    st->l3_proto, ~0);
257 			if (err)
258 				goto out;
259 		} else if (st->l3_proto == ETH_P_IP) {
260 			err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_IP4_IS,
261 						    VCAP_BIT_1);
262 			if (err)
263 				goto out;
264 		} else if (st->l3_proto == ETH_P_IPV6) {
265 			err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_IP4_IS,
266 						    VCAP_BIT_0);
267 			if (err)
268 				goto out;
269 			if (st->admin->vtype == VCAP_TYPE_IS0) {
270 				err = vcap_rule_add_key_bit(st->vrule,
271 							    VCAP_KF_IP_SNAP_IS,
272 							    VCAP_BIT_1);
273 				if (err)
274 					goto out;
275 			}
276 
277 		}
278 	}
279 
280 	if (mt.mask->ip_proto) {
281 		st->l4_proto = mt.key->ip_proto;
282 		if (st->l4_proto == IPPROTO_TCP) {
283 			err = vcap_rule_add_key_bit(st->vrule,
284 						    VCAP_KF_TCP_IS,
285 						    VCAP_BIT_1);
286 			if (err)
287 				goto out;
288 		} else if (st->l4_proto == IPPROTO_UDP) {
289 			err = vcap_rule_add_key_bit(st->vrule,
290 						    VCAP_KF_TCP_IS,
291 						    VCAP_BIT_0);
292 			if (err)
293 				goto out;
294 			if (st->admin->vtype == VCAP_TYPE_IS0) {
295 				err = vcap_rule_add_key_bit(st->vrule,
296 							    VCAP_KF_TCP_UDP_IS,
297 							    VCAP_BIT_1);
298 				if (err)
299 					goto out;
300 			}
301 		} else {
302 			err = vcap_rule_add_key_u32(st->vrule,
303 						    VCAP_KF_L3_IP_PROTO,
304 						    st->l4_proto, ~0);
305 			if (err)
306 				goto out;
307 		}
308 	}
309 
310 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_BASIC);
311 
312 	return err;
313 
314 out:
315 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_proto parse error");
316 	return err;
317 }
318 
319 static int
320 sparx5_tc_flower_handler_cvlan_usage(struct sparx5_tc_flower_parse_usage *st)
321 {
322 	enum vcap_key_field vid_key = VCAP_KF_8021Q_VID0;
323 	enum vcap_key_field pcp_key = VCAP_KF_8021Q_PCP0;
324 	struct flow_match_vlan mt;
325 	u16 tpid;
326 	int err;
327 
328 	if (st->admin->vtype != VCAP_TYPE_IS0) {
329 		NL_SET_ERR_MSG_MOD(st->fco->common.extack,
330 				   "cvlan not supported in this VCAP");
331 		return -EINVAL;
332 	}
333 
334 	flow_rule_match_cvlan(st->frule, &mt);
335 
336 	tpid = be16_to_cpu(mt.key->vlan_tpid);
337 
338 	if (tpid == ETH_P_8021Q) {
339 		vid_key = VCAP_KF_8021Q_VID1;
340 		pcp_key = VCAP_KF_8021Q_PCP1;
341 	}
342 
343 	if (mt.mask->vlan_id) {
344 		err = vcap_rule_add_key_u32(st->vrule, vid_key,
345 					    mt.key->vlan_id,
346 					    mt.mask->vlan_id);
347 		if (err)
348 			goto out;
349 	}
350 
351 	if (mt.mask->vlan_priority) {
352 		err = vcap_rule_add_key_u32(st->vrule, pcp_key,
353 					    mt.key->vlan_priority,
354 					    mt.mask->vlan_priority);
355 		if (err)
356 			goto out;
357 	}
358 
359 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_CVLAN);
360 
361 	return 0;
362 out:
363 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "cvlan parse error");
364 	return err;
365 }
366 
367 static int
368 sparx5_tc_flower_handler_vlan_usage(struct sparx5_tc_flower_parse_usage *st)
369 {
370 	enum vcap_key_field vid_key = VCAP_KF_8021Q_VID_CLS;
371 	enum vcap_key_field pcp_key = VCAP_KF_8021Q_PCP_CLS;
372 	struct flow_match_vlan mt;
373 	int err;
374 
375 	flow_rule_match_vlan(st->frule, &mt);
376 
377 	if (st->admin->vtype == VCAP_TYPE_IS0) {
378 		vid_key = VCAP_KF_8021Q_VID0;
379 		pcp_key = VCAP_KF_8021Q_PCP0;
380 	}
381 
382 	if (mt.mask->vlan_id) {
383 		err = vcap_rule_add_key_u32(st->vrule, vid_key,
384 					    mt.key->vlan_id,
385 					    mt.mask->vlan_id);
386 		if (err)
387 			goto out;
388 	}
389 
390 	if (mt.mask->vlan_priority) {
391 		err = vcap_rule_add_key_u32(st->vrule, pcp_key,
392 					    mt.key->vlan_priority,
393 					    mt.mask->vlan_priority);
394 		if (err)
395 			goto out;
396 	}
397 
398 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_VLAN);
399 
400 	return 0;
401 out:
402 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "vlan parse error");
403 	return err;
404 }
405 
406 static int
407 sparx5_tc_flower_handler_tcp_usage(struct sparx5_tc_flower_parse_usage *st)
408 {
409 	struct flow_match_tcp mt;
410 	u16 tcp_flags_mask;
411 	u16 tcp_flags_key;
412 	enum vcap_bit val;
413 	int err = 0;
414 
415 	flow_rule_match_tcp(st->frule, &mt);
416 	tcp_flags_key = be16_to_cpu(mt.key->flags);
417 	tcp_flags_mask = be16_to_cpu(mt.mask->flags);
418 
419 	if (tcp_flags_mask & TCPHDR_FIN) {
420 		val = VCAP_BIT_0;
421 		if (tcp_flags_key & TCPHDR_FIN)
422 			val = VCAP_BIT_1;
423 		err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_FIN, val);
424 		if (err)
425 			goto out;
426 	}
427 
428 	if (tcp_flags_mask & TCPHDR_SYN) {
429 		val = VCAP_BIT_0;
430 		if (tcp_flags_key & TCPHDR_SYN)
431 			val = VCAP_BIT_1;
432 		err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_SYN, val);
433 		if (err)
434 			goto out;
435 	}
436 
437 	if (tcp_flags_mask & TCPHDR_RST) {
438 		val = VCAP_BIT_0;
439 		if (tcp_flags_key & TCPHDR_RST)
440 			val = VCAP_BIT_1;
441 		err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_RST, val);
442 		if (err)
443 			goto out;
444 	}
445 
446 	if (tcp_flags_mask & TCPHDR_PSH) {
447 		val = VCAP_BIT_0;
448 		if (tcp_flags_key & TCPHDR_PSH)
449 			val = VCAP_BIT_1;
450 		err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_PSH, val);
451 		if (err)
452 			goto out;
453 	}
454 
455 	if (tcp_flags_mask & TCPHDR_ACK) {
456 		val = VCAP_BIT_0;
457 		if (tcp_flags_key & TCPHDR_ACK)
458 			val = VCAP_BIT_1;
459 		err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_ACK, val);
460 		if (err)
461 			goto out;
462 	}
463 
464 	if (tcp_flags_mask & TCPHDR_URG) {
465 		val = VCAP_BIT_0;
466 		if (tcp_flags_key & TCPHDR_URG)
467 			val = VCAP_BIT_1;
468 		err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_URG, val);
469 		if (err)
470 			goto out;
471 	}
472 
473 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_TCP);
474 
475 	return err;
476 
477 out:
478 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "tcp_flags parse error");
479 	return err;
480 }
481 
482 static int
483 sparx5_tc_flower_handler_arp_usage(struct sparx5_tc_flower_parse_usage *st)
484 {
485 	struct flow_match_arp mt;
486 	u16 value, mask;
487 	u32 ipval, ipmsk;
488 	int err;
489 
490 	flow_rule_match_arp(st->frule, &mt);
491 
492 	if (mt.mask->op) {
493 		mask = 0x3;
494 		if (st->l3_proto == ETH_P_ARP) {
495 			value = mt.key->op == TC_ARP_OP_REQUEST ?
496 					SPX5_IS2_ARP_REQUEST :
497 					SPX5_IS2_ARP_REPLY;
498 		} else { /* RARP */
499 			value = mt.key->op == TC_ARP_OP_REQUEST ?
500 					SPX5_IS2_RARP_REQUEST :
501 					SPX5_IS2_RARP_REPLY;
502 		}
503 		err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_ARP_OPCODE,
504 					    value, mask);
505 		if (err)
506 			goto out;
507 	}
508 
509 	/* The IS2 ARP keyset does not support ARP hardware addresses */
510 	if (!is_zero_ether_addr(mt.mask->sha) ||
511 	    !is_zero_ether_addr(mt.mask->tha)) {
512 		err = -EINVAL;
513 		goto out;
514 	}
515 
516 	if (mt.mask->sip) {
517 		ipval = be32_to_cpu((__force __be32)mt.key->sip);
518 		ipmsk = be32_to_cpu((__force __be32)mt.mask->sip);
519 
520 		err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_IP4_SIP,
521 					    ipval, ipmsk);
522 		if (err)
523 			goto out;
524 	}
525 
526 	if (mt.mask->tip) {
527 		ipval = be32_to_cpu((__force __be32)mt.key->tip);
528 		ipmsk = be32_to_cpu((__force __be32)mt.mask->tip);
529 
530 		err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_IP4_DIP,
531 					    ipval, ipmsk);
532 		if (err)
533 			goto out;
534 	}
535 
536 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_ARP);
537 
538 	return 0;
539 
540 out:
541 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "arp parse error");
542 	return err;
543 }
544 
545 static int
546 sparx5_tc_flower_handler_ip_usage(struct sparx5_tc_flower_parse_usage *st)
547 {
548 	struct flow_match_ip mt;
549 	int err = 0;
550 
551 	flow_rule_match_ip(st->frule, &mt);
552 
553 	if (mt.mask->tos) {
554 		err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_TOS,
555 					    mt.key->tos,
556 					    mt.mask->tos);
557 		if (err)
558 			goto out;
559 	}
560 
561 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IP);
562 
563 	return err;
564 
565 out:
566 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_tos parse error");
567 	return err;
568 }
569 
570 static int (*sparx5_tc_flower_usage_handlers[])(struct sparx5_tc_flower_parse_usage *st) = {
571 	[FLOW_DISSECTOR_KEY_ETH_ADDRS] = sparx5_tc_flower_handler_ethaddr_usage,
572 	[FLOW_DISSECTOR_KEY_IPV4_ADDRS] = sparx5_tc_flower_handler_ipv4_usage,
573 	[FLOW_DISSECTOR_KEY_IPV6_ADDRS] = sparx5_tc_flower_handler_ipv6_usage,
574 	[FLOW_DISSECTOR_KEY_CONTROL] = sparx5_tc_flower_handler_control_usage,
575 	[FLOW_DISSECTOR_KEY_PORTS] = sparx5_tc_flower_handler_portnum_usage,
576 	[FLOW_DISSECTOR_KEY_BASIC] = sparx5_tc_flower_handler_basic_usage,
577 	[FLOW_DISSECTOR_KEY_CVLAN] = sparx5_tc_flower_handler_cvlan_usage,
578 	[FLOW_DISSECTOR_KEY_VLAN] = sparx5_tc_flower_handler_vlan_usage,
579 	[FLOW_DISSECTOR_KEY_TCP] = sparx5_tc_flower_handler_tcp_usage,
580 	[FLOW_DISSECTOR_KEY_ARP] = sparx5_tc_flower_handler_arp_usage,
581 	[FLOW_DISSECTOR_KEY_IP] = sparx5_tc_flower_handler_ip_usage,
582 };
583 
584 static int sparx5_tc_use_dissectors(struct flow_cls_offload *fco,
585 				    struct vcap_admin *admin,
586 				    struct vcap_rule *vrule,
587 				    u16 *l3_proto)
588 {
589 	struct sparx5_tc_flower_parse_usage state = {
590 		.fco = fco,
591 		.vrule = vrule,
592 		.l3_proto = ETH_P_ALL,
593 		.admin = admin,
594 	};
595 	int idx, err = 0;
596 
597 	state.frule = flow_cls_offload_flow_rule(fco);
598 	for (idx = 0; idx < ARRAY_SIZE(sparx5_tc_flower_usage_handlers); ++idx) {
599 		if (!flow_rule_match_key(state.frule, idx))
600 			continue;
601 		if (!sparx5_tc_flower_usage_handlers[idx])
602 			continue;
603 		err = sparx5_tc_flower_usage_handlers[idx](&state);
604 		if (err)
605 			return err;
606 	}
607 
608 	if (state.frule->match.dissector->used_keys ^ state.used_keys) {
609 		NL_SET_ERR_MSG_MOD(fco->common.extack,
610 				   "Unsupported match item");
611 		return -ENOENT;
612 	}
613 
614 	if (l3_proto)
615 		*l3_proto = state.l3_proto;
616 	return err;
617 }
618 
619 static int sparx5_tc_flower_action_check(struct vcap_control *vctrl,
620 					 struct net_device *ndev,
621 					 struct flow_cls_offload *fco,
622 					 bool ingress)
623 {
624 	struct flow_rule *rule = flow_cls_offload_flow_rule(fco);
625 	struct flow_action_entry *actent, *last_actent = NULL;
626 	struct flow_action *act = &rule->action;
627 	u64 action_mask = 0;
628 	int idx;
629 
630 	if (!flow_action_has_entries(act)) {
631 		NL_SET_ERR_MSG_MOD(fco->common.extack, "No actions");
632 		return -EINVAL;
633 	}
634 
635 	if (!flow_action_basic_hw_stats_check(act, fco->common.extack))
636 		return -EOPNOTSUPP;
637 
638 	flow_action_for_each(idx, actent, act) {
639 		if (action_mask & BIT(actent->id)) {
640 			NL_SET_ERR_MSG_MOD(fco->common.extack,
641 					   "More actions of the same type");
642 			return -EINVAL;
643 		}
644 		action_mask |= BIT(actent->id);
645 		last_actent = actent; /* Save last action for later check */
646 	}
647 
648 	/* Check if last action is a goto
649 	 * The last chain/lookup does not need to have a goto action
650 	 */
651 	if (last_actent->id == FLOW_ACTION_GOTO) {
652 		/* Check if the destination chain is in one of the VCAPs */
653 		if (!vcap_is_next_lookup(vctrl, fco->common.chain_index,
654 					 last_actent->chain_index)) {
655 			NL_SET_ERR_MSG_MOD(fco->common.extack,
656 					   "Invalid goto chain");
657 			return -EINVAL;
658 		}
659 	} else if (!vcap_is_last_chain(vctrl, fco->common.chain_index,
660 				       ingress)) {
661 		NL_SET_ERR_MSG_MOD(fco->common.extack,
662 				   "Last action must be 'goto'");
663 		return -EINVAL;
664 	}
665 
666 	/* Catch unsupported combinations of actions */
667 	if (action_mask & BIT(FLOW_ACTION_TRAP) &&
668 	    action_mask & BIT(FLOW_ACTION_ACCEPT)) {
669 		NL_SET_ERR_MSG_MOD(fco->common.extack,
670 				   "Cannot combine pass and trap action");
671 		return -EOPNOTSUPP;
672 	}
673 
674 	return 0;
675 }
676 
677 /* Add a rule counter action */
678 static int sparx5_tc_add_rule_counter(struct vcap_admin *admin,
679 				      struct vcap_rule *vrule)
680 {
681 	int err;
682 
683 	if (admin->vtype == VCAP_TYPE_IS2 || admin->vtype == VCAP_TYPE_ES2) {
684 		err = vcap_rule_mod_action_u32(vrule, VCAP_AF_CNT_ID,
685 					       vrule->id);
686 		if (err)
687 			return err;
688 		vcap_rule_set_counter_id(vrule, vrule->id);
689 	}
690 
691 	return 0;
692 }
693 
694 /* Collect all port keysets and apply the first of them, possibly wildcarded */
695 static int sparx5_tc_select_protocol_keyset(struct net_device *ndev,
696 					    struct vcap_rule *vrule,
697 					    struct vcap_admin *admin,
698 					    u16 l3_proto,
699 					    struct sparx5_multiple_rules *multi)
700 {
701 	struct sparx5_port *port = netdev_priv(ndev);
702 	struct vcap_keyset_list portkeysetlist = {};
703 	enum vcap_keyfield_set portkeysets[10] = {};
704 	struct vcap_keyset_list matches = {};
705 	enum vcap_keyfield_set keysets[10];
706 	int idx, jdx, err = 0, count = 0;
707 	struct sparx5_wildcard_rule *mru;
708 	const struct vcap_set *kinfo;
709 	struct vcap_control *vctrl;
710 
711 	vctrl = port->sparx5->vcap_ctrl;
712 
713 	/* Find the keysets that the rule can use */
714 	matches.keysets = keysets;
715 	matches.max = ARRAY_SIZE(keysets);
716 	if (vcap_rule_find_keysets(vrule, &matches) == 0)
717 		return -EINVAL;
718 
719 	/* Find the keysets that the port configuration supports */
720 	portkeysetlist.max = ARRAY_SIZE(portkeysets);
721 	portkeysetlist.keysets = portkeysets;
722 	err = sparx5_vcap_get_port_keyset(ndev,
723 					  admin, vrule->vcap_chain_id,
724 					  l3_proto,
725 					  &portkeysetlist);
726 	if (err)
727 		return err;
728 
729 	/* Find the intersection of the two sets of keyset */
730 	for (idx = 0; idx < portkeysetlist.cnt; ++idx) {
731 		kinfo = vcap_keyfieldset(vctrl, admin->vtype,
732 					 portkeysetlist.keysets[idx]);
733 		if (!kinfo)
734 			continue;
735 
736 		/* Find a port keyset that matches the required keys
737 		 * If there are multiple keysets then compose a type id mask
738 		 */
739 		for (jdx = 0; jdx < matches.cnt; ++jdx) {
740 			if (portkeysetlist.keysets[idx] != matches.keysets[jdx])
741 				continue;
742 
743 			mru = &multi->rule[kinfo->sw_per_item];
744 			if (!mru->selected) {
745 				mru->selected = true;
746 				mru->keyset = portkeysetlist.keysets[idx];
747 				mru->value = kinfo->type_id;
748 			}
749 			mru->value &= kinfo->type_id;
750 			mru->mask |= kinfo->type_id;
751 			++count;
752 		}
753 	}
754 	if (count == 0)
755 		return -EPROTO;
756 
757 	if (l3_proto == ETH_P_ALL && count < portkeysetlist.cnt)
758 		return -ENOENT;
759 
760 	for (idx = 0; idx < SPX5_MAX_RULE_SIZE; ++idx) {
761 		mru = &multi->rule[idx];
762 		if (!mru->selected)
763 			continue;
764 
765 		/* Align the mask to the combined value */
766 		mru->mask ^= mru->value;
767 	}
768 
769 	/* Set the chosen keyset on the rule and set a wildcarded type if there
770 	 * are more than one keyset
771 	 */
772 	for (idx = 0; idx < SPX5_MAX_RULE_SIZE; ++idx) {
773 		mru = &multi->rule[idx];
774 		if (!mru->selected)
775 			continue;
776 
777 		vcap_set_rule_set_keyset(vrule, mru->keyset);
778 		if (count > 1)
779 			/* Some keysets do not have a type field */
780 			vcap_rule_mod_key_u32(vrule, VCAP_KF_TYPE,
781 					      mru->value,
782 					      ~mru->mask);
783 		mru->selected = false; /* mark as done */
784 		break; /* Stop here and add more rules later */
785 	}
786 	return err;
787 }
788 
789 static int sparx5_tc_add_rule_copy(struct vcap_control *vctrl,
790 				   struct flow_cls_offload *fco,
791 				   struct vcap_rule *erule,
792 				   struct vcap_admin *admin,
793 				   struct sparx5_wildcard_rule *rule)
794 {
795 	enum vcap_key_field keylist[] = {
796 		VCAP_KF_IF_IGR_PORT_MASK,
797 		VCAP_KF_IF_IGR_PORT_MASK_SEL,
798 		VCAP_KF_IF_IGR_PORT_MASK_RNG,
799 		VCAP_KF_LOOKUP_FIRST_IS,
800 		VCAP_KF_TYPE,
801 	};
802 	struct vcap_rule *vrule;
803 	int err;
804 
805 	/* Add an extra rule with a special user and the new keyset */
806 	erule->user = VCAP_USER_TC_EXTRA;
807 	vrule = vcap_copy_rule(erule);
808 	if (IS_ERR(vrule))
809 		return PTR_ERR(vrule);
810 
811 	/* Link the new rule to the existing rule with the cookie */
812 	vrule->cookie = erule->cookie;
813 	vcap_filter_rule_keys(vrule, keylist, ARRAY_SIZE(keylist), true);
814 	err = vcap_set_rule_set_keyset(vrule, rule->keyset);
815 	if (err) {
816 		pr_err("%s:%d: could not set keyset %s in rule: %u\n",
817 		       __func__, __LINE__,
818 		       vcap_keyset_name(vctrl, rule->keyset),
819 		       vrule->id);
820 		goto out;
821 	}
822 
823 	/* Some keysets do not have a type field, so ignore return value */
824 	vcap_rule_mod_key_u32(vrule, VCAP_KF_TYPE, rule->value, ~rule->mask);
825 
826 	err = vcap_set_rule_set_actionset(vrule, erule->actionset);
827 	if (err)
828 		goto out;
829 
830 	err = sparx5_tc_add_rule_counter(admin, vrule);
831 	if (err)
832 		goto out;
833 
834 	err = vcap_val_rule(vrule, ETH_P_ALL);
835 	if (err) {
836 		pr_err("%s:%d: could not validate rule: %u\n",
837 		       __func__, __LINE__, vrule->id);
838 		vcap_set_tc_exterr(fco, vrule);
839 		goto out;
840 	}
841 	err = vcap_add_rule(vrule);
842 	if (err) {
843 		pr_err("%s:%d: could not add rule: %u\n",
844 		       __func__, __LINE__, vrule->id);
845 		goto out;
846 	}
847 out:
848 	vcap_free_rule(vrule);
849 	return err;
850 }
851 
852 static int sparx5_tc_add_remaining_rules(struct vcap_control *vctrl,
853 					 struct flow_cls_offload *fco,
854 					 struct vcap_rule *erule,
855 					 struct vcap_admin *admin,
856 					 struct sparx5_multiple_rules *multi)
857 {
858 	int idx, err = 0;
859 
860 	for (idx = 0; idx < SPX5_MAX_RULE_SIZE; ++idx) {
861 		if (!multi->rule[idx].selected)
862 			continue;
863 
864 		err = sparx5_tc_add_rule_copy(vctrl, fco, erule, admin,
865 					      &multi->rule[idx]);
866 		if (err)
867 			break;
868 	}
869 	return err;
870 }
871 
872 /* Add the actionset that is the default for the VCAP type */
873 static int sparx5_tc_set_actionset(struct vcap_admin *admin,
874 				   struct vcap_rule *vrule)
875 {
876 	enum vcap_actionfield_set aset;
877 	int err = 0;
878 
879 	switch (admin->vtype) {
880 	case VCAP_TYPE_IS0:
881 		aset = VCAP_AFS_CLASSIFICATION;
882 		break;
883 	case VCAP_TYPE_IS2:
884 		aset = VCAP_AFS_BASE_TYPE;
885 		break;
886 	case VCAP_TYPE_ES2:
887 		aset = VCAP_AFS_BASE_TYPE;
888 		break;
889 	default:
890 		return -EINVAL;
891 	}
892 	/* Do not overwrite any current actionset */
893 	if (vrule->actionset == VCAP_AFS_NO_VALUE)
894 		err = vcap_set_rule_set_actionset(vrule, aset);
895 	return err;
896 }
897 
898 /* Add the VCAP key to match on for a rule target value */
899 static int sparx5_tc_add_rule_link_target(struct vcap_admin *admin,
900 					  struct vcap_rule *vrule,
901 					  int target_cid)
902 {
903 	int link_val = target_cid % VCAP_CID_LOOKUP_SIZE;
904 	int err;
905 
906 	if (!link_val)
907 		return 0;
908 
909 	switch (admin->vtype) {
910 	case VCAP_TYPE_IS0:
911 		/* Add NXT_IDX key for chaining rules between IS0 instances */
912 		err = vcap_rule_add_key_u32(vrule, VCAP_KF_LOOKUP_GEN_IDX_SEL,
913 					    1, /* enable */
914 					    ~0);
915 		if (err)
916 			return err;
917 		return vcap_rule_add_key_u32(vrule, VCAP_KF_LOOKUP_GEN_IDX,
918 					     link_val, /* target */
919 					     ~0);
920 	case VCAP_TYPE_IS2:
921 		/* Add PAG key for chaining rules from IS0 */
922 		return vcap_rule_add_key_u32(vrule, VCAP_KF_LOOKUP_PAG,
923 					     link_val, /* target */
924 					     ~0);
925 	case VCAP_TYPE_ES2:
926 		/* Add ISDX key for chaining rules from IS0 */
927 		return vcap_rule_add_key_u32(vrule, VCAP_KF_ISDX_CLS, link_val,
928 					     ~0);
929 	default:
930 		break;
931 	}
932 	return 0;
933 }
934 
935 /* Add the VCAP action that adds a target value to a rule */
936 static int sparx5_tc_add_rule_link(struct vcap_control *vctrl,
937 				   struct vcap_admin *admin,
938 				   struct vcap_rule *vrule,
939 				   int from_cid, int to_cid)
940 {
941 	struct vcap_admin *to_admin = vcap_find_admin(vctrl, to_cid);
942 	int diff, err = 0;
943 
944 	diff = vcap_chain_offset(vctrl, from_cid, to_cid);
945 	if (!(to_admin && diff > 0)) {
946 		pr_err("%s:%d: unsupported chain direction: %d\n",
947 		       __func__, __LINE__, to_cid);
948 		return -EINVAL;
949 	}
950 	if (admin->vtype == VCAP_TYPE_IS0 &&
951 	    to_admin->vtype == VCAP_TYPE_IS0) {
952 		/* Between IS0 instances the G_IDX value is used */
953 		err = vcap_rule_add_action_u32(vrule, VCAP_AF_NXT_IDX, diff);
954 		if (err)
955 			goto out;
956 		err = vcap_rule_add_action_u32(vrule, VCAP_AF_NXT_IDX_CTRL,
957 					       1); /* Replace */
958 		if (err)
959 			goto out;
960 	} else if (admin->vtype == VCAP_TYPE_IS0 &&
961 		   to_admin->vtype == VCAP_TYPE_IS2) {
962 		/* Between IS0 and IS2 the PAG value is used */
963 		err = vcap_rule_add_action_u32(vrule, VCAP_AF_PAG_VAL, diff);
964 		if (err)
965 			goto out;
966 		err = vcap_rule_add_action_u32(vrule,
967 					       VCAP_AF_PAG_OVERRIDE_MASK,
968 					       0xff);
969 		if (err)
970 			goto out;
971 	} else if (admin->vtype == VCAP_TYPE_IS0 &&
972 		   to_admin->vtype == VCAP_TYPE_ES2) {
973 		/* Between IS0 and ES2 the ISDX value is used */
974 		err = vcap_rule_add_action_u32(vrule, VCAP_AF_ISDX_VAL,
975 					       diff);
976 		if (err)
977 			goto out;
978 		err = vcap_rule_add_action_bit(vrule,
979 					       VCAP_AF_ISDX_ADD_REPLACE_SEL,
980 					       VCAP_BIT_1);
981 		if (err)
982 			goto out;
983 	} else {
984 		pr_err("%s:%d: unsupported chain destination: %d\n",
985 		       __func__, __LINE__, to_cid);
986 		err = -EOPNOTSUPP;
987 	}
988 out:
989 	return err;
990 }
991 
992 static int sparx5_tc_flower_replace(struct net_device *ndev,
993 				    struct flow_cls_offload *fco,
994 				    struct vcap_admin *admin,
995 				    bool ingress)
996 {
997 	struct sparx5_port *port = netdev_priv(ndev);
998 	struct sparx5_multiple_rules multi = {};
999 	struct flow_action_entry *act;
1000 	struct vcap_control *vctrl;
1001 	struct flow_rule *frule;
1002 	struct vcap_rule *vrule;
1003 	u16 l3_proto;
1004 	int err, idx;
1005 
1006 	vctrl = port->sparx5->vcap_ctrl;
1007 
1008 	err = sparx5_tc_flower_action_check(vctrl, ndev, fco, ingress);
1009 	if (err)
1010 		return err;
1011 
1012 	vrule = vcap_alloc_rule(vctrl, ndev, fco->common.chain_index, VCAP_USER_TC,
1013 				fco->common.prio, 0);
1014 	if (IS_ERR(vrule))
1015 		return PTR_ERR(vrule);
1016 
1017 	vrule->cookie = fco->cookie;
1018 
1019 	l3_proto = ETH_P_ALL;
1020 	err = sparx5_tc_use_dissectors(fco, admin, vrule, &l3_proto);
1021 	if (err)
1022 		goto out;
1023 
1024 	err = sparx5_tc_add_rule_counter(admin, vrule);
1025 	if (err)
1026 		goto out;
1027 
1028 	err = sparx5_tc_add_rule_link_target(admin, vrule,
1029 					     fco->common.chain_index);
1030 	if (err)
1031 		goto out;
1032 
1033 	frule = flow_cls_offload_flow_rule(fco);
1034 	flow_action_for_each(idx, act, &frule->action) {
1035 		switch (act->id) {
1036 		case FLOW_ACTION_TRAP:
1037 			if (admin->vtype != VCAP_TYPE_IS2 &&
1038 			    admin->vtype != VCAP_TYPE_ES2) {
1039 				NL_SET_ERR_MSG_MOD(fco->common.extack,
1040 						   "Trap action not supported in this VCAP");
1041 				err = -EOPNOTSUPP;
1042 				goto out;
1043 			}
1044 			err = vcap_rule_add_action_bit(vrule,
1045 						       VCAP_AF_CPU_COPY_ENA,
1046 						       VCAP_BIT_1);
1047 			if (err)
1048 				goto out;
1049 			err = vcap_rule_add_action_u32(vrule,
1050 						       VCAP_AF_CPU_QUEUE_NUM, 0);
1051 			if (err)
1052 				goto out;
1053 			if (admin->vtype != VCAP_TYPE_IS2)
1054 				break;
1055 			err = vcap_rule_add_action_u32(vrule,
1056 						       VCAP_AF_MASK_MODE,
1057 				SPX5_PMM_REPLACE_ALL);
1058 			if (err)
1059 				goto out;
1060 			break;
1061 		case FLOW_ACTION_ACCEPT:
1062 			err = sparx5_tc_set_actionset(admin, vrule);
1063 			if (err)
1064 				goto out;
1065 			break;
1066 		case FLOW_ACTION_GOTO:
1067 			err = sparx5_tc_set_actionset(admin, vrule);
1068 			if (err)
1069 				goto out;
1070 			sparx5_tc_add_rule_link(vctrl, admin, vrule,
1071 						fco->common.chain_index,
1072 						act->chain_index);
1073 			break;
1074 		default:
1075 			NL_SET_ERR_MSG_MOD(fco->common.extack,
1076 					   "Unsupported TC action");
1077 			err = -EOPNOTSUPP;
1078 			goto out;
1079 		}
1080 	}
1081 
1082 	err = sparx5_tc_select_protocol_keyset(ndev, vrule, admin, l3_proto,
1083 					       &multi);
1084 	if (err) {
1085 		NL_SET_ERR_MSG_MOD(fco->common.extack,
1086 				   "No matching port keyset for filter protocol and keys");
1087 		goto out;
1088 	}
1089 
1090 	/* provide the l3 protocol to guide the keyset selection */
1091 	err = vcap_val_rule(vrule, l3_proto);
1092 	if (err) {
1093 		vcap_set_tc_exterr(fco, vrule);
1094 		goto out;
1095 	}
1096 	err = vcap_add_rule(vrule);
1097 	if (err)
1098 		NL_SET_ERR_MSG_MOD(fco->common.extack,
1099 				   "Could not add the filter");
1100 
1101 	if (l3_proto == ETH_P_ALL)
1102 		err = sparx5_tc_add_remaining_rules(vctrl, fco, vrule, admin,
1103 						    &multi);
1104 
1105 out:
1106 	vcap_free_rule(vrule);
1107 	return err;
1108 }
1109 
1110 static int sparx5_tc_flower_destroy(struct net_device *ndev,
1111 				    struct flow_cls_offload *fco,
1112 				    struct vcap_admin *admin)
1113 {
1114 	struct sparx5_port *port = netdev_priv(ndev);
1115 	struct vcap_control *vctrl;
1116 	int err = -ENOENT, rule_id;
1117 
1118 	vctrl = port->sparx5->vcap_ctrl;
1119 	while (true) {
1120 		rule_id = vcap_lookup_rule_by_cookie(vctrl, fco->cookie);
1121 		if (rule_id <= 0)
1122 			break;
1123 		err = vcap_del_rule(vctrl, ndev, rule_id);
1124 		if (err) {
1125 			pr_err("%s:%d: could not delete rule %d\n",
1126 			       __func__, __LINE__, rule_id);
1127 			break;
1128 		}
1129 	}
1130 	return err;
1131 }
1132 
1133 static int sparx5_tc_flower_stats(struct net_device *ndev,
1134 				  struct flow_cls_offload *fco,
1135 				  struct vcap_admin *admin)
1136 {
1137 	struct sparx5_port *port = netdev_priv(ndev);
1138 	struct vcap_counter ctr = {};
1139 	struct vcap_control *vctrl;
1140 	ulong lastused = 0;
1141 	int err;
1142 
1143 	vctrl = port->sparx5->vcap_ctrl;
1144 	err = vcap_get_rule_count_by_cookie(vctrl, &ctr, fco->cookie);
1145 	if (err)
1146 		return err;
1147 	flow_stats_update(&fco->stats, 0x0, ctr.value, 0, lastused,
1148 			  FLOW_ACTION_HW_STATS_IMMEDIATE);
1149 	return err;
1150 }
1151 
1152 int sparx5_tc_flower(struct net_device *ndev, struct flow_cls_offload *fco,
1153 		     bool ingress)
1154 {
1155 	struct sparx5_port *port = netdev_priv(ndev);
1156 	struct vcap_control *vctrl;
1157 	struct vcap_admin *admin;
1158 	int err = -EINVAL;
1159 
1160 	/* Get vcap instance from the chain id */
1161 	vctrl = port->sparx5->vcap_ctrl;
1162 	admin = vcap_find_admin(vctrl, fco->common.chain_index);
1163 	if (!admin) {
1164 		NL_SET_ERR_MSG_MOD(fco->common.extack, "Invalid chain");
1165 		return err;
1166 	}
1167 
1168 	switch (fco->command) {
1169 	case FLOW_CLS_REPLACE:
1170 		return sparx5_tc_flower_replace(ndev, fco, admin, ingress);
1171 	case FLOW_CLS_DESTROY:
1172 		return sparx5_tc_flower_destroy(ndev, fco, admin);
1173 	case FLOW_CLS_STATS:
1174 		return sparx5_tc_flower_stats(ndev, fco, admin);
1175 	default:
1176 		return -EOPNOTSUPP;
1177 	}
1178 }
1179