1 // SPDX-License-Identifier: GPL-2.0+
2 /* Microchip VCAP API
3  *
4  * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
5  */
6 
7 #include <net/tcp.h>
8 
9 #include "sparx5_tc.h"
10 #include "vcap_api.h"
11 #include "vcap_api_client.h"
12 #include "sparx5_main.h"
13 #include "sparx5_vcap_impl.h"
14 
15 #define SPX5_MAX_RULE_SIZE 13 /* allows X1, X2, X4, X6 and X12 rules */
16 
17 /* Collect keysets and type ids for multiple rules per size */
18 struct sparx5_wildcard_rule {
19 	bool selected;
20 	u8 value;
21 	u8 mask;
22 	enum vcap_keyfield_set keyset;
23 };
24 
25 struct sparx5_multiple_rules {
26 	struct sparx5_wildcard_rule rule[SPX5_MAX_RULE_SIZE];
27 };
28 
29 struct sparx5_tc_flower_parse_usage {
30 	struct flow_cls_offload *fco;
31 	struct flow_rule *frule;
32 	struct vcap_rule *vrule;
33 	u16 l3_proto;
34 	u8 l4_proto;
35 	unsigned int used_keys;
36 };
37 
38 struct sparx5_tc_rule_pkt_cnt {
39 	u64 cookie;
40 	u32 pkts;
41 };
42 
43 /* These protocols have dedicated keysets in IS2 and a TC dissector
44  * ETH_P_ARP does not have a TC dissector
45  */
46 static u16 sparx5_tc_known_etypes[] = {
47 	ETH_P_ALL,
48 	ETH_P_ARP,
49 	ETH_P_IP,
50 	ETH_P_IPV6,
51 };
52 
53 enum sparx5_is2_arp_opcode {
54 	SPX5_IS2_ARP_REQUEST,
55 	SPX5_IS2_ARP_REPLY,
56 	SPX5_IS2_RARP_REQUEST,
57 	SPX5_IS2_RARP_REPLY,
58 };
59 
60 enum tc_arp_opcode {
61 	TC_ARP_OP_RESERVED,
62 	TC_ARP_OP_REQUEST,
63 	TC_ARP_OP_REPLY,
64 };
65 
66 static bool sparx5_tc_is_known_etype(u16 etype)
67 {
68 	int idx;
69 
70 	/* For now this only knows about IS2 traffic classification */
71 	for (idx = 0; idx < ARRAY_SIZE(sparx5_tc_known_etypes); ++idx)
72 		if (sparx5_tc_known_etypes[idx] == etype)
73 			return true;
74 
75 	return false;
76 }
77 
78 static int sparx5_tc_flower_handler_ethaddr_usage(struct sparx5_tc_flower_parse_usage *st)
79 {
80 	enum vcap_key_field smac_key = VCAP_KF_L2_SMAC;
81 	enum vcap_key_field dmac_key = VCAP_KF_L2_DMAC;
82 	struct flow_match_eth_addrs match;
83 	struct vcap_u48_key smac, dmac;
84 	int err = 0;
85 
86 	flow_rule_match_eth_addrs(st->frule, &match);
87 
88 	if (!is_zero_ether_addr(match.mask->src)) {
89 		vcap_netbytes_copy(smac.value, match.key->src, ETH_ALEN);
90 		vcap_netbytes_copy(smac.mask, match.mask->src, ETH_ALEN);
91 		err = vcap_rule_add_key_u48(st->vrule, smac_key, &smac);
92 		if (err)
93 			goto out;
94 	}
95 
96 	if (!is_zero_ether_addr(match.mask->dst)) {
97 		vcap_netbytes_copy(dmac.value, match.key->dst, ETH_ALEN);
98 		vcap_netbytes_copy(dmac.mask, match.mask->dst, ETH_ALEN);
99 		err = vcap_rule_add_key_u48(st->vrule, dmac_key, &dmac);
100 		if (err)
101 			goto out;
102 	}
103 
104 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS);
105 
106 	return err;
107 
108 out:
109 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "eth_addr parse error");
110 	return err;
111 }
112 
113 static int
114 sparx5_tc_flower_handler_ipv4_usage(struct sparx5_tc_flower_parse_usage *st)
115 {
116 	int err = 0;
117 
118 	if (st->l3_proto == ETH_P_IP) {
119 		struct flow_match_ipv4_addrs mt;
120 
121 		flow_rule_match_ipv4_addrs(st->frule, &mt);
122 		if (mt.mask->src) {
123 			err = vcap_rule_add_key_u32(st->vrule,
124 						    VCAP_KF_L3_IP4_SIP,
125 						    be32_to_cpu(mt.key->src),
126 						    be32_to_cpu(mt.mask->src));
127 			if (err)
128 				goto out;
129 		}
130 		if (mt.mask->dst) {
131 			err = vcap_rule_add_key_u32(st->vrule,
132 						    VCAP_KF_L3_IP4_DIP,
133 						    be32_to_cpu(mt.key->dst),
134 						    be32_to_cpu(mt.mask->dst));
135 			if (err)
136 				goto out;
137 		}
138 	}
139 
140 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS);
141 
142 	return err;
143 
144 out:
145 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ipv4_addr parse error");
146 	return err;
147 }
148 
149 static int
150 sparx5_tc_flower_handler_ipv6_usage(struct sparx5_tc_flower_parse_usage *st)
151 {
152 	int err = 0;
153 
154 	if (st->l3_proto == ETH_P_IPV6) {
155 		struct flow_match_ipv6_addrs mt;
156 		struct vcap_u128_key sip;
157 		struct vcap_u128_key dip;
158 
159 		flow_rule_match_ipv6_addrs(st->frule, &mt);
160 		/* Check if address masks are non-zero */
161 		if (!ipv6_addr_any(&mt.mask->src)) {
162 			vcap_netbytes_copy(sip.value, mt.key->src.s6_addr, 16);
163 			vcap_netbytes_copy(sip.mask, mt.mask->src.s6_addr, 16);
164 			err = vcap_rule_add_key_u128(st->vrule,
165 						     VCAP_KF_L3_IP6_SIP, &sip);
166 			if (err)
167 				goto out;
168 		}
169 		if (!ipv6_addr_any(&mt.mask->dst)) {
170 			vcap_netbytes_copy(dip.value, mt.key->dst.s6_addr, 16);
171 			vcap_netbytes_copy(dip.mask, mt.mask->dst.s6_addr, 16);
172 			err = vcap_rule_add_key_u128(st->vrule,
173 						     VCAP_KF_L3_IP6_DIP, &dip);
174 			if (err)
175 				goto out;
176 		}
177 	}
178 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS);
179 	return err;
180 out:
181 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ipv6_addr parse error");
182 	return err;
183 }
184 
185 static int
186 sparx5_tc_flower_handler_control_usage(struct sparx5_tc_flower_parse_usage *st)
187 {
188 	struct flow_match_control mt;
189 	u32 value, mask;
190 	int err = 0;
191 
192 	flow_rule_match_control(st->frule, &mt);
193 
194 	if (mt.mask->flags) {
195 		if (mt.mask->flags & FLOW_DIS_FIRST_FRAG) {
196 			if (mt.key->flags & FLOW_DIS_FIRST_FRAG) {
197 				value = 1; /* initial fragment */
198 				mask = 0x3;
199 			} else {
200 				if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
201 					value = 3; /* follow up fragment */
202 					mask = 0x3;
203 				} else {
204 					value = 0; /* no fragment */
205 					mask = 0x3;
206 				}
207 			}
208 		} else {
209 			if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
210 				value = 3; /* follow up fragment */
211 				mask = 0x3;
212 			} else {
213 				value = 0; /* no fragment */
214 				mask = 0x3;
215 			}
216 		}
217 
218 		err = vcap_rule_add_key_u32(st->vrule,
219 					    VCAP_KF_L3_FRAGMENT_TYPE,
220 					    value, mask);
221 		if (err)
222 			goto out;
223 	}
224 
225 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_CONTROL);
226 
227 	return err;
228 
229 out:
230 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_frag parse error");
231 	return err;
232 }
233 
234 static int
235 sparx5_tc_flower_handler_portnum_usage(struct sparx5_tc_flower_parse_usage *st)
236 {
237 	struct flow_match_ports mt;
238 	u16 value, mask;
239 	int err = 0;
240 
241 	flow_rule_match_ports(st->frule, &mt);
242 
243 	if (mt.mask->src) {
244 		value = be16_to_cpu(mt.key->src);
245 		mask = be16_to_cpu(mt.mask->src);
246 		err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L4_SPORT, value,
247 					    mask);
248 		if (err)
249 			goto out;
250 	}
251 
252 	if (mt.mask->dst) {
253 		value = be16_to_cpu(mt.key->dst);
254 		mask = be16_to_cpu(mt.mask->dst);
255 		err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L4_DPORT, value,
256 					    mask);
257 		if (err)
258 			goto out;
259 	}
260 
261 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_PORTS);
262 
263 	return err;
264 
265 out:
266 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "port parse error");
267 	return err;
268 }
269 
270 static int
271 sparx5_tc_flower_handler_basic_usage(struct sparx5_tc_flower_parse_usage *st)
272 {
273 	struct flow_match_basic mt;
274 	int err = 0;
275 
276 	flow_rule_match_basic(st->frule, &mt);
277 
278 	if (mt.mask->n_proto) {
279 		st->l3_proto = be16_to_cpu(mt.key->n_proto);
280 		if (!sparx5_tc_is_known_etype(st->l3_proto)) {
281 			err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_ETYPE,
282 						    st->l3_proto, ~0);
283 			if (err)
284 				goto out;
285 		} else if (st->l3_proto == ETH_P_IP) {
286 			err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_IP4_IS,
287 						    VCAP_BIT_1);
288 			if (err)
289 				goto out;
290 		} else if (st->l3_proto == ETH_P_IPV6) {
291 			err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_IP4_IS,
292 						    VCAP_BIT_0);
293 			if (err)
294 				goto out;
295 		}
296 	}
297 
298 	if (mt.mask->ip_proto) {
299 		st->l4_proto = mt.key->ip_proto;
300 		if (st->l4_proto == IPPROTO_TCP) {
301 			err = vcap_rule_add_key_bit(st->vrule,
302 						    VCAP_KF_TCP_IS,
303 						    VCAP_BIT_1);
304 			if (err)
305 				goto out;
306 		} else if (st->l4_proto == IPPROTO_UDP) {
307 			err = vcap_rule_add_key_bit(st->vrule,
308 						    VCAP_KF_TCP_IS,
309 						    VCAP_BIT_0);
310 			if (err)
311 				goto out;
312 		} else {
313 			err = vcap_rule_add_key_u32(st->vrule,
314 						    VCAP_KF_L3_IP_PROTO,
315 						    st->l4_proto, ~0);
316 			if (err)
317 				goto out;
318 		}
319 	}
320 
321 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_BASIC);
322 
323 	return err;
324 
325 out:
326 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_proto parse error");
327 	return err;
328 }
329 
330 static int
331 sparx5_tc_flower_handler_vlan_usage(struct sparx5_tc_flower_parse_usage *st)
332 {
333 	enum vcap_key_field vid_key = VCAP_KF_8021Q_VID_CLS;
334 	enum vcap_key_field pcp_key = VCAP_KF_8021Q_PCP_CLS;
335 	struct flow_match_vlan mt;
336 	int err;
337 
338 	flow_rule_match_vlan(st->frule, &mt);
339 
340 	if (mt.mask->vlan_id) {
341 		err = vcap_rule_add_key_u32(st->vrule, vid_key,
342 					    mt.key->vlan_id,
343 					    mt.mask->vlan_id);
344 		if (err)
345 			goto out;
346 	}
347 
348 	if (mt.mask->vlan_priority) {
349 		err = vcap_rule_add_key_u32(st->vrule, pcp_key,
350 					    mt.key->vlan_priority,
351 					    mt.mask->vlan_priority);
352 		if (err)
353 			goto out;
354 	}
355 
356 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_VLAN);
357 
358 	return 0;
359 out:
360 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "vlan parse error");
361 	return err;
362 }
363 
364 static int
365 sparx5_tc_flower_handler_tcp_usage(struct sparx5_tc_flower_parse_usage *st)
366 {
367 	struct flow_match_tcp mt;
368 	u16 tcp_flags_mask;
369 	u16 tcp_flags_key;
370 	enum vcap_bit val;
371 	int err = 0;
372 
373 	flow_rule_match_tcp(st->frule, &mt);
374 	tcp_flags_key = be16_to_cpu(mt.key->flags);
375 	tcp_flags_mask = be16_to_cpu(mt.mask->flags);
376 
377 	if (tcp_flags_mask & TCPHDR_FIN) {
378 		val = VCAP_BIT_0;
379 		if (tcp_flags_key & TCPHDR_FIN)
380 			val = VCAP_BIT_1;
381 		err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_FIN, val);
382 		if (err)
383 			goto out;
384 	}
385 
386 	if (tcp_flags_mask & TCPHDR_SYN) {
387 		val = VCAP_BIT_0;
388 		if (tcp_flags_key & TCPHDR_SYN)
389 			val = VCAP_BIT_1;
390 		err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_SYN, val);
391 		if (err)
392 			goto out;
393 	}
394 
395 	if (tcp_flags_mask & TCPHDR_RST) {
396 		val = VCAP_BIT_0;
397 		if (tcp_flags_key & TCPHDR_RST)
398 			val = VCAP_BIT_1;
399 		err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_RST, val);
400 		if (err)
401 			goto out;
402 	}
403 
404 	if (tcp_flags_mask & TCPHDR_PSH) {
405 		val = VCAP_BIT_0;
406 		if (tcp_flags_key & TCPHDR_PSH)
407 			val = VCAP_BIT_1;
408 		err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_PSH, val);
409 		if (err)
410 			goto out;
411 	}
412 
413 	if (tcp_flags_mask & TCPHDR_ACK) {
414 		val = VCAP_BIT_0;
415 		if (tcp_flags_key & TCPHDR_ACK)
416 			val = VCAP_BIT_1;
417 		err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_ACK, val);
418 		if (err)
419 			goto out;
420 	}
421 
422 	if (tcp_flags_mask & TCPHDR_URG) {
423 		val = VCAP_BIT_0;
424 		if (tcp_flags_key & TCPHDR_URG)
425 			val = VCAP_BIT_1;
426 		err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_URG, val);
427 		if (err)
428 			goto out;
429 	}
430 
431 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_TCP);
432 
433 	return err;
434 
435 out:
436 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "tcp_flags parse error");
437 	return err;
438 }
439 
440 static int
441 sparx5_tc_flower_handler_arp_usage(struct sparx5_tc_flower_parse_usage *st)
442 {
443 	struct flow_match_arp mt;
444 	u16 value, mask;
445 	u32 ipval, ipmsk;
446 	int err;
447 
448 	flow_rule_match_arp(st->frule, &mt);
449 
450 	if (mt.mask->op) {
451 		mask = 0x3;
452 		if (st->l3_proto == ETH_P_ARP) {
453 			value = mt.key->op == TC_ARP_OP_REQUEST ?
454 					SPX5_IS2_ARP_REQUEST :
455 					SPX5_IS2_ARP_REPLY;
456 		} else { /* RARP */
457 			value = mt.key->op == TC_ARP_OP_REQUEST ?
458 					SPX5_IS2_RARP_REQUEST :
459 					SPX5_IS2_RARP_REPLY;
460 		}
461 		err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_ARP_OPCODE,
462 					    value, mask);
463 		if (err)
464 			goto out;
465 	}
466 
467 	/* The IS2 ARP keyset does not support ARP hardware addresses */
468 	if (!is_zero_ether_addr(mt.mask->sha) ||
469 	    !is_zero_ether_addr(mt.mask->tha)) {
470 		err = -EINVAL;
471 		goto out;
472 	}
473 
474 	if (mt.mask->sip) {
475 		ipval = be32_to_cpu((__force __be32)mt.key->sip);
476 		ipmsk = be32_to_cpu((__force __be32)mt.mask->sip);
477 
478 		err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_IP4_SIP,
479 					    ipval, ipmsk);
480 		if (err)
481 			goto out;
482 	}
483 
484 	if (mt.mask->tip) {
485 		ipval = be32_to_cpu((__force __be32)mt.key->tip);
486 		ipmsk = be32_to_cpu((__force __be32)mt.mask->tip);
487 
488 		err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_IP4_DIP,
489 					    ipval, ipmsk);
490 		if (err)
491 			goto out;
492 	}
493 
494 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_ARP);
495 
496 	return 0;
497 
498 out:
499 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "arp parse error");
500 	return err;
501 }
502 
503 static int
504 sparx5_tc_flower_handler_ip_usage(struct sparx5_tc_flower_parse_usage *st)
505 {
506 	struct flow_match_ip mt;
507 	int err = 0;
508 
509 	flow_rule_match_ip(st->frule, &mt);
510 
511 	if (mt.mask->tos) {
512 		err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_TOS,
513 					    mt.key->tos,
514 					    mt.mask->tos);
515 		if (err)
516 			goto out;
517 	}
518 
519 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IP);
520 
521 	return err;
522 
523 out:
524 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_tos parse error");
525 	return err;
526 }
527 
528 static int (*sparx5_tc_flower_usage_handlers[])(struct sparx5_tc_flower_parse_usage *st) = {
529 	[FLOW_DISSECTOR_KEY_ETH_ADDRS] = sparx5_tc_flower_handler_ethaddr_usage,
530 	[FLOW_DISSECTOR_KEY_IPV4_ADDRS] = sparx5_tc_flower_handler_ipv4_usage,
531 	[FLOW_DISSECTOR_KEY_IPV6_ADDRS] = sparx5_tc_flower_handler_ipv6_usage,
532 	[FLOW_DISSECTOR_KEY_CONTROL] = sparx5_tc_flower_handler_control_usage,
533 	[FLOW_DISSECTOR_KEY_PORTS] = sparx5_tc_flower_handler_portnum_usage,
534 	[FLOW_DISSECTOR_KEY_BASIC] = sparx5_tc_flower_handler_basic_usage,
535 	[FLOW_DISSECTOR_KEY_VLAN] = sparx5_tc_flower_handler_vlan_usage,
536 	[FLOW_DISSECTOR_KEY_TCP] = sparx5_tc_flower_handler_tcp_usage,
537 	[FLOW_DISSECTOR_KEY_ARP] = sparx5_tc_flower_handler_arp_usage,
538 	[FLOW_DISSECTOR_KEY_IP] = sparx5_tc_flower_handler_ip_usage,
539 };
540 
541 static int sparx5_tc_use_dissectors(struct flow_cls_offload *fco,
542 				    struct vcap_admin *admin,
543 				    struct vcap_rule *vrule,
544 				    u16 *l3_proto)
545 {
546 	struct sparx5_tc_flower_parse_usage state = {
547 		.fco = fco,
548 		.vrule = vrule,
549 		.l3_proto = ETH_P_ALL,
550 	};
551 	int idx, err = 0;
552 
553 	state.frule = flow_cls_offload_flow_rule(fco);
554 	for (idx = 0; idx < ARRAY_SIZE(sparx5_tc_flower_usage_handlers); ++idx) {
555 		if (!flow_rule_match_key(state.frule, idx))
556 			continue;
557 		if (!sparx5_tc_flower_usage_handlers[idx])
558 			continue;
559 		err = sparx5_tc_flower_usage_handlers[idx](&state);
560 		if (err)
561 			return err;
562 	}
563 
564 	if (state.frule->match.dissector->used_keys ^ state.used_keys) {
565 		NL_SET_ERR_MSG_MOD(fco->common.extack,
566 				   "Unsupported match item");
567 		return -ENOENT;
568 	}
569 
570 	if (l3_proto)
571 		*l3_proto = state.l3_proto;
572 	return err;
573 }
574 
575 static int sparx5_tc_flower_action_check(struct vcap_control *vctrl,
576 					 struct net_device *ndev,
577 					 struct flow_cls_offload *fco)
578 {
579 	struct flow_rule *rule = flow_cls_offload_flow_rule(fco);
580 	struct flow_action_entry *actent, *last_actent = NULL;
581 	struct flow_action *act = &rule->action;
582 	u64 action_mask = 0;
583 	int idx;
584 
585 	if (!flow_action_has_entries(act)) {
586 		NL_SET_ERR_MSG_MOD(fco->common.extack, "No actions");
587 		return -EINVAL;
588 	}
589 
590 	if (!flow_action_basic_hw_stats_check(act, fco->common.extack))
591 		return -EOPNOTSUPP;
592 
593 	flow_action_for_each(idx, actent, act) {
594 		if (action_mask & BIT(actent->id)) {
595 			NL_SET_ERR_MSG_MOD(fco->common.extack,
596 					   "More actions of the same type");
597 			return -EINVAL;
598 		}
599 		action_mask |= BIT(actent->id);
600 		last_actent = actent; /* Save last action for later check */
601 	}
602 
603 	/* Check if last action is a goto
604 	 * The last chain/lookup does not need to have a goto action
605 	 */
606 	if (last_actent->id == FLOW_ACTION_GOTO) {
607 		/* Check if the destination chain is in one of the VCAPs */
608 		if (!vcap_is_next_lookup(vctrl, fco->common.chain_index,
609 					 last_actent->chain_index)) {
610 			NL_SET_ERR_MSG_MOD(fco->common.extack,
611 					   "Invalid goto chain");
612 			return -EINVAL;
613 		}
614 	} else if (!vcap_is_last_chain(vctrl, fco->common.chain_index)) {
615 		NL_SET_ERR_MSG_MOD(fco->common.extack,
616 				   "Last action must be 'goto'");
617 		return -EINVAL;
618 	}
619 
620 	/* Catch unsupported combinations of actions */
621 	if (action_mask & BIT(FLOW_ACTION_TRAP) &&
622 	    action_mask & BIT(FLOW_ACTION_ACCEPT)) {
623 		NL_SET_ERR_MSG_MOD(fco->common.extack,
624 				   "Cannot combine pass and trap action");
625 		return -EOPNOTSUPP;
626 	}
627 
628 	return 0;
629 }
630 
631 /* Add a rule counter action - only IS2 is considered for now */
632 static int sparx5_tc_add_rule_counter(struct vcap_admin *admin,
633 				      struct vcap_rule *vrule)
634 {
635 	int err;
636 
637 	err = vcap_rule_mod_action_u32(vrule, VCAP_AF_CNT_ID, vrule->id);
638 	if (err)
639 		return err;
640 
641 	vcap_rule_set_counter_id(vrule, vrule->id);
642 	return err;
643 }
644 
645 /* Collect all port keysets and apply the first of them, possibly wildcarded */
646 static int sparx5_tc_select_protocol_keyset(struct net_device *ndev,
647 					    struct vcap_rule *vrule,
648 					    struct vcap_admin *admin,
649 					    u16 l3_proto,
650 					    struct sparx5_multiple_rules *multi)
651 {
652 	struct sparx5_port *port = netdev_priv(ndev);
653 	struct vcap_keyset_list portkeysetlist = {};
654 	enum vcap_keyfield_set portkeysets[10] = {};
655 	struct vcap_keyset_list matches = {};
656 	enum vcap_keyfield_set keysets[10];
657 	int idx, jdx, err = 0, count = 0;
658 	struct sparx5_wildcard_rule *mru;
659 	const struct vcap_set *kinfo;
660 	struct vcap_control *vctrl;
661 
662 	vctrl = port->sparx5->vcap_ctrl;
663 
664 	/* Find the keysets that the rule can use */
665 	matches.keysets = keysets;
666 	matches.max = ARRAY_SIZE(keysets);
667 	if (vcap_rule_find_keysets(vrule, &matches) == 0)
668 		return -EINVAL;
669 
670 	/* Find the keysets that the port configuration supports */
671 	portkeysetlist.max = ARRAY_SIZE(portkeysets);
672 	portkeysetlist.keysets = portkeysets;
673 	err = sparx5_vcap_get_port_keyset(ndev,
674 					  admin, vrule->vcap_chain_id,
675 					  l3_proto,
676 					  &portkeysetlist);
677 	if (err)
678 		return err;
679 
680 	/* Find the intersection of the two sets of keyset */
681 	for (idx = 0; idx < portkeysetlist.cnt; ++idx) {
682 		kinfo = vcap_keyfieldset(vctrl, admin->vtype,
683 					 portkeysetlist.keysets[idx]);
684 		if (!kinfo)
685 			continue;
686 
687 		/* Find a port keyset that matches the required keys
688 		 * If there are multiple keysets then compose a type id mask
689 		 */
690 		for (jdx = 0; jdx < matches.cnt; ++jdx) {
691 			if (portkeysetlist.keysets[idx] != matches.keysets[jdx])
692 				continue;
693 
694 			mru = &multi->rule[kinfo->sw_per_item];
695 			if (!mru->selected) {
696 				mru->selected = true;
697 				mru->keyset = portkeysetlist.keysets[idx];
698 				mru->value = kinfo->type_id;
699 			}
700 			mru->value &= kinfo->type_id;
701 			mru->mask |= kinfo->type_id;
702 			++count;
703 		}
704 	}
705 	if (count == 0)
706 		return -EPROTO;
707 
708 	if (l3_proto == ETH_P_ALL && count < portkeysetlist.cnt)
709 		return -ENOENT;
710 
711 	for (idx = 0; idx < SPX5_MAX_RULE_SIZE; ++idx) {
712 		mru = &multi->rule[idx];
713 		if (!mru->selected)
714 			continue;
715 
716 		/* Align the mask to the combined value */
717 		mru->mask ^= mru->value;
718 	}
719 
720 	/* Set the chosen keyset on the rule and set a wildcarded type if there
721 	 * are more than one keyset
722 	 */
723 	for (idx = 0; idx < SPX5_MAX_RULE_SIZE; ++idx) {
724 		mru = &multi->rule[idx];
725 		if (!mru->selected)
726 			continue;
727 
728 		vcap_set_rule_set_keyset(vrule, mru->keyset);
729 		if (count > 1)
730 			/* Some keysets do not have a type field */
731 			vcap_rule_mod_key_u32(vrule, VCAP_KF_TYPE,
732 					      mru->value,
733 					      ~mru->mask);
734 		mru->selected = false; /* mark as done */
735 		break; /* Stop here and add more rules later */
736 	}
737 	return err;
738 }
739 
740 static int sparx5_tc_add_rule_copy(struct vcap_control *vctrl,
741 				   struct flow_cls_offload *fco,
742 				   struct vcap_rule *erule,
743 				   struct vcap_admin *admin,
744 				   struct sparx5_wildcard_rule *rule)
745 {
746 	enum vcap_key_field keylist[] = {
747 		VCAP_KF_IF_IGR_PORT_MASK,
748 		VCAP_KF_IF_IGR_PORT_MASK_SEL,
749 		VCAP_KF_IF_IGR_PORT_MASK_RNG,
750 		VCAP_KF_LOOKUP_FIRST_IS,
751 		VCAP_KF_TYPE,
752 	};
753 	struct vcap_rule *vrule;
754 	int err;
755 
756 	/* Add an extra rule with a special user and the new keyset */
757 	erule->user = VCAP_USER_TC_EXTRA;
758 	vrule = vcap_copy_rule(erule);
759 	if (IS_ERR(vrule))
760 		return PTR_ERR(vrule);
761 
762 	/* Link the new rule to the existing rule with the cookie */
763 	vrule->cookie = erule->cookie;
764 	vcap_filter_rule_keys(vrule, keylist, ARRAY_SIZE(keylist), true);
765 	err = vcap_set_rule_set_keyset(vrule, rule->keyset);
766 	if (err) {
767 		pr_err("%s:%d: could not set keyset %s in rule: %u\n",
768 		       __func__, __LINE__,
769 		       vcap_keyset_name(vctrl, rule->keyset),
770 		       vrule->id);
771 		goto out;
772 	}
773 
774 	/* Some keysets do not have a type field, so ignore return value */
775 	vcap_rule_mod_key_u32(vrule, VCAP_KF_TYPE, rule->value, ~rule->mask);
776 
777 	err = vcap_set_rule_set_actionset(vrule, erule->actionset);
778 	if (err)
779 		goto out;
780 
781 	err = sparx5_tc_add_rule_counter(admin, vrule);
782 	if (err)
783 		goto out;
784 
785 	err = vcap_val_rule(vrule, ETH_P_ALL);
786 	if (err) {
787 		pr_err("%s:%d: could not validate rule: %u\n",
788 		       __func__, __LINE__, vrule->id);
789 		vcap_set_tc_exterr(fco, vrule);
790 		goto out;
791 	}
792 	err = vcap_add_rule(vrule);
793 	if (err) {
794 		pr_err("%s:%d: could not add rule: %u\n",
795 		       __func__, __LINE__, vrule->id);
796 		goto out;
797 	}
798 out:
799 	vcap_free_rule(vrule);
800 	return err;
801 }
802 
803 static int sparx5_tc_add_remaining_rules(struct vcap_control *vctrl,
804 					 struct flow_cls_offload *fco,
805 					 struct vcap_rule *erule,
806 					 struct vcap_admin *admin,
807 					 struct sparx5_multiple_rules *multi)
808 {
809 	int idx, err = 0;
810 
811 	for (idx = 0; idx < SPX5_MAX_RULE_SIZE; ++idx) {
812 		if (!multi->rule[idx].selected)
813 			continue;
814 
815 		err = sparx5_tc_add_rule_copy(vctrl, fco, erule, admin,
816 					      &multi->rule[idx]);
817 		if (err)
818 			break;
819 	}
820 	return err;
821 }
822 
823 static int sparx5_tc_flower_replace(struct net_device *ndev,
824 				    struct flow_cls_offload *fco,
825 				    struct vcap_admin *admin)
826 {
827 	struct sparx5_port *port = netdev_priv(ndev);
828 	struct sparx5_multiple_rules multi = {};
829 	struct flow_action_entry *act;
830 	struct vcap_control *vctrl;
831 	struct flow_rule *frule;
832 	struct vcap_rule *vrule;
833 	u16 l3_proto;
834 	int err, idx;
835 
836 	vctrl = port->sparx5->vcap_ctrl;
837 
838 	err = sparx5_tc_flower_action_check(vctrl, ndev, fco);
839 	if (err)
840 		return err;
841 
842 	vrule = vcap_alloc_rule(vctrl, ndev, fco->common.chain_index, VCAP_USER_TC,
843 				fco->common.prio, 0);
844 	if (IS_ERR(vrule))
845 		return PTR_ERR(vrule);
846 
847 	vrule->cookie = fco->cookie;
848 
849 	l3_proto = ETH_P_ALL;
850 	err = sparx5_tc_use_dissectors(fco, admin, vrule, &l3_proto);
851 	if (err)
852 		goto out;
853 
854 	err = sparx5_tc_add_rule_counter(admin, vrule);
855 	if (err)
856 		goto out;
857 
858 	frule = flow_cls_offload_flow_rule(fco);
859 	flow_action_for_each(idx, act, &frule->action) {
860 		switch (act->id) {
861 		case FLOW_ACTION_TRAP:
862 			err = vcap_rule_add_action_bit(vrule,
863 						       VCAP_AF_CPU_COPY_ENA,
864 						       VCAP_BIT_1);
865 			if (err)
866 				goto out;
867 			err = vcap_rule_add_action_u32(vrule,
868 						       VCAP_AF_CPU_QUEUE_NUM, 0);
869 			if (err)
870 				goto out;
871 			err = vcap_rule_add_action_u32(vrule, VCAP_AF_MASK_MODE,
872 						       SPX5_PMM_REPLACE_ALL);
873 			if (err)
874 				goto out;
875 			/* For now the actionset is hardcoded */
876 			err = vcap_set_rule_set_actionset(vrule,
877 							  VCAP_AFS_BASE_TYPE);
878 			if (err)
879 				goto out;
880 			break;
881 		case FLOW_ACTION_ACCEPT:
882 			/* For now the actionset is hardcoded */
883 			err = vcap_set_rule_set_actionset(vrule,
884 							  VCAP_AFS_BASE_TYPE);
885 			if (err)
886 				goto out;
887 			break;
888 		case FLOW_ACTION_GOTO:
889 			/* Links between VCAPs will be added later */
890 			break;
891 		default:
892 			NL_SET_ERR_MSG_MOD(fco->common.extack,
893 					   "Unsupported TC action");
894 			err = -EOPNOTSUPP;
895 			goto out;
896 		}
897 	}
898 
899 	err = sparx5_tc_select_protocol_keyset(ndev, vrule, admin, l3_proto,
900 					       &multi);
901 	if (err) {
902 		NL_SET_ERR_MSG_MOD(fco->common.extack,
903 				   "No matching port keyset for filter protocol and keys");
904 		goto out;
905 	}
906 
907 	/* provide the l3 protocol to guide the keyset selection */
908 	err = vcap_val_rule(vrule, l3_proto);
909 	if (err) {
910 		vcap_set_tc_exterr(fco, vrule);
911 		goto out;
912 	}
913 	err = vcap_add_rule(vrule);
914 	if (err)
915 		NL_SET_ERR_MSG_MOD(fco->common.extack,
916 				   "Could not add the filter");
917 
918 	if (l3_proto == ETH_P_ALL)
919 		err = sparx5_tc_add_remaining_rules(vctrl, fco, vrule, admin,
920 						    &multi);
921 
922 out:
923 	vcap_free_rule(vrule);
924 	return err;
925 }
926 
927 static int sparx5_tc_flower_destroy(struct net_device *ndev,
928 				    struct flow_cls_offload *fco,
929 				    struct vcap_admin *admin)
930 {
931 	struct sparx5_port *port = netdev_priv(ndev);
932 	struct vcap_control *vctrl;
933 	int err = -ENOENT, rule_id;
934 
935 	vctrl = port->sparx5->vcap_ctrl;
936 	while (true) {
937 		rule_id = vcap_lookup_rule_by_cookie(vctrl, fco->cookie);
938 		if (rule_id <= 0)
939 			break;
940 		err = vcap_del_rule(vctrl, ndev, rule_id);
941 		if (err) {
942 			pr_err("%s:%d: could not delete rule %d\n",
943 			       __func__, __LINE__, rule_id);
944 			break;
945 		}
946 	}
947 	return err;
948 }
949 
950 /* Collect packet counts from all rules with the same cookie */
951 static int sparx5_tc_rule_counter_cb(void *arg, struct vcap_rule *rule)
952 {
953 	struct sparx5_tc_rule_pkt_cnt *rinfo = arg;
954 	struct vcap_counter counter;
955 	int err = 0;
956 
957 	if (rule->cookie == rinfo->cookie) {
958 		err = vcap_rule_get_counter(rule, &counter);
959 		if (err)
960 			return err;
961 		rinfo->pkts += counter.value;
962 		/* Reset the rule counter */
963 		counter.value = 0;
964 		vcap_rule_set_counter(rule, &counter);
965 	}
966 	return err;
967 }
968 
969 static int sparx5_tc_flower_stats(struct net_device *ndev,
970 				  struct flow_cls_offload *fco,
971 				  struct vcap_admin *admin)
972 {
973 	struct sparx5_port *port = netdev_priv(ndev);
974 	struct sparx5_tc_rule_pkt_cnt rinfo = {};
975 	struct vcap_control *vctrl;
976 	ulong lastused = 0;
977 	u64 drops = 0;
978 	u32 pkts = 0;
979 	int err;
980 
981 	rinfo.cookie = fco->cookie;
982 	vctrl = port->sparx5->vcap_ctrl;
983 	err = vcap_rule_iter(vctrl, sparx5_tc_rule_counter_cb, &rinfo);
984 	if (err)
985 		return err;
986 	pkts = rinfo.pkts;
987 	flow_stats_update(&fco->stats, 0x0, pkts, drops, lastused,
988 			  FLOW_ACTION_HW_STATS_IMMEDIATE);
989 	return err;
990 }
991 
992 int sparx5_tc_flower(struct net_device *ndev, struct flow_cls_offload *fco,
993 		     bool ingress)
994 {
995 	struct sparx5_port *port = netdev_priv(ndev);
996 	struct vcap_control *vctrl;
997 	struct vcap_admin *admin;
998 	int err = -EINVAL;
999 
1000 	/* Get vcap instance from the chain id */
1001 	vctrl = port->sparx5->vcap_ctrl;
1002 	admin = vcap_find_admin(vctrl, fco->common.chain_index);
1003 	if (!admin) {
1004 		NL_SET_ERR_MSG_MOD(fco->common.extack, "Invalid chain");
1005 		return err;
1006 	}
1007 
1008 	switch (fco->command) {
1009 	case FLOW_CLS_REPLACE:
1010 		return sparx5_tc_flower_replace(ndev, fco, admin);
1011 	case FLOW_CLS_DESTROY:
1012 		return sparx5_tc_flower_destroy(ndev, fco, admin);
1013 	case FLOW_CLS_STATS:
1014 		return sparx5_tc_flower_stats(ndev, fco, admin);
1015 	default:
1016 		return -EOPNOTSUPP;
1017 	}
1018 }
1019