1 // SPDX-License-Identifier: GPL-2.0+
2 /* Microchip VCAP API
3  *
4  * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
5  */
6 
7 #include <net/tcp.h>
8 
9 #include "sparx5_tc.h"
10 #include "vcap_api.h"
11 #include "vcap_api_client.h"
12 #include "sparx5_main.h"
13 #include "sparx5_vcap_impl.h"
14 
15 #define SPX5_MAX_RULE_SIZE 13 /* allows X1, X2, X4, X6 and X12 rules */
16 
17 /* Collect keysets and type ids for multiple rules per size */
18 struct sparx5_wildcard_rule {
19 	bool selected;
20 	u8 value;
21 	u8 mask;
22 	enum vcap_keyfield_set keyset;
23 };
24 
25 struct sparx5_multiple_rules {
26 	struct sparx5_wildcard_rule rule[SPX5_MAX_RULE_SIZE];
27 };
28 
29 struct sparx5_tc_flower_parse_usage {
30 	struct flow_cls_offload *fco;
31 	struct flow_rule *frule;
32 	struct vcap_rule *vrule;
33 	u16 l3_proto;
34 	u8 l4_proto;
35 	unsigned int used_keys;
36 };
37 
38 /* These protocols have dedicated keysets in IS2 and a TC dissector
39  * ETH_P_ARP does not have a TC dissector
40  */
41 static u16 sparx5_tc_known_etypes[] = {
42 	ETH_P_ALL,
43 	ETH_P_ARP,
44 	ETH_P_IP,
45 	ETH_P_IPV6,
46 };
47 
48 enum sparx5_is2_arp_opcode {
49 	SPX5_IS2_ARP_REQUEST,
50 	SPX5_IS2_ARP_REPLY,
51 	SPX5_IS2_RARP_REQUEST,
52 	SPX5_IS2_RARP_REPLY,
53 };
54 
55 enum tc_arp_opcode {
56 	TC_ARP_OP_RESERVED,
57 	TC_ARP_OP_REQUEST,
58 	TC_ARP_OP_REPLY,
59 };
60 
61 static bool sparx5_tc_is_known_etype(u16 etype)
62 {
63 	int idx;
64 
65 	/* For now this only knows about IS2 traffic classification */
66 	for (idx = 0; idx < ARRAY_SIZE(sparx5_tc_known_etypes); ++idx)
67 		if (sparx5_tc_known_etypes[idx] == etype)
68 			return true;
69 
70 	return false;
71 }
72 
73 static int sparx5_tc_flower_handler_ethaddr_usage(struct sparx5_tc_flower_parse_usage *st)
74 {
75 	enum vcap_key_field smac_key = VCAP_KF_L2_SMAC;
76 	enum vcap_key_field dmac_key = VCAP_KF_L2_DMAC;
77 	struct flow_match_eth_addrs match;
78 	struct vcap_u48_key smac, dmac;
79 	int err = 0;
80 
81 	flow_rule_match_eth_addrs(st->frule, &match);
82 
83 	if (!is_zero_ether_addr(match.mask->src)) {
84 		vcap_netbytes_copy(smac.value, match.key->src, ETH_ALEN);
85 		vcap_netbytes_copy(smac.mask, match.mask->src, ETH_ALEN);
86 		err = vcap_rule_add_key_u48(st->vrule, smac_key, &smac);
87 		if (err)
88 			goto out;
89 	}
90 
91 	if (!is_zero_ether_addr(match.mask->dst)) {
92 		vcap_netbytes_copy(dmac.value, match.key->dst, ETH_ALEN);
93 		vcap_netbytes_copy(dmac.mask, match.mask->dst, ETH_ALEN);
94 		err = vcap_rule_add_key_u48(st->vrule, dmac_key, &dmac);
95 		if (err)
96 			goto out;
97 	}
98 
99 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS);
100 
101 	return err;
102 
103 out:
104 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "eth_addr parse error");
105 	return err;
106 }
107 
108 static int
109 sparx5_tc_flower_handler_ipv4_usage(struct sparx5_tc_flower_parse_usage *st)
110 {
111 	int err = 0;
112 
113 	if (st->l3_proto == ETH_P_IP) {
114 		struct flow_match_ipv4_addrs mt;
115 
116 		flow_rule_match_ipv4_addrs(st->frule, &mt);
117 		if (mt.mask->src) {
118 			err = vcap_rule_add_key_u32(st->vrule,
119 						    VCAP_KF_L3_IP4_SIP,
120 						    be32_to_cpu(mt.key->src),
121 						    be32_to_cpu(mt.mask->src));
122 			if (err)
123 				goto out;
124 		}
125 		if (mt.mask->dst) {
126 			err = vcap_rule_add_key_u32(st->vrule,
127 						    VCAP_KF_L3_IP4_DIP,
128 						    be32_to_cpu(mt.key->dst),
129 						    be32_to_cpu(mt.mask->dst));
130 			if (err)
131 				goto out;
132 		}
133 	}
134 
135 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS);
136 
137 	return err;
138 
139 out:
140 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ipv4_addr parse error");
141 	return err;
142 }
143 
144 static int
145 sparx5_tc_flower_handler_ipv6_usage(struct sparx5_tc_flower_parse_usage *st)
146 {
147 	int err = 0;
148 
149 	if (st->l3_proto == ETH_P_IPV6) {
150 		struct flow_match_ipv6_addrs mt;
151 		struct vcap_u128_key sip;
152 		struct vcap_u128_key dip;
153 
154 		flow_rule_match_ipv6_addrs(st->frule, &mt);
155 		/* Check if address masks are non-zero */
156 		if (!ipv6_addr_any(&mt.mask->src)) {
157 			vcap_netbytes_copy(sip.value, mt.key->src.s6_addr, 16);
158 			vcap_netbytes_copy(sip.mask, mt.mask->src.s6_addr, 16);
159 			err = vcap_rule_add_key_u128(st->vrule,
160 						     VCAP_KF_L3_IP6_SIP, &sip);
161 			if (err)
162 				goto out;
163 		}
164 		if (!ipv6_addr_any(&mt.mask->dst)) {
165 			vcap_netbytes_copy(dip.value, mt.key->dst.s6_addr, 16);
166 			vcap_netbytes_copy(dip.mask, mt.mask->dst.s6_addr, 16);
167 			err = vcap_rule_add_key_u128(st->vrule,
168 						     VCAP_KF_L3_IP6_DIP, &dip);
169 			if (err)
170 				goto out;
171 		}
172 	}
173 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS);
174 	return err;
175 out:
176 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ipv6_addr parse error");
177 	return err;
178 }
179 
180 static int
181 sparx5_tc_flower_handler_control_usage(struct sparx5_tc_flower_parse_usage *st)
182 {
183 	struct flow_match_control mt;
184 	u32 value, mask;
185 	int err = 0;
186 
187 	flow_rule_match_control(st->frule, &mt);
188 
189 	if (mt.mask->flags) {
190 		if (mt.mask->flags & FLOW_DIS_FIRST_FRAG) {
191 			if (mt.key->flags & FLOW_DIS_FIRST_FRAG) {
192 				value = 1; /* initial fragment */
193 				mask = 0x3;
194 			} else {
195 				if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
196 					value = 3; /* follow up fragment */
197 					mask = 0x3;
198 				} else {
199 					value = 0; /* no fragment */
200 					mask = 0x3;
201 				}
202 			}
203 		} else {
204 			if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
205 				value = 3; /* follow up fragment */
206 				mask = 0x3;
207 			} else {
208 				value = 0; /* no fragment */
209 				mask = 0x3;
210 			}
211 		}
212 
213 		err = vcap_rule_add_key_u32(st->vrule,
214 					    VCAP_KF_L3_FRAGMENT_TYPE,
215 					    value, mask);
216 		if (err)
217 			goto out;
218 	}
219 
220 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_CONTROL);
221 
222 	return err;
223 
224 out:
225 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_frag parse error");
226 	return err;
227 }
228 
229 static int
230 sparx5_tc_flower_handler_portnum_usage(struct sparx5_tc_flower_parse_usage *st)
231 {
232 	struct flow_match_ports mt;
233 	u16 value, mask;
234 	int err = 0;
235 
236 	flow_rule_match_ports(st->frule, &mt);
237 
238 	if (mt.mask->src) {
239 		value = be16_to_cpu(mt.key->src);
240 		mask = be16_to_cpu(mt.mask->src);
241 		err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L4_SPORT, value,
242 					    mask);
243 		if (err)
244 			goto out;
245 	}
246 
247 	if (mt.mask->dst) {
248 		value = be16_to_cpu(mt.key->dst);
249 		mask = be16_to_cpu(mt.mask->dst);
250 		err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L4_DPORT, value,
251 					    mask);
252 		if (err)
253 			goto out;
254 	}
255 
256 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_PORTS);
257 
258 	return err;
259 
260 out:
261 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "port parse error");
262 	return err;
263 }
264 
265 static int
266 sparx5_tc_flower_handler_basic_usage(struct sparx5_tc_flower_parse_usage *st)
267 {
268 	struct flow_match_basic mt;
269 	int err = 0;
270 
271 	flow_rule_match_basic(st->frule, &mt);
272 
273 	if (mt.mask->n_proto) {
274 		st->l3_proto = be16_to_cpu(mt.key->n_proto);
275 		if (!sparx5_tc_is_known_etype(st->l3_proto)) {
276 			err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_ETYPE,
277 						    st->l3_proto, ~0);
278 			if (err)
279 				goto out;
280 		} else if (st->l3_proto == ETH_P_IP) {
281 			err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_IP4_IS,
282 						    VCAP_BIT_1);
283 			if (err)
284 				goto out;
285 		} else if (st->l3_proto == ETH_P_IPV6) {
286 			err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_IP4_IS,
287 						    VCAP_BIT_0);
288 			if (err)
289 				goto out;
290 		}
291 	}
292 
293 	if (mt.mask->ip_proto) {
294 		st->l4_proto = mt.key->ip_proto;
295 		if (st->l4_proto == IPPROTO_TCP) {
296 			err = vcap_rule_add_key_bit(st->vrule,
297 						    VCAP_KF_TCP_IS,
298 						    VCAP_BIT_1);
299 			if (err)
300 				goto out;
301 		} else if (st->l4_proto == IPPROTO_UDP) {
302 			err = vcap_rule_add_key_bit(st->vrule,
303 						    VCAP_KF_TCP_IS,
304 						    VCAP_BIT_0);
305 			if (err)
306 				goto out;
307 		} else {
308 			err = vcap_rule_add_key_u32(st->vrule,
309 						    VCAP_KF_L3_IP_PROTO,
310 						    st->l4_proto, ~0);
311 			if (err)
312 				goto out;
313 		}
314 	}
315 
316 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_BASIC);
317 
318 	return err;
319 
320 out:
321 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_proto parse error");
322 	return err;
323 }
324 
325 static int
326 sparx5_tc_flower_handler_vlan_usage(struct sparx5_tc_flower_parse_usage *st)
327 {
328 	enum vcap_key_field vid_key = VCAP_KF_8021Q_VID_CLS;
329 	enum vcap_key_field pcp_key = VCAP_KF_8021Q_PCP_CLS;
330 	struct flow_match_vlan mt;
331 	int err;
332 
333 	flow_rule_match_vlan(st->frule, &mt);
334 
335 	if (mt.mask->vlan_id) {
336 		err = vcap_rule_add_key_u32(st->vrule, vid_key,
337 					    mt.key->vlan_id,
338 					    mt.mask->vlan_id);
339 		if (err)
340 			goto out;
341 	}
342 
343 	if (mt.mask->vlan_priority) {
344 		err = vcap_rule_add_key_u32(st->vrule, pcp_key,
345 					    mt.key->vlan_priority,
346 					    mt.mask->vlan_priority);
347 		if (err)
348 			goto out;
349 	}
350 
351 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_VLAN);
352 
353 	return 0;
354 out:
355 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "vlan parse error");
356 	return err;
357 }
358 
359 static int
360 sparx5_tc_flower_handler_tcp_usage(struct sparx5_tc_flower_parse_usage *st)
361 {
362 	struct flow_match_tcp mt;
363 	u16 tcp_flags_mask;
364 	u16 tcp_flags_key;
365 	enum vcap_bit val;
366 	int err = 0;
367 
368 	flow_rule_match_tcp(st->frule, &mt);
369 	tcp_flags_key = be16_to_cpu(mt.key->flags);
370 	tcp_flags_mask = be16_to_cpu(mt.mask->flags);
371 
372 	if (tcp_flags_mask & TCPHDR_FIN) {
373 		val = VCAP_BIT_0;
374 		if (tcp_flags_key & TCPHDR_FIN)
375 			val = VCAP_BIT_1;
376 		err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_FIN, val);
377 		if (err)
378 			goto out;
379 	}
380 
381 	if (tcp_flags_mask & TCPHDR_SYN) {
382 		val = VCAP_BIT_0;
383 		if (tcp_flags_key & TCPHDR_SYN)
384 			val = VCAP_BIT_1;
385 		err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_SYN, val);
386 		if (err)
387 			goto out;
388 	}
389 
390 	if (tcp_flags_mask & TCPHDR_RST) {
391 		val = VCAP_BIT_0;
392 		if (tcp_flags_key & TCPHDR_RST)
393 			val = VCAP_BIT_1;
394 		err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_RST, val);
395 		if (err)
396 			goto out;
397 	}
398 
399 	if (tcp_flags_mask & TCPHDR_PSH) {
400 		val = VCAP_BIT_0;
401 		if (tcp_flags_key & TCPHDR_PSH)
402 			val = VCAP_BIT_1;
403 		err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_PSH, val);
404 		if (err)
405 			goto out;
406 	}
407 
408 	if (tcp_flags_mask & TCPHDR_ACK) {
409 		val = VCAP_BIT_0;
410 		if (tcp_flags_key & TCPHDR_ACK)
411 			val = VCAP_BIT_1;
412 		err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_ACK, val);
413 		if (err)
414 			goto out;
415 	}
416 
417 	if (tcp_flags_mask & TCPHDR_URG) {
418 		val = VCAP_BIT_0;
419 		if (tcp_flags_key & TCPHDR_URG)
420 			val = VCAP_BIT_1;
421 		err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_URG, val);
422 		if (err)
423 			goto out;
424 	}
425 
426 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_TCP);
427 
428 	return err;
429 
430 out:
431 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "tcp_flags parse error");
432 	return err;
433 }
434 
435 static int
436 sparx5_tc_flower_handler_arp_usage(struct sparx5_tc_flower_parse_usage *st)
437 {
438 	struct flow_match_arp mt;
439 	u16 value, mask;
440 	u32 ipval, ipmsk;
441 	int err;
442 
443 	flow_rule_match_arp(st->frule, &mt);
444 
445 	if (mt.mask->op) {
446 		mask = 0x3;
447 		if (st->l3_proto == ETH_P_ARP) {
448 			value = mt.key->op == TC_ARP_OP_REQUEST ?
449 					SPX5_IS2_ARP_REQUEST :
450 					SPX5_IS2_ARP_REPLY;
451 		} else { /* RARP */
452 			value = mt.key->op == TC_ARP_OP_REQUEST ?
453 					SPX5_IS2_RARP_REQUEST :
454 					SPX5_IS2_RARP_REPLY;
455 		}
456 		err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_ARP_OPCODE,
457 					    value, mask);
458 		if (err)
459 			goto out;
460 	}
461 
462 	/* The IS2 ARP keyset does not support ARP hardware addresses */
463 	if (!is_zero_ether_addr(mt.mask->sha) ||
464 	    !is_zero_ether_addr(mt.mask->tha)) {
465 		err = -EINVAL;
466 		goto out;
467 	}
468 
469 	if (mt.mask->sip) {
470 		ipval = be32_to_cpu((__force __be32)mt.key->sip);
471 		ipmsk = be32_to_cpu((__force __be32)mt.mask->sip);
472 
473 		err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_IP4_SIP,
474 					    ipval, ipmsk);
475 		if (err)
476 			goto out;
477 	}
478 
479 	if (mt.mask->tip) {
480 		ipval = be32_to_cpu((__force __be32)mt.key->tip);
481 		ipmsk = be32_to_cpu((__force __be32)mt.mask->tip);
482 
483 		err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_IP4_DIP,
484 					    ipval, ipmsk);
485 		if (err)
486 			goto out;
487 	}
488 
489 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_ARP);
490 
491 	return 0;
492 
493 out:
494 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "arp parse error");
495 	return err;
496 }
497 
498 static int
499 sparx5_tc_flower_handler_ip_usage(struct sparx5_tc_flower_parse_usage *st)
500 {
501 	struct flow_match_ip mt;
502 	int err = 0;
503 
504 	flow_rule_match_ip(st->frule, &mt);
505 
506 	if (mt.mask->tos) {
507 		err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_TOS,
508 					    mt.key->tos,
509 					    mt.mask->tos);
510 		if (err)
511 			goto out;
512 	}
513 
514 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IP);
515 
516 	return err;
517 
518 out:
519 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_tos parse error");
520 	return err;
521 }
522 
523 static int (*sparx5_tc_flower_usage_handlers[])(struct sparx5_tc_flower_parse_usage *st) = {
524 	[FLOW_DISSECTOR_KEY_ETH_ADDRS] = sparx5_tc_flower_handler_ethaddr_usage,
525 	[FLOW_DISSECTOR_KEY_IPV4_ADDRS] = sparx5_tc_flower_handler_ipv4_usage,
526 	[FLOW_DISSECTOR_KEY_IPV6_ADDRS] = sparx5_tc_flower_handler_ipv6_usage,
527 	[FLOW_DISSECTOR_KEY_CONTROL] = sparx5_tc_flower_handler_control_usage,
528 	[FLOW_DISSECTOR_KEY_PORTS] = sparx5_tc_flower_handler_portnum_usage,
529 	[FLOW_DISSECTOR_KEY_BASIC] = sparx5_tc_flower_handler_basic_usage,
530 	[FLOW_DISSECTOR_KEY_VLAN] = sparx5_tc_flower_handler_vlan_usage,
531 	[FLOW_DISSECTOR_KEY_TCP] = sparx5_tc_flower_handler_tcp_usage,
532 	[FLOW_DISSECTOR_KEY_ARP] = sparx5_tc_flower_handler_arp_usage,
533 	[FLOW_DISSECTOR_KEY_IP] = sparx5_tc_flower_handler_ip_usage,
534 };
535 
536 static int sparx5_tc_use_dissectors(struct flow_cls_offload *fco,
537 				    struct vcap_admin *admin,
538 				    struct vcap_rule *vrule,
539 				    u16 *l3_proto)
540 {
541 	struct sparx5_tc_flower_parse_usage state = {
542 		.fco = fco,
543 		.vrule = vrule,
544 		.l3_proto = ETH_P_ALL,
545 	};
546 	int idx, err = 0;
547 
548 	state.frule = flow_cls_offload_flow_rule(fco);
549 	for (idx = 0; idx < ARRAY_SIZE(sparx5_tc_flower_usage_handlers); ++idx) {
550 		if (!flow_rule_match_key(state.frule, idx))
551 			continue;
552 		if (!sparx5_tc_flower_usage_handlers[idx])
553 			continue;
554 		err = sparx5_tc_flower_usage_handlers[idx](&state);
555 		if (err)
556 			return err;
557 	}
558 
559 	if (state.frule->match.dissector->used_keys ^ state.used_keys) {
560 		NL_SET_ERR_MSG_MOD(fco->common.extack,
561 				   "Unsupported match item");
562 		return -ENOENT;
563 	}
564 
565 	if (l3_proto)
566 		*l3_proto = state.l3_proto;
567 	return err;
568 }
569 
570 static int sparx5_tc_flower_action_check(struct vcap_control *vctrl,
571 					 struct net_device *ndev,
572 					 struct flow_cls_offload *fco)
573 {
574 	struct flow_rule *rule = flow_cls_offload_flow_rule(fco);
575 	struct flow_action_entry *actent, *last_actent = NULL;
576 	struct flow_action *act = &rule->action;
577 	u64 action_mask = 0;
578 	int idx;
579 
580 	if (!flow_action_has_entries(act)) {
581 		NL_SET_ERR_MSG_MOD(fco->common.extack, "No actions");
582 		return -EINVAL;
583 	}
584 
585 	if (!flow_action_basic_hw_stats_check(act, fco->common.extack))
586 		return -EOPNOTSUPP;
587 
588 	flow_action_for_each(idx, actent, act) {
589 		if (action_mask & BIT(actent->id)) {
590 			NL_SET_ERR_MSG_MOD(fco->common.extack,
591 					   "More actions of the same type");
592 			return -EINVAL;
593 		}
594 		action_mask |= BIT(actent->id);
595 		last_actent = actent; /* Save last action for later check */
596 	}
597 
598 	/* Check if last action is a goto
599 	 * The last chain/lookup does not need to have a goto action
600 	 */
601 	if (last_actent->id == FLOW_ACTION_GOTO) {
602 		/* Check if the destination chain is in one of the VCAPs */
603 		if (!vcap_is_next_lookup(vctrl, fco->common.chain_index,
604 					 last_actent->chain_index)) {
605 			NL_SET_ERR_MSG_MOD(fco->common.extack,
606 					   "Invalid goto chain");
607 			return -EINVAL;
608 		}
609 	} else if (!vcap_is_last_chain(vctrl, fco->common.chain_index)) {
610 		NL_SET_ERR_MSG_MOD(fco->common.extack,
611 				   "Last action must be 'goto'");
612 		return -EINVAL;
613 	}
614 
615 	/* Catch unsupported combinations of actions */
616 	if (action_mask & BIT(FLOW_ACTION_TRAP) &&
617 	    action_mask & BIT(FLOW_ACTION_ACCEPT)) {
618 		NL_SET_ERR_MSG_MOD(fco->common.extack,
619 				   "Cannot combine pass and trap action");
620 		return -EOPNOTSUPP;
621 	}
622 
623 	return 0;
624 }
625 
626 /* Add a rule counter action - only IS2 is considered for now */
627 static int sparx5_tc_add_rule_counter(struct vcap_admin *admin,
628 				      struct vcap_rule *vrule)
629 {
630 	int err;
631 
632 	err = vcap_rule_mod_action_u32(vrule, VCAP_AF_CNT_ID, vrule->id);
633 	if (err)
634 		return err;
635 
636 	vcap_rule_set_counter_id(vrule, vrule->id);
637 	return err;
638 }
639 
640 /* Collect all port keysets and apply the first of them, possibly wildcarded */
641 static int sparx5_tc_select_protocol_keyset(struct net_device *ndev,
642 					    struct vcap_rule *vrule,
643 					    struct vcap_admin *admin,
644 					    u16 l3_proto,
645 					    struct sparx5_multiple_rules *multi)
646 {
647 	struct sparx5_port *port = netdev_priv(ndev);
648 	struct vcap_keyset_list portkeysetlist = {};
649 	enum vcap_keyfield_set portkeysets[10] = {};
650 	struct vcap_keyset_list matches = {};
651 	enum vcap_keyfield_set keysets[10];
652 	int idx, jdx, err = 0, count = 0;
653 	struct sparx5_wildcard_rule *mru;
654 	const struct vcap_set *kinfo;
655 	struct vcap_control *vctrl;
656 
657 	vctrl = port->sparx5->vcap_ctrl;
658 
659 	/* Find the keysets that the rule can use */
660 	matches.keysets = keysets;
661 	matches.max = ARRAY_SIZE(keysets);
662 	if (vcap_rule_find_keysets(vrule, &matches) == 0)
663 		return -EINVAL;
664 
665 	/* Find the keysets that the port configuration supports */
666 	portkeysetlist.max = ARRAY_SIZE(portkeysets);
667 	portkeysetlist.keysets = portkeysets;
668 	err = sparx5_vcap_get_port_keyset(ndev,
669 					  admin, vrule->vcap_chain_id,
670 					  l3_proto,
671 					  &portkeysetlist);
672 	if (err)
673 		return err;
674 
675 	/* Find the intersection of the two sets of keyset */
676 	for (idx = 0; idx < portkeysetlist.cnt; ++idx) {
677 		kinfo = vcap_keyfieldset(vctrl, admin->vtype,
678 					 portkeysetlist.keysets[idx]);
679 		if (!kinfo)
680 			continue;
681 
682 		/* Find a port keyset that matches the required keys
683 		 * If there are multiple keysets then compose a type id mask
684 		 */
685 		for (jdx = 0; jdx < matches.cnt; ++jdx) {
686 			if (portkeysetlist.keysets[idx] != matches.keysets[jdx])
687 				continue;
688 
689 			mru = &multi->rule[kinfo->sw_per_item];
690 			if (!mru->selected) {
691 				mru->selected = true;
692 				mru->keyset = portkeysetlist.keysets[idx];
693 				mru->value = kinfo->type_id;
694 			}
695 			mru->value &= kinfo->type_id;
696 			mru->mask |= kinfo->type_id;
697 			++count;
698 		}
699 	}
700 	if (count == 0)
701 		return -EPROTO;
702 
703 	if (l3_proto == ETH_P_ALL && count < portkeysetlist.cnt)
704 		return -ENOENT;
705 
706 	for (idx = 0; idx < SPX5_MAX_RULE_SIZE; ++idx) {
707 		mru = &multi->rule[idx];
708 		if (!mru->selected)
709 			continue;
710 
711 		/* Align the mask to the combined value */
712 		mru->mask ^= mru->value;
713 	}
714 
715 	/* Set the chosen keyset on the rule and set a wildcarded type if there
716 	 * are more than one keyset
717 	 */
718 	for (idx = 0; idx < SPX5_MAX_RULE_SIZE; ++idx) {
719 		mru = &multi->rule[idx];
720 		if (!mru->selected)
721 			continue;
722 
723 		vcap_set_rule_set_keyset(vrule, mru->keyset);
724 		if (count > 1)
725 			/* Some keysets do not have a type field */
726 			vcap_rule_mod_key_u32(vrule, VCAP_KF_TYPE,
727 					      mru->value,
728 					      ~mru->mask);
729 		mru->selected = false; /* mark as done */
730 		break; /* Stop here and add more rules later */
731 	}
732 	return err;
733 }
734 
735 static int sparx5_tc_add_rule_copy(struct vcap_control *vctrl,
736 				   struct flow_cls_offload *fco,
737 				   struct vcap_rule *erule,
738 				   struct vcap_admin *admin,
739 				   struct sparx5_wildcard_rule *rule)
740 {
741 	enum vcap_key_field keylist[] = {
742 		VCAP_KF_IF_IGR_PORT_MASK,
743 		VCAP_KF_IF_IGR_PORT_MASK_SEL,
744 		VCAP_KF_IF_IGR_PORT_MASK_RNG,
745 		VCAP_KF_LOOKUP_FIRST_IS,
746 		VCAP_KF_TYPE,
747 	};
748 	struct vcap_rule *vrule;
749 	int err;
750 
751 	/* Add an extra rule with a special user and the new keyset */
752 	erule->user = VCAP_USER_TC_EXTRA;
753 	vrule = vcap_copy_rule(erule);
754 	if (IS_ERR(vrule))
755 		return PTR_ERR(vrule);
756 
757 	/* Link the new rule to the existing rule with the cookie */
758 	vrule->cookie = erule->cookie;
759 	vcap_filter_rule_keys(vrule, keylist, ARRAY_SIZE(keylist), true);
760 	err = vcap_set_rule_set_keyset(vrule, rule->keyset);
761 	if (err) {
762 		pr_err("%s:%d: could not set keyset %s in rule: %u\n",
763 		       __func__, __LINE__,
764 		       vcap_keyset_name(vctrl, rule->keyset),
765 		       vrule->id);
766 		goto out;
767 	}
768 
769 	/* Some keysets do not have a type field, so ignore return value */
770 	vcap_rule_mod_key_u32(vrule, VCAP_KF_TYPE, rule->value, ~rule->mask);
771 
772 	err = vcap_set_rule_set_actionset(vrule, erule->actionset);
773 	if (err)
774 		goto out;
775 
776 	err = sparx5_tc_add_rule_counter(admin, vrule);
777 	if (err)
778 		goto out;
779 
780 	err = vcap_val_rule(vrule, ETH_P_ALL);
781 	if (err) {
782 		pr_err("%s:%d: could not validate rule: %u\n",
783 		       __func__, __LINE__, vrule->id);
784 		vcap_set_tc_exterr(fco, vrule);
785 		goto out;
786 	}
787 	err = vcap_add_rule(vrule);
788 	if (err) {
789 		pr_err("%s:%d: could not add rule: %u\n",
790 		       __func__, __LINE__, vrule->id);
791 		goto out;
792 	}
793 out:
794 	vcap_free_rule(vrule);
795 	return err;
796 }
797 
798 static int sparx5_tc_add_remaining_rules(struct vcap_control *vctrl,
799 					 struct flow_cls_offload *fco,
800 					 struct vcap_rule *erule,
801 					 struct vcap_admin *admin,
802 					 struct sparx5_multiple_rules *multi)
803 {
804 	int idx, err = 0;
805 
806 	for (idx = 0; idx < SPX5_MAX_RULE_SIZE; ++idx) {
807 		if (!multi->rule[idx].selected)
808 			continue;
809 
810 		err = sparx5_tc_add_rule_copy(vctrl, fco, erule, admin,
811 					      &multi->rule[idx]);
812 		if (err)
813 			break;
814 	}
815 	return err;
816 }
817 
818 static int sparx5_tc_flower_replace(struct net_device *ndev,
819 				    struct flow_cls_offload *fco,
820 				    struct vcap_admin *admin)
821 {
822 	struct sparx5_port *port = netdev_priv(ndev);
823 	struct sparx5_multiple_rules multi = {};
824 	struct flow_action_entry *act;
825 	struct vcap_control *vctrl;
826 	struct flow_rule *frule;
827 	struct vcap_rule *vrule;
828 	u16 l3_proto;
829 	int err, idx;
830 
831 	vctrl = port->sparx5->vcap_ctrl;
832 
833 	err = sparx5_tc_flower_action_check(vctrl, ndev, fco);
834 	if (err)
835 		return err;
836 
837 	vrule = vcap_alloc_rule(vctrl, ndev, fco->common.chain_index, VCAP_USER_TC,
838 				fco->common.prio, 0);
839 	if (IS_ERR(vrule))
840 		return PTR_ERR(vrule);
841 
842 	vrule->cookie = fco->cookie;
843 
844 	l3_proto = ETH_P_ALL;
845 	err = sparx5_tc_use_dissectors(fco, admin, vrule, &l3_proto);
846 	if (err)
847 		goto out;
848 
849 	err = sparx5_tc_add_rule_counter(admin, vrule);
850 	if (err)
851 		goto out;
852 
853 	frule = flow_cls_offload_flow_rule(fco);
854 	flow_action_for_each(idx, act, &frule->action) {
855 		switch (act->id) {
856 		case FLOW_ACTION_TRAP:
857 			err = vcap_rule_add_action_bit(vrule,
858 						       VCAP_AF_CPU_COPY_ENA,
859 						       VCAP_BIT_1);
860 			if (err)
861 				goto out;
862 			err = vcap_rule_add_action_u32(vrule,
863 						       VCAP_AF_CPU_QUEUE_NUM, 0);
864 			if (err)
865 				goto out;
866 			err = vcap_rule_add_action_u32(vrule, VCAP_AF_MASK_MODE,
867 						       SPX5_PMM_REPLACE_ALL);
868 			if (err)
869 				goto out;
870 			/* For now the actionset is hardcoded */
871 			err = vcap_set_rule_set_actionset(vrule,
872 							  VCAP_AFS_BASE_TYPE);
873 			if (err)
874 				goto out;
875 			break;
876 		case FLOW_ACTION_ACCEPT:
877 			/* For now the actionset is hardcoded */
878 			err = vcap_set_rule_set_actionset(vrule,
879 							  VCAP_AFS_BASE_TYPE);
880 			if (err)
881 				goto out;
882 			break;
883 		case FLOW_ACTION_GOTO:
884 			/* Links between VCAPs will be added later */
885 			break;
886 		default:
887 			NL_SET_ERR_MSG_MOD(fco->common.extack,
888 					   "Unsupported TC action");
889 			err = -EOPNOTSUPP;
890 			goto out;
891 		}
892 	}
893 
894 	err = sparx5_tc_select_protocol_keyset(ndev, vrule, admin, l3_proto,
895 					       &multi);
896 	if (err) {
897 		NL_SET_ERR_MSG_MOD(fco->common.extack,
898 				   "No matching port keyset for filter protocol and keys");
899 		goto out;
900 	}
901 
902 	/* provide the l3 protocol to guide the keyset selection */
903 	err = vcap_val_rule(vrule, l3_proto);
904 	if (err) {
905 		vcap_set_tc_exterr(fco, vrule);
906 		goto out;
907 	}
908 	err = vcap_add_rule(vrule);
909 	if (err)
910 		NL_SET_ERR_MSG_MOD(fco->common.extack,
911 				   "Could not add the filter");
912 
913 	if (l3_proto == ETH_P_ALL)
914 		err = sparx5_tc_add_remaining_rules(vctrl, fco, vrule, admin,
915 						    &multi);
916 
917 out:
918 	vcap_free_rule(vrule);
919 	return err;
920 }
921 
922 static int sparx5_tc_flower_destroy(struct net_device *ndev,
923 				    struct flow_cls_offload *fco,
924 				    struct vcap_admin *admin)
925 {
926 	struct sparx5_port *port = netdev_priv(ndev);
927 	struct vcap_control *vctrl;
928 	int err = -ENOENT, rule_id;
929 
930 	vctrl = port->sparx5->vcap_ctrl;
931 	while (true) {
932 		rule_id = vcap_lookup_rule_by_cookie(vctrl, fco->cookie);
933 		if (rule_id <= 0)
934 			break;
935 		err = vcap_del_rule(vctrl, ndev, rule_id);
936 		if (err) {
937 			pr_err("%s:%d: could not delete rule %d\n",
938 			       __func__, __LINE__, rule_id);
939 			break;
940 		}
941 	}
942 	return err;
943 }
944 
945 static int sparx5_tc_flower_stats(struct net_device *ndev,
946 				  struct flow_cls_offload *fco,
947 				  struct vcap_admin *admin)
948 {
949 	struct sparx5_port *port = netdev_priv(ndev);
950 	struct vcap_counter ctr = {};
951 	struct vcap_control *vctrl;
952 	ulong lastused = 0;
953 	int err;
954 
955 	vctrl = port->sparx5->vcap_ctrl;
956 	err = vcap_get_rule_count_by_cookie(vctrl, &ctr, fco->cookie);
957 	if (err)
958 		return err;
959 	flow_stats_update(&fco->stats, 0x0, ctr.value, 0, lastused,
960 			  FLOW_ACTION_HW_STATS_IMMEDIATE);
961 	return err;
962 }
963 
964 int sparx5_tc_flower(struct net_device *ndev, struct flow_cls_offload *fco,
965 		     bool ingress)
966 {
967 	struct sparx5_port *port = netdev_priv(ndev);
968 	struct vcap_control *vctrl;
969 	struct vcap_admin *admin;
970 	int err = -EINVAL;
971 
972 	/* Get vcap instance from the chain id */
973 	vctrl = port->sparx5->vcap_ctrl;
974 	admin = vcap_find_admin(vctrl, fco->common.chain_index);
975 	if (!admin) {
976 		NL_SET_ERR_MSG_MOD(fco->common.extack, "Invalid chain");
977 		return err;
978 	}
979 
980 	switch (fco->command) {
981 	case FLOW_CLS_REPLACE:
982 		return sparx5_tc_flower_replace(ndev, fco, admin);
983 	case FLOW_CLS_DESTROY:
984 		return sparx5_tc_flower_destroy(ndev, fco, admin);
985 	case FLOW_CLS_STATS:
986 		return sparx5_tc_flower_stats(ndev, fco, admin);
987 	default:
988 		return -EOPNOTSUPP;
989 	}
990 }
991