1 // SPDX-License-Identifier: GPL-2.0+
2 /* Microchip VCAP API
3  *
4  * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
5  */
6 
7 #include <net/tcp.h>
8 
9 #include "sparx5_tc.h"
10 #include "vcap_api.h"
11 #include "vcap_api_client.h"
12 #include "sparx5_main.h"
13 #include "sparx5_vcap_impl.h"
14 
15 #define SPX5_MAX_RULE_SIZE 13 /* allows X1, X2, X4, X6 and X12 rules */
16 
17 /* Collect keysets and type ids for multiple rules per size */
18 struct sparx5_wildcard_rule {
19 	bool selected;
20 	u8 value;
21 	u8 mask;
22 	enum vcap_keyfield_set keyset;
23 };
24 
25 struct sparx5_multiple_rules {
26 	struct sparx5_wildcard_rule rule[SPX5_MAX_RULE_SIZE];
27 };
28 
29 struct sparx5_tc_flower_parse_usage {
30 	struct flow_cls_offload *fco;
31 	struct flow_rule *frule;
32 	struct vcap_rule *vrule;
33 	struct vcap_admin *admin;
34 	u16 l3_proto;
35 	u8 l4_proto;
36 	unsigned int used_keys;
37 };
38 
39 /* These protocols have dedicated keysets in IS2 and a TC dissector
40  * ETH_P_ARP does not have a TC dissector
41  */
42 static u16 sparx5_tc_known_etypes[] = {
43 	ETH_P_ALL,
44 	ETH_P_ARP,
45 	ETH_P_IP,
46 	ETH_P_IPV6,
47 };
48 
49 enum sparx5_is2_arp_opcode {
50 	SPX5_IS2_ARP_REQUEST,
51 	SPX5_IS2_ARP_REPLY,
52 	SPX5_IS2_RARP_REQUEST,
53 	SPX5_IS2_RARP_REPLY,
54 };
55 
56 enum tc_arp_opcode {
57 	TC_ARP_OP_RESERVED,
58 	TC_ARP_OP_REQUEST,
59 	TC_ARP_OP_REPLY,
60 };
61 
62 static bool sparx5_tc_is_known_etype(u16 etype)
63 {
64 	int idx;
65 
66 	/* For now this only knows about IS2 traffic classification */
67 	for (idx = 0; idx < ARRAY_SIZE(sparx5_tc_known_etypes); ++idx)
68 		if (sparx5_tc_known_etypes[idx] == etype)
69 			return true;
70 
71 	return false;
72 }
73 
74 static int sparx5_tc_flower_handler_ethaddr_usage(struct sparx5_tc_flower_parse_usage *st)
75 {
76 	enum vcap_key_field smac_key = VCAP_KF_L2_SMAC;
77 	enum vcap_key_field dmac_key = VCAP_KF_L2_DMAC;
78 	struct flow_match_eth_addrs match;
79 	struct vcap_u48_key smac, dmac;
80 	int err = 0;
81 
82 	flow_rule_match_eth_addrs(st->frule, &match);
83 
84 	if (!is_zero_ether_addr(match.mask->src)) {
85 		vcap_netbytes_copy(smac.value, match.key->src, ETH_ALEN);
86 		vcap_netbytes_copy(smac.mask, match.mask->src, ETH_ALEN);
87 		err = vcap_rule_add_key_u48(st->vrule, smac_key, &smac);
88 		if (err)
89 			goto out;
90 	}
91 
92 	if (!is_zero_ether_addr(match.mask->dst)) {
93 		vcap_netbytes_copy(dmac.value, match.key->dst, ETH_ALEN);
94 		vcap_netbytes_copy(dmac.mask, match.mask->dst, ETH_ALEN);
95 		err = vcap_rule_add_key_u48(st->vrule, dmac_key, &dmac);
96 		if (err)
97 			goto out;
98 	}
99 
100 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS);
101 
102 	return err;
103 
104 out:
105 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "eth_addr parse error");
106 	return err;
107 }
108 
109 static int
110 sparx5_tc_flower_handler_ipv4_usage(struct sparx5_tc_flower_parse_usage *st)
111 {
112 	int err = 0;
113 
114 	if (st->l3_proto == ETH_P_IP) {
115 		struct flow_match_ipv4_addrs mt;
116 
117 		flow_rule_match_ipv4_addrs(st->frule, &mt);
118 		if (mt.mask->src) {
119 			err = vcap_rule_add_key_u32(st->vrule,
120 						    VCAP_KF_L3_IP4_SIP,
121 						    be32_to_cpu(mt.key->src),
122 						    be32_to_cpu(mt.mask->src));
123 			if (err)
124 				goto out;
125 		}
126 		if (mt.mask->dst) {
127 			err = vcap_rule_add_key_u32(st->vrule,
128 						    VCAP_KF_L3_IP4_DIP,
129 						    be32_to_cpu(mt.key->dst),
130 						    be32_to_cpu(mt.mask->dst));
131 			if (err)
132 				goto out;
133 		}
134 	}
135 
136 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS);
137 
138 	return err;
139 
140 out:
141 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ipv4_addr parse error");
142 	return err;
143 }
144 
145 static int
146 sparx5_tc_flower_handler_ipv6_usage(struct sparx5_tc_flower_parse_usage *st)
147 {
148 	int err = 0;
149 
150 	if (st->l3_proto == ETH_P_IPV6) {
151 		struct flow_match_ipv6_addrs mt;
152 		struct vcap_u128_key sip;
153 		struct vcap_u128_key dip;
154 
155 		flow_rule_match_ipv6_addrs(st->frule, &mt);
156 		/* Check if address masks are non-zero */
157 		if (!ipv6_addr_any(&mt.mask->src)) {
158 			vcap_netbytes_copy(sip.value, mt.key->src.s6_addr, 16);
159 			vcap_netbytes_copy(sip.mask, mt.mask->src.s6_addr, 16);
160 			err = vcap_rule_add_key_u128(st->vrule,
161 						     VCAP_KF_L3_IP6_SIP, &sip);
162 			if (err)
163 				goto out;
164 		}
165 		if (!ipv6_addr_any(&mt.mask->dst)) {
166 			vcap_netbytes_copy(dip.value, mt.key->dst.s6_addr, 16);
167 			vcap_netbytes_copy(dip.mask, mt.mask->dst.s6_addr, 16);
168 			err = vcap_rule_add_key_u128(st->vrule,
169 						     VCAP_KF_L3_IP6_DIP, &dip);
170 			if (err)
171 				goto out;
172 		}
173 	}
174 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS);
175 	return err;
176 out:
177 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ipv6_addr parse error");
178 	return err;
179 }
180 
181 static int
182 sparx5_tc_flower_handler_control_usage(struct sparx5_tc_flower_parse_usage *st)
183 {
184 	struct flow_match_control mt;
185 	u32 value, mask;
186 	int err = 0;
187 
188 	flow_rule_match_control(st->frule, &mt);
189 
190 	if (mt.mask->flags) {
191 		if (mt.mask->flags & FLOW_DIS_FIRST_FRAG) {
192 			if (mt.key->flags & FLOW_DIS_FIRST_FRAG) {
193 				value = 1; /* initial fragment */
194 				mask = 0x3;
195 			} else {
196 				if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
197 					value = 3; /* follow up fragment */
198 					mask = 0x3;
199 				} else {
200 					value = 0; /* no fragment */
201 					mask = 0x3;
202 				}
203 			}
204 		} else {
205 			if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
206 				value = 3; /* follow up fragment */
207 				mask = 0x3;
208 			} else {
209 				value = 0; /* no fragment */
210 				mask = 0x3;
211 			}
212 		}
213 
214 		err = vcap_rule_add_key_u32(st->vrule,
215 					    VCAP_KF_L3_FRAGMENT_TYPE,
216 					    value, mask);
217 		if (err)
218 			goto out;
219 	}
220 
221 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_CONTROL);
222 
223 	return err;
224 
225 out:
226 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_frag parse error");
227 	return err;
228 }
229 
230 static int
231 sparx5_tc_flower_handler_portnum_usage(struct sparx5_tc_flower_parse_usage *st)
232 {
233 	struct flow_match_ports mt;
234 	u16 value, mask;
235 	int err = 0;
236 
237 	flow_rule_match_ports(st->frule, &mt);
238 
239 	if (mt.mask->src) {
240 		value = be16_to_cpu(mt.key->src);
241 		mask = be16_to_cpu(mt.mask->src);
242 		err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L4_SPORT, value,
243 					    mask);
244 		if (err)
245 			goto out;
246 	}
247 
248 	if (mt.mask->dst) {
249 		value = be16_to_cpu(mt.key->dst);
250 		mask = be16_to_cpu(mt.mask->dst);
251 		err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L4_DPORT, value,
252 					    mask);
253 		if (err)
254 			goto out;
255 	}
256 
257 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_PORTS);
258 
259 	return err;
260 
261 out:
262 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "port parse error");
263 	return err;
264 }
265 
266 static int
267 sparx5_tc_flower_handler_basic_usage(struct sparx5_tc_flower_parse_usage *st)
268 {
269 	struct flow_match_basic mt;
270 	int err = 0;
271 
272 	flow_rule_match_basic(st->frule, &mt);
273 
274 	if (mt.mask->n_proto) {
275 		st->l3_proto = be16_to_cpu(mt.key->n_proto);
276 		if (!sparx5_tc_is_known_etype(st->l3_proto)) {
277 			err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_ETYPE,
278 						    st->l3_proto, ~0);
279 			if (err)
280 				goto out;
281 		} else if (st->l3_proto == ETH_P_IP) {
282 			err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_IP4_IS,
283 						    VCAP_BIT_1);
284 			if (err)
285 				goto out;
286 		} else if (st->l3_proto == ETH_P_IPV6) {
287 			err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_IP4_IS,
288 						    VCAP_BIT_0);
289 			if (err)
290 				goto out;
291 		}
292 	}
293 
294 	if (mt.mask->ip_proto) {
295 		st->l4_proto = mt.key->ip_proto;
296 		if (st->l4_proto == IPPROTO_TCP) {
297 			err = vcap_rule_add_key_bit(st->vrule,
298 						    VCAP_KF_TCP_IS,
299 						    VCAP_BIT_1);
300 			if (err)
301 				goto out;
302 		} else if (st->l4_proto == IPPROTO_UDP) {
303 			err = vcap_rule_add_key_bit(st->vrule,
304 						    VCAP_KF_TCP_IS,
305 						    VCAP_BIT_0);
306 			if (err)
307 				goto out;
308 			if (st->admin->vtype == VCAP_TYPE_IS0) {
309 				err = vcap_rule_add_key_bit(st->vrule,
310 							    VCAP_KF_TCP_UDP_IS,
311 							    VCAP_BIT_1);
312 				if (err)
313 					goto out;
314 			}
315 		} else {
316 			err = vcap_rule_add_key_u32(st->vrule,
317 						    VCAP_KF_L3_IP_PROTO,
318 						    st->l4_proto, ~0);
319 			if (err)
320 				goto out;
321 		}
322 	}
323 
324 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_BASIC);
325 
326 	return err;
327 
328 out:
329 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_proto parse error");
330 	return err;
331 }
332 
333 static int
334 sparx5_tc_flower_handler_vlan_usage(struct sparx5_tc_flower_parse_usage *st)
335 {
336 	enum vcap_key_field vid_key = VCAP_KF_8021Q_VID_CLS;
337 	enum vcap_key_field pcp_key = VCAP_KF_8021Q_PCP_CLS;
338 	struct flow_match_vlan mt;
339 	int err;
340 
341 	flow_rule_match_vlan(st->frule, &mt);
342 
343 	if (mt.mask->vlan_id) {
344 		err = vcap_rule_add_key_u32(st->vrule, vid_key,
345 					    mt.key->vlan_id,
346 					    mt.mask->vlan_id);
347 		if (err)
348 			goto out;
349 	}
350 
351 	if (mt.mask->vlan_priority) {
352 		err = vcap_rule_add_key_u32(st->vrule, pcp_key,
353 					    mt.key->vlan_priority,
354 					    mt.mask->vlan_priority);
355 		if (err)
356 			goto out;
357 	}
358 
359 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_VLAN);
360 
361 	return 0;
362 out:
363 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "vlan parse error");
364 	return err;
365 }
366 
367 static int
368 sparx5_tc_flower_handler_tcp_usage(struct sparx5_tc_flower_parse_usage *st)
369 {
370 	struct flow_match_tcp mt;
371 	u16 tcp_flags_mask;
372 	u16 tcp_flags_key;
373 	enum vcap_bit val;
374 	int err = 0;
375 
376 	flow_rule_match_tcp(st->frule, &mt);
377 	tcp_flags_key = be16_to_cpu(mt.key->flags);
378 	tcp_flags_mask = be16_to_cpu(mt.mask->flags);
379 
380 	if (tcp_flags_mask & TCPHDR_FIN) {
381 		val = VCAP_BIT_0;
382 		if (tcp_flags_key & TCPHDR_FIN)
383 			val = VCAP_BIT_1;
384 		err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_FIN, val);
385 		if (err)
386 			goto out;
387 	}
388 
389 	if (tcp_flags_mask & TCPHDR_SYN) {
390 		val = VCAP_BIT_0;
391 		if (tcp_flags_key & TCPHDR_SYN)
392 			val = VCAP_BIT_1;
393 		err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_SYN, val);
394 		if (err)
395 			goto out;
396 	}
397 
398 	if (tcp_flags_mask & TCPHDR_RST) {
399 		val = VCAP_BIT_0;
400 		if (tcp_flags_key & TCPHDR_RST)
401 			val = VCAP_BIT_1;
402 		err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_RST, val);
403 		if (err)
404 			goto out;
405 	}
406 
407 	if (tcp_flags_mask & TCPHDR_PSH) {
408 		val = VCAP_BIT_0;
409 		if (tcp_flags_key & TCPHDR_PSH)
410 			val = VCAP_BIT_1;
411 		err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_PSH, val);
412 		if (err)
413 			goto out;
414 	}
415 
416 	if (tcp_flags_mask & TCPHDR_ACK) {
417 		val = VCAP_BIT_0;
418 		if (tcp_flags_key & TCPHDR_ACK)
419 			val = VCAP_BIT_1;
420 		err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_ACK, val);
421 		if (err)
422 			goto out;
423 	}
424 
425 	if (tcp_flags_mask & TCPHDR_URG) {
426 		val = VCAP_BIT_0;
427 		if (tcp_flags_key & TCPHDR_URG)
428 			val = VCAP_BIT_1;
429 		err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_URG, val);
430 		if (err)
431 			goto out;
432 	}
433 
434 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_TCP);
435 
436 	return err;
437 
438 out:
439 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "tcp_flags parse error");
440 	return err;
441 }
442 
443 static int
444 sparx5_tc_flower_handler_arp_usage(struct sparx5_tc_flower_parse_usage *st)
445 {
446 	struct flow_match_arp mt;
447 	u16 value, mask;
448 	u32 ipval, ipmsk;
449 	int err;
450 
451 	flow_rule_match_arp(st->frule, &mt);
452 
453 	if (mt.mask->op) {
454 		mask = 0x3;
455 		if (st->l3_proto == ETH_P_ARP) {
456 			value = mt.key->op == TC_ARP_OP_REQUEST ?
457 					SPX5_IS2_ARP_REQUEST :
458 					SPX5_IS2_ARP_REPLY;
459 		} else { /* RARP */
460 			value = mt.key->op == TC_ARP_OP_REQUEST ?
461 					SPX5_IS2_RARP_REQUEST :
462 					SPX5_IS2_RARP_REPLY;
463 		}
464 		err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_ARP_OPCODE,
465 					    value, mask);
466 		if (err)
467 			goto out;
468 	}
469 
470 	/* The IS2 ARP keyset does not support ARP hardware addresses */
471 	if (!is_zero_ether_addr(mt.mask->sha) ||
472 	    !is_zero_ether_addr(mt.mask->tha)) {
473 		err = -EINVAL;
474 		goto out;
475 	}
476 
477 	if (mt.mask->sip) {
478 		ipval = be32_to_cpu((__force __be32)mt.key->sip);
479 		ipmsk = be32_to_cpu((__force __be32)mt.mask->sip);
480 
481 		err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_IP4_SIP,
482 					    ipval, ipmsk);
483 		if (err)
484 			goto out;
485 	}
486 
487 	if (mt.mask->tip) {
488 		ipval = be32_to_cpu((__force __be32)mt.key->tip);
489 		ipmsk = be32_to_cpu((__force __be32)mt.mask->tip);
490 
491 		err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_IP4_DIP,
492 					    ipval, ipmsk);
493 		if (err)
494 			goto out;
495 	}
496 
497 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_ARP);
498 
499 	return 0;
500 
501 out:
502 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "arp parse error");
503 	return err;
504 }
505 
506 static int
507 sparx5_tc_flower_handler_ip_usage(struct sparx5_tc_flower_parse_usage *st)
508 {
509 	struct flow_match_ip mt;
510 	int err = 0;
511 
512 	flow_rule_match_ip(st->frule, &mt);
513 
514 	if (mt.mask->tos) {
515 		err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_TOS,
516 					    mt.key->tos,
517 					    mt.mask->tos);
518 		if (err)
519 			goto out;
520 	}
521 
522 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IP);
523 
524 	return err;
525 
526 out:
527 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_tos parse error");
528 	return err;
529 }
530 
531 static int (*sparx5_tc_flower_usage_handlers[])(struct sparx5_tc_flower_parse_usage *st) = {
532 	[FLOW_DISSECTOR_KEY_ETH_ADDRS] = sparx5_tc_flower_handler_ethaddr_usage,
533 	[FLOW_DISSECTOR_KEY_IPV4_ADDRS] = sparx5_tc_flower_handler_ipv4_usage,
534 	[FLOW_DISSECTOR_KEY_IPV6_ADDRS] = sparx5_tc_flower_handler_ipv6_usage,
535 	[FLOW_DISSECTOR_KEY_CONTROL] = sparx5_tc_flower_handler_control_usage,
536 	[FLOW_DISSECTOR_KEY_PORTS] = sparx5_tc_flower_handler_portnum_usage,
537 	[FLOW_DISSECTOR_KEY_BASIC] = sparx5_tc_flower_handler_basic_usage,
538 	[FLOW_DISSECTOR_KEY_VLAN] = sparx5_tc_flower_handler_vlan_usage,
539 	[FLOW_DISSECTOR_KEY_TCP] = sparx5_tc_flower_handler_tcp_usage,
540 	[FLOW_DISSECTOR_KEY_ARP] = sparx5_tc_flower_handler_arp_usage,
541 	[FLOW_DISSECTOR_KEY_IP] = sparx5_tc_flower_handler_ip_usage,
542 };
543 
544 static int sparx5_tc_use_dissectors(struct flow_cls_offload *fco,
545 				    struct vcap_admin *admin,
546 				    struct vcap_rule *vrule,
547 				    u16 *l3_proto)
548 {
549 	struct sparx5_tc_flower_parse_usage state = {
550 		.fco = fco,
551 		.vrule = vrule,
552 		.l3_proto = ETH_P_ALL,
553 		.admin = admin,
554 	};
555 	int idx, err = 0;
556 
557 	state.frule = flow_cls_offload_flow_rule(fco);
558 	for (idx = 0; idx < ARRAY_SIZE(sparx5_tc_flower_usage_handlers); ++idx) {
559 		if (!flow_rule_match_key(state.frule, idx))
560 			continue;
561 		if (!sparx5_tc_flower_usage_handlers[idx])
562 			continue;
563 		err = sparx5_tc_flower_usage_handlers[idx](&state);
564 		if (err)
565 			return err;
566 	}
567 
568 	if (state.frule->match.dissector->used_keys ^ state.used_keys) {
569 		NL_SET_ERR_MSG_MOD(fco->common.extack,
570 				   "Unsupported match item");
571 		return -ENOENT;
572 	}
573 
574 	if (l3_proto)
575 		*l3_proto = state.l3_proto;
576 	return err;
577 }
578 
579 static int sparx5_tc_flower_action_check(struct vcap_control *vctrl,
580 					 struct net_device *ndev,
581 					 struct flow_cls_offload *fco)
582 {
583 	struct flow_rule *rule = flow_cls_offload_flow_rule(fco);
584 	struct flow_action_entry *actent, *last_actent = NULL;
585 	struct flow_action *act = &rule->action;
586 	u64 action_mask = 0;
587 	int idx;
588 
589 	if (!flow_action_has_entries(act)) {
590 		NL_SET_ERR_MSG_MOD(fco->common.extack, "No actions");
591 		return -EINVAL;
592 	}
593 
594 	if (!flow_action_basic_hw_stats_check(act, fco->common.extack))
595 		return -EOPNOTSUPP;
596 
597 	flow_action_for_each(idx, actent, act) {
598 		if (action_mask & BIT(actent->id)) {
599 			NL_SET_ERR_MSG_MOD(fco->common.extack,
600 					   "More actions of the same type");
601 			return -EINVAL;
602 		}
603 		action_mask |= BIT(actent->id);
604 		last_actent = actent; /* Save last action for later check */
605 	}
606 
607 	/* Check if last action is a goto
608 	 * The last chain/lookup does not need to have a goto action
609 	 */
610 	if (last_actent->id == FLOW_ACTION_GOTO) {
611 		/* Check if the destination chain is in one of the VCAPs */
612 		if (!vcap_is_next_lookup(vctrl, fco->common.chain_index,
613 					 last_actent->chain_index)) {
614 			NL_SET_ERR_MSG_MOD(fco->common.extack,
615 					   "Invalid goto chain");
616 			return -EINVAL;
617 		}
618 	} else if (!vcap_is_last_chain(vctrl, fco->common.chain_index)) {
619 		NL_SET_ERR_MSG_MOD(fco->common.extack,
620 				   "Last action must be 'goto'");
621 		return -EINVAL;
622 	}
623 
624 	/* Catch unsupported combinations of actions */
625 	if (action_mask & BIT(FLOW_ACTION_TRAP) &&
626 	    action_mask & BIT(FLOW_ACTION_ACCEPT)) {
627 		NL_SET_ERR_MSG_MOD(fco->common.extack,
628 				   "Cannot combine pass and trap action");
629 		return -EOPNOTSUPP;
630 	}
631 
632 	return 0;
633 }
634 
635 /* Add a rule counter action */
636 static int sparx5_tc_add_rule_counter(struct vcap_admin *admin,
637 				      struct vcap_rule *vrule)
638 {
639 	int err;
640 
641 	if (admin->vtype == VCAP_TYPE_IS2) {
642 		err = vcap_rule_mod_action_u32(vrule, VCAP_AF_CNT_ID,
643 					       vrule->id);
644 		if (err)
645 			return err;
646 		vcap_rule_set_counter_id(vrule, vrule->id);
647 	}
648 
649 	return 0;
650 }
651 
652 /* Collect all port keysets and apply the first of them, possibly wildcarded */
653 static int sparx5_tc_select_protocol_keyset(struct net_device *ndev,
654 					    struct vcap_rule *vrule,
655 					    struct vcap_admin *admin,
656 					    u16 l3_proto,
657 					    struct sparx5_multiple_rules *multi)
658 {
659 	struct sparx5_port *port = netdev_priv(ndev);
660 	struct vcap_keyset_list portkeysetlist = {};
661 	enum vcap_keyfield_set portkeysets[10] = {};
662 	struct vcap_keyset_list matches = {};
663 	enum vcap_keyfield_set keysets[10];
664 	int idx, jdx, err = 0, count = 0;
665 	struct sparx5_wildcard_rule *mru;
666 	const struct vcap_set *kinfo;
667 	struct vcap_control *vctrl;
668 
669 	vctrl = port->sparx5->vcap_ctrl;
670 
671 	/* Find the keysets that the rule can use */
672 	matches.keysets = keysets;
673 	matches.max = ARRAY_SIZE(keysets);
674 	if (vcap_rule_find_keysets(vrule, &matches) == 0)
675 		return -EINVAL;
676 
677 	/* Find the keysets that the port configuration supports */
678 	portkeysetlist.max = ARRAY_SIZE(portkeysets);
679 	portkeysetlist.keysets = portkeysets;
680 	err = sparx5_vcap_get_port_keyset(ndev,
681 					  admin, vrule->vcap_chain_id,
682 					  l3_proto,
683 					  &portkeysetlist);
684 	if (err)
685 		return err;
686 
687 	/* Find the intersection of the two sets of keyset */
688 	for (idx = 0; idx < portkeysetlist.cnt; ++idx) {
689 		kinfo = vcap_keyfieldset(vctrl, admin->vtype,
690 					 portkeysetlist.keysets[idx]);
691 		if (!kinfo)
692 			continue;
693 
694 		/* Find a port keyset that matches the required keys
695 		 * If there are multiple keysets then compose a type id mask
696 		 */
697 		for (jdx = 0; jdx < matches.cnt; ++jdx) {
698 			if (portkeysetlist.keysets[idx] != matches.keysets[jdx])
699 				continue;
700 
701 			mru = &multi->rule[kinfo->sw_per_item];
702 			if (!mru->selected) {
703 				mru->selected = true;
704 				mru->keyset = portkeysetlist.keysets[idx];
705 				mru->value = kinfo->type_id;
706 			}
707 			mru->value &= kinfo->type_id;
708 			mru->mask |= kinfo->type_id;
709 			++count;
710 		}
711 	}
712 	if (count == 0)
713 		return -EPROTO;
714 
715 	if (l3_proto == ETH_P_ALL && count < portkeysetlist.cnt)
716 		return -ENOENT;
717 
718 	for (idx = 0; idx < SPX5_MAX_RULE_SIZE; ++idx) {
719 		mru = &multi->rule[idx];
720 		if (!mru->selected)
721 			continue;
722 
723 		/* Align the mask to the combined value */
724 		mru->mask ^= mru->value;
725 	}
726 
727 	/* Set the chosen keyset on the rule and set a wildcarded type if there
728 	 * are more than one keyset
729 	 */
730 	for (idx = 0; idx < SPX5_MAX_RULE_SIZE; ++idx) {
731 		mru = &multi->rule[idx];
732 		if (!mru->selected)
733 			continue;
734 
735 		vcap_set_rule_set_keyset(vrule, mru->keyset);
736 		if (count > 1)
737 			/* Some keysets do not have a type field */
738 			vcap_rule_mod_key_u32(vrule, VCAP_KF_TYPE,
739 					      mru->value,
740 					      ~mru->mask);
741 		mru->selected = false; /* mark as done */
742 		break; /* Stop here and add more rules later */
743 	}
744 	return err;
745 }
746 
747 static int sparx5_tc_add_rule_copy(struct vcap_control *vctrl,
748 				   struct flow_cls_offload *fco,
749 				   struct vcap_rule *erule,
750 				   struct vcap_admin *admin,
751 				   struct sparx5_wildcard_rule *rule)
752 {
753 	enum vcap_key_field keylist[] = {
754 		VCAP_KF_IF_IGR_PORT_MASK,
755 		VCAP_KF_IF_IGR_PORT_MASK_SEL,
756 		VCAP_KF_IF_IGR_PORT_MASK_RNG,
757 		VCAP_KF_LOOKUP_FIRST_IS,
758 		VCAP_KF_TYPE,
759 	};
760 	struct vcap_rule *vrule;
761 	int err;
762 
763 	/* Add an extra rule with a special user and the new keyset */
764 	erule->user = VCAP_USER_TC_EXTRA;
765 	vrule = vcap_copy_rule(erule);
766 	if (IS_ERR(vrule))
767 		return PTR_ERR(vrule);
768 
769 	/* Link the new rule to the existing rule with the cookie */
770 	vrule->cookie = erule->cookie;
771 	vcap_filter_rule_keys(vrule, keylist, ARRAY_SIZE(keylist), true);
772 	err = vcap_set_rule_set_keyset(vrule, rule->keyset);
773 	if (err) {
774 		pr_err("%s:%d: could not set keyset %s in rule: %u\n",
775 		       __func__, __LINE__,
776 		       vcap_keyset_name(vctrl, rule->keyset),
777 		       vrule->id);
778 		goto out;
779 	}
780 
781 	/* Some keysets do not have a type field, so ignore return value */
782 	vcap_rule_mod_key_u32(vrule, VCAP_KF_TYPE, rule->value, ~rule->mask);
783 
784 	err = vcap_set_rule_set_actionset(vrule, erule->actionset);
785 	if (err)
786 		goto out;
787 
788 	err = sparx5_tc_add_rule_counter(admin, vrule);
789 	if (err)
790 		goto out;
791 
792 	err = vcap_val_rule(vrule, ETH_P_ALL);
793 	if (err) {
794 		pr_err("%s:%d: could not validate rule: %u\n",
795 		       __func__, __LINE__, vrule->id);
796 		vcap_set_tc_exterr(fco, vrule);
797 		goto out;
798 	}
799 	err = vcap_add_rule(vrule);
800 	if (err) {
801 		pr_err("%s:%d: could not add rule: %u\n",
802 		       __func__, __LINE__, vrule->id);
803 		goto out;
804 	}
805 out:
806 	vcap_free_rule(vrule);
807 	return err;
808 }
809 
810 static int sparx5_tc_add_remaining_rules(struct vcap_control *vctrl,
811 					 struct flow_cls_offload *fco,
812 					 struct vcap_rule *erule,
813 					 struct vcap_admin *admin,
814 					 struct sparx5_multiple_rules *multi)
815 {
816 	int idx, err = 0;
817 
818 	for (idx = 0; idx < SPX5_MAX_RULE_SIZE; ++idx) {
819 		if (!multi->rule[idx].selected)
820 			continue;
821 
822 		err = sparx5_tc_add_rule_copy(vctrl, fco, erule, admin,
823 					      &multi->rule[idx]);
824 		if (err)
825 			break;
826 	}
827 	return err;
828 }
829 
830 /* Add the actionset that is the default for the VCAP type */
831 static int sparx5_tc_set_actionset(struct vcap_admin *admin,
832 				   struct vcap_rule *vrule)
833 {
834 	enum vcap_actionfield_set aset;
835 	int err = 0;
836 
837 	switch (admin->vtype) {
838 	case VCAP_TYPE_IS0:
839 		aset = VCAP_AFS_CLASSIFICATION;
840 		break;
841 	case VCAP_TYPE_IS2:
842 		aset = VCAP_AFS_BASE_TYPE;
843 		break;
844 	default:
845 		return -EINVAL;
846 	}
847 	/* Do not overwrite any current actionset */
848 	if (vrule->actionset == VCAP_AFS_NO_VALUE)
849 		err = vcap_set_rule_set_actionset(vrule, aset);
850 	return err;
851 }
852 
853 /* Add the VCAP key to match on for a rule target value */
854 static int sparx5_tc_add_rule_link_target(struct vcap_admin *admin,
855 					  struct vcap_rule *vrule,
856 					  int target_cid)
857 {
858 	int link_val = target_cid % VCAP_CID_LOOKUP_SIZE;
859 	int err;
860 
861 	if (!link_val)
862 		return 0;
863 
864 	switch (admin->vtype) {
865 	case VCAP_TYPE_IS0:
866 		/* Add NXT_IDX key for chaining rules between IS0 instances */
867 		err = vcap_rule_add_key_u32(vrule, VCAP_KF_LOOKUP_GEN_IDX_SEL,
868 					    1, /* enable */
869 					    ~0);
870 		if (err)
871 			return err;
872 		return vcap_rule_add_key_u32(vrule, VCAP_KF_LOOKUP_GEN_IDX,
873 					     link_val, /* target */
874 					     ~0);
875 	case VCAP_TYPE_IS2:
876 		/* Add PAG key for chaining rules from IS0 */
877 		return vcap_rule_add_key_u32(vrule, VCAP_KF_LOOKUP_PAG,
878 					     link_val, /* target */
879 					     ~0);
880 	default:
881 		break;
882 	}
883 	return 0;
884 }
885 
886 /* Add the VCAP action that adds a target value to a rule */
887 static int sparx5_tc_add_rule_link(struct vcap_control *vctrl,
888 				   struct vcap_admin *admin,
889 				   struct vcap_rule *vrule,
890 				   int from_cid, int to_cid)
891 {
892 	struct vcap_admin *to_admin = vcap_find_admin(vctrl, to_cid);
893 	int diff, err = 0;
894 
895 	diff = vcap_chain_offset(vctrl, from_cid, to_cid);
896 	if (!(to_admin && diff > 0)) {
897 		pr_err("%s:%d: unsupported chain direction: %d\n",
898 		       __func__, __LINE__, to_cid);
899 		return -EINVAL;
900 	}
901 	if (admin->vtype == VCAP_TYPE_IS0 &&
902 	    to_admin->vtype == VCAP_TYPE_IS0) {
903 		/* Between IS0 instances the G_IDX value is used */
904 		err = vcap_rule_add_action_u32(vrule, VCAP_AF_NXT_IDX, diff);
905 		if (err)
906 			goto out;
907 		err = vcap_rule_add_action_u32(vrule, VCAP_AF_NXT_IDX_CTRL,
908 					       1); /* Replace */
909 		if (err)
910 			goto out;
911 	} else if (admin->vtype == VCAP_TYPE_IS0 &&
912 		   to_admin->vtype == VCAP_TYPE_IS2) {
913 		/* Between IS0 and IS2 the PAG value is used */
914 		err = vcap_rule_add_action_u32(vrule, VCAP_AF_PAG_VAL, diff);
915 		if (err)
916 			goto out;
917 		err = vcap_rule_add_action_u32(vrule,
918 					       VCAP_AF_PAG_OVERRIDE_MASK,
919 					       0xff);
920 		if (err)
921 			goto out;
922 	} else {
923 		pr_err("%s:%d: unsupported chain destination: %d\n",
924 		       __func__, __LINE__, to_cid);
925 		err = -EOPNOTSUPP;
926 	}
927 out:
928 	return err;
929 }
930 
931 static int sparx5_tc_flower_replace(struct net_device *ndev,
932 				    struct flow_cls_offload *fco,
933 				    struct vcap_admin *admin)
934 {
935 	struct sparx5_port *port = netdev_priv(ndev);
936 	struct sparx5_multiple_rules multi = {};
937 	struct flow_action_entry *act;
938 	struct vcap_control *vctrl;
939 	struct flow_rule *frule;
940 	struct vcap_rule *vrule;
941 	u16 l3_proto;
942 	int err, idx;
943 
944 	vctrl = port->sparx5->vcap_ctrl;
945 
946 	err = sparx5_tc_flower_action_check(vctrl, ndev, fco);
947 	if (err)
948 		return err;
949 
950 	vrule = vcap_alloc_rule(vctrl, ndev, fco->common.chain_index, VCAP_USER_TC,
951 				fco->common.prio, 0);
952 	if (IS_ERR(vrule))
953 		return PTR_ERR(vrule);
954 
955 	vrule->cookie = fco->cookie;
956 
957 	l3_proto = ETH_P_ALL;
958 	err = sparx5_tc_use_dissectors(fco, admin, vrule, &l3_proto);
959 	if (err)
960 		goto out;
961 
962 	err = sparx5_tc_add_rule_counter(admin, vrule);
963 	if (err)
964 		goto out;
965 
966 	err = sparx5_tc_add_rule_link_target(admin, vrule,
967 					     fco->common.chain_index);
968 	if (err)
969 		goto out;
970 
971 	frule = flow_cls_offload_flow_rule(fco);
972 	flow_action_for_each(idx, act, &frule->action) {
973 		switch (act->id) {
974 		case FLOW_ACTION_TRAP:
975 			if (admin->vtype != VCAP_TYPE_IS2) {
976 				NL_SET_ERR_MSG_MOD(fco->common.extack,
977 						   "Trap action not supported in this VCAP");
978 				err = -EOPNOTSUPP;
979 				goto out;
980 			}
981 			err = vcap_rule_add_action_bit(vrule,
982 						       VCAP_AF_CPU_COPY_ENA,
983 						       VCAP_BIT_1);
984 			if (err)
985 				goto out;
986 			err = vcap_rule_add_action_u32(vrule,
987 						       VCAP_AF_CPU_QUEUE_NUM, 0);
988 			if (err)
989 				goto out;
990 			err = vcap_rule_add_action_u32(vrule, VCAP_AF_MASK_MODE,
991 						       SPX5_PMM_REPLACE_ALL);
992 			if (err)
993 				goto out;
994 			break;
995 		case FLOW_ACTION_ACCEPT:
996 			err = sparx5_tc_set_actionset(admin, vrule);
997 			if (err)
998 				goto out;
999 			break;
1000 		case FLOW_ACTION_GOTO:
1001 			err = sparx5_tc_set_actionset(admin, vrule);
1002 			if (err)
1003 				goto out;
1004 			sparx5_tc_add_rule_link(vctrl, admin, vrule,
1005 						fco->common.chain_index,
1006 						act->chain_index);
1007 			break;
1008 		default:
1009 			NL_SET_ERR_MSG_MOD(fco->common.extack,
1010 					   "Unsupported TC action");
1011 			err = -EOPNOTSUPP;
1012 			goto out;
1013 		}
1014 	}
1015 
1016 	err = sparx5_tc_select_protocol_keyset(ndev, vrule, admin, l3_proto,
1017 					       &multi);
1018 	if (err) {
1019 		NL_SET_ERR_MSG_MOD(fco->common.extack,
1020 				   "No matching port keyset for filter protocol and keys");
1021 		goto out;
1022 	}
1023 
1024 	/* provide the l3 protocol to guide the keyset selection */
1025 	err = vcap_val_rule(vrule, l3_proto);
1026 	if (err) {
1027 		vcap_set_tc_exterr(fco, vrule);
1028 		goto out;
1029 	}
1030 	err = vcap_add_rule(vrule);
1031 	if (err)
1032 		NL_SET_ERR_MSG_MOD(fco->common.extack,
1033 				   "Could not add the filter");
1034 
1035 	if (l3_proto == ETH_P_ALL)
1036 		err = sparx5_tc_add_remaining_rules(vctrl, fco, vrule, admin,
1037 						    &multi);
1038 
1039 out:
1040 	vcap_free_rule(vrule);
1041 	return err;
1042 }
1043 
1044 static int sparx5_tc_flower_destroy(struct net_device *ndev,
1045 				    struct flow_cls_offload *fco,
1046 				    struct vcap_admin *admin)
1047 {
1048 	struct sparx5_port *port = netdev_priv(ndev);
1049 	struct vcap_control *vctrl;
1050 	int err = -ENOENT, rule_id;
1051 
1052 	vctrl = port->sparx5->vcap_ctrl;
1053 	while (true) {
1054 		rule_id = vcap_lookup_rule_by_cookie(vctrl, fco->cookie);
1055 		if (rule_id <= 0)
1056 			break;
1057 		err = vcap_del_rule(vctrl, ndev, rule_id);
1058 		if (err) {
1059 			pr_err("%s:%d: could not delete rule %d\n",
1060 			       __func__, __LINE__, rule_id);
1061 			break;
1062 		}
1063 	}
1064 	return err;
1065 }
1066 
1067 static int sparx5_tc_flower_stats(struct net_device *ndev,
1068 				  struct flow_cls_offload *fco,
1069 				  struct vcap_admin *admin)
1070 {
1071 	struct sparx5_port *port = netdev_priv(ndev);
1072 	struct vcap_counter ctr = {};
1073 	struct vcap_control *vctrl;
1074 	ulong lastused = 0;
1075 	int err;
1076 
1077 	vctrl = port->sparx5->vcap_ctrl;
1078 	err = vcap_get_rule_count_by_cookie(vctrl, &ctr, fco->cookie);
1079 	if (err)
1080 		return err;
1081 	flow_stats_update(&fco->stats, 0x0, ctr.value, 0, lastused,
1082 			  FLOW_ACTION_HW_STATS_IMMEDIATE);
1083 	return err;
1084 }
1085 
1086 int sparx5_tc_flower(struct net_device *ndev, struct flow_cls_offload *fco,
1087 		     bool ingress)
1088 {
1089 	struct sparx5_port *port = netdev_priv(ndev);
1090 	struct vcap_control *vctrl;
1091 	struct vcap_admin *admin;
1092 	int err = -EINVAL;
1093 
1094 	/* Get vcap instance from the chain id */
1095 	vctrl = port->sparx5->vcap_ctrl;
1096 	admin = vcap_find_admin(vctrl, fco->common.chain_index);
1097 	if (!admin) {
1098 		NL_SET_ERR_MSG_MOD(fco->common.extack, "Invalid chain");
1099 		return err;
1100 	}
1101 
1102 	switch (fco->command) {
1103 	case FLOW_CLS_REPLACE:
1104 		return sparx5_tc_flower_replace(ndev, fco, admin);
1105 	case FLOW_CLS_DESTROY:
1106 		return sparx5_tc_flower_destroy(ndev, fco, admin);
1107 	case FLOW_CLS_STATS:
1108 		return sparx5_tc_flower_stats(ndev, fco, admin);
1109 	default:
1110 		return -EOPNOTSUPP;
1111 	}
1112 }
1113