1 // SPDX-License-Identifier: GPL-2.0+
2 /* Microchip VCAP API
3  *
4  * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
5  */
6 
7 #include <net/tcp.h>
8 
9 #include "sparx5_tc.h"
10 #include "vcap_api.h"
11 #include "vcap_api_client.h"
12 #include "sparx5_main.h"
13 #include "sparx5_vcap_impl.h"
14 
15 struct sparx5_tc_flower_parse_usage {
16 	struct flow_cls_offload *fco;
17 	struct flow_rule *frule;
18 	struct vcap_rule *vrule;
19 	u16 l3_proto;
20 	u8 l4_proto;
21 	unsigned int used_keys;
22 };
23 
24 /* These protocols have dedicated keysets in IS2 and a TC dissector
25  * ETH_P_ARP does not have a TC dissector
26  */
27 static u16 sparx5_tc_known_etypes[] = {
28 	ETH_P_ALL,
29 	ETH_P_IP,
30 	ETH_P_IPV6,
31 };
32 
33 static bool sparx5_tc_is_known_etype(u16 etype)
34 {
35 	int idx;
36 
37 	/* For now this only knows about IS2 traffic classification */
38 	for (idx = 0; idx < ARRAY_SIZE(sparx5_tc_known_etypes); ++idx)
39 		if (sparx5_tc_known_etypes[idx] == etype)
40 			return true;
41 
42 	return false;
43 }
44 
45 static int sparx5_tc_flower_handler_ethaddr_usage(struct sparx5_tc_flower_parse_usage *st)
46 {
47 	enum vcap_key_field smac_key = VCAP_KF_L2_SMAC;
48 	enum vcap_key_field dmac_key = VCAP_KF_L2_DMAC;
49 	struct flow_match_eth_addrs match;
50 	struct vcap_u48_key smac, dmac;
51 	int err = 0;
52 
53 	flow_rule_match_eth_addrs(st->frule, &match);
54 
55 	if (!is_zero_ether_addr(match.mask->src)) {
56 		vcap_netbytes_copy(smac.value, match.key->src, ETH_ALEN);
57 		vcap_netbytes_copy(smac.mask, match.mask->src, ETH_ALEN);
58 		err = vcap_rule_add_key_u48(st->vrule, smac_key, &smac);
59 		if (err)
60 			goto out;
61 	}
62 
63 	if (!is_zero_ether_addr(match.mask->dst)) {
64 		vcap_netbytes_copy(dmac.value, match.key->dst, ETH_ALEN);
65 		vcap_netbytes_copy(dmac.mask, match.mask->dst, ETH_ALEN);
66 		err = vcap_rule_add_key_u48(st->vrule, dmac_key, &dmac);
67 		if (err)
68 			goto out;
69 	}
70 
71 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS);
72 
73 	return err;
74 
75 out:
76 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "eth_addr parse error");
77 	return err;
78 }
79 
80 static int
81 sparx5_tc_flower_handler_ipv4_usage(struct sparx5_tc_flower_parse_usage *st)
82 {
83 	int err = 0;
84 
85 	if (st->l3_proto == ETH_P_IP) {
86 		struct flow_match_ipv4_addrs mt;
87 
88 		flow_rule_match_ipv4_addrs(st->frule, &mt);
89 		if (mt.mask->src) {
90 			err = vcap_rule_add_key_u32(st->vrule,
91 						    VCAP_KF_L3_IP4_SIP,
92 						    be32_to_cpu(mt.key->src),
93 						    be32_to_cpu(mt.mask->src));
94 			if (err)
95 				goto out;
96 		}
97 		if (mt.mask->dst) {
98 			err = vcap_rule_add_key_u32(st->vrule,
99 						    VCAP_KF_L3_IP4_DIP,
100 						    be32_to_cpu(mt.key->dst),
101 						    be32_to_cpu(mt.mask->dst));
102 			if (err)
103 				goto out;
104 		}
105 	}
106 
107 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS);
108 
109 	return err;
110 
111 out:
112 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ipv4_addr parse error");
113 	return err;
114 }
115 
116 static int
117 sparx5_tc_flower_handler_ipv6_usage(struct sparx5_tc_flower_parse_usage *st)
118 {
119 	int err = 0;
120 
121 	if (st->l3_proto == ETH_P_IPV6) {
122 		struct flow_match_ipv6_addrs mt;
123 		struct vcap_u128_key sip;
124 		struct vcap_u128_key dip;
125 
126 		flow_rule_match_ipv6_addrs(st->frule, &mt);
127 		/* Check if address masks are non-zero */
128 		if (!ipv6_addr_any(&mt.mask->src)) {
129 			vcap_netbytes_copy(sip.value, mt.key->src.s6_addr, 16);
130 			vcap_netbytes_copy(sip.mask, mt.mask->src.s6_addr, 16);
131 			err = vcap_rule_add_key_u128(st->vrule,
132 						     VCAP_KF_L3_IP6_SIP, &sip);
133 			if (err)
134 				goto out;
135 		}
136 		if (!ipv6_addr_any(&mt.mask->dst)) {
137 			vcap_netbytes_copy(dip.value, mt.key->dst.s6_addr, 16);
138 			vcap_netbytes_copy(dip.mask, mt.mask->dst.s6_addr, 16);
139 			err = vcap_rule_add_key_u128(st->vrule,
140 						     VCAP_KF_L3_IP6_DIP, &dip);
141 			if (err)
142 				goto out;
143 		}
144 	}
145 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS);
146 	return err;
147 out:
148 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ipv6_addr parse error");
149 	return err;
150 }
151 
152 static int
153 sparx5_tc_flower_handler_control_usage(struct sparx5_tc_flower_parse_usage *st)
154 {
155 	struct flow_match_control mt;
156 	u32 value, mask;
157 	int err = 0;
158 
159 	flow_rule_match_control(st->frule, &mt);
160 
161 	if (mt.mask->flags) {
162 		if (mt.mask->flags & FLOW_DIS_FIRST_FRAG) {
163 			if (mt.key->flags & FLOW_DIS_FIRST_FRAG) {
164 				value = 1; /* initial fragment */
165 				mask = 0x3;
166 			} else {
167 				if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
168 					value = 3; /* follow up fragment */
169 					mask = 0x3;
170 				} else {
171 					value = 0; /* no fragment */
172 					mask = 0x3;
173 				}
174 			}
175 		} else {
176 			if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
177 				value = 3; /* follow up fragment */
178 				mask = 0x3;
179 			} else {
180 				value = 0; /* no fragment */
181 				mask = 0x3;
182 			}
183 		}
184 
185 		err = vcap_rule_add_key_u32(st->vrule,
186 					    VCAP_KF_L3_FRAGMENT_TYPE,
187 					    value, mask);
188 		if (err)
189 			goto out;
190 	}
191 
192 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_CONTROL);
193 
194 	return err;
195 
196 out:
197 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_frag parse error");
198 	return err;
199 }
200 
201 static int
202 sparx5_tc_flower_handler_portnum_usage(struct sparx5_tc_flower_parse_usage *st)
203 {
204 	struct flow_match_ports mt;
205 	u16 value, mask;
206 	int err = 0;
207 
208 	flow_rule_match_ports(st->frule, &mt);
209 
210 	if (mt.mask->src) {
211 		value = be16_to_cpu(mt.key->src);
212 		mask = be16_to_cpu(mt.mask->src);
213 		err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L4_SPORT, value,
214 					    mask);
215 		if (err)
216 			goto out;
217 	}
218 
219 	if (mt.mask->dst) {
220 		value = be16_to_cpu(mt.key->dst);
221 		mask = be16_to_cpu(mt.mask->dst);
222 		err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L4_DPORT, value,
223 					    mask);
224 		if (err)
225 			goto out;
226 	}
227 
228 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_PORTS);
229 
230 	return err;
231 
232 out:
233 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "port parse error");
234 	return err;
235 }
236 
237 static int
238 sparx5_tc_flower_handler_basic_usage(struct sparx5_tc_flower_parse_usage *st)
239 {
240 	struct flow_match_basic mt;
241 	int err = 0;
242 
243 	flow_rule_match_basic(st->frule, &mt);
244 
245 	if (mt.mask->n_proto) {
246 		st->l3_proto = be16_to_cpu(mt.key->n_proto);
247 		if (!sparx5_tc_is_known_etype(st->l3_proto)) {
248 			err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_ETYPE,
249 						    st->l3_proto, ~0);
250 			if (err)
251 				goto out;
252 		} else if (st->l3_proto == ETH_P_IP) {
253 			err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_IP4_IS,
254 						    VCAP_BIT_1);
255 			if (err)
256 				goto out;
257 		} else if (st->l3_proto == ETH_P_IPV6) {
258 			err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_IP4_IS,
259 						    VCAP_BIT_0);
260 			if (err)
261 				goto out;
262 		}
263 	}
264 
265 	if (mt.mask->ip_proto) {
266 		st->l4_proto = mt.key->ip_proto;
267 		if (st->l4_proto == IPPROTO_TCP) {
268 			err = vcap_rule_add_key_bit(st->vrule,
269 						    VCAP_KF_TCP_IS,
270 						    VCAP_BIT_1);
271 			if (err)
272 				goto out;
273 		} else if (st->l4_proto == IPPROTO_UDP) {
274 			err = vcap_rule_add_key_bit(st->vrule,
275 						    VCAP_KF_TCP_IS,
276 						    VCAP_BIT_0);
277 			if (err)
278 				goto out;
279 		} else {
280 			err = vcap_rule_add_key_u32(st->vrule,
281 						    VCAP_KF_L3_IP_PROTO,
282 						    st->l4_proto, ~0);
283 			if (err)
284 				goto out;
285 		}
286 	}
287 
288 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_BASIC);
289 
290 	return err;
291 
292 out:
293 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_proto parse error");
294 	return err;
295 }
296 
297 static int
298 sparx5_tc_flower_handler_vlan_usage(struct sparx5_tc_flower_parse_usage *st)
299 {
300 	enum vcap_key_field vid_key = VCAP_KF_8021Q_VID_CLS;
301 	enum vcap_key_field pcp_key = VCAP_KF_8021Q_PCP_CLS;
302 	struct flow_match_vlan mt;
303 	int err;
304 
305 	flow_rule_match_vlan(st->frule, &mt);
306 
307 	if (mt.mask->vlan_id) {
308 		err = vcap_rule_add_key_u32(st->vrule, vid_key,
309 					    mt.key->vlan_id,
310 					    mt.mask->vlan_id);
311 		if (err)
312 			goto out;
313 	}
314 
315 	if (mt.mask->vlan_priority) {
316 		err = vcap_rule_add_key_u32(st->vrule, pcp_key,
317 					    mt.key->vlan_priority,
318 					    mt.mask->vlan_priority);
319 		if (err)
320 			goto out;
321 	}
322 
323 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_VLAN);
324 
325 	return err;
326 out:
327 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "vlan parse error");
328 	return err;
329 }
330 
331 static int
332 sparx5_tc_flower_handler_tcp_usage(struct sparx5_tc_flower_parse_usage *st)
333 {
334 	struct flow_match_tcp mt;
335 	u16 tcp_flags_mask;
336 	u16 tcp_flags_key;
337 	enum vcap_bit val;
338 	int err = 0;
339 
340 	flow_rule_match_tcp(st->frule, &mt);
341 	tcp_flags_key = be16_to_cpu(mt.key->flags);
342 	tcp_flags_mask = be16_to_cpu(mt.mask->flags);
343 
344 	if (tcp_flags_mask & TCPHDR_FIN) {
345 		val = VCAP_BIT_0;
346 		if (tcp_flags_key & TCPHDR_FIN)
347 			val = VCAP_BIT_1;
348 		err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_FIN, val);
349 		if (err)
350 			goto out;
351 	}
352 
353 	if (tcp_flags_mask & TCPHDR_SYN) {
354 		val = VCAP_BIT_0;
355 		if (tcp_flags_key & TCPHDR_SYN)
356 			val = VCAP_BIT_1;
357 		err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_SYN, val);
358 		if (err)
359 			goto out;
360 	}
361 
362 	if (tcp_flags_mask & TCPHDR_RST) {
363 		val = VCAP_BIT_0;
364 		if (tcp_flags_key & TCPHDR_RST)
365 			val = VCAP_BIT_1;
366 		err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_RST, val);
367 		if (err)
368 			goto out;
369 	}
370 
371 	if (tcp_flags_mask & TCPHDR_PSH) {
372 		val = VCAP_BIT_0;
373 		if (tcp_flags_key & TCPHDR_PSH)
374 			val = VCAP_BIT_1;
375 		err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_PSH, val);
376 		if (err)
377 			goto out;
378 	}
379 
380 	if (tcp_flags_mask & TCPHDR_ACK) {
381 		val = VCAP_BIT_0;
382 		if (tcp_flags_key & TCPHDR_ACK)
383 			val = VCAP_BIT_1;
384 		err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_ACK, val);
385 		if (err)
386 			goto out;
387 	}
388 
389 	if (tcp_flags_mask & TCPHDR_URG) {
390 		val = VCAP_BIT_0;
391 		if (tcp_flags_key & TCPHDR_URG)
392 			val = VCAP_BIT_1;
393 		err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_URG, val);
394 		if (err)
395 			goto out;
396 	}
397 
398 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_TCP);
399 
400 	return err;
401 
402 out:
403 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "tcp_flags parse error");
404 	return err;
405 }
406 
407 static int
408 sparx5_tc_flower_handler_ip_usage(struct sparx5_tc_flower_parse_usage *st)
409 {
410 	struct flow_match_ip mt;
411 	int err = 0;
412 
413 	flow_rule_match_ip(st->frule, &mt);
414 
415 	if (mt.mask->tos) {
416 		err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_TOS,
417 					    mt.key->tos,
418 					    mt.mask->tos);
419 		if (err)
420 			goto out;
421 	}
422 
423 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IP);
424 
425 	return err;
426 
427 out:
428 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_tos parse error");
429 	return err;
430 }
431 
432 static int (*sparx5_tc_flower_usage_handlers[])(struct sparx5_tc_flower_parse_usage *st) = {
433 	[FLOW_DISSECTOR_KEY_ETH_ADDRS] = sparx5_tc_flower_handler_ethaddr_usage,
434 	[FLOW_DISSECTOR_KEY_IPV4_ADDRS] = sparx5_tc_flower_handler_ipv4_usage,
435 	[FLOW_DISSECTOR_KEY_IPV6_ADDRS] = sparx5_tc_flower_handler_ipv6_usage,
436 	[FLOW_DISSECTOR_KEY_CONTROL] = sparx5_tc_flower_handler_control_usage,
437 	[FLOW_DISSECTOR_KEY_PORTS] = sparx5_tc_flower_handler_portnum_usage,
438 	[FLOW_DISSECTOR_KEY_BASIC] = sparx5_tc_flower_handler_basic_usage,
439 	[FLOW_DISSECTOR_KEY_VLAN] = sparx5_tc_flower_handler_vlan_usage,
440 	[FLOW_DISSECTOR_KEY_TCP] = sparx5_tc_flower_handler_tcp_usage,
441 	[FLOW_DISSECTOR_KEY_IP] = sparx5_tc_flower_handler_ip_usage,
442 };
443 
444 static int sparx5_tc_use_dissectors(struct flow_cls_offload *fco,
445 				    struct vcap_admin *admin,
446 				    struct vcap_rule *vrule,
447 				    u16 *l3_proto)
448 {
449 	struct sparx5_tc_flower_parse_usage state = {
450 		.fco = fco,
451 		.vrule = vrule,
452 		.l3_proto = ETH_P_ALL,
453 	};
454 	int idx, err = 0;
455 
456 	state.frule = flow_cls_offload_flow_rule(fco);
457 	for (idx = 0; idx < ARRAY_SIZE(sparx5_tc_flower_usage_handlers); ++idx) {
458 		if (!flow_rule_match_key(state.frule, idx))
459 			continue;
460 		if (!sparx5_tc_flower_usage_handlers[idx])
461 			continue;
462 		err = sparx5_tc_flower_usage_handlers[idx](&state);
463 		if (err)
464 			return err;
465 	}
466 
467 	if (state.frule->match.dissector->used_keys ^ state.used_keys) {
468 		NL_SET_ERR_MSG_MOD(fco->common.extack,
469 				   "Unsupported match item");
470 		return -ENOENT;
471 	}
472 
473 	if (l3_proto)
474 		*l3_proto = state.l3_proto;
475 	return err;
476 }
477 
478 static int sparx5_tc_flower_action_check(struct vcap_control *vctrl,
479 					 struct flow_cls_offload *fco,
480 					 struct vcap_admin *admin)
481 {
482 	struct flow_rule *rule = flow_cls_offload_flow_rule(fco);
483 	struct flow_action_entry *actent, *last_actent = NULL;
484 	struct flow_action *act = &rule->action;
485 	u64 action_mask = 0;
486 	int idx;
487 
488 	if (!flow_action_has_entries(act)) {
489 		NL_SET_ERR_MSG_MOD(fco->common.extack, "No actions");
490 		return -EINVAL;
491 	}
492 
493 	if (!flow_action_basic_hw_stats_check(act, fco->common.extack))
494 		return -EOPNOTSUPP;
495 
496 	flow_action_for_each(idx, actent, act) {
497 		if (action_mask & BIT(actent->id)) {
498 			NL_SET_ERR_MSG_MOD(fco->common.extack,
499 					   "More actions of the same type");
500 			return -EINVAL;
501 		}
502 		action_mask |= BIT(actent->id);
503 		last_actent = actent; /* Save last action for later check */
504 	}
505 
506 	/* Check that last action is a goto */
507 	if (last_actent->id != FLOW_ACTION_GOTO) {
508 		NL_SET_ERR_MSG_MOD(fco->common.extack,
509 				   "Last action must be 'goto'");
510 		return -EINVAL;
511 	}
512 
513 	/* Check if the goto chain is in the next lookup */
514 	if (!vcap_is_next_lookup(vctrl, fco->common.chain_index,
515 				 last_actent->chain_index)) {
516 		NL_SET_ERR_MSG_MOD(fco->common.extack,
517 				   "Invalid goto chain");
518 		return -EINVAL;
519 	}
520 
521 	/* Catch unsupported combinations of actions */
522 	if (action_mask & BIT(FLOW_ACTION_TRAP) &&
523 	    action_mask & BIT(FLOW_ACTION_ACCEPT)) {
524 		NL_SET_ERR_MSG_MOD(fco->common.extack,
525 				   "Cannot combine pass and trap action");
526 		return -EOPNOTSUPP;
527 	}
528 
529 	return 0;
530 }
531 
532 static int sparx5_tc_flower_replace(struct net_device *ndev,
533 				    struct flow_cls_offload *fco,
534 				    struct vcap_admin *admin)
535 {
536 	struct sparx5_port *port = netdev_priv(ndev);
537 	struct flow_action_entry *act;
538 	struct vcap_control *vctrl;
539 	struct flow_rule *frule;
540 	struct vcap_rule *vrule;
541 	u16 l3_proto;
542 	int err, idx;
543 
544 	vctrl = port->sparx5->vcap_ctrl;
545 
546 	err = sparx5_tc_flower_action_check(vctrl, fco, admin);
547 	if (err)
548 		return err;
549 
550 	vrule = vcap_alloc_rule(vctrl, ndev, fco->common.chain_index, VCAP_USER_TC,
551 				fco->common.prio, 0);
552 	if (IS_ERR(vrule))
553 		return PTR_ERR(vrule);
554 
555 	vrule->cookie = fco->cookie;
556 	sparx5_tc_use_dissectors(fco, admin, vrule, &l3_proto);
557 	frule = flow_cls_offload_flow_rule(fco);
558 	flow_action_for_each(idx, act, &frule->action) {
559 		switch (act->id) {
560 		case FLOW_ACTION_TRAP:
561 			err = vcap_rule_add_action_bit(vrule,
562 						       VCAP_AF_CPU_COPY_ENA,
563 						       VCAP_BIT_1);
564 			if (err)
565 				goto out;
566 			err = vcap_rule_add_action_u32(vrule,
567 						       VCAP_AF_CPU_QUEUE_NUM, 0);
568 			if (err)
569 				goto out;
570 			err = vcap_rule_add_action_u32(vrule, VCAP_AF_MASK_MODE,
571 						       SPX5_PMM_REPLACE_ALL);
572 			if (err)
573 				goto out;
574 			/* For now the actionset is hardcoded */
575 			err = vcap_set_rule_set_actionset(vrule,
576 							  VCAP_AFS_BASE_TYPE);
577 			if (err)
578 				goto out;
579 			break;
580 		case FLOW_ACTION_ACCEPT:
581 			/* For now the actionset is hardcoded */
582 			err = vcap_set_rule_set_actionset(vrule,
583 							  VCAP_AFS_BASE_TYPE);
584 			if (err)
585 				goto out;
586 			break;
587 		case FLOW_ACTION_GOTO:
588 			/* Links between VCAPs will be added later */
589 			break;
590 		default:
591 			NL_SET_ERR_MSG_MOD(fco->common.extack,
592 					   "Unsupported TC action");
593 			err = -EOPNOTSUPP;
594 			goto out;
595 		}
596 	}
597 	/* provide the l3 protocol to guide the keyset selection */
598 	err = vcap_val_rule(vrule, l3_proto);
599 	if (err) {
600 		vcap_set_tc_exterr(fco, vrule);
601 		goto out;
602 	}
603 	err = vcap_add_rule(vrule);
604 	if (err)
605 		NL_SET_ERR_MSG_MOD(fco->common.extack,
606 				   "Could not add the filter");
607 out:
608 	vcap_free_rule(vrule);
609 	return err;
610 }
611 
612 static int sparx5_tc_flower_destroy(struct net_device *ndev,
613 				    struct flow_cls_offload *fco,
614 				    struct vcap_admin *admin)
615 {
616 	struct sparx5_port *port = netdev_priv(ndev);
617 	struct vcap_control *vctrl;
618 	int err = -ENOENT, rule_id;
619 
620 	vctrl = port->sparx5->vcap_ctrl;
621 	while (true) {
622 		rule_id = vcap_lookup_rule_by_cookie(vctrl, fco->cookie);
623 		if (rule_id <= 0)
624 			break;
625 		err = vcap_del_rule(vctrl, ndev, rule_id);
626 		if (err) {
627 			pr_err("%s:%d: could not delete rule %d\n",
628 			       __func__, __LINE__, rule_id);
629 			break;
630 		}
631 	}
632 	return err;
633 }
634 
635 int sparx5_tc_flower(struct net_device *ndev, struct flow_cls_offload *fco,
636 		     bool ingress)
637 {
638 	struct sparx5_port *port = netdev_priv(ndev);
639 	struct vcap_control *vctrl;
640 	struct vcap_admin *admin;
641 	int err = -EINVAL;
642 
643 	/* Get vcap instance from the chain id */
644 	vctrl = port->sparx5->vcap_ctrl;
645 	admin = vcap_find_admin(vctrl, fco->common.chain_index);
646 	if (!admin) {
647 		NL_SET_ERR_MSG_MOD(fco->common.extack, "Invalid chain");
648 		return err;
649 	}
650 
651 	switch (fco->command) {
652 	case FLOW_CLS_REPLACE:
653 		return sparx5_tc_flower_replace(ndev, fco, admin);
654 	case FLOW_CLS_DESTROY:
655 		return sparx5_tc_flower_destroy(ndev, fco, admin);
656 	default:
657 		return -EOPNOTSUPP;
658 	}
659 }
660