1 // SPDX-License-Identifier: GPL-2.0+
2 /* Microchip VCAP API
3  *
4  * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
5  */
6 
7 #include <net/tc_act/tc_gate.h>
8 #include <net/tcp.h>
9 
10 #include "sparx5_tc.h"
11 #include "vcap_api.h"
12 #include "vcap_api_client.h"
13 #include "vcap_tc.h"
14 #include "sparx5_main.h"
15 #include "sparx5_vcap_impl.h"
16 
17 #define SPX5_MAX_RULE_SIZE 13 /* allows X1, X2, X4, X6 and X12 rules */
18 
19 /* Collect keysets and type ids for multiple rules per size */
20 struct sparx5_wildcard_rule {
21 	bool selected;
22 	u8 value;
23 	u8 mask;
24 	enum vcap_keyfield_set keyset;
25 };
26 
27 struct sparx5_multiple_rules {
28 	struct sparx5_wildcard_rule rule[SPX5_MAX_RULE_SIZE];
29 };
30 
31 static int
32 sparx5_tc_flower_handler_basic_usage(struct vcap_tc_flower_parse_usage *st)
33 {
34 	struct flow_match_basic mt;
35 	int err = 0;
36 
37 	flow_rule_match_basic(st->frule, &mt);
38 
39 	if (mt.mask->n_proto) {
40 		st->l3_proto = be16_to_cpu(mt.key->n_proto);
41 		if (!sparx5_vcap_is_known_etype(st->admin, st->l3_proto)) {
42 			err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_ETYPE,
43 						    st->l3_proto, ~0);
44 			if (err)
45 				goto out;
46 		} else if (st->l3_proto == ETH_P_IP) {
47 			err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_IP4_IS,
48 						    VCAP_BIT_1);
49 			if (err)
50 				goto out;
51 		} else if (st->l3_proto == ETH_P_IPV6) {
52 			err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_IP4_IS,
53 						    VCAP_BIT_0);
54 			if (err)
55 				goto out;
56 			if (st->admin->vtype == VCAP_TYPE_IS0) {
57 				err = vcap_rule_add_key_bit(st->vrule,
58 							    VCAP_KF_IP_SNAP_IS,
59 							    VCAP_BIT_1);
60 				if (err)
61 					goto out;
62 			}
63 		}
64 	}
65 
66 	if (mt.mask->ip_proto) {
67 		st->l4_proto = mt.key->ip_proto;
68 		if (st->l4_proto == IPPROTO_TCP) {
69 			err = vcap_rule_add_key_bit(st->vrule,
70 						    VCAP_KF_TCP_IS,
71 						    VCAP_BIT_1);
72 			if (err)
73 				goto out;
74 		} else if (st->l4_proto == IPPROTO_UDP) {
75 			err = vcap_rule_add_key_bit(st->vrule,
76 						    VCAP_KF_TCP_IS,
77 						    VCAP_BIT_0);
78 			if (err)
79 				goto out;
80 			if (st->admin->vtype == VCAP_TYPE_IS0) {
81 				err = vcap_rule_add_key_bit(st->vrule,
82 							    VCAP_KF_TCP_UDP_IS,
83 							    VCAP_BIT_1);
84 				if (err)
85 					goto out;
86 			}
87 		} else {
88 			err = vcap_rule_add_key_u32(st->vrule,
89 						    VCAP_KF_L3_IP_PROTO,
90 						    st->l4_proto, ~0);
91 			if (err)
92 				goto out;
93 		}
94 	}
95 
96 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_BASIC);
97 
98 	return err;
99 
100 out:
101 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_proto parse error");
102 	return err;
103 }
104 
105 static int
106 sparx5_tc_flower_handler_control_usage(struct vcap_tc_flower_parse_usage *st)
107 {
108 	struct flow_match_control mt;
109 	u32 value, mask;
110 	int err = 0;
111 
112 	flow_rule_match_control(st->frule, &mt);
113 
114 	if (mt.mask->flags) {
115 		if (mt.mask->flags & FLOW_DIS_FIRST_FRAG) {
116 			if (mt.key->flags & FLOW_DIS_FIRST_FRAG) {
117 				value = 1; /* initial fragment */
118 				mask = 0x3;
119 			} else {
120 				if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
121 					value = 3; /* follow up fragment */
122 					mask = 0x3;
123 				} else {
124 					value = 0; /* no fragment */
125 					mask = 0x3;
126 				}
127 			}
128 		} else {
129 			if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
130 				value = 3; /* follow up fragment */
131 				mask = 0x3;
132 			} else {
133 				value = 0; /* no fragment */
134 				mask = 0x3;
135 			}
136 		}
137 
138 		err = vcap_rule_add_key_u32(st->vrule,
139 					    VCAP_KF_L3_FRAGMENT_TYPE,
140 					    value, mask);
141 		if (err)
142 			goto out;
143 	}
144 
145 	st->used_keys |= BIT(FLOW_DISSECTOR_KEY_CONTROL);
146 
147 	return err;
148 
149 out:
150 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_frag parse error");
151 	return err;
152 }
153 
154 static int
155 sparx5_tc_flower_handler_cvlan_usage(struct vcap_tc_flower_parse_usage *st)
156 {
157 	if (st->admin->vtype != VCAP_TYPE_IS0) {
158 		NL_SET_ERR_MSG_MOD(st->fco->common.extack,
159 				   "cvlan not supported in this VCAP");
160 		return -EINVAL;
161 	}
162 
163 	return vcap_tc_flower_handler_cvlan_usage(st);
164 }
165 
166 static int
167 sparx5_tc_flower_handler_vlan_usage(struct vcap_tc_flower_parse_usage *st)
168 {
169 	enum vcap_key_field vid_key = VCAP_KF_8021Q_VID_CLS;
170 	enum vcap_key_field pcp_key = VCAP_KF_8021Q_PCP_CLS;
171 
172 	if (st->admin->vtype == VCAP_TYPE_IS0) {
173 		vid_key = VCAP_KF_8021Q_VID0;
174 		pcp_key = VCAP_KF_8021Q_PCP0;
175 	}
176 
177 	return vcap_tc_flower_handler_vlan_usage(st, vid_key, pcp_key);
178 }
179 
180 static int (*sparx5_tc_flower_usage_handlers[])(struct vcap_tc_flower_parse_usage *st) = {
181 	[FLOW_DISSECTOR_KEY_ETH_ADDRS] = vcap_tc_flower_handler_ethaddr_usage,
182 	[FLOW_DISSECTOR_KEY_IPV4_ADDRS] = vcap_tc_flower_handler_ipv4_usage,
183 	[FLOW_DISSECTOR_KEY_IPV6_ADDRS] = vcap_tc_flower_handler_ipv6_usage,
184 	[FLOW_DISSECTOR_KEY_CONTROL] = sparx5_tc_flower_handler_control_usage,
185 	[FLOW_DISSECTOR_KEY_PORTS] = vcap_tc_flower_handler_portnum_usage,
186 	[FLOW_DISSECTOR_KEY_BASIC] = sparx5_tc_flower_handler_basic_usage,
187 	[FLOW_DISSECTOR_KEY_CVLAN] = sparx5_tc_flower_handler_cvlan_usage,
188 	[FLOW_DISSECTOR_KEY_VLAN] = sparx5_tc_flower_handler_vlan_usage,
189 	[FLOW_DISSECTOR_KEY_TCP] = vcap_tc_flower_handler_tcp_usage,
190 	[FLOW_DISSECTOR_KEY_ARP] = vcap_tc_flower_handler_arp_usage,
191 	[FLOW_DISSECTOR_KEY_IP] = vcap_tc_flower_handler_ip_usage,
192 };
193 
194 static int sparx5_tc_use_dissectors(struct flow_cls_offload *fco,
195 				    struct vcap_admin *admin,
196 				    struct vcap_rule *vrule,
197 				    u16 *l3_proto)
198 {
199 	struct vcap_tc_flower_parse_usage state = {
200 		.fco = fco,
201 		.vrule = vrule,
202 		.l3_proto = ETH_P_ALL,
203 		.admin = admin,
204 	};
205 	int idx, err = 0;
206 
207 	state.frule = flow_cls_offload_flow_rule(fco);
208 	for (idx = 0; idx < ARRAY_SIZE(sparx5_tc_flower_usage_handlers); ++idx) {
209 		if (!flow_rule_match_key(state.frule, idx))
210 			continue;
211 		if (!sparx5_tc_flower_usage_handlers[idx])
212 			continue;
213 		err = sparx5_tc_flower_usage_handlers[idx](&state);
214 		if (err)
215 			return err;
216 	}
217 
218 	if (state.frule->match.dissector->used_keys ^ state.used_keys) {
219 		NL_SET_ERR_MSG_MOD(fco->common.extack,
220 				   "Unsupported match item");
221 		return -ENOENT;
222 	}
223 
224 	if (l3_proto)
225 		*l3_proto = state.l3_proto;
226 	return err;
227 }
228 
229 static int sparx5_tc_flower_action_check(struct vcap_control *vctrl,
230 					 struct net_device *ndev,
231 					 struct flow_cls_offload *fco,
232 					 bool ingress)
233 {
234 	struct flow_rule *rule = flow_cls_offload_flow_rule(fco);
235 	struct flow_action_entry *actent, *last_actent = NULL;
236 	struct flow_action *act = &rule->action;
237 	u64 action_mask = 0;
238 	int idx;
239 
240 	if (!flow_action_has_entries(act)) {
241 		NL_SET_ERR_MSG_MOD(fco->common.extack, "No actions");
242 		return -EINVAL;
243 	}
244 
245 	if (!flow_action_basic_hw_stats_check(act, fco->common.extack))
246 		return -EOPNOTSUPP;
247 
248 	flow_action_for_each(idx, actent, act) {
249 		if (action_mask & BIT(actent->id)) {
250 			NL_SET_ERR_MSG_MOD(fco->common.extack,
251 					   "More actions of the same type");
252 			return -EINVAL;
253 		}
254 		action_mask |= BIT(actent->id);
255 		last_actent = actent; /* Save last action for later check */
256 	}
257 
258 	/* Check if last action is a goto
259 	 * The last chain/lookup does not need to have a goto action
260 	 */
261 	if (last_actent->id == FLOW_ACTION_GOTO) {
262 		/* Check if the destination chain is in one of the VCAPs */
263 		if (!vcap_is_next_lookup(vctrl, fco->common.chain_index,
264 					 last_actent->chain_index)) {
265 			NL_SET_ERR_MSG_MOD(fco->common.extack,
266 					   "Invalid goto chain");
267 			return -EINVAL;
268 		}
269 	} else if (!vcap_is_last_chain(vctrl, fco->common.chain_index,
270 				       ingress)) {
271 		NL_SET_ERR_MSG_MOD(fco->common.extack,
272 				   "Last action must be 'goto'");
273 		return -EINVAL;
274 	}
275 
276 	/* Catch unsupported combinations of actions */
277 	if (action_mask & BIT(FLOW_ACTION_TRAP) &&
278 	    action_mask & BIT(FLOW_ACTION_ACCEPT)) {
279 		NL_SET_ERR_MSG_MOD(fco->common.extack,
280 				   "Cannot combine pass and trap action");
281 		return -EOPNOTSUPP;
282 	}
283 
284 	return 0;
285 }
286 
287 /* Add a rule counter action */
288 static int sparx5_tc_add_rule_counter(struct vcap_admin *admin,
289 				      struct vcap_rule *vrule)
290 {
291 	int err;
292 
293 	switch (admin->vtype) {
294 	case VCAP_TYPE_IS0:
295 		break;
296 	case VCAP_TYPE_ES0:
297 		err = vcap_rule_mod_action_u32(vrule, VCAP_AF_ESDX,
298 					       vrule->id);
299 		if (err)
300 			return err;
301 		vcap_rule_set_counter_id(vrule, vrule->id);
302 		break;
303 	case VCAP_TYPE_IS2:
304 	case VCAP_TYPE_ES2:
305 		err = vcap_rule_mod_action_u32(vrule, VCAP_AF_CNT_ID,
306 					       vrule->id);
307 		if (err)
308 			return err;
309 		vcap_rule_set_counter_id(vrule, vrule->id);
310 		break;
311 	default:
312 		pr_err("%s:%d: vcap type: %d not supported\n",
313 		       __func__, __LINE__, admin->vtype);
314 		break;
315 	}
316 	return 0;
317 }
318 
319 /* Collect all port keysets and apply the first of them, possibly wildcarded */
320 static int sparx5_tc_select_protocol_keyset(struct net_device *ndev,
321 					    struct vcap_rule *vrule,
322 					    struct vcap_admin *admin,
323 					    u16 l3_proto,
324 					    struct sparx5_multiple_rules *multi)
325 {
326 	struct sparx5_port *port = netdev_priv(ndev);
327 	struct vcap_keyset_list portkeysetlist = {};
328 	enum vcap_keyfield_set portkeysets[10] = {};
329 	struct vcap_keyset_list matches = {};
330 	enum vcap_keyfield_set keysets[10];
331 	int idx, jdx, err = 0, count = 0;
332 	struct sparx5_wildcard_rule *mru;
333 	const struct vcap_set *kinfo;
334 	struct vcap_control *vctrl;
335 
336 	vctrl = port->sparx5->vcap_ctrl;
337 
338 	/* Find the keysets that the rule can use */
339 	matches.keysets = keysets;
340 	matches.max = ARRAY_SIZE(keysets);
341 	if (vcap_rule_find_keysets(vrule, &matches) == 0)
342 		return -EINVAL;
343 
344 	/* Find the keysets that the port configuration supports */
345 	portkeysetlist.max = ARRAY_SIZE(portkeysets);
346 	portkeysetlist.keysets = portkeysets;
347 	err = sparx5_vcap_get_port_keyset(ndev,
348 					  admin, vrule->vcap_chain_id,
349 					  l3_proto,
350 					  &portkeysetlist);
351 	if (err)
352 		return err;
353 
354 	/* Find the intersection of the two sets of keyset */
355 	for (idx = 0; idx < portkeysetlist.cnt; ++idx) {
356 		kinfo = vcap_keyfieldset(vctrl, admin->vtype,
357 					 portkeysetlist.keysets[idx]);
358 		if (!kinfo)
359 			continue;
360 
361 		/* Find a port keyset that matches the required keys
362 		 * If there are multiple keysets then compose a type id mask
363 		 */
364 		for (jdx = 0; jdx < matches.cnt; ++jdx) {
365 			if (portkeysetlist.keysets[idx] != matches.keysets[jdx])
366 				continue;
367 
368 			mru = &multi->rule[kinfo->sw_per_item];
369 			if (!mru->selected) {
370 				mru->selected = true;
371 				mru->keyset = portkeysetlist.keysets[idx];
372 				mru->value = kinfo->type_id;
373 			}
374 			mru->value &= kinfo->type_id;
375 			mru->mask |= kinfo->type_id;
376 			++count;
377 		}
378 	}
379 	if (count == 0)
380 		return -EPROTO;
381 
382 	if (l3_proto == ETH_P_ALL && count < portkeysetlist.cnt)
383 		return -ENOENT;
384 
385 	for (idx = 0; idx < SPX5_MAX_RULE_SIZE; ++idx) {
386 		mru = &multi->rule[idx];
387 		if (!mru->selected)
388 			continue;
389 
390 		/* Align the mask to the combined value */
391 		mru->mask ^= mru->value;
392 	}
393 
394 	/* Set the chosen keyset on the rule and set a wildcarded type if there
395 	 * are more than one keyset
396 	 */
397 	for (idx = 0; idx < SPX5_MAX_RULE_SIZE; ++idx) {
398 		mru = &multi->rule[idx];
399 		if (!mru->selected)
400 			continue;
401 
402 		vcap_set_rule_set_keyset(vrule, mru->keyset);
403 		if (count > 1)
404 			/* Some keysets do not have a type field */
405 			vcap_rule_mod_key_u32(vrule, VCAP_KF_TYPE,
406 					      mru->value,
407 					      ~mru->mask);
408 		mru->selected = false; /* mark as done */
409 		break; /* Stop here and add more rules later */
410 	}
411 	return err;
412 }
413 
414 static int sparx5_tc_add_rule_copy(struct vcap_control *vctrl,
415 				   struct flow_cls_offload *fco,
416 				   struct vcap_rule *erule,
417 				   struct vcap_admin *admin,
418 				   struct sparx5_wildcard_rule *rule)
419 {
420 	enum vcap_key_field keylist[] = {
421 		VCAP_KF_IF_IGR_PORT_MASK,
422 		VCAP_KF_IF_IGR_PORT_MASK_SEL,
423 		VCAP_KF_IF_IGR_PORT_MASK_RNG,
424 		VCAP_KF_LOOKUP_FIRST_IS,
425 		VCAP_KF_TYPE,
426 	};
427 	struct vcap_rule *vrule;
428 	int err;
429 
430 	/* Add an extra rule with a special user and the new keyset */
431 	erule->user = VCAP_USER_TC_EXTRA;
432 	vrule = vcap_copy_rule(erule);
433 	if (IS_ERR(vrule))
434 		return PTR_ERR(vrule);
435 
436 	/* Link the new rule to the existing rule with the cookie */
437 	vrule->cookie = erule->cookie;
438 	vcap_filter_rule_keys(vrule, keylist, ARRAY_SIZE(keylist), true);
439 	err = vcap_set_rule_set_keyset(vrule, rule->keyset);
440 	if (err) {
441 		pr_err("%s:%d: could not set keyset %s in rule: %u\n",
442 		       __func__, __LINE__,
443 		       vcap_keyset_name(vctrl, rule->keyset),
444 		       vrule->id);
445 		goto out;
446 	}
447 
448 	/* Some keysets do not have a type field, so ignore return value */
449 	vcap_rule_mod_key_u32(vrule, VCAP_KF_TYPE, rule->value, ~rule->mask);
450 
451 	err = vcap_set_rule_set_actionset(vrule, erule->actionset);
452 	if (err)
453 		goto out;
454 
455 	err = sparx5_tc_add_rule_counter(admin, vrule);
456 	if (err)
457 		goto out;
458 
459 	err = vcap_val_rule(vrule, ETH_P_ALL);
460 	if (err) {
461 		pr_err("%s:%d: could not validate rule: %u\n",
462 		       __func__, __LINE__, vrule->id);
463 		vcap_set_tc_exterr(fco, vrule);
464 		goto out;
465 	}
466 	err = vcap_add_rule(vrule);
467 	if (err) {
468 		pr_err("%s:%d: could not add rule: %u\n",
469 		       __func__, __LINE__, vrule->id);
470 		goto out;
471 	}
472 out:
473 	vcap_free_rule(vrule);
474 	return err;
475 }
476 
477 static int sparx5_tc_add_remaining_rules(struct vcap_control *vctrl,
478 					 struct flow_cls_offload *fco,
479 					 struct vcap_rule *erule,
480 					 struct vcap_admin *admin,
481 					 struct sparx5_multiple_rules *multi)
482 {
483 	int idx, err = 0;
484 
485 	for (idx = 0; idx < SPX5_MAX_RULE_SIZE; ++idx) {
486 		if (!multi->rule[idx].selected)
487 			continue;
488 
489 		err = sparx5_tc_add_rule_copy(vctrl, fco, erule, admin,
490 					      &multi->rule[idx]);
491 		if (err)
492 			break;
493 	}
494 	return err;
495 }
496 
497 /* Add the actionset that is the default for the VCAP type */
498 static int sparx5_tc_set_actionset(struct vcap_admin *admin,
499 				   struct vcap_rule *vrule)
500 {
501 	enum vcap_actionfield_set aset;
502 	int err = 0;
503 
504 	switch (admin->vtype) {
505 	case VCAP_TYPE_IS0:
506 		aset = VCAP_AFS_CLASSIFICATION;
507 		break;
508 	case VCAP_TYPE_IS2:
509 		aset = VCAP_AFS_BASE_TYPE;
510 		break;
511 	case VCAP_TYPE_ES2:
512 		aset = VCAP_AFS_BASE_TYPE;
513 		break;
514 	default:
515 		return -EINVAL;
516 	}
517 	/* Do not overwrite any current actionset */
518 	if (vrule->actionset == VCAP_AFS_NO_VALUE)
519 		err = vcap_set_rule_set_actionset(vrule, aset);
520 	return err;
521 }
522 
523 /* Add the VCAP key to match on for a rule target value */
524 static int sparx5_tc_add_rule_link_target(struct vcap_admin *admin,
525 					  struct vcap_rule *vrule,
526 					  int target_cid)
527 {
528 	int link_val = target_cid % VCAP_CID_LOOKUP_SIZE;
529 	int err;
530 
531 	if (!link_val)
532 		return 0;
533 
534 	switch (admin->vtype) {
535 	case VCAP_TYPE_IS0:
536 		/* Add NXT_IDX key for chaining rules between IS0 instances */
537 		err = vcap_rule_add_key_u32(vrule, VCAP_KF_LOOKUP_GEN_IDX_SEL,
538 					    1, /* enable */
539 					    ~0);
540 		if (err)
541 			return err;
542 		return vcap_rule_add_key_u32(vrule, VCAP_KF_LOOKUP_GEN_IDX,
543 					     link_val, /* target */
544 					     ~0);
545 	case VCAP_TYPE_IS2:
546 		/* Add PAG key for chaining rules from IS0 */
547 		return vcap_rule_add_key_u32(vrule, VCAP_KF_LOOKUP_PAG,
548 					     link_val, /* target */
549 					     ~0);
550 	case VCAP_TYPE_ES2:
551 		/* Add ISDX key for chaining rules from IS0 */
552 		return vcap_rule_add_key_u32(vrule, VCAP_KF_ISDX_CLS, link_val,
553 					     ~0);
554 	default:
555 		break;
556 	}
557 	return 0;
558 }
559 
560 /* Add the VCAP action that adds a target value to a rule */
561 static int sparx5_tc_add_rule_link(struct vcap_control *vctrl,
562 				   struct vcap_admin *admin,
563 				   struct vcap_rule *vrule,
564 				   int from_cid, int to_cid)
565 {
566 	struct vcap_admin *to_admin = vcap_find_admin(vctrl, to_cid);
567 	int diff, err = 0;
568 
569 	if (!to_admin) {
570 		pr_err("%s:%d: unsupported chain direction: %d\n",
571 		       __func__, __LINE__, to_cid);
572 		return -EINVAL;
573 	}
574 
575 	diff = vcap_chain_offset(vctrl, from_cid, to_cid);
576 	if (!diff)
577 		return 0;
578 
579 	if (admin->vtype == VCAP_TYPE_IS0 &&
580 	    to_admin->vtype == VCAP_TYPE_IS0) {
581 		/* Between IS0 instances the G_IDX value is used */
582 		err = vcap_rule_add_action_u32(vrule, VCAP_AF_NXT_IDX, diff);
583 		if (err)
584 			goto out;
585 		err = vcap_rule_add_action_u32(vrule, VCAP_AF_NXT_IDX_CTRL,
586 					       1); /* Replace */
587 		if (err)
588 			goto out;
589 	} else if (admin->vtype == VCAP_TYPE_IS0 &&
590 		   to_admin->vtype == VCAP_TYPE_IS2) {
591 		/* Between IS0 and IS2 the PAG value is used */
592 		err = vcap_rule_add_action_u32(vrule, VCAP_AF_PAG_VAL, diff);
593 		if (err)
594 			goto out;
595 		err = vcap_rule_add_action_u32(vrule,
596 					       VCAP_AF_PAG_OVERRIDE_MASK,
597 					       0xff);
598 		if (err)
599 			goto out;
600 	} else if (admin->vtype == VCAP_TYPE_IS0 &&
601 		   to_admin->vtype == VCAP_TYPE_ES2) {
602 		/* Between IS0 and ES2 the ISDX value is used */
603 		err = vcap_rule_add_action_u32(vrule, VCAP_AF_ISDX_VAL,
604 					       diff);
605 		if (err)
606 			goto out;
607 		err = vcap_rule_add_action_bit(vrule,
608 					       VCAP_AF_ISDX_ADD_REPLACE_SEL,
609 					       VCAP_BIT_1);
610 		if (err)
611 			goto out;
612 	} else {
613 		pr_err("%s:%d: unsupported chain destination: %d\n",
614 		       __func__, __LINE__, to_cid);
615 		err = -EOPNOTSUPP;
616 	}
617 out:
618 	return err;
619 }
620 
621 static int sparx5_tc_flower_parse_act_gate(struct sparx5_psfp_sg *sg,
622 					   struct flow_action_entry *act,
623 					   struct netlink_ext_ack *extack)
624 {
625 	int i;
626 
627 	if (act->gate.prio < -1 || act->gate.prio > SPX5_PSFP_SG_MAX_IPV) {
628 		NL_SET_ERR_MSG_MOD(extack, "Invalid gate priority");
629 		return -EINVAL;
630 	}
631 
632 	if (act->gate.cycletime < SPX5_PSFP_SG_MIN_CYCLE_TIME_NS ||
633 	    act->gate.cycletime > SPX5_PSFP_SG_MAX_CYCLE_TIME_NS) {
634 		NL_SET_ERR_MSG_MOD(extack, "Invalid gate cycletime");
635 		return -EINVAL;
636 	}
637 
638 	if (act->gate.cycletimeext > SPX5_PSFP_SG_MAX_CYCLE_TIME_NS) {
639 		NL_SET_ERR_MSG_MOD(extack, "Invalid gate cycletimeext");
640 		return -EINVAL;
641 	}
642 
643 	if (act->gate.num_entries >= SPX5_PSFP_GCE_CNT) {
644 		NL_SET_ERR_MSG_MOD(extack, "Invalid number of gate entries");
645 		return -EINVAL;
646 	}
647 
648 	sg->gate_state = true;
649 	sg->ipv = act->gate.prio;
650 	sg->num_entries = act->gate.num_entries;
651 	sg->cycletime = act->gate.cycletime;
652 	sg->cycletimeext = act->gate.cycletimeext;
653 
654 	for (i = 0; i < sg->num_entries; i++) {
655 		sg->gce[i].gate_state = !!act->gate.entries[i].gate_state;
656 		sg->gce[i].interval = act->gate.entries[i].interval;
657 		sg->gce[i].ipv = act->gate.entries[i].ipv;
658 		sg->gce[i].maxoctets = act->gate.entries[i].maxoctets;
659 	}
660 
661 	return 0;
662 }
663 
664 static int sparx5_tc_flower_parse_act_police(struct sparx5_policer *pol,
665 					     struct flow_action_entry *act,
666 					     struct netlink_ext_ack *extack)
667 {
668 	pol->type = SPX5_POL_SERVICE;
669 	pol->rate = div_u64(act->police.rate_bytes_ps, 1000) * 8;
670 	pol->burst = act->police.burst;
671 	pol->idx = act->hw_index;
672 
673 	/* rate is now in kbit */
674 	if (pol->rate > DIV_ROUND_UP(SPX5_SDLB_GROUP_RATE_MAX, 1000)) {
675 		NL_SET_ERR_MSG_MOD(extack, "Maximum rate exceeded");
676 		return -EINVAL;
677 	}
678 
679 	if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
680 		NL_SET_ERR_MSG_MOD(extack, "Offload not supported when exceed action is not drop");
681 		return -EOPNOTSUPP;
682 	}
683 
684 	if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
685 	    act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
686 		NL_SET_ERR_MSG_MOD(extack, "Offload not supported when conform action is not pipe or ok");
687 		return -EOPNOTSUPP;
688 	}
689 
690 	return 0;
691 }
692 
693 static int sparx5_tc_flower_psfp_setup(struct sparx5 *sparx5,
694 				       struct vcap_rule *vrule, int sg_idx,
695 				       int pol_idx, struct sparx5_psfp_sg *sg,
696 				       struct sparx5_psfp_fm *fm,
697 				       struct sparx5_psfp_sf *sf)
698 {
699 	u32 psfp_sfid = 0, psfp_fmid = 0, psfp_sgid = 0;
700 	int ret;
701 
702 	/* Must always have a stream gate - max sdu (filter option) is evaluated
703 	 * after frames have passed the gate, so in case of only a policer, we
704 	 * allocate a stream gate that is always open.
705 	 */
706 	if (sg_idx < 0) {
707 		sg_idx = sparx5_pool_idx_to_id(SPX5_PSFP_SG_OPEN);
708 		sg->ipv = 0; /* Disabled */
709 		sg->cycletime = SPX5_PSFP_SG_CYCLE_TIME_DEFAULT;
710 		sg->num_entries = 1;
711 		sg->gate_state = 1; /* Open */
712 		sg->gate_enabled = 1;
713 		sg->gce[0].gate_state = 1;
714 		sg->gce[0].interval = SPX5_PSFP_SG_CYCLE_TIME_DEFAULT;
715 		sg->gce[0].ipv = 0;
716 		sg->gce[0].maxoctets = 0; /* Disabled */
717 	}
718 
719 	ret = sparx5_psfp_sg_add(sparx5, sg_idx, sg, &psfp_sgid);
720 	if (ret < 0)
721 		return ret;
722 
723 	if (pol_idx >= 0) {
724 		/* Add new flow-meter */
725 		ret = sparx5_psfp_fm_add(sparx5, pol_idx, fm, &psfp_fmid);
726 		if (ret < 0)
727 			return ret;
728 	}
729 
730 	/* Map stream filter to stream gate */
731 	sf->sgid = psfp_sgid;
732 
733 	/* Add new stream-filter and map it to a steam gate */
734 	ret = sparx5_psfp_sf_add(sparx5, sf, &psfp_sfid);
735 	if (ret < 0)
736 		return ret;
737 
738 	/* Streams are classified by ISDX - map ISDX 1:1 to sfid for now. */
739 	sparx5_isdx_conf_set(sparx5, psfp_sfid, psfp_sfid, psfp_fmid);
740 
741 	ret = vcap_rule_add_action_bit(vrule, VCAP_AF_ISDX_ADD_REPLACE_SEL,
742 				       VCAP_BIT_1);
743 	if (ret)
744 		return ret;
745 
746 	ret = vcap_rule_add_action_u32(vrule, VCAP_AF_ISDX_VAL, psfp_sfid);
747 	if (ret)
748 		return ret;
749 
750 	return 0;
751 }
752 
753 static int sparx5_tc_flower_replace(struct net_device *ndev,
754 				    struct flow_cls_offload *fco,
755 				    struct vcap_admin *admin,
756 				    bool ingress)
757 {
758 	struct sparx5_psfp_sf sf = { .max_sdu = SPX5_PSFP_SF_MAX_SDU };
759 	struct netlink_ext_ack *extack = fco->common.extack;
760 	int err, idx, tc_sg_idx = -1, tc_pol_idx = -1;
761 	struct sparx5_port *port = netdev_priv(ndev);
762 	struct sparx5_multiple_rules multi = {};
763 	struct sparx5 *sparx5 = port->sparx5;
764 	struct sparx5_psfp_sg sg = { 0 };
765 	struct sparx5_psfp_fm fm = { 0 };
766 	struct flow_action_entry *act;
767 	struct vcap_control *vctrl;
768 	struct flow_rule *frule;
769 	struct vcap_rule *vrule;
770 	u16 l3_proto;
771 
772 	vctrl = port->sparx5->vcap_ctrl;
773 
774 	err = sparx5_tc_flower_action_check(vctrl, ndev, fco, ingress);
775 	if (err)
776 		return err;
777 
778 	vrule = vcap_alloc_rule(vctrl, ndev, fco->common.chain_index, VCAP_USER_TC,
779 				fco->common.prio, 0);
780 	if (IS_ERR(vrule))
781 		return PTR_ERR(vrule);
782 
783 	vrule->cookie = fco->cookie;
784 
785 	l3_proto = ETH_P_ALL;
786 	err = sparx5_tc_use_dissectors(fco, admin, vrule, &l3_proto);
787 	if (err)
788 		goto out;
789 
790 	err = sparx5_tc_add_rule_counter(admin, vrule);
791 	if (err)
792 		goto out;
793 
794 	err = sparx5_tc_add_rule_link_target(admin, vrule,
795 					     fco->common.chain_index);
796 	if (err)
797 		goto out;
798 
799 	frule = flow_cls_offload_flow_rule(fco);
800 	flow_action_for_each(idx, act, &frule->action) {
801 		switch (act->id) {
802 		case FLOW_ACTION_GATE: {
803 			err = sparx5_tc_flower_parse_act_gate(&sg, act, extack);
804 			if (err < 0)
805 				goto out;
806 
807 			tc_sg_idx = act->hw_index;
808 
809 			break;
810 		}
811 		case FLOW_ACTION_POLICE: {
812 			err = sparx5_tc_flower_parse_act_police(&fm.pol, act,
813 								extack);
814 			if (err < 0)
815 				goto out;
816 
817 			tc_pol_idx = fm.pol.idx;
818 			sf.max_sdu = act->police.mtu;
819 
820 			break;
821 		}
822 		case FLOW_ACTION_TRAP:
823 			if (admin->vtype != VCAP_TYPE_IS2 &&
824 			    admin->vtype != VCAP_TYPE_ES2) {
825 				NL_SET_ERR_MSG_MOD(fco->common.extack,
826 						   "Trap action not supported in this VCAP");
827 				err = -EOPNOTSUPP;
828 				goto out;
829 			}
830 			err = vcap_rule_add_action_bit(vrule,
831 						       VCAP_AF_CPU_COPY_ENA,
832 						       VCAP_BIT_1);
833 			if (err)
834 				goto out;
835 			err = vcap_rule_add_action_u32(vrule,
836 						       VCAP_AF_CPU_QUEUE_NUM, 0);
837 			if (err)
838 				goto out;
839 			if (admin->vtype != VCAP_TYPE_IS2)
840 				break;
841 			err = vcap_rule_add_action_u32(vrule,
842 						       VCAP_AF_MASK_MODE,
843 				SPX5_PMM_REPLACE_ALL);
844 			if (err)
845 				goto out;
846 			break;
847 		case FLOW_ACTION_ACCEPT:
848 			err = sparx5_tc_set_actionset(admin, vrule);
849 			if (err)
850 				goto out;
851 			break;
852 		case FLOW_ACTION_GOTO:
853 			err = sparx5_tc_set_actionset(admin, vrule);
854 			if (err)
855 				goto out;
856 			sparx5_tc_add_rule_link(vctrl, admin, vrule,
857 						fco->common.chain_index,
858 						act->chain_index);
859 			break;
860 		default:
861 			NL_SET_ERR_MSG_MOD(fco->common.extack,
862 					   "Unsupported TC action");
863 			err = -EOPNOTSUPP;
864 			goto out;
865 		}
866 	}
867 
868 	/* Setup PSFP */
869 	if (tc_sg_idx >= 0 || tc_pol_idx >= 0) {
870 		err = sparx5_tc_flower_psfp_setup(sparx5, vrule, tc_sg_idx,
871 						  tc_pol_idx, &sg, &fm, &sf);
872 		if (err)
873 			goto out;
874 	}
875 
876 	err = sparx5_tc_select_protocol_keyset(ndev, vrule, admin, l3_proto,
877 					       &multi);
878 	if (err) {
879 		NL_SET_ERR_MSG_MOD(fco->common.extack,
880 				   "No matching port keyset for filter protocol and keys");
881 		goto out;
882 	}
883 
884 	/* provide the l3 protocol to guide the keyset selection */
885 	err = vcap_val_rule(vrule, l3_proto);
886 	if (err) {
887 		vcap_set_tc_exterr(fco, vrule);
888 		goto out;
889 	}
890 	err = vcap_add_rule(vrule);
891 	if (err)
892 		NL_SET_ERR_MSG_MOD(fco->common.extack,
893 				   "Could not add the filter");
894 
895 	if (l3_proto == ETH_P_ALL)
896 		err = sparx5_tc_add_remaining_rules(vctrl, fco, vrule, admin,
897 						    &multi);
898 
899 out:
900 	vcap_free_rule(vrule);
901 	return err;
902 }
903 
904 static void sparx5_tc_free_psfp_resources(struct sparx5 *sparx5,
905 					  struct vcap_rule *vrule)
906 {
907 	struct vcap_client_actionfield *afield;
908 	u32 isdx, sfid, sgid, fmid;
909 
910 	/* Check if VCAP_AF_ISDX_VAL action is set for this rule - and if
911 	 * it is used for stream and/or flow-meter classification.
912 	 */
913 	afield = vcap_find_actionfield(vrule, VCAP_AF_ISDX_VAL);
914 	if (!afield)
915 		return;
916 
917 	isdx = afield->data.u32.value;
918 	sfid = sparx5_psfp_isdx_get_sf(sparx5, isdx);
919 
920 	if (!sfid)
921 		return;
922 
923 	fmid = sparx5_psfp_isdx_get_fm(sparx5, isdx);
924 	sgid = sparx5_psfp_sf_get_sg(sparx5, sfid);
925 
926 	if (fmid && sparx5_psfp_fm_del(sparx5, fmid) < 0)
927 		pr_err("%s:%d Could not delete invalid fmid: %d", __func__,
928 		       __LINE__, fmid);
929 
930 	if (sgid && sparx5_psfp_sg_del(sparx5, sgid) < 0)
931 		pr_err("%s:%d Could not delete invalid sgid: %d", __func__,
932 		       __LINE__, sgid);
933 
934 	if (sparx5_psfp_sf_del(sparx5, sfid) < 0)
935 		pr_err("%s:%d Could not delete invalid sfid: %d", __func__,
936 		       __LINE__, sfid);
937 
938 	sparx5_isdx_conf_set(sparx5, isdx, 0, 0);
939 }
940 
941 static int sparx5_tc_free_rule_resources(struct net_device *ndev,
942 					 struct vcap_control *vctrl,
943 					 int rule_id)
944 {
945 	struct sparx5_port *port = netdev_priv(ndev);
946 	struct sparx5 *sparx5 = port->sparx5;
947 	struct vcap_rule *vrule;
948 	int ret = 0;
949 
950 	vrule = vcap_get_rule(vctrl, rule_id);
951 	if (!vrule || IS_ERR(vrule))
952 		return -EINVAL;
953 
954 	sparx5_tc_free_psfp_resources(sparx5, vrule);
955 
956 	vcap_free_rule(vrule);
957 	return ret;
958 }
959 
960 static int sparx5_tc_flower_destroy(struct net_device *ndev,
961 				    struct flow_cls_offload *fco,
962 				    struct vcap_admin *admin)
963 {
964 	struct sparx5_port *port = netdev_priv(ndev);
965 	int err = -ENOENT, count = 0, rule_id;
966 	struct vcap_control *vctrl;
967 
968 	vctrl = port->sparx5->vcap_ctrl;
969 	while (true) {
970 		rule_id = vcap_lookup_rule_by_cookie(vctrl, fco->cookie);
971 		if (rule_id <= 0)
972 			break;
973 		if (count == 0) {
974 			/* Resources are attached to the first rule of
975 			 * a set of rules. Only works if the rules are
976 			 * in the correct order.
977 			 */
978 			err = sparx5_tc_free_rule_resources(ndev, vctrl,
979 							    rule_id);
980 			if (err)
981 				pr_err("%s:%d: could not free resources %d\n",
982 				       __func__, __LINE__, rule_id);
983 		}
984 		err = vcap_del_rule(vctrl, ndev, rule_id);
985 		if (err) {
986 			pr_err("%s:%d: could not delete rule %d\n",
987 			       __func__, __LINE__, rule_id);
988 			break;
989 		}
990 	}
991 	return err;
992 }
993 
994 static int sparx5_tc_flower_stats(struct net_device *ndev,
995 				  struct flow_cls_offload *fco,
996 				  struct vcap_admin *admin)
997 {
998 	struct sparx5_port *port = netdev_priv(ndev);
999 	struct vcap_counter ctr = {};
1000 	struct vcap_control *vctrl;
1001 	ulong lastused = 0;
1002 	int err;
1003 
1004 	vctrl = port->sparx5->vcap_ctrl;
1005 	err = vcap_get_rule_count_by_cookie(vctrl, &ctr, fco->cookie);
1006 	if (err)
1007 		return err;
1008 	flow_stats_update(&fco->stats, 0x0, ctr.value, 0, lastused,
1009 			  FLOW_ACTION_HW_STATS_IMMEDIATE);
1010 	return err;
1011 }
1012 
1013 int sparx5_tc_flower(struct net_device *ndev, struct flow_cls_offload *fco,
1014 		     bool ingress)
1015 {
1016 	struct sparx5_port *port = netdev_priv(ndev);
1017 	struct vcap_control *vctrl;
1018 	struct vcap_admin *admin;
1019 	int err = -EINVAL;
1020 
1021 	/* Get vcap instance from the chain id */
1022 	vctrl = port->sparx5->vcap_ctrl;
1023 	admin = vcap_find_admin(vctrl, fco->common.chain_index);
1024 	if (!admin) {
1025 		NL_SET_ERR_MSG_MOD(fco->common.extack, "Invalid chain");
1026 		return err;
1027 	}
1028 
1029 	switch (fco->command) {
1030 	case FLOW_CLS_REPLACE:
1031 		return sparx5_tc_flower_replace(ndev, fco, admin, ingress);
1032 	case FLOW_CLS_DESTROY:
1033 		return sparx5_tc_flower_destroy(ndev, fco, admin);
1034 	case FLOW_CLS_STATS:
1035 		return sparx5_tc_flower_stats(ndev, fco, admin);
1036 	default:
1037 		return -EOPNOTSUPP;
1038 	}
1039 }
1040