1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2020 Marvell International Ltd. All rights reserved */
3 
4 #include "prestera.h"
5 #include "prestera_acl.h"
6 #include "prestera_flow.h"
7 #include "prestera_flower.h"
8 
9 struct prestera_flower_template {
10 	struct prestera_acl_ruleset *ruleset;
11 	struct list_head list;
12 	u32 chain_index;
13 };
14 
15 static void
16 prestera_flower_template_free(struct prestera_flower_template *template)
17 {
18 	prestera_acl_ruleset_put(template->ruleset);
19 	list_del(&template->list);
20 	kfree(template);
21 }
22 
23 void prestera_flower_template_cleanup(struct prestera_flow_block *block)
24 {
25 	struct prestera_flower_template *template, *tmp;
26 
27 	/* put the reference to all rulesets kept in tmpl create */
28 	list_for_each_entry_safe(template, tmp, &block->template_list, list)
29 		prestera_flower_template_free(template);
30 }
31 
32 static int
33 prestera_flower_parse_goto_action(struct prestera_flow_block *block,
34 				  struct prestera_acl_rule *rule,
35 				  u32 chain_index,
36 				  const struct flow_action_entry *act)
37 {
38 	struct prestera_acl_ruleset *ruleset;
39 
40 	if (act->chain_index <= chain_index)
41 		/* we can jump only forward */
42 		return -EINVAL;
43 
44 	if (rule->re_arg.jump.valid)
45 		return -EEXIST;
46 
47 	ruleset = prestera_acl_ruleset_get(block->sw->acl, block,
48 					   act->chain_index);
49 	if (IS_ERR(ruleset))
50 		return PTR_ERR(ruleset);
51 
52 	rule->re_arg.jump.valid = 1;
53 	rule->re_arg.jump.i.index = prestera_acl_ruleset_index_get(ruleset);
54 
55 	rule->jump_ruleset = ruleset;
56 
57 	return 0;
58 }
59 
60 static int prestera_flower_parse_actions(struct prestera_flow_block *block,
61 					 struct prestera_acl_rule *rule,
62 					 struct flow_action *flow_action,
63 					 u32 chain_index,
64 					 struct netlink_ext_ack *extack)
65 {
66 	const struct flow_action_entry *act;
67 	int err, i;
68 
69 	/* whole struct (rule->re_arg) must be initialized with 0 */
70 	if (!flow_action_has_entries(flow_action))
71 		return 0;
72 
73 	if (!flow_action_mixed_hw_stats_check(flow_action, extack))
74 		return -EOPNOTSUPP;
75 
76 	act = flow_action_first_entry_get(flow_action);
77 	if (act->hw_stats & FLOW_ACTION_HW_STATS_DISABLED) {
78 		/* Nothing to do */
79 	} else if (act->hw_stats & FLOW_ACTION_HW_STATS_DELAYED) {
80 		/* setup counter first */
81 		rule->re_arg.count.valid = true;
82 		err = prestera_acl_chain_to_client(chain_index,
83 						   &rule->re_arg.count.client);
84 		if (err)
85 			return err;
86 	} else {
87 		NL_SET_ERR_MSG_MOD(extack, "Unsupported action HW stats type");
88 		return -EOPNOTSUPP;
89 	}
90 
91 	flow_action_for_each(i, act, flow_action) {
92 		switch (act->id) {
93 		case FLOW_ACTION_ACCEPT:
94 			if (rule->re_arg.accept.valid)
95 				return -EEXIST;
96 
97 			rule->re_arg.accept.valid = 1;
98 			break;
99 		case FLOW_ACTION_DROP:
100 			if (rule->re_arg.drop.valid)
101 				return -EEXIST;
102 
103 			rule->re_arg.drop.valid = 1;
104 			break;
105 		case FLOW_ACTION_TRAP:
106 			if (rule->re_arg.trap.valid)
107 				return -EEXIST;
108 
109 			rule->re_arg.trap.valid = 1;
110 			break;
111 		case FLOW_ACTION_GOTO:
112 			err = prestera_flower_parse_goto_action(block, rule,
113 								chain_index,
114 								act);
115 			if (err)
116 				return err;
117 			break;
118 		default:
119 			NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
120 			pr_err("Unsupported action\n");
121 			return -EOPNOTSUPP;
122 		}
123 	}
124 
125 	return 0;
126 }
127 
128 static int prestera_flower_parse_meta(struct prestera_acl_rule *rule,
129 				      struct flow_cls_offload *f,
130 				      struct prestera_flow_block *block)
131 {	struct flow_rule *f_rule = flow_cls_offload_flow_rule(f);
132 	struct prestera_acl_match *r_match = &rule->re_key.match;
133 	struct prestera_port *port;
134 	struct net_device *ingress_dev;
135 	struct flow_match_meta match;
136 	__be16 key, mask;
137 
138 	flow_rule_match_meta(f_rule, &match);
139 	if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
140 		NL_SET_ERR_MSG_MOD(f->common.extack,
141 				   "Unsupported ingress ifindex mask");
142 		return -EINVAL;
143 	}
144 
145 	ingress_dev = __dev_get_by_index(block->net,
146 					 match.key->ingress_ifindex);
147 	if (!ingress_dev) {
148 		NL_SET_ERR_MSG_MOD(f->common.extack,
149 				   "Can't find specified ingress port to match on");
150 		return -EINVAL;
151 	}
152 
153 	if (!prestera_netdev_check(ingress_dev)) {
154 		NL_SET_ERR_MSG_MOD(f->common.extack,
155 				   "Can't match on switchdev ingress port");
156 		return -EINVAL;
157 	}
158 	port = netdev_priv(ingress_dev);
159 
160 	mask = htons(0x1FFF);
161 	key = htons(port->hw_id);
162 	rule_match_set(r_match->key, SYS_PORT, key);
163 	rule_match_set(r_match->mask, SYS_PORT, mask);
164 
165 	mask = htons(0x1FF);
166 	key = htons(port->dev_id);
167 	rule_match_set(r_match->key, SYS_DEV, key);
168 	rule_match_set(r_match->mask, SYS_DEV, mask);
169 
170 	return 0;
171 
172 }
173 
174 static int prestera_flower_parse(struct prestera_flow_block *block,
175 				 struct prestera_acl_rule *rule,
176 				 struct flow_cls_offload *f)
177 {	struct flow_rule *f_rule = flow_cls_offload_flow_rule(f);
178 	struct flow_dissector *dissector = f_rule->match.dissector;
179 	struct prestera_acl_match *r_match = &rule->re_key.match;
180 	__be16 n_proto_mask = 0;
181 	__be16 n_proto_key = 0;
182 	u16 addr_type = 0;
183 	u8 ip_proto = 0;
184 	int err;
185 
186 	if (dissector->used_keys &
187 	    ~(BIT(FLOW_DISSECTOR_KEY_META) |
188 	      BIT(FLOW_DISSECTOR_KEY_CONTROL) |
189 	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
190 	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
191 	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
192 	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
193 	      BIT(FLOW_DISSECTOR_KEY_ICMP) |
194 	      BIT(FLOW_DISSECTOR_KEY_PORTS) |
195 	      BIT(FLOW_DISSECTOR_KEY_VLAN))) {
196 		NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported key");
197 		return -EOPNOTSUPP;
198 	}
199 
200 	prestera_acl_rule_priority_set(rule, f->common.prio);
201 
202 	if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_META)) {
203 		err = prestera_flower_parse_meta(rule, f, block);
204 		if (err)
205 			return err;
206 	}
207 
208 	if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_CONTROL)) {
209 		struct flow_match_control match;
210 
211 		flow_rule_match_control(f_rule, &match);
212 		addr_type = match.key->addr_type;
213 	}
214 
215 	if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_BASIC)) {
216 		struct flow_match_basic match;
217 
218 		flow_rule_match_basic(f_rule, &match);
219 		n_proto_key = match.key->n_proto;
220 		n_proto_mask = match.mask->n_proto;
221 
222 		if (ntohs(match.key->n_proto) == ETH_P_ALL) {
223 			n_proto_key = 0;
224 			n_proto_mask = 0;
225 		}
226 
227 		rule_match_set(r_match->key, ETH_TYPE, n_proto_key);
228 		rule_match_set(r_match->mask, ETH_TYPE, n_proto_mask);
229 
230 		rule_match_set(r_match->key, IP_PROTO, match.key->ip_proto);
231 		rule_match_set(r_match->mask, IP_PROTO, match.mask->ip_proto);
232 		ip_proto = match.key->ip_proto;
233 	}
234 
235 	if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
236 		struct flow_match_eth_addrs match;
237 
238 		flow_rule_match_eth_addrs(f_rule, &match);
239 
240 		/* DA key, mask */
241 		rule_match_set_n(r_match->key,
242 				 ETH_DMAC_0, &match.key->dst[0], 4);
243 		rule_match_set_n(r_match->key,
244 				 ETH_DMAC_1, &match.key->dst[4], 2);
245 
246 		rule_match_set_n(r_match->mask,
247 				 ETH_DMAC_0, &match.mask->dst[0], 4);
248 		rule_match_set_n(r_match->mask,
249 				 ETH_DMAC_1, &match.mask->dst[4], 2);
250 
251 		/* SA key, mask */
252 		rule_match_set_n(r_match->key,
253 				 ETH_SMAC_0, &match.key->src[0], 4);
254 		rule_match_set_n(r_match->key,
255 				 ETH_SMAC_1, &match.key->src[4], 2);
256 
257 		rule_match_set_n(r_match->mask,
258 				 ETH_SMAC_0, &match.mask->src[0], 4);
259 		rule_match_set_n(r_match->mask,
260 				 ETH_SMAC_1, &match.mask->src[4], 2);
261 	}
262 
263 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
264 		struct flow_match_ipv4_addrs match;
265 
266 		flow_rule_match_ipv4_addrs(f_rule, &match);
267 
268 		rule_match_set(r_match->key, IP_SRC, match.key->src);
269 		rule_match_set(r_match->mask, IP_SRC, match.mask->src);
270 
271 		rule_match_set(r_match->key, IP_DST, match.key->dst);
272 		rule_match_set(r_match->mask, IP_DST, match.mask->dst);
273 	}
274 
275 	if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_PORTS)) {
276 		struct flow_match_ports match;
277 
278 		if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
279 			NL_SET_ERR_MSG_MOD
280 			    (f->common.extack,
281 			     "Only UDP and TCP keys are supported");
282 			return -EINVAL;
283 		}
284 
285 		flow_rule_match_ports(f_rule, &match);
286 
287 		rule_match_set(r_match->key, L4_PORT_SRC, match.key->src);
288 		rule_match_set(r_match->mask, L4_PORT_SRC, match.mask->src);
289 
290 		rule_match_set(r_match->key, L4_PORT_DST, match.key->dst);
291 		rule_match_set(r_match->mask, L4_PORT_DST, match.mask->dst);
292 	}
293 
294 	if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_VLAN)) {
295 		struct flow_match_vlan match;
296 
297 		flow_rule_match_vlan(f_rule, &match);
298 
299 		if (match.mask->vlan_id != 0) {
300 			__be16 key = cpu_to_be16(match.key->vlan_id);
301 			__be16 mask = cpu_to_be16(match.mask->vlan_id);
302 
303 			rule_match_set(r_match->key, VLAN_ID, key);
304 			rule_match_set(r_match->mask, VLAN_ID, mask);
305 		}
306 
307 		rule_match_set(r_match->key, VLAN_TPID, match.key->vlan_tpid);
308 		rule_match_set(r_match->mask, VLAN_TPID, match.mask->vlan_tpid);
309 	}
310 
311 	if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_ICMP)) {
312 		struct flow_match_icmp match;
313 
314 		flow_rule_match_icmp(f_rule, &match);
315 
316 		rule_match_set(r_match->key, ICMP_TYPE, match.key->type);
317 		rule_match_set(r_match->mask, ICMP_TYPE, match.mask->type);
318 
319 		rule_match_set(r_match->key, ICMP_CODE, match.key->code);
320 		rule_match_set(r_match->mask, ICMP_CODE, match.mask->code);
321 	}
322 
323 	return prestera_flower_parse_actions(block, rule, &f->rule->action,
324 					     f->common.chain_index,
325 					     f->common.extack);
326 }
327 
328 int prestera_flower_replace(struct prestera_flow_block *block,
329 			    struct flow_cls_offload *f)
330 {
331 	struct prestera_acl_ruleset *ruleset;
332 	struct prestera_acl *acl = block->sw->acl;
333 	struct prestera_acl_rule *rule;
334 	int err;
335 
336 	ruleset = prestera_acl_ruleset_get(acl, block, f->common.chain_index);
337 	if (IS_ERR(ruleset))
338 		return PTR_ERR(ruleset);
339 
340 	/* increments the ruleset reference */
341 	rule = prestera_acl_rule_create(ruleset, f->cookie,
342 					f->common.chain_index);
343 	if (IS_ERR(rule)) {
344 		err = PTR_ERR(rule);
345 		goto err_rule_create;
346 	}
347 
348 	err = prestera_flower_parse(block, rule, f);
349 	if (err)
350 		goto err_rule_add;
351 
352 	if (!prestera_acl_ruleset_is_offload(ruleset)) {
353 		err = prestera_acl_ruleset_offload(ruleset);
354 		if (err)
355 			goto err_ruleset_offload;
356 	}
357 
358 	err = prestera_acl_rule_add(block->sw, rule);
359 	if (err)
360 		goto err_rule_add;
361 
362 	prestera_acl_ruleset_put(ruleset);
363 	return 0;
364 
365 err_ruleset_offload:
366 err_rule_add:
367 	prestera_acl_rule_destroy(rule);
368 err_rule_create:
369 	prestera_acl_ruleset_put(ruleset);
370 	return err;
371 }
372 
373 void prestera_flower_destroy(struct prestera_flow_block *block,
374 			     struct flow_cls_offload *f)
375 {
376 	struct prestera_acl_ruleset *ruleset;
377 	struct prestera_acl_rule *rule;
378 
379 	ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block,
380 					      f->common.chain_index);
381 	if (IS_ERR(ruleset))
382 		return;
383 
384 	rule = prestera_acl_rule_lookup(ruleset, f->cookie);
385 	if (rule) {
386 		prestera_acl_rule_del(block->sw, rule);
387 		prestera_acl_rule_destroy(rule);
388 	}
389 	prestera_acl_ruleset_put(ruleset);
390 
391 }
392 
393 int prestera_flower_tmplt_create(struct prestera_flow_block *block,
394 				 struct flow_cls_offload *f)
395 {
396 	struct prestera_flower_template *template;
397 	struct prestera_acl_ruleset *ruleset;
398 	struct prestera_acl_rule rule;
399 	int err;
400 
401 	memset(&rule, 0, sizeof(rule));
402 	err = prestera_flower_parse(block, &rule, f);
403 	if (err)
404 		return err;
405 
406 	template = kmalloc(sizeof(*template), GFP_KERNEL);
407 	if (!template) {
408 		err = -ENOMEM;
409 		goto err_malloc;
410 	}
411 
412 	prestera_acl_rule_keymask_pcl_id_set(&rule, 0);
413 	ruleset = prestera_acl_ruleset_get(block->sw->acl, block,
414 					   f->common.chain_index);
415 	if (IS_ERR_OR_NULL(ruleset)) {
416 		err = -EINVAL;
417 		goto err_ruleset_get;
418 	}
419 
420 	/* preserve keymask/template to this ruleset */
421 	prestera_acl_ruleset_keymask_set(ruleset, rule.re_key.match.mask);
422 
423 	/* skip error, as it is not possible to reject template operation,
424 	 * so, keep the reference to the ruleset for rules to be added
425 	 * to that ruleset later. In case of offload fail, the ruleset
426 	 * will be offloaded again during adding a new rule. Also,
427 	 * unlikly possble that ruleset is already offloaded at this staage.
428 	 */
429 	prestera_acl_ruleset_offload(ruleset);
430 
431 	/* keep the reference to the ruleset */
432 	template->ruleset = ruleset;
433 	template->chain_index = f->common.chain_index;
434 	list_add_rcu(&template->list, &block->template_list);
435 	return 0;
436 
437 err_ruleset_get:
438 	kfree(template);
439 err_malloc:
440 	NL_SET_ERR_MSG_MOD(f->common.extack, "Create chain template failed");
441 	return err;
442 }
443 
444 void prestera_flower_tmplt_destroy(struct prestera_flow_block *block,
445 				   struct flow_cls_offload *f)
446 {
447 	struct prestera_flower_template *template, *tmp;
448 
449 	list_for_each_entry_safe(template, tmp, &block->template_list, list)
450 		if (template->chain_index == f->common.chain_index) {
451 			/* put the reference to the ruleset kept in create */
452 			prestera_flower_template_free(template);
453 			return;
454 		}
455 }
456 
457 int prestera_flower_stats(struct prestera_flow_block *block,
458 			  struct flow_cls_offload *f)
459 {
460 	struct prestera_acl_ruleset *ruleset;
461 	struct prestera_acl_rule *rule;
462 	u64 packets;
463 	u64 lastuse;
464 	u64 bytes;
465 	int err;
466 
467 	ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block,
468 					      f->common.chain_index);
469 	if (IS_ERR(ruleset))
470 		return PTR_ERR(ruleset);
471 
472 	rule = prestera_acl_rule_lookup(ruleset, f->cookie);
473 	if (!rule) {
474 		err = -EINVAL;
475 		goto err_rule_get_stats;
476 	}
477 
478 	err = prestera_acl_rule_get_stats(block->sw->acl, rule, &packets,
479 					  &bytes, &lastuse);
480 	if (err)
481 		goto err_rule_get_stats;
482 
483 	flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
484 			  FLOW_ACTION_HW_STATS_DELAYED);
485 
486 err_rule_get_stats:
487 	prestera_acl_ruleset_put(ruleset);
488 	return err;
489 }
490