1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2020 Marvell International Ltd. All rights reserved */
3 
4 #include "prestera.h"
5 #include "prestera_acl.h"
6 #include "prestera_flow.h"
7 #include "prestera_flower.h"
8 
9 struct prestera_flower_template {
10 	struct prestera_acl_ruleset *ruleset;
11 	struct list_head list;
12 	u32 chain_index;
13 };
14 
15 static void
16 prestera_flower_template_free(struct prestera_flower_template *template)
17 {
18 	prestera_acl_ruleset_put(template->ruleset);
19 	list_del(&template->list);
20 	kfree(template);
21 }
22 
23 void prestera_flower_template_cleanup(struct prestera_flow_block *block)
24 {
25 	struct prestera_flower_template *template, *tmp;
26 
27 	/* put the reference to all rulesets kept in tmpl create */
28 	list_for_each_entry_safe(template, tmp, &block->template_list, list)
29 		prestera_flower_template_free(template);
30 }
31 
32 static int
33 prestera_flower_parse_goto_action(struct prestera_flow_block *block,
34 				  struct prestera_acl_rule *rule,
35 				  u32 chain_index,
36 				  const struct flow_action_entry *act)
37 {
38 	struct prestera_acl_ruleset *ruleset;
39 
40 	if (act->chain_index <= chain_index)
41 		/* we can jump only forward */
42 		return -EINVAL;
43 
44 	if (rule->re_arg.jump.valid)
45 		return -EEXIST;
46 
47 	ruleset = prestera_acl_ruleset_get(block->sw->acl, block,
48 					   act->chain_index);
49 	if (IS_ERR(ruleset))
50 		return PTR_ERR(ruleset);
51 
52 	rule->re_arg.jump.valid = 1;
53 	rule->re_arg.jump.i.index = prestera_acl_ruleset_index_get(ruleset);
54 
55 	rule->jump_ruleset = ruleset;
56 
57 	return 0;
58 }
59 
60 static int prestera_flower_parse_actions(struct prestera_flow_block *block,
61 					 struct prestera_acl_rule *rule,
62 					 struct flow_action *flow_action,
63 					 u32 chain_index,
64 					 struct netlink_ext_ack *extack)
65 {
66 	const struct flow_action_entry *act;
67 	int err, i;
68 
69 	/* whole struct (rule->re_arg) must be initialized with 0 */
70 	if (!flow_action_has_entries(flow_action))
71 		return 0;
72 
73 	flow_action_for_each(i, act, flow_action) {
74 		switch (act->id) {
75 		case FLOW_ACTION_ACCEPT:
76 			if (rule->re_arg.accept.valid)
77 				return -EEXIST;
78 
79 			rule->re_arg.accept.valid = 1;
80 			break;
81 		case FLOW_ACTION_DROP:
82 			if (rule->re_arg.drop.valid)
83 				return -EEXIST;
84 
85 			rule->re_arg.drop.valid = 1;
86 			break;
87 		case FLOW_ACTION_TRAP:
88 			if (rule->re_arg.trap.valid)
89 				return -EEXIST;
90 
91 			rule->re_arg.trap.valid = 1;
92 			break;
93 		case FLOW_ACTION_GOTO:
94 			err = prestera_flower_parse_goto_action(block, rule,
95 								chain_index,
96 								act);
97 			if (err)
98 				return err;
99 			break;
100 		default:
101 			NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
102 			pr_err("Unsupported action\n");
103 			return -EOPNOTSUPP;
104 		}
105 	}
106 
107 	return 0;
108 }
109 
110 static int prestera_flower_parse_meta(struct prestera_acl_rule *rule,
111 				      struct flow_cls_offload *f,
112 				      struct prestera_flow_block *block)
113 {	struct flow_rule *f_rule = flow_cls_offload_flow_rule(f);
114 	struct prestera_acl_match *r_match = &rule->re_key.match;
115 	struct prestera_port *port;
116 	struct net_device *ingress_dev;
117 	struct flow_match_meta match;
118 	__be16 key, mask;
119 
120 	flow_rule_match_meta(f_rule, &match);
121 	if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
122 		NL_SET_ERR_MSG_MOD(f->common.extack,
123 				   "Unsupported ingress ifindex mask");
124 		return -EINVAL;
125 	}
126 
127 	ingress_dev = __dev_get_by_index(block->net,
128 					 match.key->ingress_ifindex);
129 	if (!ingress_dev) {
130 		NL_SET_ERR_MSG_MOD(f->common.extack,
131 				   "Can't find specified ingress port to match on");
132 		return -EINVAL;
133 	}
134 
135 	if (!prestera_netdev_check(ingress_dev)) {
136 		NL_SET_ERR_MSG_MOD(f->common.extack,
137 				   "Can't match on switchdev ingress port");
138 		return -EINVAL;
139 	}
140 	port = netdev_priv(ingress_dev);
141 
142 	mask = htons(0x1FFF);
143 	key = htons(port->hw_id);
144 	rule_match_set(r_match->key, SYS_PORT, key);
145 	rule_match_set(r_match->mask, SYS_PORT, mask);
146 
147 	mask = htons(0x1FF);
148 	key = htons(port->dev_id);
149 	rule_match_set(r_match->key, SYS_DEV, key);
150 	rule_match_set(r_match->mask, SYS_DEV, mask);
151 
152 	return 0;
153 
154 }
155 
156 static int prestera_flower_parse(struct prestera_flow_block *block,
157 				 struct prestera_acl_rule *rule,
158 				 struct flow_cls_offload *f)
159 {	struct flow_rule *f_rule = flow_cls_offload_flow_rule(f);
160 	struct flow_dissector *dissector = f_rule->match.dissector;
161 	struct prestera_acl_match *r_match = &rule->re_key.match;
162 	__be16 n_proto_mask = 0;
163 	__be16 n_proto_key = 0;
164 	u16 addr_type = 0;
165 	u8 ip_proto = 0;
166 	int err;
167 
168 	if (dissector->used_keys &
169 	    ~(BIT(FLOW_DISSECTOR_KEY_META) |
170 	      BIT(FLOW_DISSECTOR_KEY_CONTROL) |
171 	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
172 	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
173 	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
174 	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
175 	      BIT(FLOW_DISSECTOR_KEY_ICMP) |
176 	      BIT(FLOW_DISSECTOR_KEY_PORTS) |
177 	      BIT(FLOW_DISSECTOR_KEY_VLAN))) {
178 		NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported key");
179 		return -EOPNOTSUPP;
180 	}
181 
182 	prestera_acl_rule_priority_set(rule, f->common.prio);
183 
184 	if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_META)) {
185 		err = prestera_flower_parse_meta(rule, f, block);
186 		if (err)
187 			return err;
188 	}
189 
190 	if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_CONTROL)) {
191 		struct flow_match_control match;
192 
193 		flow_rule_match_control(f_rule, &match);
194 		addr_type = match.key->addr_type;
195 	}
196 
197 	if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_BASIC)) {
198 		struct flow_match_basic match;
199 
200 		flow_rule_match_basic(f_rule, &match);
201 		n_proto_key = match.key->n_proto;
202 		n_proto_mask = match.mask->n_proto;
203 
204 		if (ntohs(match.key->n_proto) == ETH_P_ALL) {
205 			n_proto_key = 0;
206 			n_proto_mask = 0;
207 		}
208 
209 		rule_match_set(r_match->key, ETH_TYPE, n_proto_key);
210 		rule_match_set(r_match->mask, ETH_TYPE, n_proto_mask);
211 
212 		rule_match_set(r_match->key, IP_PROTO, match.key->ip_proto);
213 		rule_match_set(r_match->mask, IP_PROTO, match.mask->ip_proto);
214 		ip_proto = match.key->ip_proto;
215 	}
216 
217 	if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
218 		struct flow_match_eth_addrs match;
219 
220 		flow_rule_match_eth_addrs(f_rule, &match);
221 
222 		/* DA key, mask */
223 		rule_match_set_n(r_match->key,
224 				 ETH_DMAC_0, &match.key->dst[0], 4);
225 		rule_match_set_n(r_match->key,
226 				 ETH_DMAC_1, &match.key->dst[4], 2);
227 
228 		rule_match_set_n(r_match->mask,
229 				 ETH_DMAC_0, &match.mask->dst[0], 4);
230 		rule_match_set_n(r_match->mask,
231 				 ETH_DMAC_1, &match.mask->dst[4], 2);
232 
233 		/* SA key, mask */
234 		rule_match_set_n(r_match->key,
235 				 ETH_SMAC_0, &match.key->src[0], 4);
236 		rule_match_set_n(r_match->key,
237 				 ETH_SMAC_1, &match.key->src[4], 2);
238 
239 		rule_match_set_n(r_match->mask,
240 				 ETH_SMAC_0, &match.mask->src[0], 4);
241 		rule_match_set_n(r_match->mask,
242 				 ETH_SMAC_1, &match.mask->src[4], 2);
243 	}
244 
245 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
246 		struct flow_match_ipv4_addrs match;
247 
248 		flow_rule_match_ipv4_addrs(f_rule, &match);
249 
250 		rule_match_set(r_match->key, IP_SRC, match.key->src);
251 		rule_match_set(r_match->mask, IP_SRC, match.mask->src);
252 
253 		rule_match_set(r_match->key, IP_DST, match.key->dst);
254 		rule_match_set(r_match->mask, IP_DST, match.mask->dst);
255 	}
256 
257 	if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_PORTS)) {
258 		struct flow_match_ports match;
259 
260 		if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
261 			NL_SET_ERR_MSG_MOD
262 			    (f->common.extack,
263 			     "Only UDP and TCP keys are supported");
264 			return -EINVAL;
265 		}
266 
267 		flow_rule_match_ports(f_rule, &match);
268 
269 		rule_match_set(r_match->key, L4_PORT_SRC, match.key->src);
270 		rule_match_set(r_match->mask, L4_PORT_SRC, match.mask->src);
271 
272 		rule_match_set(r_match->key, L4_PORT_DST, match.key->dst);
273 		rule_match_set(r_match->mask, L4_PORT_DST, match.mask->dst);
274 	}
275 
276 	if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_VLAN)) {
277 		struct flow_match_vlan match;
278 
279 		flow_rule_match_vlan(f_rule, &match);
280 
281 		if (match.mask->vlan_id != 0) {
282 			__be16 key = cpu_to_be16(match.key->vlan_id);
283 			__be16 mask = cpu_to_be16(match.mask->vlan_id);
284 
285 			rule_match_set(r_match->key, VLAN_ID, key);
286 			rule_match_set(r_match->mask, VLAN_ID, mask);
287 		}
288 
289 		rule_match_set(r_match->key, VLAN_TPID, match.key->vlan_tpid);
290 		rule_match_set(r_match->mask, VLAN_TPID, match.mask->vlan_tpid);
291 	}
292 
293 	if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_ICMP)) {
294 		struct flow_match_icmp match;
295 
296 		flow_rule_match_icmp(f_rule, &match);
297 
298 		rule_match_set(r_match->key, ICMP_TYPE, match.key->type);
299 		rule_match_set(r_match->mask, ICMP_TYPE, match.mask->type);
300 
301 		rule_match_set(r_match->key, ICMP_CODE, match.key->code);
302 		rule_match_set(r_match->mask, ICMP_CODE, match.mask->code);
303 	}
304 
305 	return prestera_flower_parse_actions(block, rule, &f->rule->action,
306 					     f->common.chain_index,
307 					     f->common.extack);
308 }
309 
310 int prestera_flower_replace(struct prestera_flow_block *block,
311 			    struct flow_cls_offload *f)
312 {
313 	struct prestera_acl_ruleset *ruleset;
314 	struct prestera_acl *acl = block->sw->acl;
315 	struct prestera_acl_rule *rule;
316 	int err;
317 
318 	ruleset = prestera_acl_ruleset_get(acl, block, f->common.chain_index);
319 	if (IS_ERR(ruleset))
320 		return PTR_ERR(ruleset);
321 
322 	/* increments the ruleset reference */
323 	rule = prestera_acl_rule_create(ruleset, f->cookie,
324 					f->common.chain_index);
325 	if (IS_ERR(rule)) {
326 		err = PTR_ERR(rule);
327 		goto err_rule_create;
328 	}
329 
330 	err = prestera_flower_parse(block, rule, f);
331 	if (err)
332 		goto err_rule_add;
333 
334 	if (!prestera_acl_ruleset_is_offload(ruleset)) {
335 		err = prestera_acl_ruleset_offload(ruleset);
336 		if (err)
337 			goto err_ruleset_offload;
338 	}
339 
340 	err = prestera_acl_rule_add(block->sw, rule);
341 	if (err)
342 		goto err_rule_add;
343 
344 	prestera_acl_ruleset_put(ruleset);
345 	return 0;
346 
347 err_ruleset_offload:
348 err_rule_add:
349 	prestera_acl_rule_destroy(rule);
350 err_rule_create:
351 	prestera_acl_ruleset_put(ruleset);
352 	return err;
353 }
354 
355 void prestera_flower_destroy(struct prestera_flow_block *block,
356 			     struct flow_cls_offload *f)
357 {
358 	struct prestera_acl_ruleset *ruleset;
359 	struct prestera_acl_rule *rule;
360 
361 	ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block,
362 					      f->common.chain_index);
363 	if (IS_ERR(ruleset))
364 		return;
365 
366 	rule = prestera_acl_rule_lookup(ruleset, f->cookie);
367 	if (rule) {
368 		prestera_acl_rule_del(block->sw, rule);
369 		prestera_acl_rule_destroy(rule);
370 	}
371 	prestera_acl_ruleset_put(ruleset);
372 
373 }
374 
375 int prestera_flower_tmplt_create(struct prestera_flow_block *block,
376 				 struct flow_cls_offload *f)
377 {
378 	struct prestera_flower_template *template;
379 	struct prestera_acl_ruleset *ruleset;
380 	struct prestera_acl_rule rule;
381 	int err;
382 
383 	memset(&rule, 0, sizeof(rule));
384 	err = prestera_flower_parse(block, &rule, f);
385 	if (err)
386 		return err;
387 
388 	template = kmalloc(sizeof(*template), GFP_KERNEL);
389 	if (!template) {
390 		err = -ENOMEM;
391 		goto err_malloc;
392 	}
393 
394 	prestera_acl_rule_keymask_pcl_id_set(&rule, 0);
395 	ruleset = prestera_acl_ruleset_get(block->sw->acl, block,
396 					   f->common.chain_index);
397 	if (IS_ERR_OR_NULL(ruleset)) {
398 		err = -EINVAL;
399 		goto err_ruleset_get;
400 	}
401 
402 	/* preserve keymask/template to this ruleset */
403 	prestera_acl_ruleset_keymask_set(ruleset, rule.re_key.match.mask);
404 
405 	/* skip error, as it is not possible to reject template operation,
406 	 * so, keep the reference to the ruleset for rules to be added
407 	 * to that ruleset later. In case of offload fail, the ruleset
408 	 * will be offloaded again during adding a new rule. Also,
409 	 * unlikly possble that ruleset is already offloaded at this staage.
410 	 */
411 	prestera_acl_ruleset_offload(ruleset);
412 
413 	/* keep the reference to the ruleset */
414 	template->ruleset = ruleset;
415 	template->chain_index = f->common.chain_index;
416 	list_add_rcu(&template->list, &block->template_list);
417 	return 0;
418 
419 err_ruleset_get:
420 	kfree(template);
421 err_malloc:
422 	NL_SET_ERR_MSG_MOD(f->common.extack, "Create chain template failed");
423 	return err;
424 }
425 
426 void prestera_flower_tmplt_destroy(struct prestera_flow_block *block,
427 				   struct flow_cls_offload *f)
428 {
429 	struct prestera_flower_template *template, *tmp;
430 
431 	list_for_each_entry_safe(template, tmp, &block->template_list, list)
432 		if (template->chain_index == f->common.chain_index) {
433 			/* put the reference to the ruleset kept in create */
434 			prestera_flower_template_free(template);
435 			return;
436 		}
437 }
438 
439 int prestera_flower_stats(struct prestera_flow_block *block,
440 			  struct flow_cls_offload *f)
441 {
442 	struct prestera_acl_ruleset *ruleset;
443 	struct prestera_acl_rule *rule;
444 	u64 packets;
445 	u64 lastuse;
446 	u64 bytes;
447 	int err;
448 
449 	ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block,
450 					      f->common.chain_index);
451 	if (IS_ERR(ruleset))
452 		return PTR_ERR(ruleset);
453 
454 	rule = prestera_acl_rule_lookup(ruleset, f->cookie);
455 	if (!rule) {
456 		err = -EINVAL;
457 		goto err_rule_get_stats;
458 	}
459 
460 	err = prestera_acl_rule_get_stats(block->sw->acl, rule, &packets,
461 					  &bytes, &lastuse);
462 	if (err)
463 		goto err_rule_get_stats;
464 
465 	flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
466 			  FLOW_ACTION_HW_STATS_DELAYED);
467 
468 err_rule_get_stats:
469 	prestera_acl_ruleset_put(ruleset);
470 	return err;
471 }
472