1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/slab.h>
6 #include <linux/errno.h>
7 #include <linux/list.h>
8 #include <linux/string.h>
9 #include <linux/rhashtable.h>
10 #include <linux/netdevice.h>
11 #include <linux/mutex.h>
12 #include <net/net_namespace.h>
13 #include <net/tc_act/tc_vlan.h>
14 
15 #include "reg.h"
16 #include "core.h"
17 #include "resources.h"
18 #include "spectrum.h"
19 #include "core_acl_flex_keys.h"
20 #include "core_acl_flex_actions.h"
21 #include "spectrum_acl_tcam.h"
22 
23 struct mlxsw_sp_acl {
24 	struct mlxsw_sp *mlxsw_sp;
25 	struct mlxsw_afk *afk;
26 	struct mlxsw_sp_fid *dummy_fid;
27 	struct rhashtable ruleset_ht;
28 	struct list_head rules;
29 	struct mutex rules_lock; /* Protects rules list */
30 	struct {
31 		struct delayed_work dw;
32 		unsigned long interval;	/* ms */
33 #define MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS 1000
34 	} rule_activity_update;
35 	struct mlxsw_sp_acl_tcam tcam;
36 };
37 
38 struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl)
39 {
40 	return acl->afk;
41 }
42 
43 struct mlxsw_sp_acl_block_binding {
44 	struct list_head list;
45 	struct net_device *dev;
46 	struct mlxsw_sp_port *mlxsw_sp_port;
47 	bool ingress;
48 };
49 
50 struct mlxsw_sp_acl_ruleset_ht_key {
51 	struct mlxsw_sp_acl_block *block;
52 	u32 chain_index;
53 	const struct mlxsw_sp_acl_profile_ops *ops;
54 };
55 
56 struct mlxsw_sp_acl_ruleset {
57 	struct rhash_head ht_node; /* Member of acl HT */
58 	struct mlxsw_sp_acl_ruleset_ht_key ht_key;
59 	struct rhashtable rule_ht;
60 	unsigned int ref_count;
61 	unsigned long priv[];
62 	/* priv has to be always the last item */
63 };
64 
65 struct mlxsw_sp_acl_rule {
66 	struct rhash_head ht_node; /* Member of rule HT */
67 	struct list_head list;
68 	unsigned long cookie; /* HT key */
69 	struct mlxsw_sp_acl_ruleset *ruleset;
70 	struct mlxsw_sp_acl_rule_info *rulei;
71 	u64 last_used;
72 	u64 last_packets;
73 	u64 last_bytes;
74 	unsigned long priv[];
75 	/* priv has to be always the last item */
76 };
77 
78 static const struct rhashtable_params mlxsw_sp_acl_ruleset_ht_params = {
79 	.key_len = sizeof(struct mlxsw_sp_acl_ruleset_ht_key),
80 	.key_offset = offsetof(struct mlxsw_sp_acl_ruleset, ht_key),
81 	.head_offset = offsetof(struct mlxsw_sp_acl_ruleset, ht_node),
82 	.automatic_shrinking = true,
83 };
84 
85 static const struct rhashtable_params mlxsw_sp_acl_rule_ht_params = {
86 	.key_len = sizeof(unsigned long),
87 	.key_offset = offsetof(struct mlxsw_sp_acl_rule, cookie),
88 	.head_offset = offsetof(struct mlxsw_sp_acl_rule, ht_node),
89 	.automatic_shrinking = true,
90 };
91 
92 struct mlxsw_sp_fid *mlxsw_sp_acl_dummy_fid(struct mlxsw_sp *mlxsw_sp)
93 {
94 	return mlxsw_sp->acl->dummy_fid;
95 }
96 
97 struct mlxsw_sp *mlxsw_sp_acl_block_mlxsw_sp(struct mlxsw_sp_acl_block *block)
98 {
99 	return block->mlxsw_sp;
100 }
101 
102 unsigned int
103 mlxsw_sp_acl_block_rule_count(const struct mlxsw_sp_acl_block *block)
104 {
105 	return block ? block->rule_count : 0;
106 }
107 
108 void mlxsw_sp_acl_block_disable_inc(struct mlxsw_sp_acl_block *block)
109 {
110 	if (block)
111 		block->disable_count++;
112 }
113 
114 void mlxsw_sp_acl_block_disable_dec(struct mlxsw_sp_acl_block *block)
115 {
116 	if (block)
117 		block->disable_count--;
118 }
119 
120 bool mlxsw_sp_acl_block_disabled(const struct mlxsw_sp_acl_block *block)
121 {
122 	return block->disable_count;
123 }
124 
125 bool mlxsw_sp_acl_block_is_egress_bound(const struct mlxsw_sp_acl_block *block)
126 {
127 	return block->egress_binding_count;
128 }
129 
130 bool mlxsw_sp_acl_block_is_ingress_bound(const struct mlxsw_sp_acl_block *block)
131 {
132 	return block->ingress_binding_count;
133 }
134 
135 bool mlxsw_sp_acl_block_is_mixed_bound(const struct mlxsw_sp_acl_block *block)
136 {
137 	return block->ingress_binding_count && block->egress_binding_count;
138 }
139 
140 static bool
141 mlxsw_sp_acl_ruleset_is_singular(const struct mlxsw_sp_acl_ruleset *ruleset)
142 {
143 	/* We hold a reference on ruleset ourselves */
144 	return ruleset->ref_count == 2;
145 }
146 
147 static int
148 mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
149 			  struct mlxsw_sp_acl_block *block,
150 			  struct mlxsw_sp_acl_block_binding *binding)
151 {
152 	struct mlxsw_sp_acl_ruleset *ruleset = block->ruleset_zero;
153 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
154 
155 	return ops->ruleset_bind(mlxsw_sp, ruleset->priv,
156 				 binding->mlxsw_sp_port, binding->ingress);
157 }
158 
159 static void
160 mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
161 			    struct mlxsw_sp_acl_block *block,
162 			    struct mlxsw_sp_acl_block_binding *binding)
163 {
164 	struct mlxsw_sp_acl_ruleset *ruleset = block->ruleset_zero;
165 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
166 
167 	ops->ruleset_unbind(mlxsw_sp, ruleset->priv,
168 			    binding->mlxsw_sp_port, binding->ingress);
169 }
170 
171 static bool
172 mlxsw_sp_acl_ruleset_block_bound(const struct mlxsw_sp_acl_block *block)
173 {
174 	return block->ruleset_zero;
175 }
176 
177 static int
178 mlxsw_sp_acl_ruleset_block_bind(struct mlxsw_sp *mlxsw_sp,
179 				struct mlxsw_sp_acl_ruleset *ruleset,
180 				struct mlxsw_sp_acl_block *block)
181 {
182 	struct mlxsw_sp_acl_block_binding *binding;
183 	int err;
184 
185 	block->ruleset_zero = ruleset;
186 	list_for_each_entry(binding, &block->binding_list, list) {
187 		err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, block, binding);
188 		if (err)
189 			goto rollback;
190 	}
191 	return 0;
192 
193 rollback:
194 	list_for_each_entry_continue_reverse(binding, &block->binding_list,
195 					     list)
196 		mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
197 	block->ruleset_zero = NULL;
198 
199 	return err;
200 }
201 
202 static void
203 mlxsw_sp_acl_ruleset_block_unbind(struct mlxsw_sp *mlxsw_sp,
204 				  struct mlxsw_sp_acl_ruleset *ruleset,
205 				  struct mlxsw_sp_acl_block *block)
206 {
207 	struct mlxsw_sp_acl_block_binding *binding;
208 
209 	list_for_each_entry(binding, &block->binding_list, list)
210 		mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
211 	block->ruleset_zero = NULL;
212 }
213 
214 struct mlxsw_sp_acl_block *mlxsw_sp_acl_block_create(struct mlxsw_sp *mlxsw_sp,
215 						     struct net *net)
216 {
217 	struct mlxsw_sp_acl_block *block;
218 
219 	block = kzalloc(sizeof(*block), GFP_KERNEL);
220 	if (!block)
221 		return NULL;
222 	INIT_LIST_HEAD(&block->binding_list);
223 	block->mlxsw_sp = mlxsw_sp;
224 	block->net = net;
225 	return block;
226 }
227 
228 void mlxsw_sp_acl_block_destroy(struct mlxsw_sp_acl_block *block)
229 {
230 	WARN_ON(!list_empty(&block->binding_list));
231 	kfree(block);
232 }
233 
234 static struct mlxsw_sp_acl_block_binding *
235 mlxsw_sp_acl_block_lookup(struct mlxsw_sp_acl_block *block,
236 			  struct mlxsw_sp_port *mlxsw_sp_port, bool ingress)
237 {
238 	struct mlxsw_sp_acl_block_binding *binding;
239 
240 	list_for_each_entry(binding, &block->binding_list, list)
241 		if (binding->mlxsw_sp_port == mlxsw_sp_port &&
242 		    binding->ingress == ingress)
243 			return binding;
244 	return NULL;
245 }
246 
247 int mlxsw_sp_acl_block_bind(struct mlxsw_sp *mlxsw_sp,
248 			    struct mlxsw_sp_acl_block *block,
249 			    struct mlxsw_sp_port *mlxsw_sp_port,
250 			    bool ingress,
251 			    struct netlink_ext_ack *extack)
252 {
253 	struct mlxsw_sp_acl_block_binding *binding;
254 	int err;
255 
256 	if (WARN_ON(mlxsw_sp_acl_block_lookup(block, mlxsw_sp_port, ingress)))
257 		return -EEXIST;
258 
259 	if (ingress && block->ingress_blocker_rule_count) {
260 		NL_SET_ERR_MSG_MOD(extack, "Block cannot be bound to ingress because it contains unsupported rules");
261 		return -EOPNOTSUPP;
262 	}
263 
264 	if (!ingress && block->egress_blocker_rule_count) {
265 		NL_SET_ERR_MSG_MOD(extack, "Block cannot be bound to egress because it contains unsupported rules");
266 		return -EOPNOTSUPP;
267 	}
268 
269 	binding = kzalloc(sizeof(*binding), GFP_KERNEL);
270 	if (!binding)
271 		return -ENOMEM;
272 	binding->mlxsw_sp_port = mlxsw_sp_port;
273 	binding->ingress = ingress;
274 
275 	if (mlxsw_sp_acl_ruleset_block_bound(block)) {
276 		err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, block, binding);
277 		if (err)
278 			goto err_ruleset_bind;
279 	}
280 
281 	if (ingress)
282 		block->ingress_binding_count++;
283 	else
284 		block->egress_binding_count++;
285 	list_add(&binding->list, &block->binding_list);
286 	return 0;
287 
288 err_ruleset_bind:
289 	kfree(binding);
290 	return err;
291 }
292 
293 int mlxsw_sp_acl_block_unbind(struct mlxsw_sp *mlxsw_sp,
294 			      struct mlxsw_sp_acl_block *block,
295 			      struct mlxsw_sp_port *mlxsw_sp_port,
296 			      bool ingress)
297 {
298 	struct mlxsw_sp_acl_block_binding *binding;
299 
300 	binding = mlxsw_sp_acl_block_lookup(block, mlxsw_sp_port, ingress);
301 	if (!binding)
302 		return -ENOENT;
303 
304 	list_del(&binding->list);
305 
306 	if (ingress)
307 		block->ingress_binding_count--;
308 	else
309 		block->egress_binding_count--;
310 
311 	if (mlxsw_sp_acl_ruleset_block_bound(block))
312 		mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
313 
314 	kfree(binding);
315 	return 0;
316 }
317 
318 static struct mlxsw_sp_acl_ruleset *
319 mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
320 			    struct mlxsw_sp_acl_block *block, u32 chain_index,
321 			    const struct mlxsw_sp_acl_profile_ops *ops,
322 			    struct mlxsw_afk_element_usage *tmplt_elusage)
323 {
324 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
325 	struct mlxsw_sp_acl_ruleset *ruleset;
326 	size_t alloc_size;
327 	int err;
328 
329 	alloc_size = sizeof(*ruleset) + ops->ruleset_priv_size;
330 	ruleset = kzalloc(alloc_size, GFP_KERNEL);
331 	if (!ruleset)
332 		return ERR_PTR(-ENOMEM);
333 	ruleset->ref_count = 1;
334 	ruleset->ht_key.block = block;
335 	ruleset->ht_key.chain_index = chain_index;
336 	ruleset->ht_key.ops = ops;
337 
338 	err = rhashtable_init(&ruleset->rule_ht, &mlxsw_sp_acl_rule_ht_params);
339 	if (err)
340 		goto err_rhashtable_init;
341 
342 	err = ops->ruleset_add(mlxsw_sp, &acl->tcam, ruleset->priv,
343 			       tmplt_elusage);
344 	if (err)
345 		goto err_ops_ruleset_add;
346 
347 	err = rhashtable_insert_fast(&acl->ruleset_ht, &ruleset->ht_node,
348 				     mlxsw_sp_acl_ruleset_ht_params);
349 	if (err)
350 		goto err_ht_insert;
351 
352 	return ruleset;
353 
354 err_ht_insert:
355 	ops->ruleset_del(mlxsw_sp, ruleset->priv);
356 err_ops_ruleset_add:
357 	rhashtable_destroy(&ruleset->rule_ht);
358 err_rhashtable_init:
359 	kfree(ruleset);
360 	return ERR_PTR(err);
361 }
362 
363 static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp,
364 					 struct mlxsw_sp_acl_ruleset *ruleset)
365 {
366 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
367 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
368 
369 	rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
370 			       mlxsw_sp_acl_ruleset_ht_params);
371 	ops->ruleset_del(mlxsw_sp, ruleset->priv);
372 	rhashtable_destroy(&ruleset->rule_ht);
373 	kfree(ruleset);
374 }
375 
376 static void mlxsw_sp_acl_ruleset_ref_inc(struct mlxsw_sp_acl_ruleset *ruleset)
377 {
378 	ruleset->ref_count++;
379 }
380 
381 static void mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp *mlxsw_sp,
382 					 struct mlxsw_sp_acl_ruleset *ruleset)
383 {
384 	if (--ruleset->ref_count)
385 		return;
386 	mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset);
387 }
388 
389 static struct mlxsw_sp_acl_ruleset *
390 __mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl *acl,
391 			      struct mlxsw_sp_acl_block *block, u32 chain_index,
392 			      const struct mlxsw_sp_acl_profile_ops *ops)
393 {
394 	struct mlxsw_sp_acl_ruleset_ht_key ht_key;
395 
396 	memset(&ht_key, 0, sizeof(ht_key));
397 	ht_key.block = block;
398 	ht_key.chain_index = chain_index;
399 	ht_key.ops = ops;
400 	return rhashtable_lookup_fast(&acl->ruleset_ht, &ht_key,
401 				      mlxsw_sp_acl_ruleset_ht_params);
402 }
403 
404 struct mlxsw_sp_acl_ruleset *
405 mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
406 			    struct mlxsw_sp_acl_block *block, u32 chain_index,
407 			    enum mlxsw_sp_acl_profile profile)
408 {
409 	const struct mlxsw_sp_acl_profile_ops *ops;
410 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
411 	struct mlxsw_sp_acl_ruleset *ruleset;
412 
413 	ops = mlxsw_sp_acl_tcam_profile_ops(mlxsw_sp, profile);
414 	if (!ops)
415 		return ERR_PTR(-EINVAL);
416 	ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, block, chain_index, ops);
417 	if (!ruleset)
418 		return ERR_PTR(-ENOENT);
419 	return ruleset;
420 }
421 
422 struct mlxsw_sp_acl_ruleset *
423 mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
424 			 struct mlxsw_sp_acl_block *block, u32 chain_index,
425 			 enum mlxsw_sp_acl_profile profile,
426 			 struct mlxsw_afk_element_usage *tmplt_elusage)
427 {
428 	const struct mlxsw_sp_acl_profile_ops *ops;
429 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
430 	struct mlxsw_sp_acl_ruleset *ruleset;
431 
432 	ops = mlxsw_sp_acl_tcam_profile_ops(mlxsw_sp, profile);
433 	if (!ops)
434 		return ERR_PTR(-EINVAL);
435 
436 	ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, block, chain_index, ops);
437 	if (ruleset) {
438 		mlxsw_sp_acl_ruleset_ref_inc(ruleset);
439 		return ruleset;
440 	}
441 	return mlxsw_sp_acl_ruleset_create(mlxsw_sp, block, chain_index, ops,
442 					   tmplt_elusage);
443 }
444 
445 void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
446 			      struct mlxsw_sp_acl_ruleset *ruleset)
447 {
448 	mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
449 }
450 
451 u16 mlxsw_sp_acl_ruleset_group_id(struct mlxsw_sp_acl_ruleset *ruleset)
452 {
453 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
454 
455 	return ops->ruleset_group_id(ruleset->priv);
456 }
457 
458 struct mlxsw_sp_acl_rule_info *
459 mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl,
460 			  struct mlxsw_afa_block *afa_block)
461 {
462 	struct mlxsw_sp_acl_rule_info *rulei;
463 	int err;
464 
465 	rulei = kzalloc(sizeof(*rulei), GFP_KERNEL);
466 	if (!rulei)
467 		return ERR_PTR(-ENOMEM);
468 
469 	if (afa_block) {
470 		rulei->act_block = afa_block;
471 		return rulei;
472 	}
473 
474 	rulei->act_block = mlxsw_afa_block_create(acl->mlxsw_sp->afa);
475 	if (IS_ERR(rulei->act_block)) {
476 		err = PTR_ERR(rulei->act_block);
477 		goto err_afa_block_create;
478 	}
479 	rulei->action_created = 1;
480 	return rulei;
481 
482 err_afa_block_create:
483 	kfree(rulei);
484 	return ERR_PTR(err);
485 }
486 
487 void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp_acl_rule_info *rulei)
488 {
489 	if (rulei->action_created)
490 		mlxsw_afa_block_destroy(rulei->act_block);
491 	kfree(rulei);
492 }
493 
494 int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei)
495 {
496 	return mlxsw_afa_block_commit(rulei->act_block);
497 }
498 
499 void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei,
500 				 unsigned int priority)
501 {
502 	rulei->priority = priority;
503 }
504 
505 void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei,
506 				    enum mlxsw_afk_element element,
507 				    u32 key_value, u32 mask_value)
508 {
509 	mlxsw_afk_values_add_u32(&rulei->values, element,
510 				 key_value, mask_value);
511 }
512 
513 void mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info *rulei,
514 				    enum mlxsw_afk_element element,
515 				    const char *key_value,
516 				    const char *mask_value, unsigned int len)
517 {
518 	mlxsw_afk_values_add_buf(&rulei->values, element,
519 				 key_value, mask_value, len);
520 }
521 
522 int mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei)
523 {
524 	return mlxsw_afa_block_continue(rulei->act_block);
525 }
526 
527 int mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,
528 				u16 group_id)
529 {
530 	return mlxsw_afa_block_jump(rulei->act_block, group_id);
531 }
532 
533 int mlxsw_sp_acl_rulei_act_terminate(struct mlxsw_sp_acl_rule_info *rulei)
534 {
535 	return mlxsw_afa_block_terminate(rulei->act_block);
536 }
537 
538 int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei,
539 				bool ingress,
540 				const struct flow_action_cookie *fa_cookie,
541 				struct netlink_ext_ack *extack)
542 {
543 	return mlxsw_afa_block_append_drop(rulei->act_block, ingress,
544 					   fa_cookie, extack);
545 }
546 
547 int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei)
548 {
549 	return mlxsw_afa_block_append_trap(rulei->act_block,
550 					   MLXSW_TRAP_ID_ACL0);
551 }
552 
553 int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
554 			       struct mlxsw_sp_acl_rule_info *rulei,
555 			       struct net_device *out_dev,
556 			       struct netlink_ext_ack *extack)
557 {
558 	struct mlxsw_sp_port *mlxsw_sp_port;
559 	u8 local_port;
560 	bool in_port;
561 
562 	if (out_dev) {
563 		if (!mlxsw_sp_port_dev_check(out_dev)) {
564 			NL_SET_ERR_MSG_MOD(extack, "Invalid output device");
565 			return -EINVAL;
566 		}
567 		mlxsw_sp_port = netdev_priv(out_dev);
568 		if (mlxsw_sp_port->mlxsw_sp != mlxsw_sp) {
569 			NL_SET_ERR_MSG_MOD(extack, "Invalid output device");
570 			return -EINVAL;
571 		}
572 		local_port = mlxsw_sp_port->local_port;
573 		in_port = false;
574 	} else {
575 		/* If out_dev is NULL, the caller wants to
576 		 * set forward to ingress port.
577 		 */
578 		local_port = 0;
579 		in_port = true;
580 	}
581 	return mlxsw_afa_block_append_fwd(rulei->act_block,
582 					  local_port, in_port, extack);
583 }
584 
585 int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
586 				  struct mlxsw_sp_acl_rule_info *rulei,
587 				  struct mlxsw_sp_acl_block *block,
588 				  struct net_device *out_dev,
589 				  struct netlink_ext_ack *extack)
590 {
591 	struct mlxsw_sp_acl_block_binding *binding;
592 	struct mlxsw_sp_port *in_port;
593 
594 	if (!list_is_singular(&block->binding_list)) {
595 		NL_SET_ERR_MSG_MOD(extack, "Only a single mirror source is allowed");
596 		return -EOPNOTSUPP;
597 	}
598 	binding = list_first_entry(&block->binding_list,
599 				   struct mlxsw_sp_acl_block_binding, list);
600 	in_port = binding->mlxsw_sp_port;
601 
602 	return mlxsw_afa_block_append_mirror(rulei->act_block,
603 					     in_port->local_port,
604 					     out_dev,
605 					     binding->ingress,
606 					     extack);
607 }
608 
609 int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp,
610 				struct mlxsw_sp_acl_rule_info *rulei,
611 				u32 action, u16 vid, u16 proto, u8 prio,
612 				struct netlink_ext_ack *extack)
613 {
614 	u8 ethertype;
615 
616 	if (action == FLOW_ACTION_VLAN_MANGLE) {
617 		switch (proto) {
618 		case ETH_P_8021Q:
619 			ethertype = 0;
620 			break;
621 		case ETH_P_8021AD:
622 			ethertype = 1;
623 			break;
624 		default:
625 			NL_SET_ERR_MSG_MOD(extack, "Unsupported VLAN protocol");
626 			dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN protocol %#04x\n",
627 				proto);
628 			return -EINVAL;
629 		}
630 
631 		return mlxsw_afa_block_append_vlan_modify(rulei->act_block,
632 							  vid, prio, ethertype,
633 							  extack);
634 	} else {
635 		NL_SET_ERR_MSG_MOD(extack, "Unsupported VLAN action");
636 		dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN action\n");
637 		return -EINVAL;
638 	}
639 }
640 
641 int mlxsw_sp_acl_rulei_act_priority(struct mlxsw_sp *mlxsw_sp,
642 				    struct mlxsw_sp_acl_rule_info *rulei,
643 				    u32 prio, struct netlink_ext_ack *extack)
644 {
645 	/* Even though both Linux and Spectrum switches support 16 priorities,
646 	 * spectrum_qdisc only processes the first eight priomap elements, and
647 	 * the DCB and PFC features are tied to 8 priorities as well. Therefore
648 	 * bounce attempts to prioritize packets to higher priorities.
649 	 */
650 	if (prio >= IEEE_8021QAZ_MAX_TCS) {
651 		NL_SET_ERR_MSG_MOD(extack, "Only priorities 0..7 are supported");
652 		return -EINVAL;
653 	}
654 	return mlxsw_afa_block_append_qos_switch_prio(rulei->act_block, prio,
655 						      extack);
656 }
657 
658 enum mlxsw_sp_acl_mangle_field {
659 	MLXSW_SP_ACL_MANGLE_FIELD_IP_DSFIELD,
660 	MLXSW_SP_ACL_MANGLE_FIELD_IP_DSCP,
661 	MLXSW_SP_ACL_MANGLE_FIELD_IP_ECN,
662 };
663 
664 struct mlxsw_sp_acl_mangle_action {
665 	enum flow_action_mangle_base htype;
666 	/* Offset is u32-aligned. */
667 	u32 offset;
668 	/* Mask bits are unset for the modified field. */
669 	u32 mask;
670 	/* Shift required to extract the set value. */
671 	u32 shift;
672 	enum mlxsw_sp_acl_mangle_field field;
673 };
674 
675 #define MLXSW_SP_ACL_MANGLE_ACTION(_htype, _offset, _mask, _shift, _field) \
676 	{								\
677 		.htype = _htype,					\
678 		.offset = _offset,					\
679 		.mask = _mask,						\
680 		.shift = _shift,					\
681 		.field = MLXSW_SP_ACL_MANGLE_FIELD_##_field,		\
682 	}
683 
684 #define MLXSW_SP_ACL_MANGLE_ACTION_IP4(_offset, _mask, _shift, _field) \
685 	MLXSW_SP_ACL_MANGLE_ACTION(FLOW_ACT_MANGLE_HDR_TYPE_IP4,       \
686 				   _offset, _mask, _shift, _field)
687 
688 #define MLXSW_SP_ACL_MANGLE_ACTION_IP6(_offset, _mask, _shift, _field) \
689 	MLXSW_SP_ACL_MANGLE_ACTION(FLOW_ACT_MANGLE_HDR_TYPE_IP6,       \
690 				   _offset, _mask, _shift, _field)
691 
692 static struct mlxsw_sp_acl_mangle_action mlxsw_sp_acl_mangle_actions[] = {
693 	MLXSW_SP_ACL_MANGLE_ACTION_IP4(0, 0xff00ffff, 16, IP_DSFIELD),
694 	MLXSW_SP_ACL_MANGLE_ACTION_IP4(0, 0xff03ffff, 18, IP_DSCP),
695 	MLXSW_SP_ACL_MANGLE_ACTION_IP4(0, 0xfffcffff, 16, IP_ECN),
696 	MLXSW_SP_ACL_MANGLE_ACTION_IP6(0, 0xf00fffff, 20, IP_DSFIELD),
697 	MLXSW_SP_ACL_MANGLE_ACTION_IP6(0, 0xf03fffff, 22, IP_DSCP),
698 	MLXSW_SP_ACL_MANGLE_ACTION_IP6(0, 0xffcfffff, 20, IP_ECN),
699 };
700 
701 static int
702 mlxsw_sp_acl_rulei_act_mangle_field(struct mlxsw_sp *mlxsw_sp,
703 				    struct mlxsw_sp_acl_rule_info *rulei,
704 				    struct mlxsw_sp_acl_mangle_action *mact,
705 				    u32 val, struct netlink_ext_ack *extack)
706 {
707 	switch (mact->field) {
708 	case MLXSW_SP_ACL_MANGLE_FIELD_IP_DSFIELD:
709 		return mlxsw_afa_block_append_qos_dsfield(rulei->act_block,
710 							  val, extack);
711 	case MLXSW_SP_ACL_MANGLE_FIELD_IP_DSCP:
712 		return mlxsw_afa_block_append_qos_dscp(rulei->act_block,
713 						       val, extack);
714 	case MLXSW_SP_ACL_MANGLE_FIELD_IP_ECN:
715 		return mlxsw_afa_block_append_qos_ecn(rulei->act_block,
716 						      val, extack);
717 	}
718 
719 	/* We shouldn't have gotten a match in the first place! */
720 	WARN_ONCE(1, "Unhandled mangle field");
721 	return -EINVAL;
722 }
723 
724 int mlxsw_sp_acl_rulei_act_mangle(struct mlxsw_sp *mlxsw_sp,
725 				  struct mlxsw_sp_acl_rule_info *rulei,
726 				  enum flow_action_mangle_base htype,
727 				  u32 offset, u32 mask, u32 val,
728 				  struct netlink_ext_ack *extack)
729 {
730 	struct mlxsw_sp_acl_mangle_action *mact;
731 	size_t i;
732 
733 	for (i = 0; i < ARRAY_SIZE(mlxsw_sp_acl_mangle_actions); ++i) {
734 		mact = &mlxsw_sp_acl_mangle_actions[i];
735 		if (mact->htype == htype &&
736 		    mact->offset == offset &&
737 		    mact->mask == mask) {
738 			val >>= mact->shift;
739 			return mlxsw_sp_acl_rulei_act_mangle_field(mlxsw_sp,
740 								   rulei, mact,
741 								   val, extack);
742 		}
743 	}
744 
745 	NL_SET_ERR_MSG_MOD(extack, "Unsupported mangle field");
746 	return -EINVAL;
747 }
748 
749 int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
750 				 struct mlxsw_sp_acl_rule_info *rulei,
751 				 struct netlink_ext_ack *extack)
752 {
753 	int err;
754 
755 	err = mlxsw_afa_block_append_counter(rulei->act_block,
756 					     &rulei->counter_index, extack);
757 	if (err)
758 		return err;
759 	rulei->counter_valid = true;
760 	return 0;
761 }
762 
763 int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp,
764 				   struct mlxsw_sp_acl_rule_info *rulei,
765 				   u16 fid, struct netlink_ext_ack *extack)
766 {
767 	return mlxsw_afa_block_append_fid_set(rulei->act_block, fid, extack);
768 }
769 
770 struct mlxsw_sp_acl_rule *
771 mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
772 			 struct mlxsw_sp_acl_ruleset *ruleset,
773 			 unsigned long cookie,
774 			 struct mlxsw_afa_block *afa_block,
775 			 struct netlink_ext_ack *extack)
776 {
777 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
778 	struct mlxsw_sp_acl_rule *rule;
779 	int err;
780 
781 	mlxsw_sp_acl_ruleset_ref_inc(ruleset);
782 	rule = kzalloc(sizeof(*rule) + ops->rule_priv_size,
783 		       GFP_KERNEL);
784 	if (!rule) {
785 		err = -ENOMEM;
786 		goto err_alloc;
787 	}
788 	rule->cookie = cookie;
789 	rule->ruleset = ruleset;
790 
791 	rule->rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl, afa_block);
792 	if (IS_ERR(rule->rulei)) {
793 		err = PTR_ERR(rule->rulei);
794 		goto err_rulei_create;
795 	}
796 
797 	return rule;
798 
799 err_rulei_create:
800 	kfree(rule);
801 err_alloc:
802 	mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
803 	return ERR_PTR(err);
804 }
805 
806 void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp,
807 			       struct mlxsw_sp_acl_rule *rule)
808 {
809 	struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
810 
811 	mlxsw_sp_acl_rulei_destroy(rule->rulei);
812 	kfree(rule);
813 	mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
814 }
815 
816 int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
817 			  struct mlxsw_sp_acl_rule *rule)
818 {
819 	struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
820 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
821 	struct mlxsw_sp_acl_block *block = ruleset->ht_key.block;
822 	int err;
823 
824 	err = ops->rule_add(mlxsw_sp, ruleset->priv, rule->priv, rule->rulei);
825 	if (err)
826 		return err;
827 
828 	err = rhashtable_insert_fast(&ruleset->rule_ht, &rule->ht_node,
829 				     mlxsw_sp_acl_rule_ht_params);
830 	if (err)
831 		goto err_rhashtable_insert;
832 
833 	if (!ruleset->ht_key.chain_index &&
834 	    mlxsw_sp_acl_ruleset_is_singular(ruleset)) {
835 		/* We only need ruleset with chain index 0, the implicit
836 		 * one, to be directly bound to device. The rest of the
837 		 * rulesets are bound by "Goto action set".
838 		 */
839 		err = mlxsw_sp_acl_ruleset_block_bind(mlxsw_sp, ruleset, block);
840 		if (err)
841 			goto err_ruleset_block_bind;
842 	}
843 
844 	mutex_lock(&mlxsw_sp->acl->rules_lock);
845 	list_add_tail(&rule->list, &mlxsw_sp->acl->rules);
846 	mutex_unlock(&mlxsw_sp->acl->rules_lock);
847 	block->rule_count++;
848 	block->ingress_blocker_rule_count += rule->rulei->ingress_bind_blocker;
849 	block->egress_blocker_rule_count += rule->rulei->egress_bind_blocker;
850 	return 0;
851 
852 err_ruleset_block_bind:
853 	rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
854 			       mlxsw_sp_acl_rule_ht_params);
855 err_rhashtable_insert:
856 	ops->rule_del(mlxsw_sp, rule->priv);
857 	return err;
858 }
859 
860 void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
861 			   struct mlxsw_sp_acl_rule *rule)
862 {
863 	struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
864 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
865 	struct mlxsw_sp_acl_block *block = ruleset->ht_key.block;
866 
867 	block->egress_blocker_rule_count -= rule->rulei->egress_bind_blocker;
868 	block->ingress_blocker_rule_count -= rule->rulei->ingress_bind_blocker;
869 	ruleset->ht_key.block->rule_count--;
870 	mutex_lock(&mlxsw_sp->acl->rules_lock);
871 	list_del(&rule->list);
872 	mutex_unlock(&mlxsw_sp->acl->rules_lock);
873 	if (!ruleset->ht_key.chain_index &&
874 	    mlxsw_sp_acl_ruleset_is_singular(ruleset))
875 		mlxsw_sp_acl_ruleset_block_unbind(mlxsw_sp, ruleset,
876 						  ruleset->ht_key.block);
877 	rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
878 			       mlxsw_sp_acl_rule_ht_params);
879 	ops->rule_del(mlxsw_sp, rule->priv);
880 }
881 
882 int mlxsw_sp_acl_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
883 				     struct mlxsw_sp_acl_rule *rule,
884 				     struct mlxsw_afa_block *afa_block)
885 {
886 	struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
887 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
888 	struct mlxsw_sp_acl_rule_info *rulei;
889 
890 	rulei = mlxsw_sp_acl_rule_rulei(rule);
891 	rulei->act_block = afa_block;
892 
893 	return ops->rule_action_replace(mlxsw_sp, rule->priv, rule->rulei);
894 }
895 
896 struct mlxsw_sp_acl_rule *
897 mlxsw_sp_acl_rule_lookup(struct mlxsw_sp *mlxsw_sp,
898 			 struct mlxsw_sp_acl_ruleset *ruleset,
899 			 unsigned long cookie)
900 {
901 	return rhashtable_lookup_fast(&ruleset->rule_ht, &cookie,
902 				       mlxsw_sp_acl_rule_ht_params);
903 }
904 
905 struct mlxsw_sp_acl_rule_info *
906 mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule)
907 {
908 	return rule->rulei;
909 }
910 
911 static int mlxsw_sp_acl_rule_activity_update(struct mlxsw_sp *mlxsw_sp,
912 					     struct mlxsw_sp_acl_rule *rule)
913 {
914 	struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
915 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
916 	bool active;
917 	int err;
918 
919 	err = ops->rule_activity_get(mlxsw_sp, rule->priv, &active);
920 	if (err)
921 		return err;
922 	if (active)
923 		rule->last_used = jiffies;
924 	return 0;
925 }
926 
927 static int mlxsw_sp_acl_rules_activity_update(struct mlxsw_sp_acl *acl)
928 {
929 	struct mlxsw_sp_acl_rule *rule;
930 	int err;
931 
932 	mutex_lock(&acl->rules_lock);
933 	list_for_each_entry(rule, &acl->rules, list) {
934 		err = mlxsw_sp_acl_rule_activity_update(acl->mlxsw_sp,
935 							rule);
936 		if (err)
937 			goto err_rule_update;
938 	}
939 	mutex_unlock(&acl->rules_lock);
940 	return 0;
941 
942 err_rule_update:
943 	mutex_unlock(&acl->rules_lock);
944 	return err;
945 }
946 
947 static void mlxsw_sp_acl_rule_activity_work_schedule(struct mlxsw_sp_acl *acl)
948 {
949 	unsigned long interval = acl->rule_activity_update.interval;
950 
951 	mlxsw_core_schedule_dw(&acl->rule_activity_update.dw,
952 			       msecs_to_jiffies(interval));
953 }
954 
955 static void mlxsw_sp_acl_rule_activity_update_work(struct work_struct *work)
956 {
957 	struct mlxsw_sp_acl *acl = container_of(work, struct mlxsw_sp_acl,
958 						rule_activity_update.dw.work);
959 	int err;
960 
961 	err = mlxsw_sp_acl_rules_activity_update(acl);
962 	if (err)
963 		dev_err(acl->mlxsw_sp->bus_info->dev, "Could not update acl activity");
964 
965 	mlxsw_sp_acl_rule_activity_work_schedule(acl);
966 }
967 
968 int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
969 				struct mlxsw_sp_acl_rule *rule,
970 				u64 *packets, u64 *bytes, u64 *last_use,
971 				enum flow_action_hw_stats *used_hw_stats)
972 
973 {
974 	struct mlxsw_sp_acl_rule_info *rulei;
975 	u64 current_packets = 0;
976 	u64 current_bytes = 0;
977 	int err;
978 
979 	rulei = mlxsw_sp_acl_rule_rulei(rule);
980 	if (rulei->counter_valid) {
981 		err = mlxsw_sp_flow_counter_get(mlxsw_sp, rulei->counter_index,
982 						&current_packets,
983 						&current_bytes);
984 		if (err)
985 			return err;
986 		*used_hw_stats = FLOW_ACTION_HW_STATS_IMMEDIATE;
987 	}
988 	*packets = current_packets - rule->last_packets;
989 	*bytes = current_bytes - rule->last_bytes;
990 	*last_use = rule->last_used;
991 
992 	rule->last_bytes = current_bytes;
993 	rule->last_packets = current_packets;
994 
995 	return 0;
996 }
997 
998 int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
999 {
1000 	struct mlxsw_sp_fid *fid;
1001 	struct mlxsw_sp_acl *acl;
1002 	size_t alloc_size;
1003 	int err;
1004 
1005 	alloc_size = sizeof(*acl) + mlxsw_sp_acl_tcam_priv_size(mlxsw_sp);
1006 	acl = kzalloc(alloc_size, GFP_KERNEL);
1007 	if (!acl)
1008 		return -ENOMEM;
1009 	mlxsw_sp->acl = acl;
1010 	acl->mlxsw_sp = mlxsw_sp;
1011 	acl->afk = mlxsw_afk_create(MLXSW_CORE_RES_GET(mlxsw_sp->core,
1012 						       ACL_FLEX_KEYS),
1013 				    mlxsw_sp->afk_ops);
1014 	if (!acl->afk) {
1015 		err = -ENOMEM;
1016 		goto err_afk_create;
1017 	}
1018 
1019 	err = rhashtable_init(&acl->ruleset_ht,
1020 			      &mlxsw_sp_acl_ruleset_ht_params);
1021 	if (err)
1022 		goto err_rhashtable_init;
1023 
1024 	fid = mlxsw_sp_fid_dummy_get(mlxsw_sp);
1025 	if (IS_ERR(fid)) {
1026 		err = PTR_ERR(fid);
1027 		goto err_fid_get;
1028 	}
1029 	acl->dummy_fid = fid;
1030 
1031 	INIT_LIST_HEAD(&acl->rules);
1032 	mutex_init(&acl->rules_lock);
1033 	err = mlxsw_sp_acl_tcam_init(mlxsw_sp, &acl->tcam);
1034 	if (err)
1035 		goto err_acl_ops_init;
1036 
1037 	/* Create the delayed work for the rule activity_update */
1038 	INIT_DELAYED_WORK(&acl->rule_activity_update.dw,
1039 			  mlxsw_sp_acl_rule_activity_update_work);
1040 	acl->rule_activity_update.interval = MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS;
1041 	mlxsw_core_schedule_dw(&acl->rule_activity_update.dw, 0);
1042 	return 0;
1043 
1044 err_acl_ops_init:
1045 	mutex_destroy(&acl->rules_lock);
1046 	mlxsw_sp_fid_put(fid);
1047 err_fid_get:
1048 	rhashtable_destroy(&acl->ruleset_ht);
1049 err_rhashtable_init:
1050 	mlxsw_afk_destroy(acl->afk);
1051 err_afk_create:
1052 	kfree(acl);
1053 	return err;
1054 }
1055 
1056 void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp)
1057 {
1058 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
1059 
1060 	cancel_delayed_work_sync(&mlxsw_sp->acl->rule_activity_update.dw);
1061 	mlxsw_sp_acl_tcam_fini(mlxsw_sp, &acl->tcam);
1062 	mutex_destroy(&acl->rules_lock);
1063 	WARN_ON(!list_empty(&acl->rules));
1064 	mlxsw_sp_fid_put(acl->dummy_fid);
1065 	rhashtable_destroy(&acl->ruleset_ht);
1066 	mlxsw_afk_destroy(acl->afk);
1067 	kfree(acl);
1068 }
1069 
1070 u32 mlxsw_sp_acl_region_rehash_intrvl_get(struct mlxsw_sp *mlxsw_sp)
1071 {
1072 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
1073 
1074 	return mlxsw_sp_acl_tcam_vregion_rehash_intrvl_get(mlxsw_sp,
1075 							   &acl->tcam);
1076 }
1077 
1078 int mlxsw_sp_acl_region_rehash_intrvl_set(struct mlxsw_sp *mlxsw_sp, u32 val)
1079 {
1080 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
1081 
1082 	return mlxsw_sp_acl_tcam_vregion_rehash_intrvl_set(mlxsw_sp,
1083 							   &acl->tcam, val);
1084 }
1085