1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/slab.h>
6 #include <linux/errno.h>
7 #include <linux/list.h>
8 #include <linux/string.h>
9 #include <linux/rhashtable.h>
10 #include <linux/netdevice.h>
11 #include <linux/mutex.h>
12 #include <net/net_namespace.h>
13 #include <net/tc_act/tc_vlan.h>
14 
15 #include "reg.h"
16 #include "core.h"
17 #include "resources.h"
18 #include "spectrum.h"
19 #include "core_acl_flex_keys.h"
20 #include "core_acl_flex_actions.h"
21 #include "spectrum_acl_tcam.h"
22 
23 struct mlxsw_sp_acl {
24 	struct mlxsw_sp *mlxsw_sp;
25 	struct mlxsw_afk *afk;
26 	struct mlxsw_sp_fid *dummy_fid;
27 	struct rhashtable ruleset_ht;
28 	struct list_head rules;
29 	struct mutex rules_lock; /* Protects rules list */
30 	struct {
31 		struct delayed_work dw;
32 		unsigned long interval;	/* ms */
33 #define MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS 1000
34 	} rule_activity_update;
35 	struct mlxsw_sp_acl_tcam tcam;
36 };
37 
38 struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl)
39 {
40 	return acl->afk;
41 }
42 
43 struct mlxsw_sp_acl_block_binding {
44 	struct list_head list;
45 	struct net_device *dev;
46 	struct mlxsw_sp_port *mlxsw_sp_port;
47 	bool ingress;
48 };
49 
50 struct mlxsw_sp_acl_ruleset_ht_key {
51 	struct mlxsw_sp_acl_block *block;
52 	u32 chain_index;
53 	const struct mlxsw_sp_acl_profile_ops *ops;
54 };
55 
56 struct mlxsw_sp_acl_ruleset {
57 	struct rhash_head ht_node; /* Member of acl HT */
58 	struct mlxsw_sp_acl_ruleset_ht_key ht_key;
59 	struct rhashtable rule_ht;
60 	unsigned int ref_count;
61 	unsigned long priv[0];
62 	/* priv has to be always the last item */
63 };
64 
65 struct mlxsw_sp_acl_rule {
66 	struct rhash_head ht_node; /* Member of rule HT */
67 	struct list_head list;
68 	unsigned long cookie; /* HT key */
69 	struct mlxsw_sp_acl_ruleset *ruleset;
70 	struct mlxsw_sp_acl_rule_info *rulei;
71 	u64 last_used;
72 	u64 last_packets;
73 	u64 last_bytes;
74 	unsigned long priv[0];
75 	/* priv has to be always the last item */
76 };
77 
78 static const struct rhashtable_params mlxsw_sp_acl_ruleset_ht_params = {
79 	.key_len = sizeof(struct mlxsw_sp_acl_ruleset_ht_key),
80 	.key_offset = offsetof(struct mlxsw_sp_acl_ruleset, ht_key),
81 	.head_offset = offsetof(struct mlxsw_sp_acl_ruleset, ht_node),
82 	.automatic_shrinking = true,
83 };
84 
85 static const struct rhashtable_params mlxsw_sp_acl_rule_ht_params = {
86 	.key_len = sizeof(unsigned long),
87 	.key_offset = offsetof(struct mlxsw_sp_acl_rule, cookie),
88 	.head_offset = offsetof(struct mlxsw_sp_acl_rule, ht_node),
89 	.automatic_shrinking = true,
90 };
91 
92 struct mlxsw_sp_fid *mlxsw_sp_acl_dummy_fid(struct mlxsw_sp *mlxsw_sp)
93 {
94 	return mlxsw_sp->acl->dummy_fid;
95 }
96 
97 struct mlxsw_sp *mlxsw_sp_acl_block_mlxsw_sp(struct mlxsw_sp_acl_block *block)
98 {
99 	return block->mlxsw_sp;
100 }
101 
102 unsigned int mlxsw_sp_acl_block_rule_count(struct mlxsw_sp_acl_block *block)
103 {
104 	return block ? block->rule_count : 0;
105 }
106 
107 void mlxsw_sp_acl_block_disable_inc(struct mlxsw_sp_acl_block *block)
108 {
109 	if (block)
110 		block->disable_count++;
111 }
112 
113 void mlxsw_sp_acl_block_disable_dec(struct mlxsw_sp_acl_block *block)
114 {
115 	if (block)
116 		block->disable_count--;
117 }
118 
119 bool mlxsw_sp_acl_block_disabled(struct mlxsw_sp_acl_block *block)
120 {
121 	return block->disable_count;
122 }
123 
124 bool mlxsw_sp_acl_block_is_egress_bound(struct mlxsw_sp_acl_block *block)
125 {
126 	struct mlxsw_sp_acl_block_binding *binding;
127 
128 	list_for_each_entry(binding, &block->binding_list, list) {
129 		if (!binding->ingress)
130 			return true;
131 	}
132 	return false;
133 }
134 
135 static bool
136 mlxsw_sp_acl_ruleset_is_singular(const struct mlxsw_sp_acl_ruleset *ruleset)
137 {
138 	/* We hold a reference on ruleset ourselves */
139 	return ruleset->ref_count == 2;
140 }
141 
142 static int
143 mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
144 			  struct mlxsw_sp_acl_block *block,
145 			  struct mlxsw_sp_acl_block_binding *binding)
146 {
147 	struct mlxsw_sp_acl_ruleset *ruleset = block->ruleset_zero;
148 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
149 
150 	return ops->ruleset_bind(mlxsw_sp, ruleset->priv,
151 				 binding->mlxsw_sp_port, binding->ingress);
152 }
153 
154 static void
155 mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
156 			    struct mlxsw_sp_acl_block *block,
157 			    struct mlxsw_sp_acl_block_binding *binding)
158 {
159 	struct mlxsw_sp_acl_ruleset *ruleset = block->ruleset_zero;
160 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
161 
162 	ops->ruleset_unbind(mlxsw_sp, ruleset->priv,
163 			    binding->mlxsw_sp_port, binding->ingress);
164 }
165 
166 static bool mlxsw_sp_acl_ruleset_block_bound(struct mlxsw_sp_acl_block *block)
167 {
168 	return block->ruleset_zero;
169 }
170 
171 static int
172 mlxsw_sp_acl_ruleset_block_bind(struct mlxsw_sp *mlxsw_sp,
173 				struct mlxsw_sp_acl_ruleset *ruleset,
174 				struct mlxsw_sp_acl_block *block)
175 {
176 	struct mlxsw_sp_acl_block_binding *binding;
177 	int err;
178 
179 	block->ruleset_zero = ruleset;
180 	list_for_each_entry(binding, &block->binding_list, list) {
181 		err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, block, binding);
182 		if (err)
183 			goto rollback;
184 	}
185 	return 0;
186 
187 rollback:
188 	list_for_each_entry_continue_reverse(binding, &block->binding_list,
189 					     list)
190 		mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
191 	block->ruleset_zero = NULL;
192 
193 	return err;
194 }
195 
196 static void
197 mlxsw_sp_acl_ruleset_block_unbind(struct mlxsw_sp *mlxsw_sp,
198 				  struct mlxsw_sp_acl_ruleset *ruleset,
199 				  struct mlxsw_sp_acl_block *block)
200 {
201 	struct mlxsw_sp_acl_block_binding *binding;
202 
203 	list_for_each_entry(binding, &block->binding_list, list)
204 		mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
205 	block->ruleset_zero = NULL;
206 }
207 
208 struct mlxsw_sp_acl_block *mlxsw_sp_acl_block_create(struct mlxsw_sp *mlxsw_sp,
209 						     struct net *net)
210 {
211 	struct mlxsw_sp_acl_block *block;
212 
213 	block = kzalloc(sizeof(*block), GFP_KERNEL);
214 	if (!block)
215 		return NULL;
216 	INIT_LIST_HEAD(&block->binding_list);
217 	block->mlxsw_sp = mlxsw_sp;
218 	block->net = net;
219 	return block;
220 }
221 
222 void mlxsw_sp_acl_block_destroy(struct mlxsw_sp_acl_block *block)
223 {
224 	WARN_ON(!list_empty(&block->binding_list));
225 	kfree(block);
226 }
227 
228 static struct mlxsw_sp_acl_block_binding *
229 mlxsw_sp_acl_block_lookup(struct mlxsw_sp_acl_block *block,
230 			  struct mlxsw_sp_port *mlxsw_sp_port, bool ingress)
231 {
232 	struct mlxsw_sp_acl_block_binding *binding;
233 
234 	list_for_each_entry(binding, &block->binding_list, list)
235 		if (binding->mlxsw_sp_port == mlxsw_sp_port &&
236 		    binding->ingress == ingress)
237 			return binding;
238 	return NULL;
239 }
240 
241 int mlxsw_sp_acl_block_bind(struct mlxsw_sp *mlxsw_sp,
242 			    struct mlxsw_sp_acl_block *block,
243 			    struct mlxsw_sp_port *mlxsw_sp_port,
244 			    bool ingress,
245 			    struct netlink_ext_ack *extack)
246 {
247 	struct mlxsw_sp_acl_block_binding *binding;
248 	int err;
249 
250 	if (WARN_ON(mlxsw_sp_acl_block_lookup(block, mlxsw_sp_port, ingress)))
251 		return -EEXIST;
252 
253 	if (!ingress && block->egress_blocker_rule_count) {
254 		NL_SET_ERR_MSG_MOD(extack, "Block cannot be bound to egress because it contains unsupported rules");
255 		return -EOPNOTSUPP;
256 	}
257 
258 	binding = kzalloc(sizeof(*binding), GFP_KERNEL);
259 	if (!binding)
260 		return -ENOMEM;
261 	binding->mlxsw_sp_port = mlxsw_sp_port;
262 	binding->ingress = ingress;
263 
264 	if (mlxsw_sp_acl_ruleset_block_bound(block)) {
265 		err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, block, binding);
266 		if (err)
267 			goto err_ruleset_bind;
268 	}
269 
270 	list_add(&binding->list, &block->binding_list);
271 	return 0;
272 
273 err_ruleset_bind:
274 	kfree(binding);
275 	return err;
276 }
277 
278 int mlxsw_sp_acl_block_unbind(struct mlxsw_sp *mlxsw_sp,
279 			      struct mlxsw_sp_acl_block *block,
280 			      struct mlxsw_sp_port *mlxsw_sp_port,
281 			      bool ingress)
282 {
283 	struct mlxsw_sp_acl_block_binding *binding;
284 
285 	binding = mlxsw_sp_acl_block_lookup(block, mlxsw_sp_port, ingress);
286 	if (!binding)
287 		return -ENOENT;
288 
289 	list_del(&binding->list);
290 
291 	if (mlxsw_sp_acl_ruleset_block_bound(block))
292 		mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
293 
294 	kfree(binding);
295 	return 0;
296 }
297 
298 static struct mlxsw_sp_acl_ruleset *
299 mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
300 			    struct mlxsw_sp_acl_block *block, u32 chain_index,
301 			    const struct mlxsw_sp_acl_profile_ops *ops,
302 			    struct mlxsw_afk_element_usage *tmplt_elusage)
303 {
304 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
305 	struct mlxsw_sp_acl_ruleset *ruleset;
306 	size_t alloc_size;
307 	int err;
308 
309 	alloc_size = sizeof(*ruleset) + ops->ruleset_priv_size;
310 	ruleset = kzalloc(alloc_size, GFP_KERNEL);
311 	if (!ruleset)
312 		return ERR_PTR(-ENOMEM);
313 	ruleset->ref_count = 1;
314 	ruleset->ht_key.block = block;
315 	ruleset->ht_key.chain_index = chain_index;
316 	ruleset->ht_key.ops = ops;
317 
318 	err = rhashtable_init(&ruleset->rule_ht, &mlxsw_sp_acl_rule_ht_params);
319 	if (err)
320 		goto err_rhashtable_init;
321 
322 	err = ops->ruleset_add(mlxsw_sp, &acl->tcam, ruleset->priv,
323 			       tmplt_elusage);
324 	if (err)
325 		goto err_ops_ruleset_add;
326 
327 	err = rhashtable_insert_fast(&acl->ruleset_ht, &ruleset->ht_node,
328 				     mlxsw_sp_acl_ruleset_ht_params);
329 	if (err)
330 		goto err_ht_insert;
331 
332 	return ruleset;
333 
334 err_ht_insert:
335 	ops->ruleset_del(mlxsw_sp, ruleset->priv);
336 err_ops_ruleset_add:
337 	rhashtable_destroy(&ruleset->rule_ht);
338 err_rhashtable_init:
339 	kfree(ruleset);
340 	return ERR_PTR(err);
341 }
342 
343 static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp,
344 					 struct mlxsw_sp_acl_ruleset *ruleset)
345 {
346 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
347 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
348 
349 	rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
350 			       mlxsw_sp_acl_ruleset_ht_params);
351 	ops->ruleset_del(mlxsw_sp, ruleset->priv);
352 	rhashtable_destroy(&ruleset->rule_ht);
353 	kfree(ruleset);
354 }
355 
356 static void mlxsw_sp_acl_ruleset_ref_inc(struct mlxsw_sp_acl_ruleset *ruleset)
357 {
358 	ruleset->ref_count++;
359 }
360 
361 static void mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp *mlxsw_sp,
362 					 struct mlxsw_sp_acl_ruleset *ruleset)
363 {
364 	if (--ruleset->ref_count)
365 		return;
366 	mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset);
367 }
368 
369 static struct mlxsw_sp_acl_ruleset *
370 __mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl *acl,
371 			      struct mlxsw_sp_acl_block *block, u32 chain_index,
372 			      const struct mlxsw_sp_acl_profile_ops *ops)
373 {
374 	struct mlxsw_sp_acl_ruleset_ht_key ht_key;
375 
376 	memset(&ht_key, 0, sizeof(ht_key));
377 	ht_key.block = block;
378 	ht_key.chain_index = chain_index;
379 	ht_key.ops = ops;
380 	return rhashtable_lookup_fast(&acl->ruleset_ht, &ht_key,
381 				      mlxsw_sp_acl_ruleset_ht_params);
382 }
383 
384 struct mlxsw_sp_acl_ruleset *
385 mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
386 			    struct mlxsw_sp_acl_block *block, u32 chain_index,
387 			    enum mlxsw_sp_acl_profile profile)
388 {
389 	const struct mlxsw_sp_acl_profile_ops *ops;
390 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
391 	struct mlxsw_sp_acl_ruleset *ruleset;
392 
393 	ops = mlxsw_sp_acl_tcam_profile_ops(mlxsw_sp, profile);
394 	if (!ops)
395 		return ERR_PTR(-EINVAL);
396 	ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, block, chain_index, ops);
397 	if (!ruleset)
398 		return ERR_PTR(-ENOENT);
399 	return ruleset;
400 }
401 
402 struct mlxsw_sp_acl_ruleset *
403 mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
404 			 struct mlxsw_sp_acl_block *block, u32 chain_index,
405 			 enum mlxsw_sp_acl_profile profile,
406 			 struct mlxsw_afk_element_usage *tmplt_elusage)
407 {
408 	const struct mlxsw_sp_acl_profile_ops *ops;
409 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
410 	struct mlxsw_sp_acl_ruleset *ruleset;
411 
412 	ops = mlxsw_sp_acl_tcam_profile_ops(mlxsw_sp, profile);
413 	if (!ops)
414 		return ERR_PTR(-EINVAL);
415 
416 	ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, block, chain_index, ops);
417 	if (ruleset) {
418 		mlxsw_sp_acl_ruleset_ref_inc(ruleset);
419 		return ruleset;
420 	}
421 	return mlxsw_sp_acl_ruleset_create(mlxsw_sp, block, chain_index, ops,
422 					   tmplt_elusage);
423 }
424 
425 void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
426 			      struct mlxsw_sp_acl_ruleset *ruleset)
427 {
428 	mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
429 }
430 
431 u16 mlxsw_sp_acl_ruleset_group_id(struct mlxsw_sp_acl_ruleset *ruleset)
432 {
433 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
434 
435 	return ops->ruleset_group_id(ruleset->priv);
436 }
437 
438 struct mlxsw_sp_acl_rule_info *
439 mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl,
440 			  struct mlxsw_afa_block *afa_block)
441 {
442 	struct mlxsw_sp_acl_rule_info *rulei;
443 	int err;
444 
445 	rulei = kzalloc(sizeof(*rulei), GFP_KERNEL);
446 	if (!rulei)
447 		return NULL;
448 
449 	if (afa_block) {
450 		rulei->act_block = afa_block;
451 		return rulei;
452 	}
453 
454 	rulei->act_block = mlxsw_afa_block_create(acl->mlxsw_sp->afa);
455 	if (IS_ERR(rulei->act_block)) {
456 		err = PTR_ERR(rulei->act_block);
457 		goto err_afa_block_create;
458 	}
459 	rulei->action_created = 1;
460 	return rulei;
461 
462 err_afa_block_create:
463 	kfree(rulei);
464 	return ERR_PTR(err);
465 }
466 
467 void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp_acl_rule_info *rulei)
468 {
469 	if (rulei->action_created)
470 		mlxsw_afa_block_destroy(rulei->act_block);
471 	kfree(rulei);
472 }
473 
474 int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei)
475 {
476 	return mlxsw_afa_block_commit(rulei->act_block);
477 }
478 
479 void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei,
480 				 unsigned int priority)
481 {
482 	rulei->priority = priority;
483 }
484 
485 void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei,
486 				    enum mlxsw_afk_element element,
487 				    u32 key_value, u32 mask_value)
488 {
489 	mlxsw_afk_values_add_u32(&rulei->values, element,
490 				 key_value, mask_value);
491 }
492 
493 void mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info *rulei,
494 				    enum mlxsw_afk_element element,
495 				    const char *key_value,
496 				    const char *mask_value, unsigned int len)
497 {
498 	mlxsw_afk_values_add_buf(&rulei->values, element,
499 				 key_value, mask_value, len);
500 }
501 
502 int mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei)
503 {
504 	return mlxsw_afa_block_continue(rulei->act_block);
505 }
506 
507 int mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,
508 				u16 group_id)
509 {
510 	return mlxsw_afa_block_jump(rulei->act_block, group_id);
511 }
512 
513 int mlxsw_sp_acl_rulei_act_terminate(struct mlxsw_sp_acl_rule_info *rulei)
514 {
515 	return mlxsw_afa_block_terminate(rulei->act_block);
516 }
517 
518 int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei)
519 {
520 	return mlxsw_afa_block_append_drop(rulei->act_block);
521 }
522 
523 int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei)
524 {
525 	return mlxsw_afa_block_append_trap(rulei->act_block,
526 					   MLXSW_TRAP_ID_ACL0);
527 }
528 
529 int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
530 			       struct mlxsw_sp_acl_rule_info *rulei,
531 			       struct net_device *out_dev,
532 			       struct netlink_ext_ack *extack)
533 {
534 	struct mlxsw_sp_port *mlxsw_sp_port;
535 	u8 local_port;
536 	bool in_port;
537 
538 	if (out_dev) {
539 		if (!mlxsw_sp_port_dev_check(out_dev)) {
540 			NL_SET_ERR_MSG_MOD(extack, "Invalid output device");
541 			return -EINVAL;
542 		}
543 		mlxsw_sp_port = netdev_priv(out_dev);
544 		if (mlxsw_sp_port->mlxsw_sp != mlxsw_sp) {
545 			NL_SET_ERR_MSG_MOD(extack, "Invalid output device");
546 			return -EINVAL;
547 		}
548 		local_port = mlxsw_sp_port->local_port;
549 		in_port = false;
550 	} else {
551 		/* If out_dev is NULL, the caller wants to
552 		 * set forward to ingress port.
553 		 */
554 		local_port = 0;
555 		in_port = true;
556 	}
557 	return mlxsw_afa_block_append_fwd(rulei->act_block,
558 					  local_port, in_port, extack);
559 }
560 
561 int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
562 				  struct mlxsw_sp_acl_rule_info *rulei,
563 				  struct mlxsw_sp_acl_block *block,
564 				  struct net_device *out_dev,
565 				  struct netlink_ext_ack *extack)
566 {
567 	struct mlxsw_sp_acl_block_binding *binding;
568 	struct mlxsw_sp_port *in_port;
569 
570 	if (!list_is_singular(&block->binding_list)) {
571 		NL_SET_ERR_MSG_MOD(extack, "Only a single mirror source is allowed");
572 		return -EOPNOTSUPP;
573 	}
574 	binding = list_first_entry(&block->binding_list,
575 				   struct mlxsw_sp_acl_block_binding, list);
576 	in_port = binding->mlxsw_sp_port;
577 
578 	return mlxsw_afa_block_append_mirror(rulei->act_block,
579 					     in_port->local_port,
580 					     out_dev,
581 					     binding->ingress,
582 					     extack);
583 }
584 
585 int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp,
586 				struct mlxsw_sp_acl_rule_info *rulei,
587 				u32 action, u16 vid, u16 proto, u8 prio,
588 				struct netlink_ext_ack *extack)
589 {
590 	u8 ethertype;
591 
592 	if (action == FLOW_ACTION_VLAN_MANGLE) {
593 		switch (proto) {
594 		case ETH_P_8021Q:
595 			ethertype = 0;
596 			break;
597 		case ETH_P_8021AD:
598 			ethertype = 1;
599 			break;
600 		default:
601 			NL_SET_ERR_MSG_MOD(extack, "Unsupported VLAN protocol");
602 			dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN protocol %#04x\n",
603 				proto);
604 			return -EINVAL;
605 		}
606 
607 		return mlxsw_afa_block_append_vlan_modify(rulei->act_block,
608 							  vid, prio, ethertype,
609 							  extack);
610 	} else {
611 		NL_SET_ERR_MSG_MOD(extack, "Unsupported VLAN action");
612 		dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN action\n");
613 		return -EINVAL;
614 	}
615 }
616 
617 int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
618 				 struct mlxsw_sp_acl_rule_info *rulei,
619 				 struct netlink_ext_ack *extack)
620 {
621 	return mlxsw_afa_block_append_counter(rulei->act_block,
622 					      &rulei->counter_index, extack);
623 }
624 
625 int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp,
626 				   struct mlxsw_sp_acl_rule_info *rulei,
627 				   u16 fid, struct netlink_ext_ack *extack)
628 {
629 	return mlxsw_afa_block_append_fid_set(rulei->act_block, fid, extack);
630 }
631 
632 struct mlxsw_sp_acl_rule *
633 mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
634 			 struct mlxsw_sp_acl_ruleset *ruleset,
635 			 unsigned long cookie,
636 			 struct mlxsw_afa_block *afa_block,
637 			 struct netlink_ext_ack *extack)
638 {
639 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
640 	struct mlxsw_sp_acl_rule *rule;
641 	int err;
642 
643 	mlxsw_sp_acl_ruleset_ref_inc(ruleset);
644 	rule = kzalloc(sizeof(*rule) + ops->rule_priv_size,
645 		       GFP_KERNEL);
646 	if (!rule) {
647 		err = -ENOMEM;
648 		goto err_alloc;
649 	}
650 	rule->cookie = cookie;
651 	rule->ruleset = ruleset;
652 
653 	rule->rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl, afa_block);
654 	if (IS_ERR(rule->rulei)) {
655 		err = PTR_ERR(rule->rulei);
656 		goto err_rulei_create;
657 	}
658 
659 	return rule;
660 
661 err_rulei_create:
662 	kfree(rule);
663 err_alloc:
664 	mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
665 	return ERR_PTR(err);
666 }
667 
668 void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp,
669 			       struct mlxsw_sp_acl_rule *rule)
670 {
671 	struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
672 
673 	mlxsw_sp_acl_rulei_destroy(rule->rulei);
674 	kfree(rule);
675 	mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
676 }
677 
678 int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
679 			  struct mlxsw_sp_acl_rule *rule)
680 {
681 	struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
682 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
683 	struct mlxsw_sp_acl_block *block = ruleset->ht_key.block;
684 	int err;
685 
686 	err = ops->rule_add(mlxsw_sp, ruleset->priv, rule->priv, rule->rulei);
687 	if (err)
688 		return err;
689 
690 	err = rhashtable_insert_fast(&ruleset->rule_ht, &rule->ht_node,
691 				     mlxsw_sp_acl_rule_ht_params);
692 	if (err)
693 		goto err_rhashtable_insert;
694 
695 	if (!ruleset->ht_key.chain_index &&
696 	    mlxsw_sp_acl_ruleset_is_singular(ruleset)) {
697 		/* We only need ruleset with chain index 0, the implicit
698 		 * one, to be directly bound to device. The rest of the
699 		 * rulesets are bound by "Goto action set".
700 		 */
701 		err = mlxsw_sp_acl_ruleset_block_bind(mlxsw_sp, ruleset, block);
702 		if (err)
703 			goto err_ruleset_block_bind;
704 	}
705 
706 	mutex_lock(&mlxsw_sp->acl->rules_lock);
707 	list_add_tail(&rule->list, &mlxsw_sp->acl->rules);
708 	mutex_unlock(&mlxsw_sp->acl->rules_lock);
709 	block->rule_count++;
710 	block->egress_blocker_rule_count += rule->rulei->egress_bind_blocker;
711 	return 0;
712 
713 err_ruleset_block_bind:
714 	rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
715 			       mlxsw_sp_acl_rule_ht_params);
716 err_rhashtable_insert:
717 	ops->rule_del(mlxsw_sp, rule->priv);
718 	return err;
719 }
720 
721 void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
722 			   struct mlxsw_sp_acl_rule *rule)
723 {
724 	struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
725 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
726 	struct mlxsw_sp_acl_block *block = ruleset->ht_key.block;
727 
728 	block->egress_blocker_rule_count -= rule->rulei->egress_bind_blocker;
729 	ruleset->ht_key.block->rule_count--;
730 	mutex_lock(&mlxsw_sp->acl->rules_lock);
731 	list_del(&rule->list);
732 	mutex_unlock(&mlxsw_sp->acl->rules_lock);
733 	if (!ruleset->ht_key.chain_index &&
734 	    mlxsw_sp_acl_ruleset_is_singular(ruleset))
735 		mlxsw_sp_acl_ruleset_block_unbind(mlxsw_sp, ruleset,
736 						  ruleset->ht_key.block);
737 	rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
738 			       mlxsw_sp_acl_rule_ht_params);
739 	ops->rule_del(mlxsw_sp, rule->priv);
740 }
741 
742 int mlxsw_sp_acl_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
743 				     struct mlxsw_sp_acl_rule *rule,
744 				     struct mlxsw_afa_block *afa_block)
745 {
746 	struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
747 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
748 	struct mlxsw_sp_acl_rule_info *rulei;
749 
750 	rulei = mlxsw_sp_acl_rule_rulei(rule);
751 	rulei->act_block = afa_block;
752 
753 	return ops->rule_action_replace(mlxsw_sp, rule->priv, rule->rulei);
754 }
755 
756 struct mlxsw_sp_acl_rule *
757 mlxsw_sp_acl_rule_lookup(struct mlxsw_sp *mlxsw_sp,
758 			 struct mlxsw_sp_acl_ruleset *ruleset,
759 			 unsigned long cookie)
760 {
761 	return rhashtable_lookup_fast(&ruleset->rule_ht, &cookie,
762 				       mlxsw_sp_acl_rule_ht_params);
763 }
764 
765 struct mlxsw_sp_acl_rule_info *
766 mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule)
767 {
768 	return rule->rulei;
769 }
770 
771 static int mlxsw_sp_acl_rule_activity_update(struct mlxsw_sp *mlxsw_sp,
772 					     struct mlxsw_sp_acl_rule *rule)
773 {
774 	struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
775 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
776 	bool active;
777 	int err;
778 
779 	err = ops->rule_activity_get(mlxsw_sp, rule->priv, &active);
780 	if (err)
781 		return err;
782 	if (active)
783 		rule->last_used = jiffies;
784 	return 0;
785 }
786 
787 static int mlxsw_sp_acl_rules_activity_update(struct mlxsw_sp_acl *acl)
788 {
789 	struct mlxsw_sp_acl_rule *rule;
790 	int err;
791 
792 	mutex_lock(&acl->rules_lock);
793 	list_for_each_entry(rule, &acl->rules, list) {
794 		err = mlxsw_sp_acl_rule_activity_update(acl->mlxsw_sp,
795 							rule);
796 		if (err)
797 			goto err_rule_update;
798 	}
799 	mutex_unlock(&acl->rules_lock);
800 	return 0;
801 
802 err_rule_update:
803 	mutex_unlock(&acl->rules_lock);
804 	return err;
805 }
806 
807 static void mlxsw_sp_acl_rule_activity_work_schedule(struct mlxsw_sp_acl *acl)
808 {
809 	unsigned long interval = acl->rule_activity_update.interval;
810 
811 	mlxsw_core_schedule_dw(&acl->rule_activity_update.dw,
812 			       msecs_to_jiffies(interval));
813 }
814 
815 static void mlxsw_sp_acl_rule_activity_update_work(struct work_struct *work)
816 {
817 	struct mlxsw_sp_acl *acl = container_of(work, struct mlxsw_sp_acl,
818 						rule_activity_update.dw.work);
819 	int err;
820 
821 	err = mlxsw_sp_acl_rules_activity_update(acl);
822 	if (err)
823 		dev_err(acl->mlxsw_sp->bus_info->dev, "Could not update acl activity");
824 
825 	mlxsw_sp_acl_rule_activity_work_schedule(acl);
826 }
827 
828 int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
829 				struct mlxsw_sp_acl_rule *rule,
830 				u64 *packets, u64 *bytes, u64 *last_use)
831 
832 {
833 	struct mlxsw_sp_acl_rule_info *rulei;
834 	u64 current_packets;
835 	u64 current_bytes;
836 	int err;
837 
838 	rulei = mlxsw_sp_acl_rule_rulei(rule);
839 	err = mlxsw_sp_flow_counter_get(mlxsw_sp, rulei->counter_index,
840 					&current_packets, &current_bytes);
841 	if (err)
842 		return err;
843 
844 	*packets = current_packets - rule->last_packets;
845 	*bytes = current_bytes - rule->last_bytes;
846 	*last_use = rule->last_used;
847 
848 	rule->last_bytes = current_bytes;
849 	rule->last_packets = current_packets;
850 
851 	return 0;
852 }
853 
854 int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
855 {
856 	struct mlxsw_sp_fid *fid;
857 	struct mlxsw_sp_acl *acl;
858 	size_t alloc_size;
859 	int err;
860 
861 	alloc_size = sizeof(*acl) + mlxsw_sp_acl_tcam_priv_size(mlxsw_sp);
862 	acl = kzalloc(alloc_size, GFP_KERNEL);
863 	if (!acl)
864 		return -ENOMEM;
865 	mlxsw_sp->acl = acl;
866 	acl->mlxsw_sp = mlxsw_sp;
867 	acl->afk = mlxsw_afk_create(MLXSW_CORE_RES_GET(mlxsw_sp->core,
868 						       ACL_FLEX_KEYS),
869 				    mlxsw_sp->afk_ops);
870 	if (!acl->afk) {
871 		err = -ENOMEM;
872 		goto err_afk_create;
873 	}
874 
875 	err = rhashtable_init(&acl->ruleset_ht,
876 			      &mlxsw_sp_acl_ruleset_ht_params);
877 	if (err)
878 		goto err_rhashtable_init;
879 
880 	fid = mlxsw_sp_fid_dummy_get(mlxsw_sp);
881 	if (IS_ERR(fid)) {
882 		err = PTR_ERR(fid);
883 		goto err_fid_get;
884 	}
885 	acl->dummy_fid = fid;
886 
887 	INIT_LIST_HEAD(&acl->rules);
888 	mutex_init(&acl->rules_lock);
889 	err = mlxsw_sp_acl_tcam_init(mlxsw_sp, &acl->tcam);
890 	if (err)
891 		goto err_acl_ops_init;
892 
893 	/* Create the delayed work for the rule activity_update */
894 	INIT_DELAYED_WORK(&acl->rule_activity_update.dw,
895 			  mlxsw_sp_acl_rule_activity_update_work);
896 	acl->rule_activity_update.interval = MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS;
897 	mlxsw_core_schedule_dw(&acl->rule_activity_update.dw, 0);
898 	return 0;
899 
900 err_acl_ops_init:
901 	mutex_destroy(&acl->rules_lock);
902 	mlxsw_sp_fid_put(fid);
903 err_fid_get:
904 	rhashtable_destroy(&acl->ruleset_ht);
905 err_rhashtable_init:
906 	mlxsw_afk_destroy(acl->afk);
907 err_afk_create:
908 	kfree(acl);
909 	return err;
910 }
911 
912 void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp)
913 {
914 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
915 
916 	cancel_delayed_work_sync(&mlxsw_sp->acl->rule_activity_update.dw);
917 	mlxsw_sp_acl_tcam_fini(mlxsw_sp, &acl->tcam);
918 	mutex_destroy(&acl->rules_lock);
919 	WARN_ON(!list_empty(&acl->rules));
920 	mlxsw_sp_fid_put(acl->dummy_fid);
921 	rhashtable_destroy(&acl->ruleset_ht);
922 	mlxsw_afk_destroy(acl->afk);
923 	kfree(acl);
924 }
925 
926 u32 mlxsw_sp_acl_region_rehash_intrvl_get(struct mlxsw_sp *mlxsw_sp)
927 {
928 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
929 
930 	return mlxsw_sp_acl_tcam_vregion_rehash_intrvl_get(mlxsw_sp,
931 							   &acl->tcam);
932 }
933 
934 int mlxsw_sp_acl_region_rehash_intrvl_set(struct mlxsw_sp *mlxsw_sp, u32 val)
935 {
936 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
937 
938 	return mlxsw_sp_acl_tcam_vregion_rehash_intrvl_set(mlxsw_sp,
939 							   &acl->tcam, val);
940 }
941