1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
3  * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the names of the copyright holders nor the names of its
15  *    contributors may be used to endorse or promote products derived from
16  *    this software without specific prior written permission.
17  *
18  * Alternatively, this software may be distributed under the terms of the
19  * GNU General Public License ("GPL") version 2 as published by the Free
20  * Software Foundation.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <linux/kernel.h>
36 #include <linux/slab.h>
37 #include <linux/errno.h>
38 #include <linux/list.h>
39 #include <linux/string.h>
40 #include <linux/rhashtable.h>
41 #include <linux/netdevice.h>
42 #include <net/net_namespace.h>
43 #include <net/tc_act/tc_vlan.h>
44 
45 #include "reg.h"
46 #include "core.h"
47 #include "resources.h"
48 #include "spectrum.h"
49 #include "core_acl_flex_keys.h"
50 #include "core_acl_flex_actions.h"
51 #include "spectrum_acl_tcam.h"
52 
53 struct mlxsw_sp_acl {
54 	struct mlxsw_sp *mlxsw_sp;
55 	struct mlxsw_afk *afk;
56 	struct mlxsw_sp_fid *dummy_fid;
57 	struct rhashtable ruleset_ht;
58 	struct list_head rules;
59 	struct {
60 		struct delayed_work dw;
61 		unsigned long interval;	/* ms */
62 #define MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS 1000
63 	} rule_activity_update;
64 	struct mlxsw_sp_acl_tcam tcam;
65 };
66 
67 struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl)
68 {
69 	return acl->afk;
70 }
71 
72 struct mlxsw_sp_acl_block_binding {
73 	struct list_head list;
74 	struct net_device *dev;
75 	struct mlxsw_sp_port *mlxsw_sp_port;
76 	bool ingress;
77 };
78 
79 struct mlxsw_sp_acl_block {
80 	struct list_head binding_list;
81 	struct mlxsw_sp_acl_ruleset *ruleset_zero;
82 	struct mlxsw_sp *mlxsw_sp;
83 	unsigned int rule_count;
84 	unsigned int disable_count;
85 };
86 
87 struct mlxsw_sp_acl_ruleset_ht_key {
88 	struct mlxsw_sp_acl_block *block;
89 	u32 chain_index;
90 	const struct mlxsw_sp_acl_profile_ops *ops;
91 };
92 
93 struct mlxsw_sp_acl_ruleset {
94 	struct rhash_head ht_node; /* Member of acl HT */
95 	struct mlxsw_sp_acl_ruleset_ht_key ht_key;
96 	struct rhashtable rule_ht;
97 	unsigned int ref_count;
98 	unsigned long priv[0];
99 	/* priv has to be always the last item */
100 };
101 
102 struct mlxsw_sp_acl_rule {
103 	struct rhash_head ht_node; /* Member of rule HT */
104 	struct list_head list;
105 	unsigned long cookie; /* HT key */
106 	struct mlxsw_sp_acl_ruleset *ruleset;
107 	struct mlxsw_sp_acl_rule_info *rulei;
108 	u64 last_used;
109 	u64 last_packets;
110 	u64 last_bytes;
111 	unsigned long priv[0];
112 	/* priv has to be always the last item */
113 };
114 
115 static const struct rhashtable_params mlxsw_sp_acl_ruleset_ht_params = {
116 	.key_len = sizeof(struct mlxsw_sp_acl_ruleset_ht_key),
117 	.key_offset = offsetof(struct mlxsw_sp_acl_ruleset, ht_key),
118 	.head_offset = offsetof(struct mlxsw_sp_acl_ruleset, ht_node),
119 	.automatic_shrinking = true,
120 };
121 
122 static const struct rhashtable_params mlxsw_sp_acl_rule_ht_params = {
123 	.key_len = sizeof(unsigned long),
124 	.key_offset = offsetof(struct mlxsw_sp_acl_rule, cookie),
125 	.head_offset = offsetof(struct mlxsw_sp_acl_rule, ht_node),
126 	.automatic_shrinking = true,
127 };
128 
129 struct mlxsw_sp_fid *mlxsw_sp_acl_dummy_fid(struct mlxsw_sp *mlxsw_sp)
130 {
131 	return mlxsw_sp->acl->dummy_fid;
132 }
133 
134 struct mlxsw_sp *mlxsw_sp_acl_block_mlxsw_sp(struct mlxsw_sp_acl_block *block)
135 {
136 	return block->mlxsw_sp;
137 }
138 
139 unsigned int mlxsw_sp_acl_block_rule_count(struct mlxsw_sp_acl_block *block)
140 {
141 	return block ? block->rule_count : 0;
142 }
143 
144 void mlxsw_sp_acl_block_disable_inc(struct mlxsw_sp_acl_block *block)
145 {
146 	if (block)
147 		block->disable_count++;
148 }
149 
150 void mlxsw_sp_acl_block_disable_dec(struct mlxsw_sp_acl_block *block)
151 {
152 	if (block)
153 		block->disable_count--;
154 }
155 
156 bool mlxsw_sp_acl_block_disabled(struct mlxsw_sp_acl_block *block)
157 {
158 	return block->disable_count;
159 }
160 
161 static bool
162 mlxsw_sp_acl_ruleset_is_singular(const struct mlxsw_sp_acl_ruleset *ruleset)
163 {
164 	/* We hold a reference on ruleset ourselves */
165 	return ruleset->ref_count == 2;
166 }
167 
168 static int
169 mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
170 			  struct mlxsw_sp_acl_block *block,
171 			  struct mlxsw_sp_acl_block_binding *binding)
172 {
173 	struct mlxsw_sp_acl_ruleset *ruleset = block->ruleset_zero;
174 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
175 
176 	return ops->ruleset_bind(mlxsw_sp, ruleset->priv,
177 				 binding->mlxsw_sp_port, binding->ingress);
178 }
179 
180 static void
181 mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
182 			    struct mlxsw_sp_acl_block *block,
183 			    struct mlxsw_sp_acl_block_binding *binding)
184 {
185 	struct mlxsw_sp_acl_ruleset *ruleset = block->ruleset_zero;
186 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
187 
188 	ops->ruleset_unbind(mlxsw_sp, ruleset->priv,
189 			    binding->mlxsw_sp_port, binding->ingress);
190 }
191 
192 static bool mlxsw_sp_acl_ruleset_block_bound(struct mlxsw_sp_acl_block *block)
193 {
194 	return block->ruleset_zero;
195 }
196 
197 static int
198 mlxsw_sp_acl_ruleset_block_bind(struct mlxsw_sp *mlxsw_sp,
199 				struct mlxsw_sp_acl_ruleset *ruleset,
200 				struct mlxsw_sp_acl_block *block)
201 {
202 	struct mlxsw_sp_acl_block_binding *binding;
203 	int err;
204 
205 	block->ruleset_zero = ruleset;
206 	list_for_each_entry(binding, &block->binding_list, list) {
207 		err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, block, binding);
208 		if (err)
209 			goto rollback;
210 	}
211 	return 0;
212 
213 rollback:
214 	list_for_each_entry_continue_reverse(binding, &block->binding_list,
215 					     list)
216 		mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
217 	block->ruleset_zero = NULL;
218 
219 	return err;
220 }
221 
222 static void
223 mlxsw_sp_acl_ruleset_block_unbind(struct mlxsw_sp *mlxsw_sp,
224 				  struct mlxsw_sp_acl_ruleset *ruleset,
225 				  struct mlxsw_sp_acl_block *block)
226 {
227 	struct mlxsw_sp_acl_block_binding *binding;
228 
229 	list_for_each_entry(binding, &block->binding_list, list)
230 		mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
231 	block->ruleset_zero = NULL;
232 }
233 
234 struct mlxsw_sp_acl_block *mlxsw_sp_acl_block_create(struct mlxsw_sp *mlxsw_sp,
235 						     struct net *net)
236 {
237 	struct mlxsw_sp_acl_block *block;
238 
239 	block = kzalloc(sizeof(*block), GFP_KERNEL);
240 	if (!block)
241 		return NULL;
242 	INIT_LIST_HEAD(&block->binding_list);
243 	block->mlxsw_sp = mlxsw_sp;
244 	return block;
245 }
246 
247 void mlxsw_sp_acl_block_destroy(struct mlxsw_sp_acl_block *block)
248 {
249 	WARN_ON(!list_empty(&block->binding_list));
250 	kfree(block);
251 }
252 
253 static struct mlxsw_sp_acl_block_binding *
254 mlxsw_sp_acl_block_lookup(struct mlxsw_sp_acl_block *block,
255 			  struct mlxsw_sp_port *mlxsw_sp_port, bool ingress)
256 {
257 	struct mlxsw_sp_acl_block_binding *binding;
258 
259 	list_for_each_entry(binding, &block->binding_list, list)
260 		if (binding->mlxsw_sp_port == mlxsw_sp_port &&
261 		    binding->ingress == ingress)
262 			return binding;
263 	return NULL;
264 }
265 
266 int mlxsw_sp_acl_block_bind(struct mlxsw_sp *mlxsw_sp,
267 			    struct mlxsw_sp_acl_block *block,
268 			    struct mlxsw_sp_port *mlxsw_sp_port,
269 			    bool ingress)
270 {
271 	struct mlxsw_sp_acl_block_binding *binding;
272 	int err;
273 
274 	if (WARN_ON(mlxsw_sp_acl_block_lookup(block, mlxsw_sp_port, ingress)))
275 		return -EEXIST;
276 
277 	binding = kzalloc(sizeof(*binding), GFP_KERNEL);
278 	if (!binding)
279 		return -ENOMEM;
280 	binding->mlxsw_sp_port = mlxsw_sp_port;
281 	binding->ingress = ingress;
282 
283 	if (mlxsw_sp_acl_ruleset_block_bound(block)) {
284 		err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, block, binding);
285 		if (err)
286 			goto err_ruleset_bind;
287 	}
288 
289 	list_add(&binding->list, &block->binding_list);
290 	return 0;
291 
292 err_ruleset_bind:
293 	kfree(binding);
294 	return err;
295 }
296 
297 int mlxsw_sp_acl_block_unbind(struct mlxsw_sp *mlxsw_sp,
298 			      struct mlxsw_sp_acl_block *block,
299 			      struct mlxsw_sp_port *mlxsw_sp_port,
300 			      bool ingress)
301 {
302 	struct mlxsw_sp_acl_block_binding *binding;
303 
304 	binding = mlxsw_sp_acl_block_lookup(block, mlxsw_sp_port, ingress);
305 	if (!binding)
306 		return -ENOENT;
307 
308 	list_del(&binding->list);
309 
310 	if (mlxsw_sp_acl_ruleset_block_bound(block))
311 		mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
312 
313 	kfree(binding);
314 	return 0;
315 }
316 
317 static struct mlxsw_sp_acl_ruleset *
318 mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
319 			    struct mlxsw_sp_acl_block *block, u32 chain_index,
320 			    const struct mlxsw_sp_acl_profile_ops *ops,
321 			    struct mlxsw_afk_element_usage *tmplt_elusage)
322 {
323 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
324 	struct mlxsw_sp_acl_ruleset *ruleset;
325 	size_t alloc_size;
326 	int err;
327 
328 	alloc_size = sizeof(*ruleset) + ops->ruleset_priv_size;
329 	ruleset = kzalloc(alloc_size, GFP_KERNEL);
330 	if (!ruleset)
331 		return ERR_PTR(-ENOMEM);
332 	ruleset->ref_count = 1;
333 	ruleset->ht_key.block = block;
334 	ruleset->ht_key.chain_index = chain_index;
335 	ruleset->ht_key.ops = ops;
336 
337 	err = rhashtable_init(&ruleset->rule_ht, &mlxsw_sp_acl_rule_ht_params);
338 	if (err)
339 		goto err_rhashtable_init;
340 
341 	err = ops->ruleset_add(mlxsw_sp, &acl->tcam, ruleset->priv,
342 			       tmplt_elusage);
343 	if (err)
344 		goto err_ops_ruleset_add;
345 
346 	err = rhashtable_insert_fast(&acl->ruleset_ht, &ruleset->ht_node,
347 				     mlxsw_sp_acl_ruleset_ht_params);
348 	if (err)
349 		goto err_ht_insert;
350 
351 	return ruleset;
352 
353 err_ht_insert:
354 	ops->ruleset_del(mlxsw_sp, ruleset->priv);
355 err_ops_ruleset_add:
356 	rhashtable_destroy(&ruleset->rule_ht);
357 err_rhashtable_init:
358 	kfree(ruleset);
359 	return ERR_PTR(err);
360 }
361 
362 static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp,
363 					 struct mlxsw_sp_acl_ruleset *ruleset)
364 {
365 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
366 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
367 
368 	rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
369 			       mlxsw_sp_acl_ruleset_ht_params);
370 	ops->ruleset_del(mlxsw_sp, ruleset->priv);
371 	rhashtable_destroy(&ruleset->rule_ht);
372 	kfree(ruleset);
373 }
374 
375 static void mlxsw_sp_acl_ruleset_ref_inc(struct mlxsw_sp_acl_ruleset *ruleset)
376 {
377 	ruleset->ref_count++;
378 }
379 
380 static void mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp *mlxsw_sp,
381 					 struct mlxsw_sp_acl_ruleset *ruleset)
382 {
383 	if (--ruleset->ref_count)
384 		return;
385 	mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset);
386 }
387 
388 static struct mlxsw_sp_acl_ruleset *
389 __mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl *acl,
390 			      struct mlxsw_sp_acl_block *block, u32 chain_index,
391 			      const struct mlxsw_sp_acl_profile_ops *ops)
392 {
393 	struct mlxsw_sp_acl_ruleset_ht_key ht_key;
394 
395 	memset(&ht_key, 0, sizeof(ht_key));
396 	ht_key.block = block;
397 	ht_key.chain_index = chain_index;
398 	ht_key.ops = ops;
399 	return rhashtable_lookup_fast(&acl->ruleset_ht, &ht_key,
400 				      mlxsw_sp_acl_ruleset_ht_params);
401 }
402 
403 struct mlxsw_sp_acl_ruleset *
404 mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
405 			    struct mlxsw_sp_acl_block *block, u32 chain_index,
406 			    enum mlxsw_sp_acl_profile profile)
407 {
408 	const struct mlxsw_sp_acl_profile_ops *ops;
409 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
410 	struct mlxsw_sp_acl_ruleset *ruleset;
411 
412 	ops = mlxsw_sp_acl_tcam_profile_ops(mlxsw_sp, profile);
413 	if (!ops)
414 		return ERR_PTR(-EINVAL);
415 	ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, block, chain_index, ops);
416 	if (!ruleset)
417 		return ERR_PTR(-ENOENT);
418 	return ruleset;
419 }
420 
421 struct mlxsw_sp_acl_ruleset *
422 mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
423 			 struct mlxsw_sp_acl_block *block, u32 chain_index,
424 			 enum mlxsw_sp_acl_profile profile,
425 			 struct mlxsw_afk_element_usage *tmplt_elusage)
426 {
427 	const struct mlxsw_sp_acl_profile_ops *ops;
428 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
429 	struct mlxsw_sp_acl_ruleset *ruleset;
430 
431 	ops = mlxsw_sp_acl_tcam_profile_ops(mlxsw_sp, profile);
432 	if (!ops)
433 		return ERR_PTR(-EINVAL);
434 
435 	ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, block, chain_index, ops);
436 	if (ruleset) {
437 		mlxsw_sp_acl_ruleset_ref_inc(ruleset);
438 		return ruleset;
439 	}
440 	return mlxsw_sp_acl_ruleset_create(mlxsw_sp, block, chain_index, ops,
441 					   tmplt_elusage);
442 }
443 
444 void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
445 			      struct mlxsw_sp_acl_ruleset *ruleset)
446 {
447 	mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
448 }
449 
450 u16 mlxsw_sp_acl_ruleset_group_id(struct mlxsw_sp_acl_ruleset *ruleset)
451 {
452 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
453 
454 	return ops->ruleset_group_id(ruleset->priv);
455 }
456 
457 struct mlxsw_sp_acl_rule_info *
458 mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl)
459 {
460 	struct mlxsw_sp_acl_rule_info *rulei;
461 	int err;
462 
463 	rulei = kzalloc(sizeof(*rulei), GFP_KERNEL);
464 	if (!rulei)
465 		return NULL;
466 	rulei->act_block = mlxsw_afa_block_create(acl->mlxsw_sp->afa);
467 	if (IS_ERR(rulei->act_block)) {
468 		err = PTR_ERR(rulei->act_block);
469 		goto err_afa_block_create;
470 	}
471 	return rulei;
472 
473 err_afa_block_create:
474 	kfree(rulei);
475 	return ERR_PTR(err);
476 }
477 
478 void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp_acl_rule_info *rulei)
479 {
480 	mlxsw_afa_block_destroy(rulei->act_block);
481 	kfree(rulei);
482 }
483 
484 int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei)
485 {
486 	return mlxsw_afa_block_commit(rulei->act_block);
487 }
488 
489 void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei,
490 				 unsigned int priority)
491 {
492 	rulei->priority = priority >> 16;
493 }
494 
495 void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei,
496 				    enum mlxsw_afk_element element,
497 				    u32 key_value, u32 mask_value)
498 {
499 	mlxsw_afk_values_add_u32(&rulei->values, element,
500 				 key_value, mask_value);
501 }
502 
503 void mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info *rulei,
504 				    enum mlxsw_afk_element element,
505 				    const char *key_value,
506 				    const char *mask_value, unsigned int len)
507 {
508 	mlxsw_afk_values_add_buf(&rulei->values, element,
509 				 key_value, mask_value, len);
510 }
511 
512 int mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei)
513 {
514 	return mlxsw_afa_block_continue(rulei->act_block);
515 }
516 
517 int mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,
518 				u16 group_id)
519 {
520 	return mlxsw_afa_block_jump(rulei->act_block, group_id);
521 }
522 
523 int mlxsw_sp_acl_rulei_act_terminate(struct mlxsw_sp_acl_rule_info *rulei)
524 {
525 	return mlxsw_afa_block_terminate(rulei->act_block);
526 }
527 
528 int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei)
529 {
530 	return mlxsw_afa_block_append_drop(rulei->act_block);
531 }
532 
533 int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei)
534 {
535 	return mlxsw_afa_block_append_trap(rulei->act_block,
536 					   MLXSW_TRAP_ID_ACL0);
537 }
538 
539 int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
540 			       struct mlxsw_sp_acl_rule_info *rulei,
541 			       struct net_device *out_dev,
542 			       struct netlink_ext_ack *extack)
543 {
544 	struct mlxsw_sp_port *mlxsw_sp_port;
545 	u8 local_port;
546 	bool in_port;
547 
548 	if (out_dev) {
549 		if (!mlxsw_sp_port_dev_check(out_dev)) {
550 			NL_SET_ERR_MSG_MOD(extack, "Invalid output device");
551 			return -EINVAL;
552 		}
553 		mlxsw_sp_port = netdev_priv(out_dev);
554 		if (mlxsw_sp_port->mlxsw_sp != mlxsw_sp) {
555 			NL_SET_ERR_MSG_MOD(extack, "Invalid output device");
556 			return -EINVAL;
557 		}
558 		local_port = mlxsw_sp_port->local_port;
559 		in_port = false;
560 	} else {
561 		/* If out_dev is NULL, the caller wants to
562 		 * set forward to ingress port.
563 		 */
564 		local_port = 0;
565 		in_port = true;
566 	}
567 	return mlxsw_afa_block_append_fwd(rulei->act_block,
568 					  local_port, in_port, extack);
569 }
570 
571 int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
572 				  struct mlxsw_sp_acl_rule_info *rulei,
573 				  struct mlxsw_sp_acl_block *block,
574 				  struct net_device *out_dev,
575 				  struct netlink_ext_ack *extack)
576 {
577 	struct mlxsw_sp_acl_block_binding *binding;
578 	struct mlxsw_sp_port *in_port;
579 
580 	if (!list_is_singular(&block->binding_list)) {
581 		NL_SET_ERR_MSG_MOD(extack, "Only a single mirror source is allowed");
582 		return -EOPNOTSUPP;
583 	}
584 	binding = list_first_entry(&block->binding_list,
585 				   struct mlxsw_sp_acl_block_binding, list);
586 	in_port = binding->mlxsw_sp_port;
587 
588 	return mlxsw_afa_block_append_mirror(rulei->act_block,
589 					     in_port->local_port,
590 					     out_dev,
591 					     binding->ingress,
592 					     extack);
593 }
594 
595 int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp,
596 				struct mlxsw_sp_acl_rule_info *rulei,
597 				u32 action, u16 vid, u16 proto, u8 prio,
598 				struct netlink_ext_ack *extack)
599 {
600 	u8 ethertype;
601 
602 	if (action == TCA_VLAN_ACT_MODIFY) {
603 		switch (proto) {
604 		case ETH_P_8021Q:
605 			ethertype = 0;
606 			break;
607 		case ETH_P_8021AD:
608 			ethertype = 1;
609 			break;
610 		default:
611 			NL_SET_ERR_MSG_MOD(extack, "Unsupported VLAN protocol");
612 			dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN protocol %#04x\n",
613 				proto);
614 			return -EINVAL;
615 		}
616 
617 		return mlxsw_afa_block_append_vlan_modify(rulei->act_block,
618 							  vid, prio, ethertype,
619 							  extack);
620 	} else {
621 		NL_SET_ERR_MSG_MOD(extack, "Unsupported VLAN action");
622 		dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN action\n");
623 		return -EINVAL;
624 	}
625 }
626 
627 int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
628 				 struct mlxsw_sp_acl_rule_info *rulei,
629 				 struct netlink_ext_ack *extack)
630 {
631 	return mlxsw_afa_block_append_counter(rulei->act_block,
632 					      &rulei->counter_index, extack);
633 }
634 
635 int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp,
636 				   struct mlxsw_sp_acl_rule_info *rulei,
637 				   u16 fid, struct netlink_ext_ack *extack)
638 {
639 	return mlxsw_afa_block_append_fid_set(rulei->act_block, fid, extack);
640 }
641 
642 struct mlxsw_sp_acl_rule *
643 mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
644 			 struct mlxsw_sp_acl_ruleset *ruleset,
645 			 unsigned long cookie,
646 			 struct netlink_ext_ack *extack)
647 {
648 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
649 	struct mlxsw_sp_acl_rule *rule;
650 	int err;
651 
652 	mlxsw_sp_acl_ruleset_ref_inc(ruleset);
653 	rule = kzalloc(sizeof(*rule) + ops->rule_priv_size(mlxsw_sp),
654 		       GFP_KERNEL);
655 	if (!rule) {
656 		err = -ENOMEM;
657 		goto err_alloc;
658 	}
659 	rule->cookie = cookie;
660 	rule->ruleset = ruleset;
661 
662 	rule->rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl);
663 	if (IS_ERR(rule->rulei)) {
664 		err = PTR_ERR(rule->rulei);
665 		goto err_rulei_create;
666 	}
667 
668 	return rule;
669 
670 err_rulei_create:
671 	kfree(rule);
672 err_alloc:
673 	mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
674 	return ERR_PTR(err);
675 }
676 
677 void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp,
678 			       struct mlxsw_sp_acl_rule *rule)
679 {
680 	struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
681 
682 	mlxsw_sp_acl_rulei_destroy(rule->rulei);
683 	kfree(rule);
684 	mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
685 }
686 
687 int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
688 			  struct mlxsw_sp_acl_rule *rule)
689 {
690 	struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
691 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
692 	int err;
693 
694 	err = ops->rule_add(mlxsw_sp, ruleset->priv, rule->priv, rule->rulei);
695 	if (err)
696 		return err;
697 
698 	err = rhashtable_insert_fast(&ruleset->rule_ht, &rule->ht_node,
699 				     mlxsw_sp_acl_rule_ht_params);
700 	if (err)
701 		goto err_rhashtable_insert;
702 
703 	if (!ruleset->ht_key.chain_index &&
704 	    mlxsw_sp_acl_ruleset_is_singular(ruleset)) {
705 		/* We only need ruleset with chain index 0, the implicit
706 		 * one, to be directly bound to device. The rest of the
707 		 * rulesets are bound by "Goto action set".
708 		 */
709 		err = mlxsw_sp_acl_ruleset_block_bind(mlxsw_sp, ruleset,
710 						      ruleset->ht_key.block);
711 		if (err)
712 			goto err_ruleset_block_bind;
713 	}
714 
715 	list_add_tail(&rule->list, &mlxsw_sp->acl->rules);
716 	ruleset->ht_key.block->rule_count++;
717 	return 0;
718 
719 err_ruleset_block_bind:
720 	rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
721 			       mlxsw_sp_acl_rule_ht_params);
722 err_rhashtable_insert:
723 	ops->rule_del(mlxsw_sp, rule->priv);
724 	return err;
725 }
726 
727 void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
728 			   struct mlxsw_sp_acl_rule *rule)
729 {
730 	struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
731 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
732 
733 	ruleset->ht_key.block->rule_count--;
734 	list_del(&rule->list);
735 	if (!ruleset->ht_key.chain_index &&
736 	    mlxsw_sp_acl_ruleset_is_singular(ruleset))
737 		mlxsw_sp_acl_ruleset_block_unbind(mlxsw_sp, ruleset,
738 						  ruleset->ht_key.block);
739 	rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
740 			       mlxsw_sp_acl_rule_ht_params);
741 	ops->rule_del(mlxsw_sp, rule->priv);
742 }
743 
744 struct mlxsw_sp_acl_rule *
745 mlxsw_sp_acl_rule_lookup(struct mlxsw_sp *mlxsw_sp,
746 			 struct mlxsw_sp_acl_ruleset *ruleset,
747 			 unsigned long cookie)
748 {
749 	return rhashtable_lookup_fast(&ruleset->rule_ht, &cookie,
750 				       mlxsw_sp_acl_rule_ht_params);
751 }
752 
753 struct mlxsw_sp_acl_rule_info *
754 mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule)
755 {
756 	return rule->rulei;
757 }
758 
759 static int mlxsw_sp_acl_rule_activity_update(struct mlxsw_sp *mlxsw_sp,
760 					     struct mlxsw_sp_acl_rule *rule)
761 {
762 	struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
763 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
764 	bool active;
765 	int err;
766 
767 	err = ops->rule_activity_get(mlxsw_sp, rule->priv, &active);
768 	if (err)
769 		return err;
770 	if (active)
771 		rule->last_used = jiffies;
772 	return 0;
773 }
774 
775 static int mlxsw_sp_acl_rules_activity_update(struct mlxsw_sp_acl *acl)
776 {
777 	struct mlxsw_sp_acl_rule *rule;
778 	int err;
779 
780 	/* Protect internal structures from changes */
781 	rtnl_lock();
782 	list_for_each_entry(rule, &acl->rules, list) {
783 		err = mlxsw_sp_acl_rule_activity_update(acl->mlxsw_sp,
784 							rule);
785 		if (err)
786 			goto err_rule_update;
787 	}
788 	rtnl_unlock();
789 	return 0;
790 
791 err_rule_update:
792 	rtnl_unlock();
793 	return err;
794 }
795 
796 static void mlxsw_sp_acl_rule_activity_work_schedule(struct mlxsw_sp_acl *acl)
797 {
798 	unsigned long interval = acl->rule_activity_update.interval;
799 
800 	mlxsw_core_schedule_dw(&acl->rule_activity_update.dw,
801 			       msecs_to_jiffies(interval));
802 }
803 
804 static void mlxsw_sp_acl_rul_activity_update_work(struct work_struct *work)
805 {
806 	struct mlxsw_sp_acl *acl = container_of(work, struct mlxsw_sp_acl,
807 						rule_activity_update.dw.work);
808 	int err;
809 
810 	err = mlxsw_sp_acl_rules_activity_update(acl);
811 	if (err)
812 		dev_err(acl->mlxsw_sp->bus_info->dev, "Could not update acl activity");
813 
814 	mlxsw_sp_acl_rule_activity_work_schedule(acl);
815 }
816 
817 int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
818 				struct mlxsw_sp_acl_rule *rule,
819 				u64 *packets, u64 *bytes, u64 *last_use)
820 
821 {
822 	struct mlxsw_sp_acl_rule_info *rulei;
823 	u64 current_packets;
824 	u64 current_bytes;
825 	int err;
826 
827 	rulei = mlxsw_sp_acl_rule_rulei(rule);
828 	err = mlxsw_sp_flow_counter_get(mlxsw_sp, rulei->counter_index,
829 					&current_packets, &current_bytes);
830 	if (err)
831 		return err;
832 
833 	*packets = current_packets - rule->last_packets;
834 	*bytes = current_bytes - rule->last_bytes;
835 	*last_use = rule->last_used;
836 
837 	rule->last_bytes = current_bytes;
838 	rule->last_packets = current_packets;
839 
840 	return 0;
841 }
842 
843 int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
844 {
845 	struct mlxsw_sp_fid *fid;
846 	struct mlxsw_sp_acl *acl;
847 	size_t alloc_size;
848 	int err;
849 
850 	alloc_size = sizeof(*acl) + mlxsw_sp_acl_tcam_priv_size(mlxsw_sp);
851 	acl = kzalloc(alloc_size, GFP_KERNEL);
852 	if (!acl)
853 		return -ENOMEM;
854 	mlxsw_sp->acl = acl;
855 	acl->mlxsw_sp = mlxsw_sp;
856 	acl->afk = mlxsw_afk_create(MLXSW_CORE_RES_GET(mlxsw_sp->core,
857 						       ACL_FLEX_KEYS),
858 				    mlxsw_sp->afk_ops);
859 	if (!acl->afk) {
860 		err = -ENOMEM;
861 		goto err_afk_create;
862 	}
863 
864 	err = rhashtable_init(&acl->ruleset_ht,
865 			      &mlxsw_sp_acl_ruleset_ht_params);
866 	if (err)
867 		goto err_rhashtable_init;
868 
869 	fid = mlxsw_sp_fid_dummy_get(mlxsw_sp);
870 	if (IS_ERR(fid)) {
871 		err = PTR_ERR(fid);
872 		goto err_fid_get;
873 	}
874 	acl->dummy_fid = fid;
875 
876 	INIT_LIST_HEAD(&acl->rules);
877 	err = mlxsw_sp_acl_tcam_init(mlxsw_sp, &acl->tcam);
878 	if (err)
879 		goto err_acl_ops_init;
880 
881 	/* Create the delayed work for the rule activity_update */
882 	INIT_DELAYED_WORK(&acl->rule_activity_update.dw,
883 			  mlxsw_sp_acl_rul_activity_update_work);
884 	acl->rule_activity_update.interval = MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS;
885 	mlxsw_core_schedule_dw(&acl->rule_activity_update.dw, 0);
886 	return 0;
887 
888 err_acl_ops_init:
889 	mlxsw_sp_fid_put(fid);
890 err_fid_get:
891 	rhashtable_destroy(&acl->ruleset_ht);
892 err_rhashtable_init:
893 	mlxsw_afk_destroy(acl->afk);
894 err_afk_create:
895 	kfree(acl);
896 	return err;
897 }
898 
899 void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp)
900 {
901 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
902 
903 	cancel_delayed_work_sync(&mlxsw_sp->acl->rule_activity_update.dw);
904 	mlxsw_sp_acl_tcam_fini(mlxsw_sp, &acl->tcam);
905 	WARN_ON(!list_empty(&acl->rules));
906 	mlxsw_sp_fid_put(acl->dummy_fid);
907 	rhashtable_destroy(&acl->ruleset_ht);
908 	mlxsw_afk_destroy(acl->afk);
909 	kfree(acl);
910 }
911