xref: /openbmc/linux/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c (revision 4f139972b489f8bc2c821aa25ac65018d92af3f7)
1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
3  * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the names of the copyright holders nor the names of its
15  *    contributors may be used to endorse or promote products derived from
16  *    this software without specific prior written permission.
17  *
18  * Alternatively, this software may be distributed under the terms of the
19  * GNU General Public License ("GPL") version 2 as published by the Free
20  * Software Foundation.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <linux/kernel.h>
36 #include <linux/slab.h>
37 #include <linux/errno.h>
38 #include <linux/list.h>
39 #include <linux/string.h>
40 #include <linux/rhashtable.h>
41 #include <linux/netdevice.h>
42 #include <net/tc_act/tc_vlan.h>
43 
44 #include "reg.h"
45 #include "core.h"
46 #include "resources.h"
47 #include "spectrum.h"
48 #include "core_acl_flex_keys.h"
49 #include "core_acl_flex_actions.h"
50 #include "spectrum_acl_flex_keys.h"
51 
52 struct mlxsw_sp_acl {
53 	struct mlxsw_sp *mlxsw_sp;
54 	struct mlxsw_afk *afk;
55 	struct mlxsw_afa *afa;
56 	const struct mlxsw_sp_acl_ops *ops;
57 	struct rhashtable ruleset_ht;
58 	struct list_head rules;
59 	struct {
60 		struct delayed_work dw;
61 		unsigned long interval;	/* ms */
62 #define MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS 1000
63 	} rule_activity_update;
64 	unsigned long priv[0];
65 	/* priv has to be always the last item */
66 };
67 
68 struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl)
69 {
70 	return acl->afk;
71 }
72 
73 struct mlxsw_sp_acl_ruleset_ht_key {
74 	struct net_device *dev; /* dev this ruleset is bound to */
75 	bool ingress;
76 	const struct mlxsw_sp_acl_profile_ops *ops;
77 };
78 
79 struct mlxsw_sp_acl_ruleset {
80 	struct rhash_head ht_node; /* Member of acl HT */
81 	struct mlxsw_sp_acl_ruleset_ht_key ht_key;
82 	struct rhashtable rule_ht;
83 	unsigned int ref_count;
84 	unsigned long priv[0];
85 	/* priv has to be always the last item */
86 };
87 
88 struct mlxsw_sp_acl_rule {
89 	struct rhash_head ht_node; /* Member of rule HT */
90 	struct list_head list;
91 	unsigned long cookie; /* HT key */
92 	struct mlxsw_sp_acl_ruleset *ruleset;
93 	struct mlxsw_sp_acl_rule_info *rulei;
94 	u64 last_used;
95 	u64 last_packets;
96 	u64 last_bytes;
97 	unsigned long priv[0];
98 	/* priv has to be always the last item */
99 };
100 
101 static const struct rhashtable_params mlxsw_sp_acl_ruleset_ht_params = {
102 	.key_len = sizeof(struct mlxsw_sp_acl_ruleset_ht_key),
103 	.key_offset = offsetof(struct mlxsw_sp_acl_ruleset, ht_key),
104 	.head_offset = offsetof(struct mlxsw_sp_acl_ruleset, ht_node),
105 	.automatic_shrinking = true,
106 };
107 
108 static const struct rhashtable_params mlxsw_sp_acl_rule_ht_params = {
109 	.key_len = sizeof(unsigned long),
110 	.key_offset = offsetof(struct mlxsw_sp_acl_rule, cookie),
111 	.head_offset = offsetof(struct mlxsw_sp_acl_rule, ht_node),
112 	.automatic_shrinking = true,
113 };
114 
115 static struct mlxsw_sp_acl_ruleset *
116 mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
117 			    const struct mlxsw_sp_acl_profile_ops *ops)
118 {
119 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
120 	struct mlxsw_sp_acl_ruleset *ruleset;
121 	size_t alloc_size;
122 	int err;
123 
124 	alloc_size = sizeof(*ruleset) + ops->ruleset_priv_size;
125 	ruleset = kzalloc(alloc_size, GFP_KERNEL);
126 	if (!ruleset)
127 		return ERR_PTR(-ENOMEM);
128 	ruleset->ref_count = 1;
129 	ruleset->ht_key.ops = ops;
130 
131 	err = rhashtable_init(&ruleset->rule_ht, &mlxsw_sp_acl_rule_ht_params);
132 	if (err)
133 		goto err_rhashtable_init;
134 
135 	err = ops->ruleset_add(mlxsw_sp, acl->priv, ruleset->priv);
136 	if (err)
137 		goto err_ops_ruleset_add;
138 
139 	return ruleset;
140 
141 err_ops_ruleset_add:
142 	rhashtable_destroy(&ruleset->rule_ht);
143 err_rhashtable_init:
144 	kfree(ruleset);
145 	return ERR_PTR(err);
146 }
147 
148 static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp,
149 					 struct mlxsw_sp_acl_ruleset *ruleset)
150 {
151 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
152 
153 	ops->ruleset_del(mlxsw_sp, ruleset->priv);
154 	rhashtable_destroy(&ruleset->rule_ht);
155 	kfree(ruleset);
156 }
157 
158 static int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
159 				     struct mlxsw_sp_acl_ruleset *ruleset,
160 				     struct net_device *dev, bool ingress)
161 {
162 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
163 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
164 	int err;
165 
166 	ruleset->ht_key.dev = dev;
167 	ruleset->ht_key.ingress = ingress;
168 	err = rhashtable_insert_fast(&acl->ruleset_ht, &ruleset->ht_node,
169 				     mlxsw_sp_acl_ruleset_ht_params);
170 	if (err)
171 		return err;
172 	err = ops->ruleset_bind(mlxsw_sp, ruleset->priv, dev, ingress);
173 	if (err)
174 		goto err_ops_ruleset_bind;
175 	return 0;
176 
177 err_ops_ruleset_bind:
178 	rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
179 			       mlxsw_sp_acl_ruleset_ht_params);
180 	return err;
181 }
182 
183 static void mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
184 					struct mlxsw_sp_acl_ruleset *ruleset)
185 {
186 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
187 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
188 
189 	ops->ruleset_unbind(mlxsw_sp, ruleset->priv);
190 	rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
191 			       mlxsw_sp_acl_ruleset_ht_params);
192 }
193 
194 static void mlxsw_sp_acl_ruleset_ref_inc(struct mlxsw_sp_acl_ruleset *ruleset)
195 {
196 	ruleset->ref_count++;
197 }
198 
199 static void mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp *mlxsw_sp,
200 					 struct mlxsw_sp_acl_ruleset *ruleset)
201 {
202 	if (--ruleset->ref_count)
203 		return;
204 	mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, ruleset);
205 	mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset);
206 }
207 
208 struct mlxsw_sp_acl_ruleset *
209 mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
210 			 struct net_device *dev, bool ingress,
211 			 enum mlxsw_sp_acl_profile profile)
212 {
213 	const struct mlxsw_sp_acl_profile_ops *ops;
214 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
215 	struct mlxsw_sp_acl_ruleset_ht_key ht_key;
216 	struct mlxsw_sp_acl_ruleset *ruleset;
217 	int err;
218 
219 	ops = acl->ops->profile_ops(mlxsw_sp, profile);
220 	if (!ops)
221 		return ERR_PTR(-EINVAL);
222 
223 	memset(&ht_key, 0, sizeof(ht_key));
224 	ht_key.dev = dev;
225 	ht_key.ingress = ingress;
226 	ht_key.ops = ops;
227 	ruleset = rhashtable_lookup_fast(&acl->ruleset_ht, &ht_key,
228 					 mlxsw_sp_acl_ruleset_ht_params);
229 	if (ruleset) {
230 		mlxsw_sp_acl_ruleset_ref_inc(ruleset);
231 		return ruleset;
232 	}
233 	ruleset = mlxsw_sp_acl_ruleset_create(mlxsw_sp, ops);
234 	if (IS_ERR(ruleset))
235 		return ruleset;
236 	err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, ruleset, dev, ingress);
237 	if (err)
238 		goto err_ruleset_bind;
239 	return ruleset;
240 
241 err_ruleset_bind:
242 	mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset);
243 	return ERR_PTR(err);
244 }
245 
246 void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
247 			      struct mlxsw_sp_acl_ruleset *ruleset)
248 {
249 	mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
250 }
251 
252 static int
253 mlxsw_sp_acl_rulei_counter_alloc(struct mlxsw_sp *mlxsw_sp,
254 				 struct mlxsw_sp_acl_rule_info *rulei)
255 {
256 	int err;
257 
258 	err = mlxsw_sp_flow_counter_alloc(mlxsw_sp, &rulei->counter_index);
259 	if (err)
260 		return err;
261 	rulei->counter_valid = true;
262 	return 0;
263 }
264 
265 static void
266 mlxsw_sp_acl_rulei_counter_free(struct mlxsw_sp *mlxsw_sp,
267 				struct mlxsw_sp_acl_rule_info *rulei)
268 {
269 	rulei->counter_valid = false;
270 	mlxsw_sp_flow_counter_free(mlxsw_sp, rulei->counter_index);
271 }
272 
273 struct mlxsw_sp_acl_rule_info *
274 mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl)
275 {
276 	struct mlxsw_sp_acl_rule_info *rulei;
277 	int err;
278 
279 	rulei = kzalloc(sizeof(*rulei), GFP_KERNEL);
280 	if (!rulei)
281 		return NULL;
282 	rulei->act_block = mlxsw_afa_block_create(acl->afa);
283 	if (IS_ERR(rulei->act_block)) {
284 		err = PTR_ERR(rulei->act_block);
285 		goto err_afa_block_create;
286 	}
287 	return rulei;
288 
289 err_afa_block_create:
290 	kfree(rulei);
291 	return ERR_PTR(err);
292 }
293 
294 void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp_acl_rule_info *rulei)
295 {
296 	mlxsw_afa_block_destroy(rulei->act_block);
297 	kfree(rulei);
298 }
299 
300 int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei)
301 {
302 	return mlxsw_afa_block_commit(rulei->act_block);
303 }
304 
305 void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei,
306 				 unsigned int priority)
307 {
308 	rulei->priority = priority;
309 }
310 
311 void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei,
312 				    enum mlxsw_afk_element element,
313 				    u32 key_value, u32 mask_value)
314 {
315 	mlxsw_afk_values_add_u32(&rulei->values, element,
316 				 key_value, mask_value);
317 }
318 
319 void mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info *rulei,
320 				    enum mlxsw_afk_element element,
321 				    const char *key_value,
322 				    const char *mask_value, unsigned int len)
323 {
324 	mlxsw_afk_values_add_buf(&rulei->values, element,
325 				 key_value, mask_value, len);
326 }
327 
328 void mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei)
329 {
330 	mlxsw_afa_block_continue(rulei->act_block);
331 }
332 
333 void mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,
334 				 u16 group_id)
335 {
336 	mlxsw_afa_block_jump(rulei->act_block, group_id);
337 }
338 
339 int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei)
340 {
341 	return mlxsw_afa_block_append_drop(rulei->act_block);
342 }
343 
344 int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
345 			       struct mlxsw_sp_acl_rule_info *rulei,
346 			       struct net_device *out_dev)
347 {
348 	struct mlxsw_sp_port *mlxsw_sp_port;
349 	u8 local_port;
350 	bool in_port;
351 
352 	if (out_dev) {
353 		if (!mlxsw_sp_port_dev_check(out_dev))
354 			return -EINVAL;
355 		mlxsw_sp_port = netdev_priv(out_dev);
356 		if (mlxsw_sp_port->mlxsw_sp != mlxsw_sp)
357 			return -EINVAL;
358 		local_port = mlxsw_sp_port->local_port;
359 		in_port = false;
360 	} else {
361 		/* If out_dev is NULL, the called wants to
362 		 * set forward to ingress port.
363 		 */
364 		local_port = 0;
365 		in_port = true;
366 	}
367 	return mlxsw_afa_block_append_fwd(rulei->act_block,
368 					  local_port, in_port);
369 }
370 
371 int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp,
372 				struct mlxsw_sp_acl_rule_info *rulei,
373 				u32 action, u16 vid, u16 proto, u8 prio)
374 {
375 	u8 ethertype;
376 
377 	if (action == TCA_VLAN_ACT_MODIFY) {
378 		switch (proto) {
379 		case ETH_P_8021Q:
380 			ethertype = 0;
381 			break;
382 		case ETH_P_8021AD:
383 			ethertype = 1;
384 			break;
385 		default:
386 			dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN protocol %#04x\n",
387 				proto);
388 			return -EINVAL;
389 		}
390 
391 		return mlxsw_afa_block_append_vlan_modify(rulei->act_block,
392 							  vid, prio, ethertype);
393 	} else {
394 		dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN action\n");
395 		return -EINVAL;
396 	}
397 }
398 
399 int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
400 				 struct mlxsw_sp_acl_rule_info *rulei)
401 {
402 	return mlxsw_afa_block_append_counter(rulei->act_block,
403 					      rulei->counter_index);
404 }
405 
406 struct mlxsw_sp_acl_rule *
407 mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
408 			 struct mlxsw_sp_acl_ruleset *ruleset,
409 			 unsigned long cookie)
410 {
411 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
412 	struct mlxsw_sp_acl_rule *rule;
413 	int err;
414 
415 	mlxsw_sp_acl_ruleset_ref_inc(ruleset);
416 	rule = kzalloc(sizeof(*rule) + ops->rule_priv_size, GFP_KERNEL);
417 	if (!rule) {
418 		err = -ENOMEM;
419 		goto err_alloc;
420 	}
421 	rule->cookie = cookie;
422 	rule->ruleset = ruleset;
423 
424 	rule->rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl);
425 	if (IS_ERR(rule->rulei)) {
426 		err = PTR_ERR(rule->rulei);
427 		goto err_rulei_create;
428 	}
429 
430 	err = mlxsw_sp_acl_rulei_counter_alloc(mlxsw_sp, rule->rulei);
431 	if (err)
432 		goto err_counter_alloc;
433 	return rule;
434 
435 err_counter_alloc:
436 	mlxsw_sp_acl_rulei_destroy(rule->rulei);
437 err_rulei_create:
438 	kfree(rule);
439 err_alloc:
440 	mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
441 	return ERR_PTR(err);
442 }
443 
444 void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp,
445 			       struct mlxsw_sp_acl_rule *rule)
446 {
447 	struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
448 
449 	mlxsw_sp_acl_rulei_counter_free(mlxsw_sp, rule->rulei);
450 	mlxsw_sp_acl_rulei_destroy(rule->rulei);
451 	kfree(rule);
452 	mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
453 }
454 
455 int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
456 			  struct mlxsw_sp_acl_rule *rule)
457 {
458 	struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
459 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
460 	int err;
461 
462 	err = ops->rule_add(mlxsw_sp, ruleset->priv, rule->priv, rule->rulei);
463 	if (err)
464 		return err;
465 
466 	err = rhashtable_insert_fast(&ruleset->rule_ht, &rule->ht_node,
467 				     mlxsw_sp_acl_rule_ht_params);
468 	if (err)
469 		goto err_rhashtable_insert;
470 
471 	list_add_tail(&rule->list, &mlxsw_sp->acl->rules);
472 	return 0;
473 
474 err_rhashtable_insert:
475 	ops->rule_del(mlxsw_sp, rule->priv);
476 	return err;
477 }
478 
479 void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
480 			   struct mlxsw_sp_acl_rule *rule)
481 {
482 	struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
483 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
484 
485 	list_del(&rule->list);
486 	rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
487 			       mlxsw_sp_acl_rule_ht_params);
488 	ops->rule_del(mlxsw_sp, rule->priv);
489 }
490 
491 struct mlxsw_sp_acl_rule *
492 mlxsw_sp_acl_rule_lookup(struct mlxsw_sp *mlxsw_sp,
493 			 struct mlxsw_sp_acl_ruleset *ruleset,
494 			 unsigned long cookie)
495 {
496 	return rhashtable_lookup_fast(&ruleset->rule_ht, &cookie,
497 				       mlxsw_sp_acl_rule_ht_params);
498 }
499 
500 struct mlxsw_sp_acl_rule_info *
501 mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule)
502 {
503 	return rule->rulei;
504 }
505 
506 static int mlxsw_sp_acl_rule_activity_update(struct mlxsw_sp *mlxsw_sp,
507 					     struct mlxsw_sp_acl_rule *rule)
508 {
509 	struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
510 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
511 	bool active;
512 	int err;
513 
514 	err = ops->rule_activity_get(mlxsw_sp, rule->priv, &active);
515 	if (err)
516 		return err;
517 	if (active)
518 		rule->last_used = jiffies;
519 	return 0;
520 }
521 
522 static int mlxsw_sp_acl_rules_activity_update(struct mlxsw_sp_acl *acl)
523 {
524 	struct mlxsw_sp_acl_rule *rule;
525 	int err;
526 
527 	/* Protect internal structures from changes */
528 	rtnl_lock();
529 	list_for_each_entry(rule, &acl->rules, list) {
530 		err = mlxsw_sp_acl_rule_activity_update(acl->mlxsw_sp,
531 							rule);
532 		if (err)
533 			goto err_rule_update;
534 	}
535 	rtnl_unlock();
536 	return 0;
537 
538 err_rule_update:
539 	rtnl_unlock();
540 	return err;
541 }
542 
543 static void mlxsw_sp_acl_rule_activity_work_schedule(struct mlxsw_sp_acl *acl)
544 {
545 	unsigned long interval = acl->rule_activity_update.interval;
546 
547 	mlxsw_core_schedule_dw(&acl->rule_activity_update.dw,
548 			       msecs_to_jiffies(interval));
549 }
550 
551 static void mlxsw_sp_acl_rul_activity_update_work(struct work_struct *work)
552 {
553 	struct mlxsw_sp_acl *acl = container_of(work, struct mlxsw_sp_acl,
554 						rule_activity_update.dw.work);
555 	int err;
556 
557 	err = mlxsw_sp_acl_rules_activity_update(acl);
558 	if (err)
559 		dev_err(acl->mlxsw_sp->bus_info->dev, "Could not update acl activity");
560 
561 	mlxsw_sp_acl_rule_activity_work_schedule(acl);
562 }
563 
564 int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
565 				struct mlxsw_sp_acl_rule *rule,
566 				u64 *packets, u64 *bytes, u64 *last_use)
567 
568 {
569 	struct mlxsw_sp_acl_rule_info *rulei;
570 	u64 current_packets;
571 	u64 current_bytes;
572 	int err;
573 
574 	rulei = mlxsw_sp_acl_rule_rulei(rule);
575 	err = mlxsw_sp_flow_counter_get(mlxsw_sp, rulei->counter_index,
576 					&current_packets, &current_bytes);
577 	if (err)
578 		return err;
579 
580 	*packets = current_packets - rule->last_packets;
581 	*bytes = current_bytes - rule->last_bytes;
582 	*last_use = rule->last_used;
583 
584 	rule->last_bytes = current_bytes;
585 	rule->last_packets = current_packets;
586 
587 	return 0;
588 }
589 
590 #define MLXSW_SP_KDVL_ACT_EXT_SIZE 1
591 
592 static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
593 				     char *enc_actions, bool is_first)
594 {
595 	struct mlxsw_sp *mlxsw_sp = priv;
596 	char pefa_pl[MLXSW_REG_PEFA_LEN];
597 	u32 kvdl_index;
598 	int err;
599 
600 	/* The first action set of a TCAM entry is stored directly in TCAM,
601 	 * not KVD linear area.
602 	 */
603 	if (is_first)
604 		return 0;
605 
606 	err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KDVL_ACT_EXT_SIZE,
607 				  &kvdl_index);
608 	if (err)
609 		return err;
610 	mlxsw_reg_pefa_pack(pefa_pl, kvdl_index, enc_actions);
611 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl);
612 	if (err)
613 		goto err_pefa_write;
614 	*p_kvdl_index = kvdl_index;
615 	return 0;
616 
617 err_pefa_write:
618 	mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
619 	return err;
620 }
621 
622 static void mlxsw_sp_act_kvdl_set_del(void *priv, u32 kvdl_index,
623 				      bool is_first)
624 {
625 	struct mlxsw_sp *mlxsw_sp = priv;
626 
627 	if (is_first)
628 		return;
629 	mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
630 }
631 
632 static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index,
633 					   u8 local_port)
634 {
635 	struct mlxsw_sp *mlxsw_sp = priv;
636 	char ppbs_pl[MLXSW_REG_PPBS_LEN];
637 	u32 kvdl_index;
638 	int err;
639 
640 	err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1, &kvdl_index);
641 	if (err)
642 		return err;
643 	mlxsw_reg_ppbs_pack(ppbs_pl, kvdl_index, local_port);
644 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbs), ppbs_pl);
645 	if (err)
646 		goto err_ppbs_write;
647 	*p_kvdl_index = kvdl_index;
648 	return 0;
649 
650 err_ppbs_write:
651 	mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
652 	return err;
653 }
654 
655 static void mlxsw_sp_act_kvdl_fwd_entry_del(void *priv, u32 kvdl_index)
656 {
657 	struct mlxsw_sp *mlxsw_sp = priv;
658 
659 	mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
660 }
661 
662 static const struct mlxsw_afa_ops mlxsw_sp_act_afa_ops = {
663 	.kvdl_set_add		= mlxsw_sp_act_kvdl_set_add,
664 	.kvdl_set_del		= mlxsw_sp_act_kvdl_set_del,
665 	.kvdl_fwd_entry_add	= mlxsw_sp_act_kvdl_fwd_entry_add,
666 	.kvdl_fwd_entry_del	= mlxsw_sp_act_kvdl_fwd_entry_del,
667 };
668 
669 int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
670 {
671 	const struct mlxsw_sp_acl_ops *acl_ops = &mlxsw_sp_acl_tcam_ops;
672 	struct mlxsw_sp_acl *acl;
673 	int err;
674 
675 	acl = kzalloc(sizeof(*acl) + acl_ops->priv_size, GFP_KERNEL);
676 	if (!acl)
677 		return -ENOMEM;
678 	mlxsw_sp->acl = acl;
679 	acl->mlxsw_sp = mlxsw_sp;
680 	acl->afk = mlxsw_afk_create(MLXSW_CORE_RES_GET(mlxsw_sp->core,
681 						       ACL_FLEX_KEYS),
682 				    mlxsw_sp_afk_blocks,
683 				    MLXSW_SP_AFK_BLOCKS_COUNT);
684 	if (!acl->afk) {
685 		err = -ENOMEM;
686 		goto err_afk_create;
687 	}
688 
689 	acl->afa = mlxsw_afa_create(MLXSW_CORE_RES_GET(mlxsw_sp->core,
690 						       ACL_ACTIONS_PER_SET),
691 				    &mlxsw_sp_act_afa_ops, mlxsw_sp);
692 	if (IS_ERR(acl->afa)) {
693 		err = PTR_ERR(acl->afa);
694 		goto err_afa_create;
695 	}
696 
697 	err = rhashtable_init(&acl->ruleset_ht,
698 			      &mlxsw_sp_acl_ruleset_ht_params);
699 	if (err)
700 		goto err_rhashtable_init;
701 
702 	INIT_LIST_HEAD(&acl->rules);
703 	err = acl_ops->init(mlxsw_sp, acl->priv);
704 	if (err)
705 		goto err_acl_ops_init;
706 
707 	acl->ops = acl_ops;
708 
709 	/* Create the delayed work for the rule activity_update */
710 	INIT_DELAYED_WORK(&acl->rule_activity_update.dw,
711 			  mlxsw_sp_acl_rul_activity_update_work);
712 	acl->rule_activity_update.interval = MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS;
713 	mlxsw_core_schedule_dw(&acl->rule_activity_update.dw, 0);
714 	return 0;
715 
716 err_acl_ops_init:
717 	rhashtable_destroy(&acl->ruleset_ht);
718 err_rhashtable_init:
719 	mlxsw_afa_destroy(acl->afa);
720 err_afa_create:
721 	mlxsw_afk_destroy(acl->afk);
722 err_afk_create:
723 	kfree(acl);
724 	return err;
725 }
726 
727 void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp)
728 {
729 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
730 	const struct mlxsw_sp_acl_ops *acl_ops = acl->ops;
731 
732 	cancel_delayed_work_sync(&mlxsw_sp->acl->rule_activity_update.dw);
733 	acl_ops->fini(mlxsw_sp, acl->priv);
734 	WARN_ON(!list_empty(&acl->rules));
735 	rhashtable_destroy(&acl->ruleset_ht);
736 	mlxsw_afa_destroy(acl->afa);
737 	mlxsw_afk_destroy(acl->afk);
738 	kfree(acl);
739 }
740