1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
3  * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the names of the copyright holders nor the names of its
15  *    contributors may be used to endorse or promote products derived from
16  *    this software without specific prior written permission.
17  *
18  * Alternatively, this software may be distributed under the terms of the
19  * GNU General Public License ("GPL") version 2 as published by the Free
20  * Software Foundation.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <linux/kernel.h>
36 #include <linux/slab.h>
37 #include <linux/errno.h>
38 #include <linux/list.h>
39 #include <linux/string.h>
40 #include <linux/rhashtable.h>
41 #include <linux/netdevice.h>
42 #include <net/tc_act/tc_vlan.h>
43 
44 #include "reg.h"
45 #include "core.h"
46 #include "resources.h"
47 #include "spectrum.h"
48 #include "core_acl_flex_keys.h"
49 #include "core_acl_flex_actions.h"
50 #include "spectrum_acl_flex_keys.h"
51 
52 struct mlxsw_sp_acl {
53 	struct mlxsw_sp *mlxsw_sp;
54 	struct mlxsw_afk *afk;
55 	struct mlxsw_afa *afa;
56 	struct mlxsw_sp_fid *dummy_fid;
57 	const struct mlxsw_sp_acl_ops *ops;
58 	struct rhashtable ruleset_ht;
59 	struct list_head rules;
60 	struct {
61 		struct delayed_work dw;
62 		unsigned long interval;	/* ms */
63 #define MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS 1000
64 	} rule_activity_update;
65 	unsigned long priv[0];
66 	/* priv has to be always the last item */
67 };
68 
69 struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl)
70 {
71 	return acl->afk;
72 }
73 
74 struct mlxsw_sp_acl_ruleset_ht_key {
75 	struct net_device *dev; /* dev this ruleset is bound to */
76 	bool ingress;
77 	u32 chain_index;
78 	const struct mlxsw_sp_acl_profile_ops *ops;
79 };
80 
81 struct mlxsw_sp_acl_ruleset {
82 	struct rhash_head ht_node; /* Member of acl HT */
83 	struct mlxsw_sp_acl_ruleset_ht_key ht_key;
84 	struct rhashtable rule_ht;
85 	unsigned int ref_count;
86 	unsigned long priv[0];
87 	/* priv has to be always the last item */
88 };
89 
90 struct mlxsw_sp_acl_rule {
91 	struct rhash_head ht_node; /* Member of rule HT */
92 	struct list_head list;
93 	unsigned long cookie; /* HT key */
94 	struct mlxsw_sp_acl_ruleset *ruleset;
95 	struct mlxsw_sp_acl_rule_info *rulei;
96 	u64 last_used;
97 	u64 last_packets;
98 	u64 last_bytes;
99 	unsigned long priv[0];
100 	/* priv has to be always the last item */
101 };
102 
103 static const struct rhashtable_params mlxsw_sp_acl_ruleset_ht_params = {
104 	.key_len = sizeof(struct mlxsw_sp_acl_ruleset_ht_key),
105 	.key_offset = offsetof(struct mlxsw_sp_acl_ruleset, ht_key),
106 	.head_offset = offsetof(struct mlxsw_sp_acl_ruleset, ht_node),
107 	.automatic_shrinking = true,
108 };
109 
110 static const struct rhashtable_params mlxsw_sp_acl_rule_ht_params = {
111 	.key_len = sizeof(unsigned long),
112 	.key_offset = offsetof(struct mlxsw_sp_acl_rule, cookie),
113 	.head_offset = offsetof(struct mlxsw_sp_acl_rule, ht_node),
114 	.automatic_shrinking = true,
115 };
116 
117 struct mlxsw_sp_fid *mlxsw_sp_acl_dummy_fid(struct mlxsw_sp *mlxsw_sp)
118 {
119 	return mlxsw_sp->acl->dummy_fid;
120 }
121 
122 static struct mlxsw_sp_acl_ruleset *
123 mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
124 			    const struct mlxsw_sp_acl_profile_ops *ops)
125 {
126 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
127 	struct mlxsw_sp_acl_ruleset *ruleset;
128 	size_t alloc_size;
129 	int err;
130 
131 	alloc_size = sizeof(*ruleset) + ops->ruleset_priv_size;
132 	ruleset = kzalloc(alloc_size, GFP_KERNEL);
133 	if (!ruleset)
134 		return ERR_PTR(-ENOMEM);
135 	ruleset->ref_count = 1;
136 	ruleset->ht_key.ops = ops;
137 
138 	err = rhashtable_init(&ruleset->rule_ht, &mlxsw_sp_acl_rule_ht_params);
139 	if (err)
140 		goto err_rhashtable_init;
141 
142 	err = ops->ruleset_add(mlxsw_sp, acl->priv, ruleset->priv);
143 	if (err)
144 		goto err_ops_ruleset_add;
145 
146 	return ruleset;
147 
148 err_ops_ruleset_add:
149 	rhashtable_destroy(&ruleset->rule_ht);
150 err_rhashtable_init:
151 	kfree(ruleset);
152 	return ERR_PTR(err);
153 }
154 
155 static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp,
156 					 struct mlxsw_sp_acl_ruleset *ruleset)
157 {
158 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
159 
160 	ops->ruleset_del(mlxsw_sp, ruleset->priv);
161 	rhashtable_destroy(&ruleset->rule_ht);
162 	kfree(ruleset);
163 }
164 
165 static int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
166 				     struct mlxsw_sp_acl_ruleset *ruleset,
167 				     struct net_device *dev, bool ingress,
168 				     u32 chain_index)
169 {
170 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
171 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
172 	int err;
173 
174 	ruleset->ht_key.dev = dev;
175 	ruleset->ht_key.ingress = ingress;
176 	ruleset->ht_key.chain_index = chain_index;
177 	err = rhashtable_insert_fast(&acl->ruleset_ht, &ruleset->ht_node,
178 				     mlxsw_sp_acl_ruleset_ht_params);
179 	if (err)
180 		return err;
181 	if (!ruleset->ht_key.chain_index) {
182 		/* We only need ruleset with chain index 0, the implicit one,
183 		 * to be directly bound to device. The rest of the rulesets
184 		 * are bound by "Goto action set".
185 		 */
186 		err = ops->ruleset_bind(mlxsw_sp, ruleset->priv, dev, ingress);
187 		if (err)
188 			goto err_ops_ruleset_bind;
189 	}
190 	return 0;
191 
192 err_ops_ruleset_bind:
193 	rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
194 			       mlxsw_sp_acl_ruleset_ht_params);
195 	return err;
196 }
197 
198 static void mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
199 					struct mlxsw_sp_acl_ruleset *ruleset)
200 {
201 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
202 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
203 
204 	if (!ruleset->ht_key.chain_index)
205 		ops->ruleset_unbind(mlxsw_sp, ruleset->priv);
206 	rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
207 			       mlxsw_sp_acl_ruleset_ht_params);
208 }
209 
210 static void mlxsw_sp_acl_ruleset_ref_inc(struct mlxsw_sp_acl_ruleset *ruleset)
211 {
212 	ruleset->ref_count++;
213 }
214 
215 static void mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp *mlxsw_sp,
216 					 struct mlxsw_sp_acl_ruleset *ruleset)
217 {
218 	if (--ruleset->ref_count)
219 		return;
220 	mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, ruleset);
221 	mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset);
222 }
223 
224 static struct mlxsw_sp_acl_ruleset *
225 __mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl *acl, struct net_device *dev,
226 			      bool ingress, u32 chain_index,
227 			      const struct mlxsw_sp_acl_profile_ops *ops)
228 {
229 	struct mlxsw_sp_acl_ruleset_ht_key ht_key;
230 
231 	memset(&ht_key, 0, sizeof(ht_key));
232 	ht_key.dev = dev;
233 	ht_key.ingress = ingress;
234 	ht_key.chain_index = chain_index;
235 	ht_key.ops = ops;
236 	return rhashtable_lookup_fast(&acl->ruleset_ht, &ht_key,
237 				      mlxsw_sp_acl_ruleset_ht_params);
238 }
239 
240 struct mlxsw_sp_acl_ruleset *
241 mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp, struct net_device *dev,
242 			    bool ingress, u32 chain_index,
243 			    enum mlxsw_sp_acl_profile profile)
244 {
245 	const struct mlxsw_sp_acl_profile_ops *ops;
246 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
247 	struct mlxsw_sp_acl_ruleset *ruleset;
248 
249 	ops = acl->ops->profile_ops(mlxsw_sp, profile);
250 	if (!ops)
251 		return ERR_PTR(-EINVAL);
252 	ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, dev, ingress,
253 						chain_index, ops);
254 	if (!ruleset)
255 		return ERR_PTR(-ENOENT);
256 	return ruleset;
257 }
258 
259 struct mlxsw_sp_acl_ruleset *
260 mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, struct net_device *dev,
261 			 bool ingress, u32 chain_index,
262 			 enum mlxsw_sp_acl_profile profile)
263 {
264 	const struct mlxsw_sp_acl_profile_ops *ops;
265 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
266 	struct mlxsw_sp_acl_ruleset *ruleset;
267 	int err;
268 
269 	ops = acl->ops->profile_ops(mlxsw_sp, profile);
270 	if (!ops)
271 		return ERR_PTR(-EINVAL);
272 
273 	ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, dev, ingress,
274 						chain_index, ops);
275 	if (ruleset) {
276 		mlxsw_sp_acl_ruleset_ref_inc(ruleset);
277 		return ruleset;
278 	}
279 	ruleset = mlxsw_sp_acl_ruleset_create(mlxsw_sp, ops);
280 	if (IS_ERR(ruleset))
281 		return ruleset;
282 	err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, ruleset, dev,
283 					ingress, chain_index);
284 	if (err)
285 		goto err_ruleset_bind;
286 	return ruleset;
287 
288 err_ruleset_bind:
289 	mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset);
290 	return ERR_PTR(err);
291 }
292 
293 void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
294 			      struct mlxsw_sp_acl_ruleset *ruleset)
295 {
296 	mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
297 }
298 
299 u16 mlxsw_sp_acl_ruleset_group_id(struct mlxsw_sp_acl_ruleset *ruleset)
300 {
301 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
302 
303 	return ops->ruleset_group_id(ruleset->priv);
304 }
305 
306 static int
307 mlxsw_sp_acl_rulei_counter_alloc(struct mlxsw_sp *mlxsw_sp,
308 				 struct mlxsw_sp_acl_rule_info *rulei)
309 {
310 	int err;
311 
312 	err = mlxsw_sp_flow_counter_alloc(mlxsw_sp, &rulei->counter_index);
313 	if (err)
314 		return err;
315 	rulei->counter_valid = true;
316 	return 0;
317 }
318 
319 static void
320 mlxsw_sp_acl_rulei_counter_free(struct mlxsw_sp *mlxsw_sp,
321 				struct mlxsw_sp_acl_rule_info *rulei)
322 {
323 	rulei->counter_valid = false;
324 	mlxsw_sp_flow_counter_free(mlxsw_sp, rulei->counter_index);
325 }
326 
327 struct mlxsw_sp_acl_rule_info *
328 mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl)
329 {
330 	struct mlxsw_sp_acl_rule_info *rulei;
331 	int err;
332 
333 	rulei = kzalloc(sizeof(*rulei), GFP_KERNEL);
334 	if (!rulei)
335 		return NULL;
336 	rulei->act_block = mlxsw_afa_block_create(acl->afa);
337 	if (IS_ERR(rulei->act_block)) {
338 		err = PTR_ERR(rulei->act_block);
339 		goto err_afa_block_create;
340 	}
341 	return rulei;
342 
343 err_afa_block_create:
344 	kfree(rulei);
345 	return ERR_PTR(err);
346 }
347 
348 void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp_acl_rule_info *rulei)
349 {
350 	mlxsw_afa_block_destroy(rulei->act_block);
351 	kfree(rulei);
352 }
353 
354 int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei)
355 {
356 	return mlxsw_afa_block_commit(rulei->act_block);
357 }
358 
359 void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei,
360 				 unsigned int priority)
361 {
362 	rulei->priority = priority;
363 }
364 
365 void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei,
366 				    enum mlxsw_afk_element element,
367 				    u32 key_value, u32 mask_value)
368 {
369 	mlxsw_afk_values_add_u32(&rulei->values, element,
370 				 key_value, mask_value);
371 }
372 
373 void mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info *rulei,
374 				    enum mlxsw_afk_element element,
375 				    const char *key_value,
376 				    const char *mask_value, unsigned int len)
377 {
378 	mlxsw_afk_values_add_buf(&rulei->values, element,
379 				 key_value, mask_value, len);
380 }
381 
382 void mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei)
383 {
384 	mlxsw_afa_block_continue(rulei->act_block);
385 }
386 
387 void mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,
388 				 u16 group_id)
389 {
390 	mlxsw_afa_block_jump(rulei->act_block, group_id);
391 }
392 
393 int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei)
394 {
395 	return mlxsw_afa_block_append_drop(rulei->act_block);
396 }
397 
398 int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei)
399 {
400 	return mlxsw_afa_block_append_trap(rulei->act_block);
401 }
402 
403 int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
404 			       struct mlxsw_sp_acl_rule_info *rulei,
405 			       struct net_device *out_dev)
406 {
407 	struct mlxsw_sp_port *mlxsw_sp_port;
408 	u8 local_port;
409 	bool in_port;
410 
411 	if (out_dev) {
412 		if (!mlxsw_sp_port_dev_check(out_dev))
413 			return -EINVAL;
414 		mlxsw_sp_port = netdev_priv(out_dev);
415 		if (mlxsw_sp_port->mlxsw_sp != mlxsw_sp)
416 			return -EINVAL;
417 		local_port = mlxsw_sp_port->local_port;
418 		in_port = false;
419 	} else {
420 		/* If out_dev is NULL, the caller wants to
421 		 * set forward to ingress port.
422 		 */
423 		local_port = 0;
424 		in_port = true;
425 	}
426 	return mlxsw_afa_block_append_fwd(rulei->act_block,
427 					  local_port, in_port);
428 }
429 
430 int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp,
431 				struct mlxsw_sp_acl_rule_info *rulei,
432 				u32 action, u16 vid, u16 proto, u8 prio)
433 {
434 	u8 ethertype;
435 
436 	if (action == TCA_VLAN_ACT_MODIFY) {
437 		switch (proto) {
438 		case ETH_P_8021Q:
439 			ethertype = 0;
440 			break;
441 		case ETH_P_8021AD:
442 			ethertype = 1;
443 			break;
444 		default:
445 			dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN protocol %#04x\n",
446 				proto);
447 			return -EINVAL;
448 		}
449 
450 		return mlxsw_afa_block_append_vlan_modify(rulei->act_block,
451 							  vid, prio, ethertype);
452 	} else {
453 		dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN action\n");
454 		return -EINVAL;
455 	}
456 }
457 
458 int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
459 				 struct mlxsw_sp_acl_rule_info *rulei)
460 {
461 	return mlxsw_afa_block_append_counter(rulei->act_block,
462 					      rulei->counter_index);
463 }
464 
465 int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp,
466 				   struct mlxsw_sp_acl_rule_info *rulei,
467 				   u16 fid)
468 {
469 	return mlxsw_afa_block_append_fid_set(rulei->act_block, fid);
470 }
471 
472 struct mlxsw_sp_acl_rule *
473 mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
474 			 struct mlxsw_sp_acl_ruleset *ruleset,
475 			 unsigned long cookie)
476 {
477 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
478 	struct mlxsw_sp_acl_rule *rule;
479 	int err;
480 
481 	mlxsw_sp_acl_ruleset_ref_inc(ruleset);
482 	rule = kzalloc(sizeof(*rule) + ops->rule_priv_size, GFP_KERNEL);
483 	if (!rule) {
484 		err = -ENOMEM;
485 		goto err_alloc;
486 	}
487 	rule->cookie = cookie;
488 	rule->ruleset = ruleset;
489 
490 	rule->rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl);
491 	if (IS_ERR(rule->rulei)) {
492 		err = PTR_ERR(rule->rulei);
493 		goto err_rulei_create;
494 	}
495 
496 	err = mlxsw_sp_acl_rulei_counter_alloc(mlxsw_sp, rule->rulei);
497 	if (err)
498 		goto err_counter_alloc;
499 	return rule;
500 
501 err_counter_alloc:
502 	mlxsw_sp_acl_rulei_destroy(rule->rulei);
503 err_rulei_create:
504 	kfree(rule);
505 err_alloc:
506 	mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
507 	return ERR_PTR(err);
508 }
509 
510 void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp,
511 			       struct mlxsw_sp_acl_rule *rule)
512 {
513 	struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
514 
515 	mlxsw_sp_acl_rulei_counter_free(mlxsw_sp, rule->rulei);
516 	mlxsw_sp_acl_rulei_destroy(rule->rulei);
517 	kfree(rule);
518 	mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
519 }
520 
521 int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
522 			  struct mlxsw_sp_acl_rule *rule)
523 {
524 	struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
525 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
526 	int err;
527 
528 	err = ops->rule_add(mlxsw_sp, ruleset->priv, rule->priv, rule->rulei);
529 	if (err)
530 		return err;
531 
532 	err = rhashtable_insert_fast(&ruleset->rule_ht, &rule->ht_node,
533 				     mlxsw_sp_acl_rule_ht_params);
534 	if (err)
535 		goto err_rhashtable_insert;
536 
537 	list_add_tail(&rule->list, &mlxsw_sp->acl->rules);
538 	return 0;
539 
540 err_rhashtable_insert:
541 	ops->rule_del(mlxsw_sp, rule->priv);
542 	return err;
543 }
544 
545 void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
546 			   struct mlxsw_sp_acl_rule *rule)
547 {
548 	struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
549 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
550 
551 	list_del(&rule->list);
552 	rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
553 			       mlxsw_sp_acl_rule_ht_params);
554 	ops->rule_del(mlxsw_sp, rule->priv);
555 }
556 
557 struct mlxsw_sp_acl_rule *
558 mlxsw_sp_acl_rule_lookup(struct mlxsw_sp *mlxsw_sp,
559 			 struct mlxsw_sp_acl_ruleset *ruleset,
560 			 unsigned long cookie)
561 {
562 	return rhashtable_lookup_fast(&ruleset->rule_ht, &cookie,
563 				       mlxsw_sp_acl_rule_ht_params);
564 }
565 
566 struct mlxsw_sp_acl_rule_info *
567 mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule)
568 {
569 	return rule->rulei;
570 }
571 
572 static int mlxsw_sp_acl_rule_activity_update(struct mlxsw_sp *mlxsw_sp,
573 					     struct mlxsw_sp_acl_rule *rule)
574 {
575 	struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
576 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
577 	bool active;
578 	int err;
579 
580 	err = ops->rule_activity_get(mlxsw_sp, rule->priv, &active);
581 	if (err)
582 		return err;
583 	if (active)
584 		rule->last_used = jiffies;
585 	return 0;
586 }
587 
588 static int mlxsw_sp_acl_rules_activity_update(struct mlxsw_sp_acl *acl)
589 {
590 	struct mlxsw_sp_acl_rule *rule;
591 	int err;
592 
593 	/* Protect internal structures from changes */
594 	rtnl_lock();
595 	list_for_each_entry(rule, &acl->rules, list) {
596 		err = mlxsw_sp_acl_rule_activity_update(acl->mlxsw_sp,
597 							rule);
598 		if (err)
599 			goto err_rule_update;
600 	}
601 	rtnl_unlock();
602 	return 0;
603 
604 err_rule_update:
605 	rtnl_unlock();
606 	return err;
607 }
608 
609 static void mlxsw_sp_acl_rule_activity_work_schedule(struct mlxsw_sp_acl *acl)
610 {
611 	unsigned long interval = acl->rule_activity_update.interval;
612 
613 	mlxsw_core_schedule_dw(&acl->rule_activity_update.dw,
614 			       msecs_to_jiffies(interval));
615 }
616 
617 static void mlxsw_sp_acl_rul_activity_update_work(struct work_struct *work)
618 {
619 	struct mlxsw_sp_acl *acl = container_of(work, struct mlxsw_sp_acl,
620 						rule_activity_update.dw.work);
621 	int err;
622 
623 	err = mlxsw_sp_acl_rules_activity_update(acl);
624 	if (err)
625 		dev_err(acl->mlxsw_sp->bus_info->dev, "Could not update acl activity");
626 
627 	mlxsw_sp_acl_rule_activity_work_schedule(acl);
628 }
629 
630 int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
631 				struct mlxsw_sp_acl_rule *rule,
632 				u64 *packets, u64 *bytes, u64 *last_use)
633 
634 {
635 	struct mlxsw_sp_acl_rule_info *rulei;
636 	u64 current_packets;
637 	u64 current_bytes;
638 	int err;
639 
640 	rulei = mlxsw_sp_acl_rule_rulei(rule);
641 	err = mlxsw_sp_flow_counter_get(mlxsw_sp, rulei->counter_index,
642 					&current_packets, &current_bytes);
643 	if (err)
644 		return err;
645 
646 	*packets = current_packets - rule->last_packets;
647 	*bytes = current_bytes - rule->last_bytes;
648 	*last_use = rule->last_used;
649 
650 	rule->last_bytes = current_bytes;
651 	rule->last_packets = current_packets;
652 
653 	return 0;
654 }
655 
656 #define MLXSW_SP_KDVL_ACT_EXT_SIZE 1
657 
658 static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
659 				     char *enc_actions, bool is_first)
660 {
661 	struct mlxsw_sp *mlxsw_sp = priv;
662 	char pefa_pl[MLXSW_REG_PEFA_LEN];
663 	u32 kvdl_index;
664 	int err;
665 
666 	/* The first action set of a TCAM entry is stored directly in TCAM,
667 	 * not KVD linear area.
668 	 */
669 	if (is_first)
670 		return 0;
671 
672 	err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KDVL_ACT_EXT_SIZE,
673 				  &kvdl_index);
674 	if (err)
675 		return err;
676 	mlxsw_reg_pefa_pack(pefa_pl, kvdl_index, enc_actions);
677 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl);
678 	if (err)
679 		goto err_pefa_write;
680 	*p_kvdl_index = kvdl_index;
681 	return 0;
682 
683 err_pefa_write:
684 	mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
685 	return err;
686 }
687 
688 static void mlxsw_sp_act_kvdl_set_del(void *priv, u32 kvdl_index,
689 				      bool is_first)
690 {
691 	struct mlxsw_sp *mlxsw_sp = priv;
692 
693 	if (is_first)
694 		return;
695 	mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
696 }
697 
698 static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index,
699 					   u8 local_port)
700 {
701 	struct mlxsw_sp *mlxsw_sp = priv;
702 	char ppbs_pl[MLXSW_REG_PPBS_LEN];
703 	u32 kvdl_index;
704 	int err;
705 
706 	err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1, &kvdl_index);
707 	if (err)
708 		return err;
709 	mlxsw_reg_ppbs_pack(ppbs_pl, kvdl_index, local_port);
710 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbs), ppbs_pl);
711 	if (err)
712 		goto err_ppbs_write;
713 	*p_kvdl_index = kvdl_index;
714 	return 0;
715 
716 err_ppbs_write:
717 	mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
718 	return err;
719 }
720 
721 static void mlxsw_sp_act_kvdl_fwd_entry_del(void *priv, u32 kvdl_index)
722 {
723 	struct mlxsw_sp *mlxsw_sp = priv;
724 
725 	mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
726 }
727 
728 static const struct mlxsw_afa_ops mlxsw_sp_act_afa_ops = {
729 	.kvdl_set_add		= mlxsw_sp_act_kvdl_set_add,
730 	.kvdl_set_del		= mlxsw_sp_act_kvdl_set_del,
731 	.kvdl_fwd_entry_add	= mlxsw_sp_act_kvdl_fwd_entry_add,
732 	.kvdl_fwd_entry_del	= mlxsw_sp_act_kvdl_fwd_entry_del,
733 };
734 
735 int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
736 {
737 	const struct mlxsw_sp_acl_ops *acl_ops = &mlxsw_sp_acl_tcam_ops;
738 	struct mlxsw_sp_fid *fid;
739 	struct mlxsw_sp_acl *acl;
740 	int err;
741 
742 	acl = kzalloc(sizeof(*acl) + acl_ops->priv_size, GFP_KERNEL);
743 	if (!acl)
744 		return -ENOMEM;
745 	mlxsw_sp->acl = acl;
746 	acl->mlxsw_sp = mlxsw_sp;
747 	acl->afk = mlxsw_afk_create(MLXSW_CORE_RES_GET(mlxsw_sp->core,
748 						       ACL_FLEX_KEYS),
749 				    mlxsw_sp_afk_blocks,
750 				    MLXSW_SP_AFK_BLOCKS_COUNT);
751 	if (!acl->afk) {
752 		err = -ENOMEM;
753 		goto err_afk_create;
754 	}
755 
756 	acl->afa = mlxsw_afa_create(MLXSW_CORE_RES_GET(mlxsw_sp->core,
757 						       ACL_ACTIONS_PER_SET),
758 				    &mlxsw_sp_act_afa_ops, mlxsw_sp);
759 	if (IS_ERR(acl->afa)) {
760 		err = PTR_ERR(acl->afa);
761 		goto err_afa_create;
762 	}
763 
764 	err = rhashtable_init(&acl->ruleset_ht,
765 			      &mlxsw_sp_acl_ruleset_ht_params);
766 	if (err)
767 		goto err_rhashtable_init;
768 
769 	fid = mlxsw_sp_fid_dummy_get(mlxsw_sp);
770 	if (IS_ERR(fid)) {
771 		err = PTR_ERR(fid);
772 		goto err_fid_get;
773 	}
774 	acl->dummy_fid = fid;
775 
776 	INIT_LIST_HEAD(&acl->rules);
777 	err = acl_ops->init(mlxsw_sp, acl->priv);
778 	if (err)
779 		goto err_acl_ops_init;
780 
781 	acl->ops = acl_ops;
782 
783 	/* Create the delayed work for the rule activity_update */
784 	INIT_DELAYED_WORK(&acl->rule_activity_update.dw,
785 			  mlxsw_sp_acl_rul_activity_update_work);
786 	acl->rule_activity_update.interval = MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS;
787 	mlxsw_core_schedule_dw(&acl->rule_activity_update.dw, 0);
788 	return 0;
789 
790 err_acl_ops_init:
791 	mlxsw_sp_fid_put(fid);
792 err_fid_get:
793 	rhashtable_destroy(&acl->ruleset_ht);
794 err_rhashtable_init:
795 	mlxsw_afa_destroy(acl->afa);
796 err_afa_create:
797 	mlxsw_afk_destroy(acl->afk);
798 err_afk_create:
799 	kfree(acl);
800 	return err;
801 }
802 
803 void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp)
804 {
805 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
806 	const struct mlxsw_sp_acl_ops *acl_ops = acl->ops;
807 
808 	cancel_delayed_work_sync(&mlxsw_sp->acl->rule_activity_update.dw);
809 	acl_ops->fini(mlxsw_sp, acl->priv);
810 	WARN_ON(!list_empty(&acl->rules));
811 	mlxsw_sp_fid_put(acl->dummy_fid);
812 	rhashtable_destroy(&acl->ruleset_ht);
813 	mlxsw_afa_destroy(acl->afa);
814 	mlxsw_afk_destroy(acl->afk);
815 	kfree(acl);
816 }
817