1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
3  * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the names of the copyright holders nor the names of its
15  *    contributors may be used to endorse or promote products derived from
16  *    this software without specific prior written permission.
17  *
18  * Alternatively, this software may be distributed under the terms of the
19  * GNU General Public License ("GPL") version 2 as published by the Free
20  * Software Foundation.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <linux/kernel.h>
36 #include <linux/slab.h>
37 #include <linux/errno.h>
38 #include <linux/list.h>
39 #include <linux/string.h>
40 #include <linux/rhashtable.h>
41 #include <linux/netdevice.h>
42 #include <net/net_namespace.h>
43 #include <net/tc_act/tc_vlan.h>
44 
45 #include "reg.h"
46 #include "core.h"
47 #include "resources.h"
48 #include "spectrum.h"
49 #include "core_acl_flex_keys.h"
50 #include "core_acl_flex_actions.h"
51 #include "spectrum_acl_flex_keys.h"
52 
53 struct mlxsw_sp_acl {
54 	struct mlxsw_sp *mlxsw_sp;
55 	struct mlxsw_afk *afk;
56 	struct mlxsw_sp_fid *dummy_fid;
57 	const struct mlxsw_sp_acl_ops *ops;
58 	struct rhashtable ruleset_ht;
59 	struct list_head rules;
60 	struct {
61 		struct delayed_work dw;
62 		unsigned long interval;	/* ms */
63 #define MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS 1000
64 	} rule_activity_update;
65 	unsigned long priv[0];
66 	/* priv has to be always the last item */
67 };
68 
69 struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl)
70 {
71 	return acl->afk;
72 }
73 
74 struct mlxsw_sp_acl_block_binding {
75 	struct list_head list;
76 	struct net_device *dev;
77 	struct mlxsw_sp_port *mlxsw_sp_port;
78 	bool ingress;
79 };
80 
81 struct mlxsw_sp_acl_block {
82 	struct list_head binding_list;
83 	struct mlxsw_sp_acl_ruleset *ruleset_zero;
84 	struct mlxsw_sp *mlxsw_sp;
85 	unsigned int rule_count;
86 	unsigned int disable_count;
87 };
88 
89 struct mlxsw_sp_acl_ruleset_ht_key {
90 	struct mlxsw_sp_acl_block *block;
91 	u32 chain_index;
92 	const struct mlxsw_sp_acl_profile_ops *ops;
93 };
94 
95 struct mlxsw_sp_acl_ruleset {
96 	struct rhash_head ht_node; /* Member of acl HT */
97 	struct mlxsw_sp_acl_ruleset_ht_key ht_key;
98 	struct rhashtable rule_ht;
99 	unsigned int ref_count;
100 	unsigned long priv[0];
101 	/* priv has to be always the last item */
102 };
103 
104 struct mlxsw_sp_acl_rule {
105 	struct rhash_head ht_node; /* Member of rule HT */
106 	struct list_head list;
107 	unsigned long cookie; /* HT key */
108 	struct mlxsw_sp_acl_ruleset *ruleset;
109 	struct mlxsw_sp_acl_rule_info *rulei;
110 	u64 last_used;
111 	u64 last_packets;
112 	u64 last_bytes;
113 	unsigned long priv[0];
114 	/* priv has to be always the last item */
115 };
116 
117 static const struct rhashtable_params mlxsw_sp_acl_ruleset_ht_params = {
118 	.key_len = sizeof(struct mlxsw_sp_acl_ruleset_ht_key),
119 	.key_offset = offsetof(struct mlxsw_sp_acl_ruleset, ht_key),
120 	.head_offset = offsetof(struct mlxsw_sp_acl_ruleset, ht_node),
121 	.automatic_shrinking = true,
122 };
123 
124 static const struct rhashtable_params mlxsw_sp_acl_rule_ht_params = {
125 	.key_len = sizeof(unsigned long),
126 	.key_offset = offsetof(struct mlxsw_sp_acl_rule, cookie),
127 	.head_offset = offsetof(struct mlxsw_sp_acl_rule, ht_node),
128 	.automatic_shrinking = true,
129 };
130 
131 struct mlxsw_sp_fid *mlxsw_sp_acl_dummy_fid(struct mlxsw_sp *mlxsw_sp)
132 {
133 	return mlxsw_sp->acl->dummy_fid;
134 }
135 
136 struct mlxsw_sp *mlxsw_sp_acl_block_mlxsw_sp(struct mlxsw_sp_acl_block *block)
137 {
138 	return block->mlxsw_sp;
139 }
140 
141 unsigned int mlxsw_sp_acl_block_rule_count(struct mlxsw_sp_acl_block *block)
142 {
143 	return block ? block->rule_count : 0;
144 }
145 
146 void mlxsw_sp_acl_block_disable_inc(struct mlxsw_sp_acl_block *block)
147 {
148 	if (block)
149 		block->disable_count++;
150 }
151 
152 void mlxsw_sp_acl_block_disable_dec(struct mlxsw_sp_acl_block *block)
153 {
154 	if (block)
155 		block->disable_count--;
156 }
157 
158 bool mlxsw_sp_acl_block_disabled(struct mlxsw_sp_acl_block *block)
159 {
160 	return block->disable_count;
161 }
162 
163 static int
164 mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
165 			  struct mlxsw_sp_acl_block *block,
166 			  struct mlxsw_sp_acl_block_binding *binding)
167 {
168 	struct mlxsw_sp_acl_ruleset *ruleset = block->ruleset_zero;
169 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
170 
171 	return ops->ruleset_bind(mlxsw_sp, ruleset->priv,
172 				 binding->mlxsw_sp_port, binding->ingress);
173 }
174 
175 static void
176 mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
177 			    struct mlxsw_sp_acl_block *block,
178 			    struct mlxsw_sp_acl_block_binding *binding)
179 {
180 	struct mlxsw_sp_acl_ruleset *ruleset = block->ruleset_zero;
181 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
182 
183 	ops->ruleset_unbind(mlxsw_sp, ruleset->priv,
184 			    binding->mlxsw_sp_port, binding->ingress);
185 }
186 
187 static bool mlxsw_sp_acl_ruleset_block_bound(struct mlxsw_sp_acl_block *block)
188 {
189 	return block->ruleset_zero;
190 }
191 
192 static int
193 mlxsw_sp_acl_ruleset_block_bind(struct mlxsw_sp *mlxsw_sp,
194 				struct mlxsw_sp_acl_ruleset *ruleset,
195 				struct mlxsw_sp_acl_block *block)
196 {
197 	struct mlxsw_sp_acl_block_binding *binding;
198 	int err;
199 
200 	block->ruleset_zero = ruleset;
201 	list_for_each_entry(binding, &block->binding_list, list) {
202 		err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, block, binding);
203 		if (err)
204 			goto rollback;
205 	}
206 	return 0;
207 
208 rollback:
209 	list_for_each_entry_continue_reverse(binding, &block->binding_list,
210 					     list)
211 		mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
212 	block->ruleset_zero = NULL;
213 
214 	return err;
215 }
216 
217 static void
218 mlxsw_sp_acl_ruleset_block_unbind(struct mlxsw_sp *mlxsw_sp,
219 				  struct mlxsw_sp_acl_ruleset *ruleset,
220 				  struct mlxsw_sp_acl_block *block)
221 {
222 	struct mlxsw_sp_acl_block_binding *binding;
223 
224 	list_for_each_entry(binding, &block->binding_list, list)
225 		mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
226 	block->ruleset_zero = NULL;
227 }
228 
229 struct mlxsw_sp_acl_block *mlxsw_sp_acl_block_create(struct mlxsw_sp *mlxsw_sp,
230 						     struct net *net)
231 {
232 	struct mlxsw_sp_acl_block *block;
233 
234 	block = kzalloc(sizeof(*block), GFP_KERNEL);
235 	if (!block)
236 		return NULL;
237 	INIT_LIST_HEAD(&block->binding_list);
238 	block->mlxsw_sp = mlxsw_sp;
239 	return block;
240 }
241 
242 void mlxsw_sp_acl_block_destroy(struct mlxsw_sp_acl_block *block)
243 {
244 	WARN_ON(!list_empty(&block->binding_list));
245 	kfree(block);
246 }
247 
248 static struct mlxsw_sp_acl_block_binding *
249 mlxsw_sp_acl_block_lookup(struct mlxsw_sp_acl_block *block,
250 			  struct mlxsw_sp_port *mlxsw_sp_port, bool ingress)
251 {
252 	struct mlxsw_sp_acl_block_binding *binding;
253 
254 	list_for_each_entry(binding, &block->binding_list, list)
255 		if (binding->mlxsw_sp_port == mlxsw_sp_port &&
256 		    binding->ingress == ingress)
257 			return binding;
258 	return NULL;
259 }
260 
261 int mlxsw_sp_acl_block_bind(struct mlxsw_sp *mlxsw_sp,
262 			    struct mlxsw_sp_acl_block *block,
263 			    struct mlxsw_sp_port *mlxsw_sp_port,
264 			    bool ingress)
265 {
266 	struct mlxsw_sp_acl_block_binding *binding;
267 	int err;
268 
269 	if (WARN_ON(mlxsw_sp_acl_block_lookup(block, mlxsw_sp_port, ingress)))
270 		return -EEXIST;
271 
272 	binding = kzalloc(sizeof(*binding), GFP_KERNEL);
273 	if (!binding)
274 		return -ENOMEM;
275 	binding->mlxsw_sp_port = mlxsw_sp_port;
276 	binding->ingress = ingress;
277 
278 	if (mlxsw_sp_acl_ruleset_block_bound(block)) {
279 		err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, block, binding);
280 		if (err)
281 			goto err_ruleset_bind;
282 	}
283 
284 	list_add(&binding->list, &block->binding_list);
285 	return 0;
286 
287 err_ruleset_bind:
288 	kfree(binding);
289 	return err;
290 }
291 
292 int mlxsw_sp_acl_block_unbind(struct mlxsw_sp *mlxsw_sp,
293 			      struct mlxsw_sp_acl_block *block,
294 			      struct mlxsw_sp_port *mlxsw_sp_port,
295 			      bool ingress)
296 {
297 	struct mlxsw_sp_acl_block_binding *binding;
298 
299 	binding = mlxsw_sp_acl_block_lookup(block, mlxsw_sp_port, ingress);
300 	if (!binding)
301 		return -ENOENT;
302 
303 	list_del(&binding->list);
304 
305 	if (mlxsw_sp_acl_ruleset_block_bound(block))
306 		mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
307 
308 	kfree(binding);
309 	return 0;
310 }
311 
312 static struct mlxsw_sp_acl_ruleset *
313 mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
314 			    struct mlxsw_sp_acl_block *block, u32 chain_index,
315 			    const struct mlxsw_sp_acl_profile_ops *ops)
316 {
317 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
318 	struct mlxsw_sp_acl_ruleset *ruleset;
319 	size_t alloc_size;
320 	int err;
321 
322 	alloc_size = sizeof(*ruleset) + ops->ruleset_priv_size;
323 	ruleset = kzalloc(alloc_size, GFP_KERNEL);
324 	if (!ruleset)
325 		return ERR_PTR(-ENOMEM);
326 	ruleset->ref_count = 1;
327 	ruleset->ht_key.block = block;
328 	ruleset->ht_key.chain_index = chain_index;
329 	ruleset->ht_key.ops = ops;
330 
331 	err = rhashtable_init(&ruleset->rule_ht, &mlxsw_sp_acl_rule_ht_params);
332 	if (err)
333 		goto err_rhashtable_init;
334 
335 	err = ops->ruleset_add(mlxsw_sp, acl->priv, ruleset->priv);
336 	if (err)
337 		goto err_ops_ruleset_add;
338 
339 	err = rhashtable_insert_fast(&acl->ruleset_ht, &ruleset->ht_node,
340 				     mlxsw_sp_acl_ruleset_ht_params);
341 	if (err)
342 		goto err_ht_insert;
343 
344 	if (!chain_index) {
345 		/* We only need ruleset with chain index 0, the implicit one,
346 		 * to be directly bound to device. The rest of the rulesets
347 		 * are bound by "Goto action set".
348 		 */
349 		err = mlxsw_sp_acl_ruleset_block_bind(mlxsw_sp, ruleset, block);
350 		if (err)
351 			goto err_ruleset_bind;
352 	}
353 
354 	return ruleset;
355 
356 err_ruleset_bind:
357 	rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
358 			       mlxsw_sp_acl_ruleset_ht_params);
359 err_ht_insert:
360 	ops->ruleset_del(mlxsw_sp, ruleset->priv);
361 err_ops_ruleset_add:
362 	rhashtable_destroy(&ruleset->rule_ht);
363 err_rhashtable_init:
364 	kfree(ruleset);
365 	return ERR_PTR(err);
366 }
367 
368 static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp,
369 					 struct mlxsw_sp_acl_ruleset *ruleset)
370 {
371 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
372 	struct mlxsw_sp_acl_block *block = ruleset->ht_key.block;
373 	u32 chain_index = ruleset->ht_key.chain_index;
374 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
375 
376 	if (!chain_index)
377 		mlxsw_sp_acl_ruleset_block_unbind(mlxsw_sp, ruleset, block);
378 	rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
379 			       mlxsw_sp_acl_ruleset_ht_params);
380 	ops->ruleset_del(mlxsw_sp, ruleset->priv);
381 	rhashtable_destroy(&ruleset->rule_ht);
382 	kfree(ruleset);
383 }
384 
385 static void mlxsw_sp_acl_ruleset_ref_inc(struct mlxsw_sp_acl_ruleset *ruleset)
386 {
387 	ruleset->ref_count++;
388 }
389 
390 static void mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp *mlxsw_sp,
391 					 struct mlxsw_sp_acl_ruleset *ruleset)
392 {
393 	if (--ruleset->ref_count)
394 		return;
395 	mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset);
396 }
397 
398 static struct mlxsw_sp_acl_ruleset *
399 __mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl *acl,
400 			      struct mlxsw_sp_acl_block *block, u32 chain_index,
401 			      const struct mlxsw_sp_acl_profile_ops *ops)
402 {
403 	struct mlxsw_sp_acl_ruleset_ht_key ht_key;
404 
405 	memset(&ht_key, 0, sizeof(ht_key));
406 	ht_key.block = block;
407 	ht_key.chain_index = chain_index;
408 	ht_key.ops = ops;
409 	return rhashtable_lookup_fast(&acl->ruleset_ht, &ht_key,
410 				      mlxsw_sp_acl_ruleset_ht_params);
411 }
412 
413 struct mlxsw_sp_acl_ruleset *
414 mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
415 			    struct mlxsw_sp_acl_block *block, u32 chain_index,
416 			    enum mlxsw_sp_acl_profile profile)
417 {
418 	const struct mlxsw_sp_acl_profile_ops *ops;
419 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
420 	struct mlxsw_sp_acl_ruleset *ruleset;
421 
422 	ops = acl->ops->profile_ops(mlxsw_sp, profile);
423 	if (!ops)
424 		return ERR_PTR(-EINVAL);
425 	ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, block, chain_index, ops);
426 	if (!ruleset)
427 		return ERR_PTR(-ENOENT);
428 	return ruleset;
429 }
430 
431 struct mlxsw_sp_acl_ruleset *
432 mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
433 			 struct mlxsw_sp_acl_block *block, u32 chain_index,
434 			 enum mlxsw_sp_acl_profile profile)
435 {
436 	const struct mlxsw_sp_acl_profile_ops *ops;
437 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
438 	struct mlxsw_sp_acl_ruleset *ruleset;
439 
440 	ops = acl->ops->profile_ops(mlxsw_sp, profile);
441 	if (!ops)
442 		return ERR_PTR(-EINVAL);
443 
444 	ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, block, chain_index, ops);
445 	if (ruleset) {
446 		mlxsw_sp_acl_ruleset_ref_inc(ruleset);
447 		return ruleset;
448 	}
449 	return mlxsw_sp_acl_ruleset_create(mlxsw_sp, block, chain_index, ops);
450 }
451 
452 void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
453 			      struct mlxsw_sp_acl_ruleset *ruleset)
454 {
455 	mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
456 }
457 
458 u16 mlxsw_sp_acl_ruleset_group_id(struct mlxsw_sp_acl_ruleset *ruleset)
459 {
460 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
461 
462 	return ops->ruleset_group_id(ruleset->priv);
463 }
464 
465 struct mlxsw_sp_acl_rule_info *
466 mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl)
467 {
468 	struct mlxsw_sp_acl_rule_info *rulei;
469 	int err;
470 
471 	rulei = kzalloc(sizeof(*rulei), GFP_KERNEL);
472 	if (!rulei)
473 		return NULL;
474 	rulei->act_block = mlxsw_afa_block_create(acl->mlxsw_sp->afa);
475 	if (IS_ERR(rulei->act_block)) {
476 		err = PTR_ERR(rulei->act_block);
477 		goto err_afa_block_create;
478 	}
479 	return rulei;
480 
481 err_afa_block_create:
482 	kfree(rulei);
483 	return ERR_PTR(err);
484 }
485 
486 void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp_acl_rule_info *rulei)
487 {
488 	mlxsw_afa_block_destroy(rulei->act_block);
489 	kfree(rulei);
490 }
491 
492 int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei)
493 {
494 	return mlxsw_afa_block_commit(rulei->act_block);
495 }
496 
497 void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei,
498 				 unsigned int priority)
499 {
500 	rulei->priority = priority;
501 }
502 
503 void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei,
504 				    enum mlxsw_afk_element element,
505 				    u32 key_value, u32 mask_value)
506 {
507 	mlxsw_afk_values_add_u32(&rulei->values, element,
508 				 key_value, mask_value);
509 }
510 
511 void mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info *rulei,
512 				    enum mlxsw_afk_element element,
513 				    const char *key_value,
514 				    const char *mask_value, unsigned int len)
515 {
516 	mlxsw_afk_values_add_buf(&rulei->values, element,
517 				 key_value, mask_value, len);
518 }
519 
520 int mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei)
521 {
522 	return mlxsw_afa_block_continue(rulei->act_block);
523 }
524 
525 int mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,
526 				u16 group_id)
527 {
528 	return mlxsw_afa_block_jump(rulei->act_block, group_id);
529 }
530 
531 int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei)
532 {
533 	return mlxsw_afa_block_append_drop(rulei->act_block);
534 }
535 
536 int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei)
537 {
538 	return mlxsw_afa_block_append_trap(rulei->act_block,
539 					   MLXSW_TRAP_ID_ACL0);
540 }
541 
542 int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
543 			       struct mlxsw_sp_acl_rule_info *rulei,
544 			       struct net_device *out_dev)
545 {
546 	struct mlxsw_sp_port *mlxsw_sp_port;
547 	u8 local_port;
548 	bool in_port;
549 
550 	if (out_dev) {
551 		if (!mlxsw_sp_port_dev_check(out_dev))
552 			return -EINVAL;
553 		mlxsw_sp_port = netdev_priv(out_dev);
554 		if (mlxsw_sp_port->mlxsw_sp != mlxsw_sp)
555 			return -EINVAL;
556 		local_port = mlxsw_sp_port->local_port;
557 		in_port = false;
558 	} else {
559 		/* If out_dev is NULL, the caller wants to
560 		 * set forward to ingress port.
561 		 */
562 		local_port = 0;
563 		in_port = true;
564 	}
565 	return mlxsw_afa_block_append_fwd(rulei->act_block,
566 					  local_port, in_port);
567 }
568 
569 int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
570 				  struct mlxsw_sp_acl_rule_info *rulei,
571 				  struct mlxsw_sp_acl_block *block,
572 				  struct net_device *out_dev)
573 {
574 	struct mlxsw_sp_acl_block_binding *binding;
575 	struct mlxsw_sp_port *out_port;
576 	struct mlxsw_sp_port *in_port;
577 
578 	if (!list_is_singular(&block->binding_list))
579 		return -EOPNOTSUPP;
580 
581 	binding = list_first_entry(&block->binding_list,
582 				   struct mlxsw_sp_acl_block_binding, list);
583 	in_port = binding->mlxsw_sp_port;
584 	if (!mlxsw_sp_port_dev_check(out_dev))
585 		return -EINVAL;
586 
587 	out_port = netdev_priv(out_dev);
588 	if (out_port->mlxsw_sp != mlxsw_sp)
589 		return -EINVAL;
590 
591 	return mlxsw_afa_block_append_mirror(rulei->act_block,
592 					     in_port->local_port,
593 					     out_port->local_port,
594 					     binding->ingress);
595 }
596 
597 int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp,
598 				struct mlxsw_sp_acl_rule_info *rulei,
599 				u32 action, u16 vid, u16 proto, u8 prio)
600 {
601 	u8 ethertype;
602 
603 	if (action == TCA_VLAN_ACT_MODIFY) {
604 		switch (proto) {
605 		case ETH_P_8021Q:
606 			ethertype = 0;
607 			break;
608 		case ETH_P_8021AD:
609 			ethertype = 1;
610 			break;
611 		default:
612 			dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN protocol %#04x\n",
613 				proto);
614 			return -EINVAL;
615 		}
616 
617 		return mlxsw_afa_block_append_vlan_modify(rulei->act_block,
618 							  vid, prio, ethertype);
619 	} else {
620 		dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN action\n");
621 		return -EINVAL;
622 	}
623 }
624 
625 int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
626 				 struct mlxsw_sp_acl_rule_info *rulei)
627 {
628 	return mlxsw_afa_block_append_counter(rulei->act_block,
629 					      &rulei->counter_index);
630 }
631 
632 int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp,
633 				   struct mlxsw_sp_acl_rule_info *rulei,
634 				   u16 fid)
635 {
636 	return mlxsw_afa_block_append_fid_set(rulei->act_block, fid);
637 }
638 
639 struct mlxsw_sp_acl_rule *
640 mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
641 			 struct mlxsw_sp_acl_ruleset *ruleset,
642 			 unsigned long cookie)
643 {
644 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
645 	struct mlxsw_sp_acl_rule *rule;
646 	int err;
647 
648 	mlxsw_sp_acl_ruleset_ref_inc(ruleset);
649 	rule = kzalloc(sizeof(*rule) + ops->rule_priv_size, GFP_KERNEL);
650 	if (!rule) {
651 		err = -ENOMEM;
652 		goto err_alloc;
653 	}
654 	rule->cookie = cookie;
655 	rule->ruleset = ruleset;
656 
657 	rule->rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl);
658 	if (IS_ERR(rule->rulei)) {
659 		err = PTR_ERR(rule->rulei);
660 		goto err_rulei_create;
661 	}
662 
663 	return rule;
664 
665 err_rulei_create:
666 	kfree(rule);
667 err_alloc:
668 	mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
669 	return ERR_PTR(err);
670 }
671 
672 void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp,
673 			       struct mlxsw_sp_acl_rule *rule)
674 {
675 	struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
676 
677 	mlxsw_sp_acl_rulei_destroy(rule->rulei);
678 	kfree(rule);
679 	mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
680 }
681 
682 int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
683 			  struct mlxsw_sp_acl_rule *rule)
684 {
685 	struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
686 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
687 	int err;
688 
689 	err = ops->rule_add(mlxsw_sp, ruleset->priv, rule->priv, rule->rulei);
690 	if (err)
691 		return err;
692 
693 	err = rhashtable_insert_fast(&ruleset->rule_ht, &rule->ht_node,
694 				     mlxsw_sp_acl_rule_ht_params);
695 	if (err)
696 		goto err_rhashtable_insert;
697 
698 	list_add_tail(&rule->list, &mlxsw_sp->acl->rules);
699 	ruleset->ht_key.block->rule_count++;
700 	return 0;
701 
702 err_rhashtable_insert:
703 	ops->rule_del(mlxsw_sp, rule->priv);
704 	return err;
705 }
706 
707 void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
708 			   struct mlxsw_sp_acl_rule *rule)
709 {
710 	struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
711 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
712 
713 	ruleset->ht_key.block->rule_count--;
714 	list_del(&rule->list);
715 	rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
716 			       mlxsw_sp_acl_rule_ht_params);
717 	ops->rule_del(mlxsw_sp, rule->priv);
718 }
719 
720 struct mlxsw_sp_acl_rule *
721 mlxsw_sp_acl_rule_lookup(struct mlxsw_sp *mlxsw_sp,
722 			 struct mlxsw_sp_acl_ruleset *ruleset,
723 			 unsigned long cookie)
724 {
725 	return rhashtable_lookup_fast(&ruleset->rule_ht, &cookie,
726 				       mlxsw_sp_acl_rule_ht_params);
727 }
728 
729 struct mlxsw_sp_acl_rule_info *
730 mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule)
731 {
732 	return rule->rulei;
733 }
734 
735 static int mlxsw_sp_acl_rule_activity_update(struct mlxsw_sp *mlxsw_sp,
736 					     struct mlxsw_sp_acl_rule *rule)
737 {
738 	struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
739 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
740 	bool active;
741 	int err;
742 
743 	err = ops->rule_activity_get(mlxsw_sp, rule->priv, &active);
744 	if (err)
745 		return err;
746 	if (active)
747 		rule->last_used = jiffies;
748 	return 0;
749 }
750 
751 static int mlxsw_sp_acl_rules_activity_update(struct mlxsw_sp_acl *acl)
752 {
753 	struct mlxsw_sp_acl_rule *rule;
754 	int err;
755 
756 	/* Protect internal structures from changes */
757 	rtnl_lock();
758 	list_for_each_entry(rule, &acl->rules, list) {
759 		err = mlxsw_sp_acl_rule_activity_update(acl->mlxsw_sp,
760 							rule);
761 		if (err)
762 			goto err_rule_update;
763 	}
764 	rtnl_unlock();
765 	return 0;
766 
767 err_rule_update:
768 	rtnl_unlock();
769 	return err;
770 }
771 
772 static void mlxsw_sp_acl_rule_activity_work_schedule(struct mlxsw_sp_acl *acl)
773 {
774 	unsigned long interval = acl->rule_activity_update.interval;
775 
776 	mlxsw_core_schedule_dw(&acl->rule_activity_update.dw,
777 			       msecs_to_jiffies(interval));
778 }
779 
780 static void mlxsw_sp_acl_rul_activity_update_work(struct work_struct *work)
781 {
782 	struct mlxsw_sp_acl *acl = container_of(work, struct mlxsw_sp_acl,
783 						rule_activity_update.dw.work);
784 	int err;
785 
786 	err = mlxsw_sp_acl_rules_activity_update(acl);
787 	if (err)
788 		dev_err(acl->mlxsw_sp->bus_info->dev, "Could not update acl activity");
789 
790 	mlxsw_sp_acl_rule_activity_work_schedule(acl);
791 }
792 
793 int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
794 				struct mlxsw_sp_acl_rule *rule,
795 				u64 *packets, u64 *bytes, u64 *last_use)
796 
797 {
798 	struct mlxsw_sp_acl_rule_info *rulei;
799 	u64 current_packets;
800 	u64 current_bytes;
801 	int err;
802 
803 	rulei = mlxsw_sp_acl_rule_rulei(rule);
804 	err = mlxsw_sp_flow_counter_get(mlxsw_sp, rulei->counter_index,
805 					&current_packets, &current_bytes);
806 	if (err)
807 		return err;
808 
809 	*packets = current_packets - rule->last_packets;
810 	*bytes = current_bytes - rule->last_bytes;
811 	*last_use = rule->last_used;
812 
813 	rule->last_bytes = current_bytes;
814 	rule->last_packets = current_packets;
815 
816 	return 0;
817 }
818 
819 int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
820 {
821 	const struct mlxsw_sp_acl_ops *acl_ops = &mlxsw_sp_acl_tcam_ops;
822 	struct mlxsw_sp_fid *fid;
823 	struct mlxsw_sp_acl *acl;
824 	int err;
825 
826 	acl = kzalloc(sizeof(*acl) + acl_ops->priv_size, GFP_KERNEL);
827 	if (!acl)
828 		return -ENOMEM;
829 	mlxsw_sp->acl = acl;
830 	acl->mlxsw_sp = mlxsw_sp;
831 	acl->afk = mlxsw_afk_create(MLXSW_CORE_RES_GET(mlxsw_sp->core,
832 						       ACL_FLEX_KEYS),
833 				    mlxsw_sp_afk_blocks,
834 				    MLXSW_SP_AFK_BLOCKS_COUNT);
835 	if (!acl->afk) {
836 		err = -ENOMEM;
837 		goto err_afk_create;
838 	}
839 
840 	err = rhashtable_init(&acl->ruleset_ht,
841 			      &mlxsw_sp_acl_ruleset_ht_params);
842 	if (err)
843 		goto err_rhashtable_init;
844 
845 	fid = mlxsw_sp_fid_dummy_get(mlxsw_sp);
846 	if (IS_ERR(fid)) {
847 		err = PTR_ERR(fid);
848 		goto err_fid_get;
849 	}
850 	acl->dummy_fid = fid;
851 
852 	INIT_LIST_HEAD(&acl->rules);
853 	err = acl_ops->init(mlxsw_sp, acl->priv);
854 	if (err)
855 		goto err_acl_ops_init;
856 
857 	acl->ops = acl_ops;
858 
859 	/* Create the delayed work for the rule activity_update */
860 	INIT_DELAYED_WORK(&acl->rule_activity_update.dw,
861 			  mlxsw_sp_acl_rul_activity_update_work);
862 	acl->rule_activity_update.interval = MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS;
863 	mlxsw_core_schedule_dw(&acl->rule_activity_update.dw, 0);
864 	return 0;
865 
866 err_acl_ops_init:
867 	mlxsw_sp_fid_put(fid);
868 err_fid_get:
869 	rhashtable_destroy(&acl->ruleset_ht);
870 err_rhashtable_init:
871 	mlxsw_afk_destroy(acl->afk);
872 err_afk_create:
873 	kfree(acl);
874 	return err;
875 }
876 
877 void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp)
878 {
879 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
880 	const struct mlxsw_sp_acl_ops *acl_ops = acl->ops;
881 
882 	cancel_delayed_work_sync(&mlxsw_sp->acl->rule_activity_update.dw);
883 	acl_ops->fini(mlxsw_sp, acl->priv);
884 	WARN_ON(!list_empty(&acl->rules));
885 	mlxsw_sp_fid_put(acl->dummy_fid);
886 	rhashtable_destroy(&acl->ruleset_ht);
887 	mlxsw_afk_destroy(acl->afk);
888 	kfree(acl);
889 }
890