1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/slab.h>
6 #include <linux/errno.h>
7 #include <linux/list.h>
8 #include <linux/string.h>
9 #include <linux/rhashtable.h>
10 #include <linux/netdevice.h>
11 #include <linux/mutex.h>
12 #include <linux/refcount.h>
13 #include <net/net_namespace.h>
14 #include <net/tc_act/tc_vlan.h>
15 
16 #include "reg.h"
17 #include "core.h"
18 #include "resources.h"
19 #include "spectrum.h"
20 #include "core_acl_flex_keys.h"
21 #include "core_acl_flex_actions.h"
22 #include "spectrum_acl_tcam.h"
23 
24 struct mlxsw_sp_acl {
25 	struct mlxsw_sp *mlxsw_sp;
26 	struct mlxsw_afk *afk;
27 	struct mlxsw_sp_fid *dummy_fid;
28 	struct rhashtable ruleset_ht;
29 	struct list_head rules;
30 	struct mutex rules_lock; /* Protects rules list */
31 	struct {
32 		struct delayed_work dw;
33 		unsigned long interval;	/* ms */
34 #define MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS 1000
35 	} rule_activity_update;
36 	struct mlxsw_sp_acl_tcam tcam;
37 };
38 
39 struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl)
40 {
41 	return acl->afk;
42 }
43 
44 struct mlxsw_sp_acl_tcam *mlxsw_sp_acl_to_tcam(struct mlxsw_sp_acl *acl)
45 {
46 	return &acl->tcam;
47 }
48 
49 struct mlxsw_sp_acl_ruleset_ht_key {
50 	struct mlxsw_sp_flow_block *block;
51 	u32 chain_index;
52 	const struct mlxsw_sp_acl_profile_ops *ops;
53 };
54 
55 struct mlxsw_sp_acl_ruleset {
56 	struct rhash_head ht_node; /* Member of acl HT */
57 	struct mlxsw_sp_acl_ruleset_ht_key ht_key;
58 	struct rhashtable rule_ht;
59 	refcount_t ref_count;
60 	unsigned int min_prio;
61 	unsigned int max_prio;
62 	unsigned long priv[];
63 	/* priv has to be always the last item */
64 };
65 
66 struct mlxsw_sp_acl_rule {
67 	struct rhash_head ht_node; /* Member of rule HT */
68 	struct list_head list;
69 	unsigned long cookie; /* HT key */
70 	struct mlxsw_sp_acl_ruleset *ruleset;
71 	struct mlxsw_sp_acl_rule_info *rulei;
72 	u64 last_used;
73 	u64 last_packets;
74 	u64 last_bytes;
75 	u64 last_drops;
76 	unsigned long priv[];
77 	/* priv has to be always the last item */
78 };
79 
80 static const struct rhashtable_params mlxsw_sp_acl_ruleset_ht_params = {
81 	.key_len = sizeof(struct mlxsw_sp_acl_ruleset_ht_key),
82 	.key_offset = offsetof(struct mlxsw_sp_acl_ruleset, ht_key),
83 	.head_offset = offsetof(struct mlxsw_sp_acl_ruleset, ht_node),
84 	.automatic_shrinking = true,
85 };
86 
87 static const struct rhashtable_params mlxsw_sp_acl_rule_ht_params = {
88 	.key_len = sizeof(unsigned long),
89 	.key_offset = offsetof(struct mlxsw_sp_acl_rule, cookie),
90 	.head_offset = offsetof(struct mlxsw_sp_acl_rule, ht_node),
91 	.automatic_shrinking = true,
92 };
93 
94 struct mlxsw_sp_fid *mlxsw_sp_acl_dummy_fid(struct mlxsw_sp *mlxsw_sp)
95 {
96 	return mlxsw_sp->acl->dummy_fid;
97 }
98 
99 static bool
100 mlxsw_sp_acl_ruleset_is_singular(const struct mlxsw_sp_acl_ruleset *ruleset)
101 {
102 	/* We hold a reference on ruleset ourselves */
103 	return refcount_read(&ruleset->ref_count) == 2;
104 }
105 
106 int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
107 			      struct mlxsw_sp_flow_block *block,
108 			      struct mlxsw_sp_flow_block_binding *binding)
109 {
110 	struct mlxsw_sp_acl_ruleset *ruleset = block->ruleset_zero;
111 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
112 
113 	return ops->ruleset_bind(mlxsw_sp, ruleset->priv,
114 				 binding->mlxsw_sp_port, binding->ingress);
115 }
116 
117 void mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
118 				 struct mlxsw_sp_flow_block *block,
119 				 struct mlxsw_sp_flow_block_binding *binding)
120 {
121 	struct mlxsw_sp_acl_ruleset *ruleset = block->ruleset_zero;
122 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
123 
124 	ops->ruleset_unbind(mlxsw_sp, ruleset->priv,
125 			    binding->mlxsw_sp_port, binding->ingress);
126 }
127 
128 static int
129 mlxsw_sp_acl_ruleset_block_bind(struct mlxsw_sp *mlxsw_sp,
130 				struct mlxsw_sp_acl_ruleset *ruleset,
131 				struct mlxsw_sp_flow_block *block)
132 {
133 	struct mlxsw_sp_flow_block_binding *binding;
134 	int err;
135 
136 	block->ruleset_zero = ruleset;
137 	list_for_each_entry(binding, &block->binding_list, list) {
138 		err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, block, binding);
139 		if (err)
140 			goto rollback;
141 	}
142 	return 0;
143 
144 rollback:
145 	list_for_each_entry_continue_reverse(binding, &block->binding_list,
146 					     list)
147 		mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
148 	block->ruleset_zero = NULL;
149 
150 	return err;
151 }
152 
153 static void
154 mlxsw_sp_acl_ruleset_block_unbind(struct mlxsw_sp *mlxsw_sp,
155 				  struct mlxsw_sp_acl_ruleset *ruleset,
156 				  struct mlxsw_sp_flow_block *block)
157 {
158 	struct mlxsw_sp_flow_block_binding *binding;
159 
160 	list_for_each_entry(binding, &block->binding_list, list)
161 		mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
162 	block->ruleset_zero = NULL;
163 }
164 
165 static struct mlxsw_sp_acl_ruleset *
166 mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
167 			    struct mlxsw_sp_flow_block *block, u32 chain_index,
168 			    const struct mlxsw_sp_acl_profile_ops *ops,
169 			    struct mlxsw_afk_element_usage *tmplt_elusage)
170 {
171 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
172 	struct mlxsw_sp_acl_ruleset *ruleset;
173 	size_t alloc_size;
174 	int err;
175 
176 	alloc_size = sizeof(*ruleset) + ops->ruleset_priv_size;
177 	ruleset = kzalloc(alloc_size, GFP_KERNEL);
178 	if (!ruleset)
179 		return ERR_PTR(-ENOMEM);
180 	refcount_set(&ruleset->ref_count, 1);
181 	ruleset->ht_key.block = block;
182 	ruleset->ht_key.chain_index = chain_index;
183 	ruleset->ht_key.ops = ops;
184 
185 	err = rhashtable_init(&ruleset->rule_ht, &mlxsw_sp_acl_rule_ht_params);
186 	if (err)
187 		goto err_rhashtable_init;
188 
189 	err = ops->ruleset_add(mlxsw_sp, &acl->tcam, ruleset->priv,
190 			       tmplt_elusage, &ruleset->min_prio,
191 			       &ruleset->max_prio);
192 	if (err)
193 		goto err_ops_ruleset_add;
194 
195 	err = rhashtable_insert_fast(&acl->ruleset_ht, &ruleset->ht_node,
196 				     mlxsw_sp_acl_ruleset_ht_params);
197 	if (err)
198 		goto err_ht_insert;
199 
200 	return ruleset;
201 
202 err_ht_insert:
203 	ops->ruleset_del(mlxsw_sp, ruleset->priv);
204 err_ops_ruleset_add:
205 	rhashtable_destroy(&ruleset->rule_ht);
206 err_rhashtable_init:
207 	kfree(ruleset);
208 	return ERR_PTR(err);
209 }
210 
211 static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp,
212 					 struct mlxsw_sp_acl_ruleset *ruleset)
213 {
214 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
215 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
216 
217 	rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
218 			       mlxsw_sp_acl_ruleset_ht_params);
219 	ops->ruleset_del(mlxsw_sp, ruleset->priv);
220 	rhashtable_destroy(&ruleset->rule_ht);
221 	kfree(ruleset);
222 }
223 
224 static void mlxsw_sp_acl_ruleset_ref_inc(struct mlxsw_sp_acl_ruleset *ruleset)
225 {
226 	refcount_inc(&ruleset->ref_count);
227 }
228 
229 static void mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp *mlxsw_sp,
230 					 struct mlxsw_sp_acl_ruleset *ruleset)
231 {
232 	if (!refcount_dec_and_test(&ruleset->ref_count))
233 		return;
234 	mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset);
235 }
236 
237 static struct mlxsw_sp_acl_ruleset *
238 __mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl *acl,
239 			      struct mlxsw_sp_flow_block *block, u32 chain_index,
240 			      const struct mlxsw_sp_acl_profile_ops *ops)
241 {
242 	struct mlxsw_sp_acl_ruleset_ht_key ht_key;
243 
244 	memset(&ht_key, 0, sizeof(ht_key));
245 	ht_key.block = block;
246 	ht_key.chain_index = chain_index;
247 	ht_key.ops = ops;
248 	return rhashtable_lookup_fast(&acl->ruleset_ht, &ht_key,
249 				      mlxsw_sp_acl_ruleset_ht_params);
250 }
251 
252 struct mlxsw_sp_acl_ruleset *
253 mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
254 			    struct mlxsw_sp_flow_block *block, u32 chain_index,
255 			    enum mlxsw_sp_acl_profile profile)
256 {
257 	const struct mlxsw_sp_acl_profile_ops *ops;
258 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
259 	struct mlxsw_sp_acl_ruleset *ruleset;
260 
261 	ops = mlxsw_sp_acl_tcam_profile_ops(mlxsw_sp, profile);
262 	if (!ops)
263 		return ERR_PTR(-EINVAL);
264 	ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, block, chain_index, ops);
265 	if (!ruleset)
266 		return ERR_PTR(-ENOENT);
267 	return ruleset;
268 }
269 
270 struct mlxsw_sp_acl_ruleset *
271 mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
272 			 struct mlxsw_sp_flow_block *block, u32 chain_index,
273 			 enum mlxsw_sp_acl_profile profile,
274 			 struct mlxsw_afk_element_usage *tmplt_elusage)
275 {
276 	const struct mlxsw_sp_acl_profile_ops *ops;
277 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
278 	struct mlxsw_sp_acl_ruleset *ruleset;
279 
280 	ops = mlxsw_sp_acl_tcam_profile_ops(mlxsw_sp, profile);
281 	if (!ops)
282 		return ERR_PTR(-EINVAL);
283 
284 	ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, block, chain_index, ops);
285 	if (ruleset) {
286 		mlxsw_sp_acl_ruleset_ref_inc(ruleset);
287 		return ruleset;
288 	}
289 	return mlxsw_sp_acl_ruleset_create(mlxsw_sp, block, chain_index, ops,
290 					   tmplt_elusage);
291 }
292 
293 void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
294 			      struct mlxsw_sp_acl_ruleset *ruleset)
295 {
296 	mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
297 }
298 
299 u16 mlxsw_sp_acl_ruleset_group_id(struct mlxsw_sp_acl_ruleset *ruleset)
300 {
301 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
302 
303 	return ops->ruleset_group_id(ruleset->priv);
304 }
305 
306 void mlxsw_sp_acl_ruleset_prio_get(struct mlxsw_sp_acl_ruleset *ruleset,
307 				   unsigned int *p_min_prio,
308 				   unsigned int *p_max_prio)
309 {
310 	*p_min_prio = ruleset->min_prio;
311 	*p_max_prio = ruleset->max_prio;
312 }
313 
314 struct mlxsw_sp_acl_rule_info *
315 mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl,
316 			  struct mlxsw_afa_block *afa_block)
317 {
318 	struct mlxsw_sp_acl_rule_info *rulei;
319 	int err;
320 
321 	rulei = kzalloc(sizeof(*rulei), GFP_KERNEL);
322 	if (!rulei)
323 		return ERR_PTR(-ENOMEM);
324 
325 	if (afa_block) {
326 		rulei->act_block = afa_block;
327 		return rulei;
328 	}
329 
330 	rulei->act_block = mlxsw_afa_block_create(acl->mlxsw_sp->afa);
331 	if (IS_ERR(rulei->act_block)) {
332 		err = PTR_ERR(rulei->act_block);
333 		goto err_afa_block_create;
334 	}
335 	rulei->action_created = 1;
336 	return rulei;
337 
338 err_afa_block_create:
339 	kfree(rulei);
340 	return ERR_PTR(err);
341 }
342 
343 void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp *mlxsw_sp,
344 				struct mlxsw_sp_acl_rule_info *rulei)
345 {
346 	if (rulei->action_created)
347 		mlxsw_afa_block_destroy(rulei->act_block);
348 	if (rulei->src_port_range_reg_valid)
349 		mlxsw_sp_port_range_reg_put(mlxsw_sp,
350 					    rulei->src_port_range_reg_index);
351 	if (rulei->dst_port_range_reg_valid)
352 		mlxsw_sp_port_range_reg_put(mlxsw_sp,
353 					    rulei->dst_port_range_reg_index);
354 	kfree(rulei);
355 }
356 
357 int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei)
358 {
359 	return mlxsw_afa_block_commit(rulei->act_block);
360 }
361 
362 void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei,
363 				 unsigned int priority)
364 {
365 	rulei->priority = priority;
366 }
367 
368 void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei,
369 				    enum mlxsw_afk_element element,
370 				    u32 key_value, u32 mask_value)
371 {
372 	mlxsw_afk_values_add_u32(&rulei->values, element,
373 				 key_value, mask_value);
374 }
375 
376 void mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info *rulei,
377 				    enum mlxsw_afk_element element,
378 				    const char *key_value,
379 				    const char *mask_value, unsigned int len)
380 {
381 	mlxsw_afk_values_add_buf(&rulei->values, element,
382 				 key_value, mask_value, len);
383 }
384 
385 int mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei)
386 {
387 	return mlxsw_afa_block_continue(rulei->act_block);
388 }
389 
390 int mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,
391 				u16 group_id)
392 {
393 	return mlxsw_afa_block_jump(rulei->act_block, group_id);
394 }
395 
396 int mlxsw_sp_acl_rulei_act_terminate(struct mlxsw_sp_acl_rule_info *rulei)
397 {
398 	return mlxsw_afa_block_terminate(rulei->act_block);
399 }
400 
401 int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei,
402 				bool ingress,
403 				const struct flow_action_cookie *fa_cookie,
404 				struct netlink_ext_ack *extack)
405 {
406 	return mlxsw_afa_block_append_drop(rulei->act_block, ingress,
407 					   fa_cookie, extack);
408 }
409 
410 int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei)
411 {
412 	return mlxsw_afa_block_append_trap(rulei->act_block,
413 					   MLXSW_TRAP_ID_ACL0);
414 }
415 
416 int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
417 			       struct mlxsw_sp_acl_rule_info *rulei,
418 			       struct net_device *out_dev,
419 			       struct netlink_ext_ack *extack)
420 {
421 	struct mlxsw_sp_port *mlxsw_sp_port;
422 	u16 local_port;
423 	bool in_port;
424 
425 	if (out_dev) {
426 		if (!mlxsw_sp_port_dev_check(out_dev)) {
427 			NL_SET_ERR_MSG_MOD(extack, "Invalid output device");
428 			return -EINVAL;
429 		}
430 		mlxsw_sp_port = netdev_priv(out_dev);
431 		if (mlxsw_sp_port->mlxsw_sp != mlxsw_sp) {
432 			NL_SET_ERR_MSG_MOD(extack, "Invalid output device");
433 			return -EINVAL;
434 		}
435 		local_port = mlxsw_sp_port->local_port;
436 		in_port = false;
437 	} else {
438 		/* If out_dev is NULL, the caller wants to
439 		 * set forward to ingress port.
440 		 */
441 		local_port = 0;
442 		in_port = true;
443 	}
444 	return mlxsw_afa_block_append_fwd(rulei->act_block,
445 					  local_port, in_port, extack);
446 }
447 
448 int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
449 				  struct mlxsw_sp_acl_rule_info *rulei,
450 				  struct mlxsw_sp_flow_block *block,
451 				  struct net_device *out_dev,
452 				  struct netlink_ext_ack *extack)
453 {
454 	struct mlxsw_sp_flow_block_binding *binding;
455 	struct mlxsw_sp_port *in_port;
456 
457 	if (!list_is_singular(&block->binding_list)) {
458 		NL_SET_ERR_MSG_MOD(extack, "Only a single mirror source is allowed");
459 		return -EOPNOTSUPP;
460 	}
461 	binding = list_first_entry(&block->binding_list,
462 				   struct mlxsw_sp_flow_block_binding, list);
463 	in_port = binding->mlxsw_sp_port;
464 
465 	return mlxsw_afa_block_append_mirror(rulei->act_block,
466 					     in_port->local_port,
467 					     out_dev,
468 					     binding->ingress,
469 					     extack);
470 }
471 
472 int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp,
473 				struct mlxsw_sp_acl_rule_info *rulei,
474 				u32 action, u16 vid, u16 proto, u8 prio,
475 				struct netlink_ext_ack *extack)
476 {
477 	u8 ethertype;
478 
479 	if (action == FLOW_ACTION_VLAN_MANGLE) {
480 		switch (proto) {
481 		case ETH_P_8021Q:
482 			ethertype = 0;
483 			break;
484 		case ETH_P_8021AD:
485 			ethertype = 1;
486 			break;
487 		default:
488 			NL_SET_ERR_MSG_MOD(extack, "Unsupported VLAN protocol");
489 			dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN protocol %#04x\n",
490 				proto);
491 			return -EINVAL;
492 		}
493 
494 		return mlxsw_afa_block_append_vlan_modify(rulei->act_block,
495 							  vid, prio, ethertype,
496 							  extack);
497 	} else {
498 		NL_SET_ERR_MSG_MOD(extack, "Unsupported VLAN action");
499 		dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN action\n");
500 		return -EINVAL;
501 	}
502 }
503 
504 int mlxsw_sp_acl_rulei_act_priority(struct mlxsw_sp *mlxsw_sp,
505 				    struct mlxsw_sp_acl_rule_info *rulei,
506 				    u32 prio, struct netlink_ext_ack *extack)
507 {
508 	/* Even though both Linux and Spectrum switches support 16 priorities,
509 	 * spectrum_qdisc only processes the first eight priomap elements, and
510 	 * the DCB and PFC features are tied to 8 priorities as well. Therefore
511 	 * bounce attempts to prioritize packets to higher priorities.
512 	 */
513 	if (prio >= IEEE_8021QAZ_MAX_TCS) {
514 		NL_SET_ERR_MSG_MOD(extack, "Only priorities 0..7 are supported");
515 		return -EINVAL;
516 	}
517 	return mlxsw_afa_block_append_qos_switch_prio(rulei->act_block, prio,
518 						      extack);
519 }
520 
521 struct mlxsw_sp_acl_mangle_action {
522 	enum flow_action_mangle_base htype;
523 	/* Offset is u32-aligned. */
524 	u32 offset;
525 	/* Mask bits are unset for the modified field. */
526 	u32 mask;
527 	/* Shift required to extract the set value. */
528 	u32 shift;
529 	enum mlxsw_sp_acl_mangle_field field;
530 };
531 
532 #define MLXSW_SP_ACL_MANGLE_ACTION(_htype, _offset, _mask, _shift, _field) \
533 	{								\
534 		.htype = _htype,					\
535 		.offset = _offset,					\
536 		.mask = _mask,						\
537 		.shift = _shift,					\
538 		.field = MLXSW_SP_ACL_MANGLE_FIELD_##_field,		\
539 	}
540 
541 #define MLXSW_SP_ACL_MANGLE_ACTION_IP4(_offset, _mask, _shift, _field) \
542 	MLXSW_SP_ACL_MANGLE_ACTION(FLOW_ACT_MANGLE_HDR_TYPE_IP4,       \
543 				   _offset, _mask, _shift, _field)
544 
545 #define MLXSW_SP_ACL_MANGLE_ACTION_IP6(_offset, _mask, _shift, _field) \
546 	MLXSW_SP_ACL_MANGLE_ACTION(FLOW_ACT_MANGLE_HDR_TYPE_IP6,       \
547 				   _offset, _mask, _shift, _field)
548 
549 #define MLXSW_SP_ACL_MANGLE_ACTION_TCP(_offset, _mask, _shift, _field) \
550 	MLXSW_SP_ACL_MANGLE_ACTION(FLOW_ACT_MANGLE_HDR_TYPE_TCP, _offset, _mask, _shift, _field)
551 
552 #define MLXSW_SP_ACL_MANGLE_ACTION_UDP(_offset, _mask, _shift, _field) \
553 	MLXSW_SP_ACL_MANGLE_ACTION(FLOW_ACT_MANGLE_HDR_TYPE_UDP, _offset, _mask, _shift, _field)
554 
555 static struct mlxsw_sp_acl_mangle_action mlxsw_sp_acl_mangle_actions[] = {
556 	MLXSW_SP_ACL_MANGLE_ACTION_IP4(0, 0xff00ffff, 16, IP_DSFIELD),
557 	MLXSW_SP_ACL_MANGLE_ACTION_IP4(0, 0xff03ffff, 18, IP_DSCP),
558 	MLXSW_SP_ACL_MANGLE_ACTION_IP4(0, 0xfffcffff, 16, IP_ECN),
559 
560 	MLXSW_SP_ACL_MANGLE_ACTION_IP6(0, 0xf00fffff, 20, IP_DSFIELD),
561 	MLXSW_SP_ACL_MANGLE_ACTION_IP6(0, 0xf03fffff, 22, IP_DSCP),
562 	MLXSW_SP_ACL_MANGLE_ACTION_IP6(0, 0xffcfffff, 20, IP_ECN),
563 
564 	MLXSW_SP_ACL_MANGLE_ACTION_TCP(0, 0x0000ffff, 16, IP_SPORT),
565 	MLXSW_SP_ACL_MANGLE_ACTION_TCP(0, 0xffff0000, 0,  IP_DPORT),
566 
567 	MLXSW_SP_ACL_MANGLE_ACTION_UDP(0, 0x0000ffff, 16, IP_SPORT),
568 	MLXSW_SP_ACL_MANGLE_ACTION_UDP(0, 0xffff0000, 0,  IP_DPORT),
569 
570 	MLXSW_SP_ACL_MANGLE_ACTION_IP4(12, 0x00000000, 0, IP4_SIP),
571 	MLXSW_SP_ACL_MANGLE_ACTION_IP4(16, 0x00000000, 0, IP4_DIP),
572 
573 	MLXSW_SP_ACL_MANGLE_ACTION_IP6(8, 0x00000000, 0, IP6_SIP_1),
574 	MLXSW_SP_ACL_MANGLE_ACTION_IP6(12, 0x00000000, 0, IP6_SIP_2),
575 	MLXSW_SP_ACL_MANGLE_ACTION_IP6(16, 0x00000000, 0, IP6_SIP_3),
576 	MLXSW_SP_ACL_MANGLE_ACTION_IP6(20, 0x00000000, 0, IP6_SIP_4),
577 	MLXSW_SP_ACL_MANGLE_ACTION_IP6(24, 0x00000000, 0, IP6_DIP_1),
578 	MLXSW_SP_ACL_MANGLE_ACTION_IP6(28, 0x00000000, 0, IP6_DIP_2),
579 	MLXSW_SP_ACL_MANGLE_ACTION_IP6(32, 0x00000000, 0, IP6_DIP_3),
580 	MLXSW_SP_ACL_MANGLE_ACTION_IP6(36, 0x00000000, 0, IP6_DIP_4),
581 };
582 
583 static int
584 mlxsw_sp_acl_rulei_act_mangle_field(struct mlxsw_sp *mlxsw_sp,
585 				    struct mlxsw_sp_acl_rule_info *rulei,
586 				    struct mlxsw_sp_acl_mangle_action *mact,
587 				    u32 val, struct netlink_ext_ack *extack)
588 {
589 	switch (mact->field) {
590 	case MLXSW_SP_ACL_MANGLE_FIELD_IP_DSFIELD:
591 		return mlxsw_afa_block_append_qos_dsfield(rulei->act_block,
592 							  val, extack);
593 	case MLXSW_SP_ACL_MANGLE_FIELD_IP_DSCP:
594 		return mlxsw_afa_block_append_qos_dscp(rulei->act_block,
595 						       val, extack);
596 	case MLXSW_SP_ACL_MANGLE_FIELD_IP_ECN:
597 		return mlxsw_afa_block_append_qos_ecn(rulei->act_block,
598 						      val, extack);
599 	default:
600 		return -EOPNOTSUPP;
601 	}
602 }
603 
604 static int mlxsw_sp1_acl_rulei_act_mangle_field(struct mlxsw_sp *mlxsw_sp,
605 						struct mlxsw_sp_acl_rule_info *rulei,
606 						struct mlxsw_sp_acl_mangle_action *mact,
607 						u32 val, struct netlink_ext_ack *extack)
608 {
609 	int err;
610 
611 	err = mlxsw_sp_acl_rulei_act_mangle_field(mlxsw_sp, rulei, mact, val, extack);
612 	if (err != -EOPNOTSUPP)
613 		return err;
614 
615 	NL_SET_ERR_MSG_MOD(extack, "Unsupported mangle field");
616 	return err;
617 }
618 
619 static int
620 mlxsw_sp2_acl_rulei_act_mangle_field_ip_odd(struct mlxsw_sp_acl_rule_info *rulei,
621 					    enum mlxsw_sp_acl_mangle_field field,
622 					    u32 val, struct netlink_ext_ack *extack)
623 {
624 	if (!rulei->ipv6_valid) {
625 		rulei->ipv6.prev_val = val;
626 		rulei->ipv6_valid = true;
627 		rulei->ipv6.prev_field = field;
628 		return 0;
629 	}
630 
631 	NL_SET_ERR_MSG_MOD(extack, "Unsupported mangle field order");
632 	return -EOPNOTSUPP;
633 }
634 
635 static int mlxsw_sp2_acl_rulei_act_mangle_field(struct mlxsw_sp *mlxsw_sp,
636 						struct mlxsw_sp_acl_rule_info *rulei,
637 						struct mlxsw_sp_acl_mangle_action *mact,
638 						u32 val, struct netlink_ext_ack *extack)
639 {
640 	int err;
641 
642 	err = mlxsw_sp_acl_rulei_act_mangle_field(mlxsw_sp, rulei, mact, val, extack);
643 	if (err != -EOPNOTSUPP)
644 		return err;
645 
646 	switch (mact->field) {
647 	case MLXSW_SP_ACL_MANGLE_FIELD_IP_SPORT:
648 		return mlxsw_afa_block_append_l4port(rulei->act_block, false, val, extack);
649 	case MLXSW_SP_ACL_MANGLE_FIELD_IP_DPORT:
650 		return mlxsw_afa_block_append_l4port(rulei->act_block, true, val, extack);
651 	/* IPv4 fields */
652 	case MLXSW_SP_ACL_MANGLE_FIELD_IP4_SIP:
653 		return mlxsw_afa_block_append_ip(rulei->act_block, false,
654 						 true, val, 0, extack);
655 	case MLXSW_SP_ACL_MANGLE_FIELD_IP4_DIP:
656 		return mlxsw_afa_block_append_ip(rulei->act_block, true,
657 						 true, val, 0, extack);
658 	/* IPv6 fields */
659 	case MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_1:
660 	case MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_3:
661 	case MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_1:
662 	case MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_3:
663 		return mlxsw_sp2_acl_rulei_act_mangle_field_ip_odd(rulei,
664 								   mact->field,
665 								   val, extack);
666 	case MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_2:
667 		if (rulei->ipv6_valid &&
668 		    rulei->ipv6.prev_field == MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_1) {
669 			rulei->ipv6_valid = false;
670 			return mlxsw_afa_block_append_ip(rulei->act_block,
671 							 false, false, val,
672 							 rulei->ipv6.prev_val,
673 							 extack);
674 		}
675 		break;
676 	case MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_4:
677 		if (rulei->ipv6_valid &&
678 		    rulei->ipv6.prev_field == MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_3) {
679 			rulei->ipv6_valid = false;
680 			return mlxsw_afa_block_append_ip(rulei->act_block,
681 							 false, true, val,
682 							 rulei->ipv6.prev_val,
683 							 extack);
684 		}
685 		break;
686 	case MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_2:
687 		if (rulei->ipv6_valid &&
688 		    rulei->ipv6.prev_field == MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_1) {
689 			rulei->ipv6_valid = false;
690 			return mlxsw_afa_block_append_ip(rulei->act_block,
691 							 true, false, val,
692 							 rulei->ipv6.prev_val,
693 							 extack);
694 		}
695 		break;
696 	case MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_4:
697 		if (rulei->ipv6_valid &&
698 		    rulei->ipv6.prev_field == MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_3) {
699 			rulei->ipv6_valid = false;
700 			return mlxsw_afa_block_append_ip(rulei->act_block,
701 							 true, true, val,
702 							 rulei->ipv6.prev_val,
703 							 extack);
704 		}
705 		break;
706 	default:
707 		break;
708 	}
709 
710 	NL_SET_ERR_MSG_MOD(extack, "Unsupported mangle field");
711 	return err;
712 }
713 
714 int mlxsw_sp_acl_rulei_act_mangle(struct mlxsw_sp *mlxsw_sp,
715 				  struct mlxsw_sp_acl_rule_info *rulei,
716 				  enum flow_action_mangle_base htype,
717 				  u32 offset, u32 mask, u32 val,
718 				  struct netlink_ext_ack *extack)
719 {
720 	const struct mlxsw_sp_acl_rulei_ops *acl_rulei_ops = mlxsw_sp->acl_rulei_ops;
721 	struct mlxsw_sp_acl_mangle_action *mact;
722 	size_t i;
723 
724 	for (i = 0; i < ARRAY_SIZE(mlxsw_sp_acl_mangle_actions); ++i) {
725 		mact = &mlxsw_sp_acl_mangle_actions[i];
726 		if (mact->htype == htype &&
727 		    mact->offset == offset &&
728 		    mact->mask == mask) {
729 			val >>= mact->shift;
730 			return acl_rulei_ops->act_mangle_field(mlxsw_sp,
731 							       rulei, mact,
732 							       val, extack);
733 		}
734 	}
735 
736 	NL_SET_ERR_MSG_MOD(extack, "Unknown mangle field");
737 	return -EINVAL;
738 }
739 
740 int mlxsw_sp_acl_rulei_act_police(struct mlxsw_sp *mlxsw_sp,
741 				  struct mlxsw_sp_acl_rule_info *rulei,
742 				  u32 index, u64 rate_bytes_ps,
743 				  u32 burst, struct netlink_ext_ack *extack)
744 {
745 	int err;
746 
747 	err = mlxsw_afa_block_append_police(rulei->act_block, index,
748 					    rate_bytes_ps, burst,
749 					    &rulei->policer_index, extack);
750 	if (err)
751 		return err;
752 
753 	rulei->policer_index_valid = true;
754 
755 	return 0;
756 }
757 
758 int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
759 				 struct mlxsw_sp_acl_rule_info *rulei,
760 				 struct netlink_ext_ack *extack)
761 {
762 	int err;
763 
764 	err = mlxsw_afa_block_append_counter(rulei->act_block,
765 					     &rulei->counter_index, extack);
766 	if (err)
767 		return err;
768 	rulei->counter_valid = true;
769 	return 0;
770 }
771 
772 int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp,
773 				   struct mlxsw_sp_acl_rule_info *rulei,
774 				   u16 fid, struct netlink_ext_ack *extack)
775 {
776 	return mlxsw_afa_block_append_fid_set(rulei->act_block, fid, extack);
777 }
778 
779 int mlxsw_sp_acl_rulei_act_ignore(struct mlxsw_sp *mlxsw_sp,
780 				  struct mlxsw_sp_acl_rule_info *rulei,
781 				  bool disable_learning, bool disable_security)
782 {
783 	return mlxsw_afa_block_append_ignore(rulei->act_block,
784 					     disable_learning,
785 					     disable_security);
786 }
787 
788 int mlxsw_sp_acl_rulei_act_sample(struct mlxsw_sp *mlxsw_sp,
789 				  struct mlxsw_sp_acl_rule_info *rulei,
790 				  struct mlxsw_sp_flow_block *block,
791 				  struct psample_group *psample_group, u32 rate,
792 				  u32 trunc_size, bool truncate,
793 				  struct netlink_ext_ack *extack)
794 {
795 	struct mlxsw_sp_flow_block_binding *binding;
796 	struct mlxsw_sp_port *mlxsw_sp_port;
797 
798 	if (!list_is_singular(&block->binding_list)) {
799 		NL_SET_ERR_MSG_MOD(extack, "Only a single sampling source is allowed");
800 		return -EOPNOTSUPP;
801 	}
802 	binding = list_first_entry(&block->binding_list,
803 				   struct mlxsw_sp_flow_block_binding, list);
804 	mlxsw_sp_port = binding->mlxsw_sp_port;
805 
806 	return mlxsw_afa_block_append_sampler(rulei->act_block,
807 					      mlxsw_sp_port->local_port,
808 					      psample_group, rate, trunc_size,
809 					      truncate, binding->ingress,
810 					      extack);
811 }
812 
813 struct mlxsw_sp_acl_rule *
814 mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
815 			 struct mlxsw_sp_acl_ruleset *ruleset,
816 			 unsigned long cookie,
817 			 struct mlxsw_afa_block *afa_block,
818 			 struct netlink_ext_ack *extack)
819 {
820 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
821 	struct mlxsw_sp_acl_rule *rule;
822 	int err;
823 
824 	mlxsw_sp_acl_ruleset_ref_inc(ruleset);
825 	rule = kzalloc(sizeof(*rule) + ops->rule_priv_size,
826 		       GFP_KERNEL);
827 	if (!rule) {
828 		err = -ENOMEM;
829 		goto err_alloc;
830 	}
831 	rule->cookie = cookie;
832 	rule->ruleset = ruleset;
833 
834 	rule->rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl, afa_block);
835 	if (IS_ERR(rule->rulei)) {
836 		err = PTR_ERR(rule->rulei);
837 		goto err_rulei_create;
838 	}
839 
840 	return rule;
841 
842 err_rulei_create:
843 	kfree(rule);
844 err_alloc:
845 	mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
846 	return ERR_PTR(err);
847 }
848 
849 void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp,
850 			       struct mlxsw_sp_acl_rule *rule)
851 {
852 	struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
853 
854 	mlxsw_sp_acl_rulei_destroy(mlxsw_sp, rule->rulei);
855 	kfree(rule);
856 	mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
857 }
858 
859 int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
860 			  struct mlxsw_sp_acl_rule *rule)
861 {
862 	struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
863 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
864 	struct mlxsw_sp_flow_block *block = ruleset->ht_key.block;
865 	int err;
866 
867 	err = ops->rule_add(mlxsw_sp, ruleset->priv, rule->priv, rule->rulei);
868 	if (err)
869 		return err;
870 
871 	err = rhashtable_insert_fast(&ruleset->rule_ht, &rule->ht_node,
872 				     mlxsw_sp_acl_rule_ht_params);
873 	if (err)
874 		goto err_rhashtable_insert;
875 
876 	if (!ruleset->ht_key.chain_index &&
877 	    mlxsw_sp_acl_ruleset_is_singular(ruleset)) {
878 		/* We only need ruleset with chain index 0, the implicit
879 		 * one, to be directly bound to device. The rest of the
880 		 * rulesets are bound by "Goto action set".
881 		 */
882 		err = mlxsw_sp_acl_ruleset_block_bind(mlxsw_sp, ruleset, block);
883 		if (err)
884 			goto err_ruleset_block_bind;
885 	}
886 
887 	mutex_lock(&mlxsw_sp->acl->rules_lock);
888 	list_add_tail(&rule->list, &mlxsw_sp->acl->rules);
889 	mutex_unlock(&mlxsw_sp->acl->rules_lock);
890 	block->rule_count++;
891 	block->ingress_blocker_rule_count += rule->rulei->ingress_bind_blocker;
892 	block->egress_blocker_rule_count += rule->rulei->egress_bind_blocker;
893 	return 0;
894 
895 err_ruleset_block_bind:
896 	rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
897 			       mlxsw_sp_acl_rule_ht_params);
898 err_rhashtable_insert:
899 	ops->rule_del(mlxsw_sp, rule->priv);
900 	return err;
901 }
902 
903 void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
904 			   struct mlxsw_sp_acl_rule *rule)
905 {
906 	struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
907 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
908 	struct mlxsw_sp_flow_block *block = ruleset->ht_key.block;
909 
910 	block->egress_blocker_rule_count -= rule->rulei->egress_bind_blocker;
911 	block->ingress_blocker_rule_count -= rule->rulei->ingress_bind_blocker;
912 	block->rule_count--;
913 	mutex_lock(&mlxsw_sp->acl->rules_lock);
914 	list_del(&rule->list);
915 	mutex_unlock(&mlxsw_sp->acl->rules_lock);
916 	if (!ruleset->ht_key.chain_index &&
917 	    mlxsw_sp_acl_ruleset_is_singular(ruleset))
918 		mlxsw_sp_acl_ruleset_block_unbind(mlxsw_sp, ruleset, block);
919 	rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
920 			       mlxsw_sp_acl_rule_ht_params);
921 	ops->rule_del(mlxsw_sp, rule->priv);
922 }
923 
924 int mlxsw_sp_acl_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
925 				     struct mlxsw_sp_acl_rule *rule,
926 				     struct mlxsw_afa_block *afa_block)
927 {
928 	struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
929 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
930 	struct mlxsw_sp_acl_rule_info *rulei;
931 
932 	rulei = mlxsw_sp_acl_rule_rulei(rule);
933 	rulei->act_block = afa_block;
934 
935 	return ops->rule_action_replace(mlxsw_sp, rule->priv, rule->rulei);
936 }
937 
938 struct mlxsw_sp_acl_rule *
939 mlxsw_sp_acl_rule_lookup(struct mlxsw_sp *mlxsw_sp,
940 			 struct mlxsw_sp_acl_ruleset *ruleset,
941 			 unsigned long cookie)
942 {
943 	return rhashtable_lookup_fast(&ruleset->rule_ht, &cookie,
944 				       mlxsw_sp_acl_rule_ht_params);
945 }
946 
947 struct mlxsw_sp_acl_rule_info *
948 mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule)
949 {
950 	return rule->rulei;
951 }
952 
953 static int mlxsw_sp_acl_rule_activity_update(struct mlxsw_sp *mlxsw_sp,
954 					     struct mlxsw_sp_acl_rule *rule)
955 {
956 	struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
957 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
958 	bool active;
959 	int err;
960 
961 	err = ops->rule_activity_get(mlxsw_sp, rule->priv, &active);
962 	if (err)
963 		return err;
964 	if (active)
965 		rule->last_used = jiffies;
966 	return 0;
967 }
968 
969 static int mlxsw_sp_acl_rules_activity_update(struct mlxsw_sp_acl *acl)
970 {
971 	struct mlxsw_sp_acl_rule *rule;
972 	int err;
973 
974 	mutex_lock(&acl->rules_lock);
975 	list_for_each_entry(rule, &acl->rules, list) {
976 		err = mlxsw_sp_acl_rule_activity_update(acl->mlxsw_sp,
977 							rule);
978 		if (err)
979 			goto err_rule_update;
980 	}
981 	mutex_unlock(&acl->rules_lock);
982 	return 0;
983 
984 err_rule_update:
985 	mutex_unlock(&acl->rules_lock);
986 	return err;
987 }
988 
989 static void mlxsw_sp_acl_rule_activity_work_schedule(struct mlxsw_sp_acl *acl)
990 {
991 	unsigned long interval = acl->rule_activity_update.interval;
992 
993 	mlxsw_core_schedule_dw(&acl->rule_activity_update.dw,
994 			       msecs_to_jiffies(interval));
995 }
996 
997 static void mlxsw_sp_acl_rule_activity_update_work(struct work_struct *work)
998 {
999 	struct mlxsw_sp_acl *acl = container_of(work, struct mlxsw_sp_acl,
1000 						rule_activity_update.dw.work);
1001 	int err;
1002 
1003 	err = mlxsw_sp_acl_rules_activity_update(acl);
1004 	if (err)
1005 		dev_err(acl->mlxsw_sp->bus_info->dev, "Could not update acl activity");
1006 
1007 	mlxsw_sp_acl_rule_activity_work_schedule(acl);
1008 }
1009 
1010 int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
1011 				struct mlxsw_sp_acl_rule *rule,
1012 				u64 *packets, u64 *bytes, u64 *drops,
1013 				u64 *last_use,
1014 				enum flow_action_hw_stats *used_hw_stats)
1015 
1016 {
1017 	enum mlxsw_sp_policer_type type = MLXSW_SP_POLICER_TYPE_SINGLE_RATE;
1018 	struct mlxsw_sp_acl_rule_info *rulei;
1019 	u64 current_packets = 0;
1020 	u64 current_bytes = 0;
1021 	u64 current_drops = 0;
1022 	int err;
1023 
1024 	rulei = mlxsw_sp_acl_rule_rulei(rule);
1025 	if (rulei->counter_valid) {
1026 		err = mlxsw_sp_flow_counter_get(mlxsw_sp, rulei->counter_index,
1027 						&current_packets,
1028 						&current_bytes);
1029 		if (err)
1030 			return err;
1031 		*used_hw_stats = FLOW_ACTION_HW_STATS_IMMEDIATE;
1032 	}
1033 	if (rulei->policer_index_valid) {
1034 		err = mlxsw_sp_policer_drops_counter_get(mlxsw_sp, type,
1035 							 rulei->policer_index,
1036 							 &current_drops);
1037 		if (err)
1038 			return err;
1039 	}
1040 	*packets = current_packets - rule->last_packets;
1041 	*bytes = current_bytes - rule->last_bytes;
1042 	*drops = current_drops - rule->last_drops;
1043 	*last_use = rule->last_used;
1044 
1045 	rule->last_bytes = current_bytes;
1046 	rule->last_packets = current_packets;
1047 	rule->last_drops = current_drops;
1048 
1049 	return 0;
1050 }
1051 
1052 int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
1053 {
1054 	struct mlxsw_sp_fid *fid;
1055 	struct mlxsw_sp_acl *acl;
1056 	size_t alloc_size;
1057 	int err;
1058 
1059 	alloc_size = sizeof(*acl) + mlxsw_sp_acl_tcam_priv_size(mlxsw_sp);
1060 	acl = kzalloc(alloc_size, GFP_KERNEL);
1061 	if (!acl)
1062 		return -ENOMEM;
1063 	mlxsw_sp->acl = acl;
1064 	acl->mlxsw_sp = mlxsw_sp;
1065 	acl->afk = mlxsw_afk_create(MLXSW_CORE_RES_GET(mlxsw_sp->core,
1066 						       ACL_FLEX_KEYS),
1067 				    mlxsw_sp->afk_ops);
1068 	if (!acl->afk) {
1069 		err = -ENOMEM;
1070 		goto err_afk_create;
1071 	}
1072 
1073 	err = rhashtable_init(&acl->ruleset_ht,
1074 			      &mlxsw_sp_acl_ruleset_ht_params);
1075 	if (err)
1076 		goto err_rhashtable_init;
1077 
1078 	fid = mlxsw_sp_fid_dummy_get(mlxsw_sp);
1079 	if (IS_ERR(fid)) {
1080 		err = PTR_ERR(fid);
1081 		goto err_fid_get;
1082 	}
1083 	acl->dummy_fid = fid;
1084 
1085 	INIT_LIST_HEAD(&acl->rules);
1086 	mutex_init(&acl->rules_lock);
1087 	err = mlxsw_sp_acl_tcam_init(mlxsw_sp, &acl->tcam);
1088 	if (err)
1089 		goto err_acl_ops_init;
1090 
1091 	/* Create the delayed work for the rule activity_update */
1092 	INIT_DELAYED_WORK(&acl->rule_activity_update.dw,
1093 			  mlxsw_sp_acl_rule_activity_update_work);
1094 	acl->rule_activity_update.interval = MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS;
1095 	mlxsw_core_schedule_dw(&acl->rule_activity_update.dw, 0);
1096 	return 0;
1097 
1098 err_acl_ops_init:
1099 	mutex_destroy(&acl->rules_lock);
1100 	mlxsw_sp_fid_put(fid);
1101 err_fid_get:
1102 	rhashtable_destroy(&acl->ruleset_ht);
1103 err_rhashtable_init:
1104 	mlxsw_afk_destroy(acl->afk);
1105 err_afk_create:
1106 	kfree(acl);
1107 	return err;
1108 }
1109 
1110 void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp)
1111 {
1112 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
1113 
1114 	cancel_delayed_work_sync(&mlxsw_sp->acl->rule_activity_update.dw);
1115 	mlxsw_sp_acl_tcam_fini(mlxsw_sp, &acl->tcam);
1116 	mutex_destroy(&acl->rules_lock);
1117 	WARN_ON(!list_empty(&acl->rules));
1118 	mlxsw_sp_fid_put(acl->dummy_fid);
1119 	rhashtable_destroy(&acl->ruleset_ht);
1120 	mlxsw_afk_destroy(acl->afk);
1121 	kfree(acl);
1122 }
1123 
1124 struct mlxsw_sp_acl_rulei_ops mlxsw_sp1_acl_rulei_ops = {
1125 	.act_mangle_field = mlxsw_sp1_acl_rulei_act_mangle_field,
1126 };
1127 
1128 struct mlxsw_sp_acl_rulei_ops mlxsw_sp2_acl_rulei_ops = {
1129 	.act_mangle_field = mlxsw_sp2_acl_rulei_act_mangle_field,
1130 };
1131