1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/slab.h>
6 #include <linux/errno.h>
7 #include <linux/list.h>
8 #include <linux/string.h>
9 #include <linux/rhashtable.h>
10 #include <linux/netdevice.h>
11 #include <linux/mutex.h>
12 #include <net/net_namespace.h>
13 #include <net/tc_act/tc_vlan.h>
14 
15 #include "reg.h"
16 #include "core.h"
17 #include "resources.h"
18 #include "spectrum.h"
19 #include "core_acl_flex_keys.h"
20 #include "core_acl_flex_actions.h"
21 #include "spectrum_acl_tcam.h"
22 
23 struct mlxsw_sp_acl {
24 	struct mlxsw_sp *mlxsw_sp;
25 	struct mlxsw_afk *afk;
26 	struct mlxsw_sp_fid *dummy_fid;
27 	struct rhashtable ruleset_ht;
28 	struct list_head rules;
29 	struct mutex rules_lock; /* Protects rules list */
30 	struct {
31 		struct delayed_work dw;
32 		unsigned long interval;	/* ms */
33 #define MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS 1000
34 	} rule_activity_update;
35 	struct mlxsw_sp_acl_tcam tcam;
36 };
37 
38 struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl)
39 {
40 	return acl->afk;
41 }
42 
43 struct mlxsw_sp_acl_ruleset_ht_key {
44 	struct mlxsw_sp_flow_block *block;
45 	u32 chain_index;
46 	const struct mlxsw_sp_acl_profile_ops *ops;
47 };
48 
49 struct mlxsw_sp_acl_ruleset {
50 	struct rhash_head ht_node; /* Member of acl HT */
51 	struct mlxsw_sp_acl_ruleset_ht_key ht_key;
52 	struct rhashtable rule_ht;
53 	unsigned int ref_count;
54 	unsigned int min_prio;
55 	unsigned int max_prio;
56 	unsigned long priv[];
57 	/* priv has to be always the last item */
58 };
59 
60 struct mlxsw_sp_acl_rule {
61 	struct rhash_head ht_node; /* Member of rule HT */
62 	struct list_head list;
63 	unsigned long cookie; /* HT key */
64 	struct mlxsw_sp_acl_ruleset *ruleset;
65 	struct mlxsw_sp_acl_rule_info *rulei;
66 	u64 last_used;
67 	u64 last_packets;
68 	u64 last_bytes;
69 	unsigned long priv[];
70 	/* priv has to be always the last item */
71 };
72 
73 static const struct rhashtable_params mlxsw_sp_acl_ruleset_ht_params = {
74 	.key_len = sizeof(struct mlxsw_sp_acl_ruleset_ht_key),
75 	.key_offset = offsetof(struct mlxsw_sp_acl_ruleset, ht_key),
76 	.head_offset = offsetof(struct mlxsw_sp_acl_ruleset, ht_node),
77 	.automatic_shrinking = true,
78 };
79 
80 static const struct rhashtable_params mlxsw_sp_acl_rule_ht_params = {
81 	.key_len = sizeof(unsigned long),
82 	.key_offset = offsetof(struct mlxsw_sp_acl_rule, cookie),
83 	.head_offset = offsetof(struct mlxsw_sp_acl_rule, ht_node),
84 	.automatic_shrinking = true,
85 };
86 
87 struct mlxsw_sp_fid *mlxsw_sp_acl_dummy_fid(struct mlxsw_sp *mlxsw_sp)
88 {
89 	return mlxsw_sp->acl->dummy_fid;
90 }
91 
92 static bool
93 mlxsw_sp_acl_ruleset_is_singular(const struct mlxsw_sp_acl_ruleset *ruleset)
94 {
95 	/* We hold a reference on ruleset ourselves */
96 	return ruleset->ref_count == 2;
97 }
98 
99 int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
100 			      struct mlxsw_sp_flow_block *block,
101 			      struct mlxsw_sp_flow_block_binding *binding)
102 {
103 	struct mlxsw_sp_acl_ruleset *ruleset = block->ruleset_zero;
104 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
105 
106 	return ops->ruleset_bind(mlxsw_sp, ruleset->priv,
107 				 binding->mlxsw_sp_port, binding->ingress);
108 }
109 
110 void mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
111 				 struct mlxsw_sp_flow_block *block,
112 				 struct mlxsw_sp_flow_block_binding *binding)
113 {
114 	struct mlxsw_sp_acl_ruleset *ruleset = block->ruleset_zero;
115 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
116 
117 	ops->ruleset_unbind(mlxsw_sp, ruleset->priv,
118 			    binding->mlxsw_sp_port, binding->ingress);
119 }
120 
121 static int
122 mlxsw_sp_acl_ruleset_block_bind(struct mlxsw_sp *mlxsw_sp,
123 				struct mlxsw_sp_acl_ruleset *ruleset,
124 				struct mlxsw_sp_flow_block *block)
125 {
126 	struct mlxsw_sp_flow_block_binding *binding;
127 	int err;
128 
129 	block->ruleset_zero = ruleset;
130 	list_for_each_entry(binding, &block->binding_list, list) {
131 		err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, block, binding);
132 		if (err)
133 			goto rollback;
134 	}
135 	return 0;
136 
137 rollback:
138 	list_for_each_entry_continue_reverse(binding, &block->binding_list,
139 					     list)
140 		mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
141 	block->ruleset_zero = NULL;
142 
143 	return err;
144 }
145 
146 static void
147 mlxsw_sp_acl_ruleset_block_unbind(struct mlxsw_sp *mlxsw_sp,
148 				  struct mlxsw_sp_acl_ruleset *ruleset,
149 				  struct mlxsw_sp_flow_block *block)
150 {
151 	struct mlxsw_sp_flow_block_binding *binding;
152 
153 	list_for_each_entry(binding, &block->binding_list, list)
154 		mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
155 	block->ruleset_zero = NULL;
156 }
157 
158 static struct mlxsw_sp_acl_ruleset *
159 mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
160 			    struct mlxsw_sp_flow_block *block, u32 chain_index,
161 			    const struct mlxsw_sp_acl_profile_ops *ops,
162 			    struct mlxsw_afk_element_usage *tmplt_elusage)
163 {
164 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
165 	struct mlxsw_sp_acl_ruleset *ruleset;
166 	size_t alloc_size;
167 	int err;
168 
169 	alloc_size = sizeof(*ruleset) + ops->ruleset_priv_size;
170 	ruleset = kzalloc(alloc_size, GFP_KERNEL);
171 	if (!ruleset)
172 		return ERR_PTR(-ENOMEM);
173 	ruleset->ref_count = 1;
174 	ruleset->ht_key.block = block;
175 	ruleset->ht_key.chain_index = chain_index;
176 	ruleset->ht_key.ops = ops;
177 
178 	err = rhashtable_init(&ruleset->rule_ht, &mlxsw_sp_acl_rule_ht_params);
179 	if (err)
180 		goto err_rhashtable_init;
181 
182 	err = ops->ruleset_add(mlxsw_sp, &acl->tcam, ruleset->priv,
183 			       tmplt_elusage, &ruleset->min_prio,
184 			       &ruleset->max_prio);
185 	if (err)
186 		goto err_ops_ruleset_add;
187 
188 	err = rhashtable_insert_fast(&acl->ruleset_ht, &ruleset->ht_node,
189 				     mlxsw_sp_acl_ruleset_ht_params);
190 	if (err)
191 		goto err_ht_insert;
192 
193 	return ruleset;
194 
195 err_ht_insert:
196 	ops->ruleset_del(mlxsw_sp, ruleset->priv);
197 err_ops_ruleset_add:
198 	rhashtable_destroy(&ruleset->rule_ht);
199 err_rhashtable_init:
200 	kfree(ruleset);
201 	return ERR_PTR(err);
202 }
203 
204 static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp,
205 					 struct mlxsw_sp_acl_ruleset *ruleset)
206 {
207 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
208 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
209 
210 	rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
211 			       mlxsw_sp_acl_ruleset_ht_params);
212 	ops->ruleset_del(mlxsw_sp, ruleset->priv);
213 	rhashtable_destroy(&ruleset->rule_ht);
214 	kfree(ruleset);
215 }
216 
217 static void mlxsw_sp_acl_ruleset_ref_inc(struct mlxsw_sp_acl_ruleset *ruleset)
218 {
219 	ruleset->ref_count++;
220 }
221 
222 static void mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp *mlxsw_sp,
223 					 struct mlxsw_sp_acl_ruleset *ruleset)
224 {
225 	if (--ruleset->ref_count)
226 		return;
227 	mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset);
228 }
229 
230 static struct mlxsw_sp_acl_ruleset *
231 __mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl *acl,
232 			      struct mlxsw_sp_flow_block *block, u32 chain_index,
233 			      const struct mlxsw_sp_acl_profile_ops *ops)
234 {
235 	struct mlxsw_sp_acl_ruleset_ht_key ht_key;
236 
237 	memset(&ht_key, 0, sizeof(ht_key));
238 	ht_key.block = block;
239 	ht_key.chain_index = chain_index;
240 	ht_key.ops = ops;
241 	return rhashtable_lookup_fast(&acl->ruleset_ht, &ht_key,
242 				      mlxsw_sp_acl_ruleset_ht_params);
243 }
244 
245 struct mlxsw_sp_acl_ruleset *
246 mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
247 			    struct mlxsw_sp_flow_block *block, u32 chain_index,
248 			    enum mlxsw_sp_acl_profile profile)
249 {
250 	const struct mlxsw_sp_acl_profile_ops *ops;
251 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
252 	struct mlxsw_sp_acl_ruleset *ruleset;
253 
254 	ops = mlxsw_sp_acl_tcam_profile_ops(mlxsw_sp, profile);
255 	if (!ops)
256 		return ERR_PTR(-EINVAL);
257 	ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, block, chain_index, ops);
258 	if (!ruleset)
259 		return ERR_PTR(-ENOENT);
260 	return ruleset;
261 }
262 
263 struct mlxsw_sp_acl_ruleset *
264 mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
265 			 struct mlxsw_sp_flow_block *block, u32 chain_index,
266 			 enum mlxsw_sp_acl_profile profile,
267 			 struct mlxsw_afk_element_usage *tmplt_elusage)
268 {
269 	const struct mlxsw_sp_acl_profile_ops *ops;
270 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
271 	struct mlxsw_sp_acl_ruleset *ruleset;
272 
273 	ops = mlxsw_sp_acl_tcam_profile_ops(mlxsw_sp, profile);
274 	if (!ops)
275 		return ERR_PTR(-EINVAL);
276 
277 	ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, block, chain_index, ops);
278 	if (ruleset) {
279 		mlxsw_sp_acl_ruleset_ref_inc(ruleset);
280 		return ruleset;
281 	}
282 	return mlxsw_sp_acl_ruleset_create(mlxsw_sp, block, chain_index, ops,
283 					   tmplt_elusage);
284 }
285 
286 void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
287 			      struct mlxsw_sp_acl_ruleset *ruleset)
288 {
289 	mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
290 }
291 
292 u16 mlxsw_sp_acl_ruleset_group_id(struct mlxsw_sp_acl_ruleset *ruleset)
293 {
294 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
295 
296 	return ops->ruleset_group_id(ruleset->priv);
297 }
298 
299 void mlxsw_sp_acl_ruleset_prio_get(struct mlxsw_sp_acl_ruleset *ruleset,
300 				   unsigned int *p_min_prio,
301 				   unsigned int *p_max_prio)
302 {
303 	*p_min_prio = ruleset->min_prio;
304 	*p_max_prio = ruleset->max_prio;
305 }
306 
307 struct mlxsw_sp_acl_rule_info *
308 mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl,
309 			  struct mlxsw_afa_block *afa_block)
310 {
311 	struct mlxsw_sp_acl_rule_info *rulei;
312 	int err;
313 
314 	rulei = kzalloc(sizeof(*rulei), GFP_KERNEL);
315 	if (!rulei)
316 		return ERR_PTR(-ENOMEM);
317 
318 	if (afa_block) {
319 		rulei->act_block = afa_block;
320 		return rulei;
321 	}
322 
323 	rulei->act_block = mlxsw_afa_block_create(acl->mlxsw_sp->afa);
324 	if (IS_ERR(rulei->act_block)) {
325 		err = PTR_ERR(rulei->act_block);
326 		goto err_afa_block_create;
327 	}
328 	rulei->action_created = 1;
329 	return rulei;
330 
331 err_afa_block_create:
332 	kfree(rulei);
333 	return ERR_PTR(err);
334 }
335 
336 void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp_acl_rule_info *rulei)
337 {
338 	if (rulei->action_created)
339 		mlxsw_afa_block_destroy(rulei->act_block);
340 	kfree(rulei);
341 }
342 
343 int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei)
344 {
345 	return mlxsw_afa_block_commit(rulei->act_block);
346 }
347 
348 void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei,
349 				 unsigned int priority)
350 {
351 	rulei->priority = priority;
352 }
353 
354 void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei,
355 				    enum mlxsw_afk_element element,
356 				    u32 key_value, u32 mask_value)
357 {
358 	mlxsw_afk_values_add_u32(&rulei->values, element,
359 				 key_value, mask_value);
360 }
361 
362 void mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info *rulei,
363 				    enum mlxsw_afk_element element,
364 				    const char *key_value,
365 				    const char *mask_value, unsigned int len)
366 {
367 	mlxsw_afk_values_add_buf(&rulei->values, element,
368 				 key_value, mask_value, len);
369 }
370 
371 int mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei)
372 {
373 	return mlxsw_afa_block_continue(rulei->act_block);
374 }
375 
376 int mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,
377 				u16 group_id)
378 {
379 	return mlxsw_afa_block_jump(rulei->act_block, group_id);
380 }
381 
382 int mlxsw_sp_acl_rulei_act_terminate(struct mlxsw_sp_acl_rule_info *rulei)
383 {
384 	return mlxsw_afa_block_terminate(rulei->act_block);
385 }
386 
387 int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei,
388 				bool ingress,
389 				const struct flow_action_cookie *fa_cookie,
390 				struct netlink_ext_ack *extack)
391 {
392 	return mlxsw_afa_block_append_drop(rulei->act_block, ingress,
393 					   fa_cookie, extack);
394 }
395 
396 int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei)
397 {
398 	return mlxsw_afa_block_append_trap(rulei->act_block,
399 					   MLXSW_TRAP_ID_ACL0);
400 }
401 
402 int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
403 			       struct mlxsw_sp_acl_rule_info *rulei,
404 			       struct net_device *out_dev,
405 			       struct netlink_ext_ack *extack)
406 {
407 	struct mlxsw_sp_port *mlxsw_sp_port;
408 	u8 local_port;
409 	bool in_port;
410 
411 	if (out_dev) {
412 		if (!mlxsw_sp_port_dev_check(out_dev)) {
413 			NL_SET_ERR_MSG_MOD(extack, "Invalid output device");
414 			return -EINVAL;
415 		}
416 		mlxsw_sp_port = netdev_priv(out_dev);
417 		if (mlxsw_sp_port->mlxsw_sp != mlxsw_sp) {
418 			NL_SET_ERR_MSG_MOD(extack, "Invalid output device");
419 			return -EINVAL;
420 		}
421 		local_port = mlxsw_sp_port->local_port;
422 		in_port = false;
423 	} else {
424 		/* If out_dev is NULL, the caller wants to
425 		 * set forward to ingress port.
426 		 */
427 		local_port = 0;
428 		in_port = true;
429 	}
430 	return mlxsw_afa_block_append_fwd(rulei->act_block,
431 					  local_port, in_port, extack);
432 }
433 
434 int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
435 				  struct mlxsw_sp_acl_rule_info *rulei,
436 				  struct mlxsw_sp_flow_block *block,
437 				  struct net_device *out_dev,
438 				  struct netlink_ext_ack *extack)
439 {
440 	struct mlxsw_sp_flow_block_binding *binding;
441 	struct mlxsw_sp_port *in_port;
442 
443 	if (!list_is_singular(&block->binding_list)) {
444 		NL_SET_ERR_MSG_MOD(extack, "Only a single mirror source is allowed");
445 		return -EOPNOTSUPP;
446 	}
447 	binding = list_first_entry(&block->binding_list,
448 				   struct mlxsw_sp_flow_block_binding, list);
449 	in_port = binding->mlxsw_sp_port;
450 
451 	return mlxsw_afa_block_append_mirror(rulei->act_block,
452 					     in_port->local_port,
453 					     out_dev,
454 					     binding->ingress,
455 					     extack);
456 }
457 
458 int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp,
459 				struct mlxsw_sp_acl_rule_info *rulei,
460 				u32 action, u16 vid, u16 proto, u8 prio,
461 				struct netlink_ext_ack *extack)
462 {
463 	u8 ethertype;
464 
465 	if (action == FLOW_ACTION_VLAN_MANGLE) {
466 		switch (proto) {
467 		case ETH_P_8021Q:
468 			ethertype = 0;
469 			break;
470 		case ETH_P_8021AD:
471 			ethertype = 1;
472 			break;
473 		default:
474 			NL_SET_ERR_MSG_MOD(extack, "Unsupported VLAN protocol");
475 			dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN protocol %#04x\n",
476 				proto);
477 			return -EINVAL;
478 		}
479 
480 		return mlxsw_afa_block_append_vlan_modify(rulei->act_block,
481 							  vid, prio, ethertype,
482 							  extack);
483 	} else {
484 		NL_SET_ERR_MSG_MOD(extack, "Unsupported VLAN action");
485 		dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN action\n");
486 		return -EINVAL;
487 	}
488 }
489 
490 int mlxsw_sp_acl_rulei_act_priority(struct mlxsw_sp *mlxsw_sp,
491 				    struct mlxsw_sp_acl_rule_info *rulei,
492 				    u32 prio, struct netlink_ext_ack *extack)
493 {
494 	/* Even though both Linux and Spectrum switches support 16 priorities,
495 	 * spectrum_qdisc only processes the first eight priomap elements, and
496 	 * the DCB and PFC features are tied to 8 priorities as well. Therefore
497 	 * bounce attempts to prioritize packets to higher priorities.
498 	 */
499 	if (prio >= IEEE_8021QAZ_MAX_TCS) {
500 		NL_SET_ERR_MSG_MOD(extack, "Only priorities 0..7 are supported");
501 		return -EINVAL;
502 	}
503 	return mlxsw_afa_block_append_qos_switch_prio(rulei->act_block, prio,
504 						      extack);
505 }
506 
507 enum mlxsw_sp_acl_mangle_field {
508 	MLXSW_SP_ACL_MANGLE_FIELD_IP_DSFIELD,
509 	MLXSW_SP_ACL_MANGLE_FIELD_IP_DSCP,
510 	MLXSW_SP_ACL_MANGLE_FIELD_IP_ECN,
511 };
512 
513 struct mlxsw_sp_acl_mangle_action {
514 	enum flow_action_mangle_base htype;
515 	/* Offset is u32-aligned. */
516 	u32 offset;
517 	/* Mask bits are unset for the modified field. */
518 	u32 mask;
519 	/* Shift required to extract the set value. */
520 	u32 shift;
521 	enum mlxsw_sp_acl_mangle_field field;
522 };
523 
524 #define MLXSW_SP_ACL_MANGLE_ACTION(_htype, _offset, _mask, _shift, _field) \
525 	{								\
526 		.htype = _htype,					\
527 		.offset = _offset,					\
528 		.mask = _mask,						\
529 		.shift = _shift,					\
530 		.field = MLXSW_SP_ACL_MANGLE_FIELD_##_field,		\
531 	}
532 
533 #define MLXSW_SP_ACL_MANGLE_ACTION_IP4(_offset, _mask, _shift, _field) \
534 	MLXSW_SP_ACL_MANGLE_ACTION(FLOW_ACT_MANGLE_HDR_TYPE_IP4,       \
535 				   _offset, _mask, _shift, _field)
536 
537 #define MLXSW_SP_ACL_MANGLE_ACTION_IP6(_offset, _mask, _shift, _field) \
538 	MLXSW_SP_ACL_MANGLE_ACTION(FLOW_ACT_MANGLE_HDR_TYPE_IP6,       \
539 				   _offset, _mask, _shift, _field)
540 
541 static struct mlxsw_sp_acl_mangle_action mlxsw_sp_acl_mangle_actions[] = {
542 	MLXSW_SP_ACL_MANGLE_ACTION_IP4(0, 0xff00ffff, 16, IP_DSFIELD),
543 	MLXSW_SP_ACL_MANGLE_ACTION_IP4(0, 0xff03ffff, 18, IP_DSCP),
544 	MLXSW_SP_ACL_MANGLE_ACTION_IP4(0, 0xfffcffff, 16, IP_ECN),
545 	MLXSW_SP_ACL_MANGLE_ACTION_IP6(0, 0xf00fffff, 20, IP_DSFIELD),
546 	MLXSW_SP_ACL_MANGLE_ACTION_IP6(0, 0xf03fffff, 22, IP_DSCP),
547 	MLXSW_SP_ACL_MANGLE_ACTION_IP6(0, 0xffcfffff, 20, IP_ECN),
548 };
549 
550 static int
551 mlxsw_sp_acl_rulei_act_mangle_field(struct mlxsw_sp *mlxsw_sp,
552 				    struct mlxsw_sp_acl_rule_info *rulei,
553 				    struct mlxsw_sp_acl_mangle_action *mact,
554 				    u32 val, struct netlink_ext_ack *extack)
555 {
556 	switch (mact->field) {
557 	case MLXSW_SP_ACL_MANGLE_FIELD_IP_DSFIELD:
558 		return mlxsw_afa_block_append_qos_dsfield(rulei->act_block,
559 							  val, extack);
560 	case MLXSW_SP_ACL_MANGLE_FIELD_IP_DSCP:
561 		return mlxsw_afa_block_append_qos_dscp(rulei->act_block,
562 						       val, extack);
563 	case MLXSW_SP_ACL_MANGLE_FIELD_IP_ECN:
564 		return mlxsw_afa_block_append_qos_ecn(rulei->act_block,
565 						      val, extack);
566 	}
567 
568 	/* We shouldn't have gotten a match in the first place! */
569 	WARN_ONCE(1, "Unhandled mangle field");
570 	return -EINVAL;
571 }
572 
573 int mlxsw_sp_acl_rulei_act_mangle(struct mlxsw_sp *mlxsw_sp,
574 				  struct mlxsw_sp_acl_rule_info *rulei,
575 				  enum flow_action_mangle_base htype,
576 				  u32 offset, u32 mask, u32 val,
577 				  struct netlink_ext_ack *extack)
578 {
579 	struct mlxsw_sp_acl_mangle_action *mact;
580 	size_t i;
581 
582 	for (i = 0; i < ARRAY_SIZE(mlxsw_sp_acl_mangle_actions); ++i) {
583 		mact = &mlxsw_sp_acl_mangle_actions[i];
584 		if (mact->htype == htype &&
585 		    mact->offset == offset &&
586 		    mact->mask == mask) {
587 			val >>= mact->shift;
588 			return mlxsw_sp_acl_rulei_act_mangle_field(mlxsw_sp,
589 								   rulei, mact,
590 								   val, extack);
591 		}
592 	}
593 
594 	NL_SET_ERR_MSG_MOD(extack, "Unsupported mangle field");
595 	return -EINVAL;
596 }
597 
598 int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
599 				 struct mlxsw_sp_acl_rule_info *rulei,
600 				 struct netlink_ext_ack *extack)
601 {
602 	int err;
603 
604 	err = mlxsw_afa_block_append_counter(rulei->act_block,
605 					     &rulei->counter_index, extack);
606 	if (err)
607 		return err;
608 	rulei->counter_valid = true;
609 	return 0;
610 }
611 
612 int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp,
613 				   struct mlxsw_sp_acl_rule_info *rulei,
614 				   u16 fid, struct netlink_ext_ack *extack)
615 {
616 	return mlxsw_afa_block_append_fid_set(rulei->act_block, fid, extack);
617 }
618 
619 struct mlxsw_sp_acl_rule *
620 mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
621 			 struct mlxsw_sp_acl_ruleset *ruleset,
622 			 unsigned long cookie,
623 			 struct mlxsw_afa_block *afa_block,
624 			 struct netlink_ext_ack *extack)
625 {
626 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
627 	struct mlxsw_sp_acl_rule *rule;
628 	int err;
629 
630 	mlxsw_sp_acl_ruleset_ref_inc(ruleset);
631 	rule = kzalloc(sizeof(*rule) + ops->rule_priv_size,
632 		       GFP_KERNEL);
633 	if (!rule) {
634 		err = -ENOMEM;
635 		goto err_alloc;
636 	}
637 	rule->cookie = cookie;
638 	rule->ruleset = ruleset;
639 
640 	rule->rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl, afa_block);
641 	if (IS_ERR(rule->rulei)) {
642 		err = PTR_ERR(rule->rulei);
643 		goto err_rulei_create;
644 	}
645 
646 	return rule;
647 
648 err_rulei_create:
649 	kfree(rule);
650 err_alloc:
651 	mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
652 	return ERR_PTR(err);
653 }
654 
655 void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp,
656 			       struct mlxsw_sp_acl_rule *rule)
657 {
658 	struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
659 
660 	mlxsw_sp_acl_rulei_destroy(rule->rulei);
661 	kfree(rule);
662 	mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
663 }
664 
665 int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
666 			  struct mlxsw_sp_acl_rule *rule)
667 {
668 	struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
669 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
670 	struct mlxsw_sp_flow_block *block = ruleset->ht_key.block;
671 	int err;
672 
673 	err = ops->rule_add(mlxsw_sp, ruleset->priv, rule->priv, rule->rulei);
674 	if (err)
675 		return err;
676 
677 	err = rhashtable_insert_fast(&ruleset->rule_ht, &rule->ht_node,
678 				     mlxsw_sp_acl_rule_ht_params);
679 	if (err)
680 		goto err_rhashtable_insert;
681 
682 	if (!ruleset->ht_key.chain_index &&
683 	    mlxsw_sp_acl_ruleset_is_singular(ruleset)) {
684 		/* We only need ruleset with chain index 0, the implicit
685 		 * one, to be directly bound to device. The rest of the
686 		 * rulesets are bound by "Goto action set".
687 		 */
688 		err = mlxsw_sp_acl_ruleset_block_bind(mlxsw_sp, ruleset, block);
689 		if (err)
690 			goto err_ruleset_block_bind;
691 	}
692 
693 	mutex_lock(&mlxsw_sp->acl->rules_lock);
694 	list_add_tail(&rule->list, &mlxsw_sp->acl->rules);
695 	mutex_unlock(&mlxsw_sp->acl->rules_lock);
696 	block->rule_count++;
697 	block->ingress_blocker_rule_count += rule->rulei->ingress_bind_blocker;
698 	block->egress_blocker_rule_count += rule->rulei->egress_bind_blocker;
699 	return 0;
700 
701 err_ruleset_block_bind:
702 	rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
703 			       mlxsw_sp_acl_rule_ht_params);
704 err_rhashtable_insert:
705 	ops->rule_del(mlxsw_sp, rule->priv);
706 	return err;
707 }
708 
709 void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
710 			   struct mlxsw_sp_acl_rule *rule)
711 {
712 	struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
713 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
714 	struct mlxsw_sp_flow_block *block = ruleset->ht_key.block;
715 
716 	block->egress_blocker_rule_count -= rule->rulei->egress_bind_blocker;
717 	block->ingress_blocker_rule_count -= rule->rulei->ingress_bind_blocker;
718 	block->rule_count--;
719 	mutex_lock(&mlxsw_sp->acl->rules_lock);
720 	list_del(&rule->list);
721 	mutex_unlock(&mlxsw_sp->acl->rules_lock);
722 	if (!ruleset->ht_key.chain_index &&
723 	    mlxsw_sp_acl_ruleset_is_singular(ruleset))
724 		mlxsw_sp_acl_ruleset_block_unbind(mlxsw_sp, ruleset, block);
725 	rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
726 			       mlxsw_sp_acl_rule_ht_params);
727 	ops->rule_del(mlxsw_sp, rule->priv);
728 }
729 
730 int mlxsw_sp_acl_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
731 				     struct mlxsw_sp_acl_rule *rule,
732 				     struct mlxsw_afa_block *afa_block)
733 {
734 	struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
735 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
736 	struct mlxsw_sp_acl_rule_info *rulei;
737 
738 	rulei = mlxsw_sp_acl_rule_rulei(rule);
739 	rulei->act_block = afa_block;
740 
741 	return ops->rule_action_replace(mlxsw_sp, rule->priv, rule->rulei);
742 }
743 
744 struct mlxsw_sp_acl_rule *
745 mlxsw_sp_acl_rule_lookup(struct mlxsw_sp *mlxsw_sp,
746 			 struct mlxsw_sp_acl_ruleset *ruleset,
747 			 unsigned long cookie)
748 {
749 	return rhashtable_lookup_fast(&ruleset->rule_ht, &cookie,
750 				       mlxsw_sp_acl_rule_ht_params);
751 }
752 
753 struct mlxsw_sp_acl_rule_info *
754 mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule)
755 {
756 	return rule->rulei;
757 }
758 
759 static int mlxsw_sp_acl_rule_activity_update(struct mlxsw_sp *mlxsw_sp,
760 					     struct mlxsw_sp_acl_rule *rule)
761 {
762 	struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
763 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
764 	bool active;
765 	int err;
766 
767 	err = ops->rule_activity_get(mlxsw_sp, rule->priv, &active);
768 	if (err)
769 		return err;
770 	if (active)
771 		rule->last_used = jiffies;
772 	return 0;
773 }
774 
775 static int mlxsw_sp_acl_rules_activity_update(struct mlxsw_sp_acl *acl)
776 {
777 	struct mlxsw_sp_acl_rule *rule;
778 	int err;
779 
780 	mutex_lock(&acl->rules_lock);
781 	list_for_each_entry(rule, &acl->rules, list) {
782 		err = mlxsw_sp_acl_rule_activity_update(acl->mlxsw_sp,
783 							rule);
784 		if (err)
785 			goto err_rule_update;
786 	}
787 	mutex_unlock(&acl->rules_lock);
788 	return 0;
789 
790 err_rule_update:
791 	mutex_unlock(&acl->rules_lock);
792 	return err;
793 }
794 
795 static void mlxsw_sp_acl_rule_activity_work_schedule(struct mlxsw_sp_acl *acl)
796 {
797 	unsigned long interval = acl->rule_activity_update.interval;
798 
799 	mlxsw_core_schedule_dw(&acl->rule_activity_update.dw,
800 			       msecs_to_jiffies(interval));
801 }
802 
803 static void mlxsw_sp_acl_rule_activity_update_work(struct work_struct *work)
804 {
805 	struct mlxsw_sp_acl *acl = container_of(work, struct mlxsw_sp_acl,
806 						rule_activity_update.dw.work);
807 	int err;
808 
809 	err = mlxsw_sp_acl_rules_activity_update(acl);
810 	if (err)
811 		dev_err(acl->mlxsw_sp->bus_info->dev, "Could not update acl activity");
812 
813 	mlxsw_sp_acl_rule_activity_work_schedule(acl);
814 }
815 
816 int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
817 				struct mlxsw_sp_acl_rule *rule,
818 				u64 *packets, u64 *bytes, u64 *last_use,
819 				enum flow_action_hw_stats *used_hw_stats)
820 
821 {
822 	struct mlxsw_sp_acl_rule_info *rulei;
823 	u64 current_packets = 0;
824 	u64 current_bytes = 0;
825 	int err;
826 
827 	rulei = mlxsw_sp_acl_rule_rulei(rule);
828 	if (rulei->counter_valid) {
829 		err = mlxsw_sp_flow_counter_get(mlxsw_sp, rulei->counter_index,
830 						&current_packets,
831 						&current_bytes);
832 		if (err)
833 			return err;
834 		*used_hw_stats = FLOW_ACTION_HW_STATS_IMMEDIATE;
835 	}
836 	*packets = current_packets - rule->last_packets;
837 	*bytes = current_bytes - rule->last_bytes;
838 	*last_use = rule->last_used;
839 
840 	rule->last_bytes = current_bytes;
841 	rule->last_packets = current_packets;
842 
843 	return 0;
844 }
845 
846 int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
847 {
848 	struct mlxsw_sp_fid *fid;
849 	struct mlxsw_sp_acl *acl;
850 	size_t alloc_size;
851 	int err;
852 
853 	alloc_size = sizeof(*acl) + mlxsw_sp_acl_tcam_priv_size(mlxsw_sp);
854 	acl = kzalloc(alloc_size, GFP_KERNEL);
855 	if (!acl)
856 		return -ENOMEM;
857 	mlxsw_sp->acl = acl;
858 	acl->mlxsw_sp = mlxsw_sp;
859 	acl->afk = mlxsw_afk_create(MLXSW_CORE_RES_GET(mlxsw_sp->core,
860 						       ACL_FLEX_KEYS),
861 				    mlxsw_sp->afk_ops);
862 	if (!acl->afk) {
863 		err = -ENOMEM;
864 		goto err_afk_create;
865 	}
866 
867 	err = rhashtable_init(&acl->ruleset_ht,
868 			      &mlxsw_sp_acl_ruleset_ht_params);
869 	if (err)
870 		goto err_rhashtable_init;
871 
872 	fid = mlxsw_sp_fid_dummy_get(mlxsw_sp);
873 	if (IS_ERR(fid)) {
874 		err = PTR_ERR(fid);
875 		goto err_fid_get;
876 	}
877 	acl->dummy_fid = fid;
878 
879 	INIT_LIST_HEAD(&acl->rules);
880 	mutex_init(&acl->rules_lock);
881 	err = mlxsw_sp_acl_tcam_init(mlxsw_sp, &acl->tcam);
882 	if (err)
883 		goto err_acl_ops_init;
884 
885 	/* Create the delayed work for the rule activity_update */
886 	INIT_DELAYED_WORK(&acl->rule_activity_update.dw,
887 			  mlxsw_sp_acl_rule_activity_update_work);
888 	acl->rule_activity_update.interval = MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS;
889 	mlxsw_core_schedule_dw(&acl->rule_activity_update.dw, 0);
890 	return 0;
891 
892 err_acl_ops_init:
893 	mutex_destroy(&acl->rules_lock);
894 	mlxsw_sp_fid_put(fid);
895 err_fid_get:
896 	rhashtable_destroy(&acl->ruleset_ht);
897 err_rhashtable_init:
898 	mlxsw_afk_destroy(acl->afk);
899 err_afk_create:
900 	kfree(acl);
901 	return err;
902 }
903 
904 void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp)
905 {
906 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
907 
908 	cancel_delayed_work_sync(&mlxsw_sp->acl->rule_activity_update.dw);
909 	mlxsw_sp_acl_tcam_fini(mlxsw_sp, &acl->tcam);
910 	mutex_destroy(&acl->rules_lock);
911 	WARN_ON(!list_empty(&acl->rules));
912 	mlxsw_sp_fid_put(acl->dummy_fid);
913 	rhashtable_destroy(&acl->ruleset_ht);
914 	mlxsw_afk_destroy(acl->afk);
915 	kfree(acl);
916 }
917 
918 u32 mlxsw_sp_acl_region_rehash_intrvl_get(struct mlxsw_sp *mlxsw_sp)
919 {
920 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
921 
922 	return mlxsw_sp_acl_tcam_vregion_rehash_intrvl_get(mlxsw_sp,
923 							   &acl->tcam);
924 }
925 
926 int mlxsw_sp_acl_region_rehash_intrvl_set(struct mlxsw_sp *mlxsw_sp, u32 val)
927 {
928 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
929 
930 	return mlxsw_sp_acl_tcam_vregion_rehash_intrvl_set(mlxsw_sp,
931 							   &acl->tcam, val);
932 }
933