xref: /openbmc/linux/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c (revision 8631f940b81bf0da3d375fce166d381fa8c47bb2)
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/slab.h>
6 #include <linux/errno.h>
7 #include <linux/bitops.h>
8 #include <linux/list.h>
9 #include <linux/rhashtable.h>
10 #include <linux/netdevice.h>
11 
12 #include "reg.h"
13 #include "core.h"
14 #include "resources.h"
15 #include "spectrum.h"
16 #include "spectrum_acl_tcam.h"
17 #include "core_acl_flex_keys.h"
18 
19 size_t mlxsw_sp_acl_tcam_priv_size(struct mlxsw_sp *mlxsw_sp)
20 {
21 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
22 
23 	return ops->priv_size;
24 }
25 
26 int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
27 			   struct mlxsw_sp_acl_tcam *tcam)
28 {
29 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
30 	u64 max_tcam_regions;
31 	u64 max_regions;
32 	u64 max_groups;
33 	size_t alloc_size;
34 	int err;
35 
36 	max_tcam_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core,
37 					      ACL_MAX_TCAM_REGIONS);
38 	max_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_REGIONS);
39 
40 	/* Use 1:1 mapping between ACL region and TCAM region */
41 	if (max_tcam_regions < max_regions)
42 		max_regions = max_tcam_regions;
43 
44 	alloc_size = sizeof(tcam->used_regions[0]) * BITS_TO_LONGS(max_regions);
45 	tcam->used_regions = kzalloc(alloc_size, GFP_KERNEL);
46 	if (!tcam->used_regions)
47 		return -ENOMEM;
48 	tcam->max_regions = max_regions;
49 
50 	max_groups = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUPS);
51 	alloc_size = sizeof(tcam->used_groups[0]) * BITS_TO_LONGS(max_groups);
52 	tcam->used_groups = kzalloc(alloc_size, GFP_KERNEL);
53 	if (!tcam->used_groups) {
54 		err = -ENOMEM;
55 		goto err_alloc_used_groups;
56 	}
57 	tcam->max_groups = max_groups;
58 	tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
59 						 ACL_MAX_GROUP_SIZE);
60 
61 	err = ops->init(mlxsw_sp, tcam->priv, tcam);
62 	if (err)
63 		goto err_tcam_init;
64 
65 	return 0;
66 
67 err_tcam_init:
68 	kfree(tcam->used_groups);
69 err_alloc_used_groups:
70 	kfree(tcam->used_regions);
71 	return err;
72 }
73 
74 void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
75 			    struct mlxsw_sp_acl_tcam *tcam)
76 {
77 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
78 
79 	ops->fini(mlxsw_sp, tcam->priv);
80 	kfree(tcam->used_groups);
81 	kfree(tcam->used_regions);
82 }
83 
84 int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,
85 				   struct mlxsw_sp_acl_rule_info *rulei,
86 				   u32 *priority, bool fillup_priority)
87 {
88 	u64 max_priority;
89 
90 	if (!fillup_priority) {
91 		*priority = 0;
92 		return 0;
93 	}
94 
95 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, KVD_SIZE))
96 		return -EIO;
97 
98 	/* Priority range is 1..cap_kvd_size-1. */
99 	max_priority = MLXSW_CORE_RES_GET(mlxsw_sp->core, KVD_SIZE) - 1;
100 	if (rulei->priority >= max_priority)
101 		return -EINVAL;
102 
103 	/* Unlike in TC, in HW, higher number means higher priority. */
104 	*priority = max_priority - rulei->priority;
105 	return 0;
106 }
107 
108 static int mlxsw_sp_acl_tcam_region_id_get(struct mlxsw_sp_acl_tcam *tcam,
109 					   u16 *p_id)
110 {
111 	u16 id;
112 
113 	id = find_first_zero_bit(tcam->used_regions, tcam->max_regions);
114 	if (id < tcam->max_regions) {
115 		__set_bit(id, tcam->used_regions);
116 		*p_id = id;
117 		return 0;
118 	}
119 	return -ENOBUFS;
120 }
121 
122 static void mlxsw_sp_acl_tcam_region_id_put(struct mlxsw_sp_acl_tcam *tcam,
123 					    u16 id)
124 {
125 	__clear_bit(id, tcam->used_regions);
126 }
127 
128 static int mlxsw_sp_acl_tcam_group_id_get(struct mlxsw_sp_acl_tcam *tcam,
129 					  u16 *p_id)
130 {
131 	u16 id;
132 
133 	id = find_first_zero_bit(tcam->used_groups, tcam->max_groups);
134 	if (id < tcam->max_groups) {
135 		__set_bit(id, tcam->used_groups);
136 		*p_id = id;
137 		return 0;
138 	}
139 	return -ENOBUFS;
140 }
141 
142 static void mlxsw_sp_acl_tcam_group_id_put(struct mlxsw_sp_acl_tcam *tcam,
143 					   u16 id)
144 {
145 	__clear_bit(id, tcam->used_groups);
146 }
147 
148 struct mlxsw_sp_acl_tcam_pattern {
149 	const enum mlxsw_afk_element *elements;
150 	unsigned int elements_count;
151 };
152 
153 struct mlxsw_sp_acl_tcam_group {
154 	struct mlxsw_sp_acl_tcam *tcam;
155 	u16 id;
156 	struct list_head region_list;
157 	unsigned int region_count;
158 	struct rhashtable chunk_ht;
159 	struct mlxsw_sp_acl_tcam_group_ops *ops;
160 	const struct mlxsw_sp_acl_tcam_pattern *patterns;
161 	unsigned int patterns_count;
162 	bool tmplt_elusage_set;
163 	struct mlxsw_afk_element_usage tmplt_elusage;
164 };
165 
166 struct mlxsw_sp_acl_tcam_chunk {
167 	struct list_head list; /* Member of a TCAM region */
168 	struct rhash_head ht_node; /* Member of a chunk HT */
169 	unsigned int priority; /* Priority within the region and group */
170 	struct mlxsw_sp_acl_tcam_group *group;
171 	struct mlxsw_sp_acl_tcam_region *region;
172 	unsigned int ref_count;
173 	unsigned long priv[0];
174 	/* priv has to be always the last item */
175 };
176 
177 struct mlxsw_sp_acl_tcam_entry {
178 	struct mlxsw_sp_acl_tcam_chunk *chunk;
179 	unsigned long priv[0];
180 	/* priv has to be always the last item */
181 };
182 
183 static const struct rhashtable_params mlxsw_sp_acl_tcam_chunk_ht_params = {
184 	.key_len = sizeof(unsigned int),
185 	.key_offset = offsetof(struct mlxsw_sp_acl_tcam_chunk, priority),
186 	.head_offset = offsetof(struct mlxsw_sp_acl_tcam_chunk, ht_node),
187 	.automatic_shrinking = true,
188 };
189 
190 static int mlxsw_sp_acl_tcam_group_update(struct mlxsw_sp *mlxsw_sp,
191 					  struct mlxsw_sp_acl_tcam_group *group)
192 {
193 	struct mlxsw_sp_acl_tcam_region *region;
194 	char pagt_pl[MLXSW_REG_PAGT_LEN];
195 	int acl_index = 0;
196 
197 	mlxsw_reg_pagt_pack(pagt_pl, group->id);
198 	list_for_each_entry(region, &group->region_list, list)
199 		mlxsw_reg_pagt_acl_id_pack(pagt_pl, acl_index++, region->id);
200 	mlxsw_reg_pagt_size_set(pagt_pl, acl_index);
201 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pagt), pagt_pl);
202 }
203 
204 static int
205 mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp *mlxsw_sp,
206 			    struct mlxsw_sp_acl_tcam *tcam,
207 			    struct mlxsw_sp_acl_tcam_group *group,
208 			    const struct mlxsw_sp_acl_tcam_pattern *patterns,
209 			    unsigned int patterns_count,
210 			    struct mlxsw_afk_element_usage *tmplt_elusage)
211 {
212 	int err;
213 
214 	group->tcam = tcam;
215 	group->patterns = patterns;
216 	group->patterns_count = patterns_count;
217 	if (tmplt_elusage) {
218 		group->tmplt_elusage_set = true;
219 		memcpy(&group->tmplt_elusage, tmplt_elusage,
220 		       sizeof(group->tmplt_elusage));
221 	}
222 	INIT_LIST_HEAD(&group->region_list);
223 	err = mlxsw_sp_acl_tcam_group_id_get(tcam, &group->id);
224 	if (err)
225 		return err;
226 
227 	err = rhashtable_init(&group->chunk_ht,
228 			      &mlxsw_sp_acl_tcam_chunk_ht_params);
229 	if (err)
230 		goto err_rhashtable_init;
231 
232 	return 0;
233 
234 err_rhashtable_init:
235 	mlxsw_sp_acl_tcam_group_id_put(tcam, group->id);
236 	return err;
237 }
238 
239 static void mlxsw_sp_acl_tcam_group_del(struct mlxsw_sp *mlxsw_sp,
240 					struct mlxsw_sp_acl_tcam_group *group)
241 {
242 	struct mlxsw_sp_acl_tcam *tcam = group->tcam;
243 
244 	rhashtable_destroy(&group->chunk_ht);
245 	mlxsw_sp_acl_tcam_group_id_put(tcam, group->id);
246 	WARN_ON(!list_empty(&group->region_list));
247 }
248 
249 static int
250 mlxsw_sp_acl_tcam_group_bind(struct mlxsw_sp *mlxsw_sp,
251 			     struct mlxsw_sp_acl_tcam_group *group,
252 			     struct mlxsw_sp_port *mlxsw_sp_port,
253 			     bool ingress)
254 {
255 	char ppbt_pl[MLXSW_REG_PPBT_LEN];
256 
257 	mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL :
258 					       MLXSW_REG_PXBT_E_EACL,
259 			    MLXSW_REG_PXBT_OP_BIND, mlxsw_sp_port->local_port,
260 			    group->id);
261 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
262 }
263 
264 static void
265 mlxsw_sp_acl_tcam_group_unbind(struct mlxsw_sp *mlxsw_sp,
266 			       struct mlxsw_sp_acl_tcam_group *group,
267 			       struct mlxsw_sp_port *mlxsw_sp_port,
268 			       bool ingress)
269 {
270 	char ppbt_pl[MLXSW_REG_PPBT_LEN];
271 
272 	mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL :
273 					       MLXSW_REG_PXBT_E_EACL,
274 			    MLXSW_REG_PXBT_OP_UNBIND, mlxsw_sp_port->local_port,
275 			    group->id);
276 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
277 }
278 
279 static u16
280 mlxsw_sp_acl_tcam_group_id(struct mlxsw_sp_acl_tcam_group *group)
281 {
282 	return group->id;
283 }
284 
285 static unsigned int
286 mlxsw_sp_acl_tcam_region_prio(struct mlxsw_sp_acl_tcam_region *region)
287 {
288 	struct mlxsw_sp_acl_tcam_chunk *chunk;
289 
290 	if (list_empty(&region->chunk_list))
291 		return 0;
292 	/* As a priority of a region, return priority of the first chunk */
293 	chunk = list_first_entry(&region->chunk_list, typeof(*chunk), list);
294 	return chunk->priority;
295 }
296 
297 static unsigned int
298 mlxsw_sp_acl_tcam_region_max_prio(struct mlxsw_sp_acl_tcam_region *region)
299 {
300 	struct mlxsw_sp_acl_tcam_chunk *chunk;
301 
302 	if (list_empty(&region->chunk_list))
303 		return 0;
304 	chunk = list_last_entry(&region->chunk_list, typeof(*chunk), list);
305 	return chunk->priority;
306 }
307 
308 static void
309 mlxsw_sp_acl_tcam_group_list_add(struct mlxsw_sp_acl_tcam_group *group,
310 				 struct mlxsw_sp_acl_tcam_region *region)
311 {
312 	struct mlxsw_sp_acl_tcam_region *region2;
313 	struct list_head *pos;
314 
315 	/* Position the region inside the list according to priority */
316 	list_for_each(pos, &group->region_list) {
317 		region2 = list_entry(pos, typeof(*region2), list);
318 		if (mlxsw_sp_acl_tcam_region_prio(region2) >
319 		    mlxsw_sp_acl_tcam_region_prio(region))
320 			break;
321 	}
322 	list_add_tail(&region->list, pos);
323 	group->region_count++;
324 }
325 
326 static void
327 mlxsw_sp_acl_tcam_group_list_del(struct mlxsw_sp_acl_tcam_group *group,
328 				 struct mlxsw_sp_acl_tcam_region *region)
329 {
330 	group->region_count--;
331 	list_del(&region->list);
332 }
333 
334 static int
335 mlxsw_sp_acl_tcam_group_region_attach(struct mlxsw_sp *mlxsw_sp,
336 				      struct mlxsw_sp_acl_tcam_group *group,
337 				      struct mlxsw_sp_acl_tcam_region *region)
338 {
339 	int err;
340 
341 	if (group->region_count == group->tcam->max_group_size)
342 		return -ENOBUFS;
343 
344 	mlxsw_sp_acl_tcam_group_list_add(group, region);
345 
346 	err = mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
347 	if (err)
348 		goto err_group_update;
349 	region->group = group;
350 
351 	return 0;
352 
353 err_group_update:
354 	mlxsw_sp_acl_tcam_group_list_del(group, region);
355 	mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
356 	return err;
357 }
358 
359 static void
360 mlxsw_sp_acl_tcam_group_region_detach(struct mlxsw_sp *mlxsw_sp,
361 				      struct mlxsw_sp_acl_tcam_region *region)
362 {
363 	struct mlxsw_sp_acl_tcam_group *group = region->group;
364 
365 	mlxsw_sp_acl_tcam_group_list_del(group, region);
366 	mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
367 }
368 
369 static struct mlxsw_sp_acl_tcam_region *
370 mlxsw_sp_acl_tcam_group_region_find(struct mlxsw_sp_acl_tcam_group *group,
371 				    unsigned int priority,
372 				    struct mlxsw_afk_element_usage *elusage,
373 				    bool *p_need_split)
374 {
375 	struct mlxsw_sp_acl_tcam_region *region, *region2;
376 	struct list_head *pos;
377 	bool issubset;
378 
379 	list_for_each(pos, &group->region_list) {
380 		region = list_entry(pos, typeof(*region), list);
381 
382 		/* First, check if the requested priority does not rather belong
383 		 * under some of the next regions.
384 		 */
385 		if (pos->next != &group->region_list) { /* not last */
386 			region2 = list_entry(pos->next, typeof(*region2), list);
387 			if (priority >= mlxsw_sp_acl_tcam_region_prio(region2))
388 				continue;
389 		}
390 
391 		issubset = mlxsw_afk_key_info_subset(region->key_info, elusage);
392 
393 		/* If requested element usage would not fit and the priority
394 		 * is lower than the currently inspected region we cannot
395 		 * use this region, so return NULL to indicate new region has
396 		 * to be created.
397 		 */
398 		if (!issubset &&
399 		    priority < mlxsw_sp_acl_tcam_region_prio(region))
400 			return NULL;
401 
402 		/* If requested element usage would not fit and the priority
403 		 * is higher than the currently inspected region we cannot
404 		 * use this region. There is still some hope that the next
405 		 * region would be the fit. So let it be processed and
406 		 * eventually break at the check right above this.
407 		 */
408 		if (!issubset &&
409 		    priority > mlxsw_sp_acl_tcam_region_max_prio(region))
410 			continue;
411 
412 		/* Indicate if the region needs to be split in order to add
413 		 * the requested priority. Split is needed when requested
414 		 * element usage won't fit into the found region.
415 		 */
416 		*p_need_split = !issubset;
417 		return region;
418 	}
419 	return NULL; /* New region has to be created. */
420 }
421 
422 static void
423 mlxsw_sp_acl_tcam_group_use_patterns(struct mlxsw_sp_acl_tcam_group *group,
424 				     struct mlxsw_afk_element_usage *elusage,
425 				     struct mlxsw_afk_element_usage *out)
426 {
427 	const struct mlxsw_sp_acl_tcam_pattern *pattern;
428 	int i;
429 
430 	/* In case the template is set, we don't have to look up the pattern
431 	 * and just use the template.
432 	 */
433 	if (group->tmplt_elusage_set) {
434 		memcpy(out, &group->tmplt_elusage, sizeof(*out));
435 		WARN_ON(!mlxsw_afk_element_usage_subset(elusage, out));
436 		return;
437 	}
438 
439 	for (i = 0; i < group->patterns_count; i++) {
440 		pattern = &group->patterns[i];
441 		mlxsw_afk_element_usage_fill(out, pattern->elements,
442 					     pattern->elements_count);
443 		if (mlxsw_afk_element_usage_subset(elusage, out))
444 			return;
445 	}
446 	memcpy(out, elusage, sizeof(*out));
447 }
448 
449 static int
450 mlxsw_sp_acl_tcam_region_alloc(struct mlxsw_sp *mlxsw_sp,
451 			       struct mlxsw_sp_acl_tcam_region *region)
452 {
453 	struct mlxsw_afk_key_info *key_info = region->key_info;
454 	char ptar_pl[MLXSW_REG_PTAR_LEN];
455 	unsigned int encodings_count;
456 	int i;
457 	int err;
458 
459 	mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_ALLOC,
460 			    region->key_type,
461 			    MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT,
462 			    region->id, region->tcam_region_info);
463 	encodings_count = mlxsw_afk_key_info_blocks_count_get(key_info);
464 	for (i = 0; i < encodings_count; i++) {
465 		u16 encoding;
466 
467 		encoding = mlxsw_afk_key_info_block_encoding_get(key_info, i);
468 		mlxsw_reg_ptar_key_id_pack(ptar_pl, i, encoding);
469 	}
470 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
471 	if (err)
472 		return err;
473 	mlxsw_reg_ptar_unpack(ptar_pl, region->tcam_region_info);
474 	return 0;
475 }
476 
477 static void
478 mlxsw_sp_acl_tcam_region_free(struct mlxsw_sp *mlxsw_sp,
479 			      struct mlxsw_sp_acl_tcam_region *region)
480 {
481 	char ptar_pl[MLXSW_REG_PTAR_LEN];
482 
483 	mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_FREE,
484 			    region->key_type, 0, region->id,
485 			    region->tcam_region_info);
486 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
487 }
488 
489 static int
490 mlxsw_sp_acl_tcam_region_enable(struct mlxsw_sp *mlxsw_sp,
491 				struct mlxsw_sp_acl_tcam_region *region)
492 {
493 	char pacl_pl[MLXSW_REG_PACL_LEN];
494 
495 	mlxsw_reg_pacl_pack(pacl_pl, region->id, true,
496 			    region->tcam_region_info);
497 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
498 }
499 
500 static void
501 mlxsw_sp_acl_tcam_region_disable(struct mlxsw_sp *mlxsw_sp,
502 				 struct mlxsw_sp_acl_tcam_region *region)
503 {
504 	char pacl_pl[MLXSW_REG_PACL_LEN];
505 
506 	mlxsw_reg_pacl_pack(pacl_pl, region->id, false,
507 			    region->tcam_region_info);
508 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
509 }
510 
511 static struct mlxsw_sp_acl_tcam_region *
512 mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp,
513 				struct mlxsw_sp_acl_tcam *tcam,
514 				struct mlxsw_afk_element_usage *elusage)
515 {
516 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
517 	struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
518 	struct mlxsw_sp_acl_tcam_region *region;
519 	int err;
520 
521 	region = kzalloc(sizeof(*region) + ops->region_priv_size, GFP_KERNEL);
522 	if (!region)
523 		return ERR_PTR(-ENOMEM);
524 	INIT_LIST_HEAD(&region->chunk_list);
525 	region->mlxsw_sp = mlxsw_sp;
526 
527 	region->key_info = mlxsw_afk_key_info_get(afk, elusage);
528 	if (IS_ERR(region->key_info)) {
529 		err = PTR_ERR(region->key_info);
530 		goto err_key_info_get;
531 	}
532 
533 	err = mlxsw_sp_acl_tcam_region_id_get(tcam, &region->id);
534 	if (err)
535 		goto err_region_id_get;
536 
537 	err = ops->region_associate(mlxsw_sp, region);
538 	if (err)
539 		goto err_tcam_region_associate;
540 
541 	region->key_type = ops->key_type;
542 	err = mlxsw_sp_acl_tcam_region_alloc(mlxsw_sp, region);
543 	if (err)
544 		goto err_tcam_region_alloc;
545 
546 	err = mlxsw_sp_acl_tcam_region_enable(mlxsw_sp, region);
547 	if (err)
548 		goto err_tcam_region_enable;
549 
550 	err = ops->region_init(mlxsw_sp, region->priv, tcam->priv, region);
551 	if (err)
552 		goto err_tcam_region_init;
553 
554 	return region;
555 
556 err_tcam_region_init:
557 	mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
558 err_tcam_region_enable:
559 	mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
560 err_tcam_region_alloc:
561 err_tcam_region_associate:
562 	mlxsw_sp_acl_tcam_region_id_put(tcam, region->id);
563 err_region_id_get:
564 	mlxsw_afk_key_info_put(region->key_info);
565 err_key_info_get:
566 	kfree(region);
567 	return ERR_PTR(err);
568 }
569 
570 static void
571 mlxsw_sp_acl_tcam_region_destroy(struct mlxsw_sp *mlxsw_sp,
572 				 struct mlxsw_sp_acl_tcam_region *region)
573 {
574 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
575 
576 	ops->region_fini(mlxsw_sp, region->priv);
577 	mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
578 	mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
579 	mlxsw_sp_acl_tcam_region_id_put(region->group->tcam, region->id);
580 	mlxsw_afk_key_info_put(region->key_info);
581 	kfree(region);
582 }
583 
584 static int
585 mlxsw_sp_acl_tcam_chunk_assoc(struct mlxsw_sp *mlxsw_sp,
586 			      struct mlxsw_sp_acl_tcam_group *group,
587 			      unsigned int priority,
588 			      struct mlxsw_afk_element_usage *elusage,
589 			      struct mlxsw_sp_acl_tcam_chunk *chunk)
590 {
591 	struct mlxsw_sp_acl_tcam_region *region;
592 	bool region_created = false;
593 	bool need_split;
594 	int err;
595 
596 	region = mlxsw_sp_acl_tcam_group_region_find(group, priority, elusage,
597 						     &need_split);
598 	if (region && need_split) {
599 		/* According to priority, the chunk should belong to an
600 		 * existing region. However, this chunk needs elements
601 		 * that region does not contain. We need to split the existing
602 		 * region into two and create a new region for this chunk
603 		 * in between. This is not supported now.
604 		 */
605 		return -EOPNOTSUPP;
606 	}
607 	if (!region) {
608 		struct mlxsw_afk_element_usage region_elusage;
609 
610 		mlxsw_sp_acl_tcam_group_use_patterns(group, elusage,
611 						     &region_elusage);
612 		region = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, group->tcam,
613 							 &region_elusage);
614 		if (IS_ERR(region))
615 			return PTR_ERR(region);
616 		region_created = true;
617 	}
618 
619 	chunk->region = region;
620 	list_add_tail(&chunk->list, &region->chunk_list);
621 
622 	if (!region_created)
623 		return 0;
624 
625 	err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp, group, region);
626 	if (err)
627 		goto err_group_region_attach;
628 
629 	return 0;
630 
631 err_group_region_attach:
632 	mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, region);
633 	return err;
634 }
635 
636 static void
637 mlxsw_sp_acl_tcam_chunk_deassoc(struct mlxsw_sp *mlxsw_sp,
638 				struct mlxsw_sp_acl_tcam_chunk *chunk)
639 {
640 	struct mlxsw_sp_acl_tcam_region *region = chunk->region;
641 
642 	list_del(&chunk->list);
643 	if (list_empty(&region->chunk_list)) {
644 		mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, region);
645 		mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, region);
646 	}
647 }
648 
649 static struct mlxsw_sp_acl_tcam_chunk *
650 mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp,
651 			       struct mlxsw_sp_acl_tcam_group *group,
652 			       unsigned int priority,
653 			       struct mlxsw_afk_element_usage *elusage)
654 {
655 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
656 	struct mlxsw_sp_acl_tcam_chunk *chunk;
657 	int err;
658 
659 	if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO)
660 		return ERR_PTR(-EINVAL);
661 
662 	chunk = kzalloc(sizeof(*chunk) + ops->chunk_priv_size, GFP_KERNEL);
663 	if (!chunk)
664 		return ERR_PTR(-ENOMEM);
665 	chunk->priority = priority;
666 	chunk->group = group;
667 	chunk->ref_count = 1;
668 
669 	err = mlxsw_sp_acl_tcam_chunk_assoc(mlxsw_sp, group, priority,
670 					    elusage, chunk);
671 	if (err)
672 		goto err_chunk_assoc;
673 
674 	ops->chunk_init(chunk->region->priv, chunk->priv, priority);
675 
676 	err = rhashtable_insert_fast(&group->chunk_ht, &chunk->ht_node,
677 				     mlxsw_sp_acl_tcam_chunk_ht_params);
678 	if (err)
679 		goto err_rhashtable_insert;
680 
681 	return chunk;
682 
683 err_rhashtable_insert:
684 	ops->chunk_fini(chunk->priv);
685 	mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk);
686 err_chunk_assoc:
687 	kfree(chunk);
688 	return ERR_PTR(err);
689 }
690 
691 static void
692 mlxsw_sp_acl_tcam_chunk_destroy(struct mlxsw_sp *mlxsw_sp,
693 				struct mlxsw_sp_acl_tcam_chunk *chunk)
694 {
695 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
696 	struct mlxsw_sp_acl_tcam_group *group = chunk->group;
697 
698 	rhashtable_remove_fast(&group->chunk_ht, &chunk->ht_node,
699 			       mlxsw_sp_acl_tcam_chunk_ht_params);
700 	ops->chunk_fini(chunk->priv);
701 	mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk);
702 	kfree(chunk);
703 }
704 
705 static struct mlxsw_sp_acl_tcam_chunk *
706 mlxsw_sp_acl_tcam_chunk_get(struct mlxsw_sp *mlxsw_sp,
707 			    struct mlxsw_sp_acl_tcam_group *group,
708 			    unsigned int priority,
709 			    struct mlxsw_afk_element_usage *elusage)
710 {
711 	struct mlxsw_sp_acl_tcam_chunk *chunk;
712 
713 	chunk = rhashtable_lookup_fast(&group->chunk_ht, &priority,
714 				       mlxsw_sp_acl_tcam_chunk_ht_params);
715 	if (chunk) {
716 		if (WARN_ON(!mlxsw_afk_key_info_subset(chunk->region->key_info,
717 						       elusage)))
718 			return ERR_PTR(-EINVAL);
719 		chunk->ref_count++;
720 		return chunk;
721 	}
722 	return mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, group,
723 					      priority, elusage);
724 }
725 
726 static void mlxsw_sp_acl_tcam_chunk_put(struct mlxsw_sp *mlxsw_sp,
727 					struct mlxsw_sp_acl_tcam_chunk *chunk)
728 {
729 	if (--chunk->ref_count)
730 		return;
731 	mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, chunk);
732 }
733 
734 static size_t mlxsw_sp_acl_tcam_entry_priv_size(struct mlxsw_sp *mlxsw_sp)
735 {
736 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
737 
738 	return ops->entry_priv_size;
739 }
740 
741 static int mlxsw_sp_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp,
742 				       struct mlxsw_sp_acl_tcam_group *group,
743 				       struct mlxsw_sp_acl_tcam_entry *entry,
744 				       struct mlxsw_sp_acl_rule_info *rulei)
745 {
746 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
747 	struct mlxsw_sp_acl_tcam_chunk *chunk;
748 	struct mlxsw_sp_acl_tcam_region *region;
749 	int err;
750 
751 	chunk = mlxsw_sp_acl_tcam_chunk_get(mlxsw_sp, group, rulei->priority,
752 					    &rulei->values.elusage);
753 	if (IS_ERR(chunk))
754 		return PTR_ERR(chunk);
755 
756 	region = chunk->region;
757 
758 	err = ops->entry_add(mlxsw_sp, region->priv, chunk->priv,
759 			     entry->priv, rulei);
760 	if (err)
761 		goto err_entry_add;
762 	entry->chunk = chunk;
763 
764 	return 0;
765 
766 err_entry_add:
767 	mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk);
768 	return err;
769 }
770 
771 static void mlxsw_sp_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp,
772 					struct mlxsw_sp_acl_tcam_entry *entry)
773 {
774 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
775 	struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk;
776 	struct mlxsw_sp_acl_tcam_region *region = chunk->region;
777 
778 	ops->entry_del(mlxsw_sp, region->priv, chunk->priv, entry->priv);
779 	mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk);
780 }
781 
782 static int
783 mlxsw_sp_acl_tcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
784 				       struct mlxsw_sp_acl_tcam_group *group,
785 				       struct mlxsw_sp_acl_tcam_entry *entry,
786 				       struct mlxsw_sp_acl_rule_info *rulei)
787 {
788 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
789 	struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk;
790 	struct mlxsw_sp_acl_tcam_region *region = chunk->region;
791 
792 	return ops->entry_action_replace(mlxsw_sp, region->priv, chunk->priv,
793 					 entry->priv, rulei);
794 }
795 
796 static int
797 mlxsw_sp_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
798 				     struct mlxsw_sp_acl_tcam_entry *entry,
799 				     bool *activity)
800 {
801 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
802 	struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk;
803 	struct mlxsw_sp_acl_tcam_region *region = chunk->region;
804 
805 	return ops->entry_activity_get(mlxsw_sp, region->priv,
806 				       entry->priv, activity);
807 }
808 
809 static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = {
810 	MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
811 	MLXSW_AFK_ELEMENT_DMAC_32_47,
812 	MLXSW_AFK_ELEMENT_DMAC_0_31,
813 	MLXSW_AFK_ELEMENT_SMAC_32_47,
814 	MLXSW_AFK_ELEMENT_SMAC_0_31,
815 	MLXSW_AFK_ELEMENT_ETHERTYPE,
816 	MLXSW_AFK_ELEMENT_IP_PROTO,
817 	MLXSW_AFK_ELEMENT_SRC_IP_0_31,
818 	MLXSW_AFK_ELEMENT_DST_IP_0_31,
819 	MLXSW_AFK_ELEMENT_DST_L4_PORT,
820 	MLXSW_AFK_ELEMENT_SRC_L4_PORT,
821 	MLXSW_AFK_ELEMENT_VID,
822 	MLXSW_AFK_ELEMENT_PCP,
823 	MLXSW_AFK_ELEMENT_TCP_FLAGS,
824 	MLXSW_AFK_ELEMENT_IP_TTL_,
825 	MLXSW_AFK_ELEMENT_IP_ECN,
826 	MLXSW_AFK_ELEMENT_IP_DSCP,
827 };
828 
829 static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv6[] = {
830 	MLXSW_AFK_ELEMENT_ETHERTYPE,
831 	MLXSW_AFK_ELEMENT_IP_PROTO,
832 	MLXSW_AFK_ELEMENT_SRC_IP_96_127,
833 	MLXSW_AFK_ELEMENT_SRC_IP_64_95,
834 	MLXSW_AFK_ELEMENT_SRC_IP_32_63,
835 	MLXSW_AFK_ELEMENT_SRC_IP_0_31,
836 	MLXSW_AFK_ELEMENT_DST_IP_96_127,
837 	MLXSW_AFK_ELEMENT_DST_IP_64_95,
838 	MLXSW_AFK_ELEMENT_DST_IP_32_63,
839 	MLXSW_AFK_ELEMENT_DST_IP_0_31,
840 	MLXSW_AFK_ELEMENT_DST_L4_PORT,
841 	MLXSW_AFK_ELEMENT_SRC_L4_PORT,
842 };
843 
844 static const struct mlxsw_sp_acl_tcam_pattern mlxsw_sp_acl_tcam_patterns[] = {
845 	{
846 		.elements = mlxsw_sp_acl_tcam_pattern_ipv4,
847 		.elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv4),
848 	},
849 	{
850 		.elements = mlxsw_sp_acl_tcam_pattern_ipv6,
851 		.elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv6),
852 	},
853 };
854 
855 #define MLXSW_SP_ACL_TCAM_PATTERNS_COUNT \
856 	ARRAY_SIZE(mlxsw_sp_acl_tcam_patterns)
857 
858 struct mlxsw_sp_acl_tcam_flower_ruleset {
859 	struct mlxsw_sp_acl_tcam_group group;
860 };
861 
862 struct mlxsw_sp_acl_tcam_flower_rule {
863 	struct mlxsw_sp_acl_tcam_entry entry;
864 };
865 
866 struct mlxsw_sp_acl_tcam_mr_ruleset {
867 	struct mlxsw_sp_acl_tcam_chunk *chunk;
868 	struct mlxsw_sp_acl_tcam_group group;
869 };
870 
871 struct mlxsw_sp_acl_tcam_mr_rule {
872 	struct mlxsw_sp_acl_tcam_entry entry;
873 };
874 
875 static int
876 mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp *mlxsw_sp,
877 				     struct mlxsw_sp_acl_tcam *tcam,
878 				     void *ruleset_priv,
879 				     struct mlxsw_afk_element_usage *tmplt_elusage)
880 {
881 	struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
882 
883 	return mlxsw_sp_acl_tcam_group_add(mlxsw_sp, tcam, &ruleset->group,
884 					   mlxsw_sp_acl_tcam_patterns,
885 					   MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
886 					   tmplt_elusage);
887 }
888 
889 static void
890 mlxsw_sp_acl_tcam_flower_ruleset_del(struct mlxsw_sp *mlxsw_sp,
891 				     void *ruleset_priv)
892 {
893 	struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
894 
895 	mlxsw_sp_acl_tcam_group_del(mlxsw_sp, &ruleset->group);
896 }
897 
898 static int
899 mlxsw_sp_acl_tcam_flower_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
900 				      void *ruleset_priv,
901 				      struct mlxsw_sp_port *mlxsw_sp_port,
902 				      bool ingress)
903 {
904 	struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
905 
906 	return mlxsw_sp_acl_tcam_group_bind(mlxsw_sp, &ruleset->group,
907 					    mlxsw_sp_port, ingress);
908 }
909 
910 static void
911 mlxsw_sp_acl_tcam_flower_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
912 					void *ruleset_priv,
913 					struct mlxsw_sp_port *mlxsw_sp_port,
914 					bool ingress)
915 {
916 	struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
917 
918 	mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->group,
919 				       mlxsw_sp_port, ingress);
920 }
921 
922 static u16
923 mlxsw_sp_acl_tcam_flower_ruleset_group_id(void *ruleset_priv)
924 {
925 	struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
926 
927 	return mlxsw_sp_acl_tcam_group_id(&ruleset->group);
928 }
929 
930 static size_t mlxsw_sp_acl_tcam_flower_rule_priv_size(struct mlxsw_sp *mlxsw_sp)
931 {
932 	return sizeof(struct mlxsw_sp_acl_tcam_flower_rule) +
933 	       mlxsw_sp_acl_tcam_entry_priv_size(mlxsw_sp);
934 }
935 
936 static int
937 mlxsw_sp_acl_tcam_flower_rule_add(struct mlxsw_sp *mlxsw_sp,
938 				  void *ruleset_priv, void *rule_priv,
939 				  struct mlxsw_sp_acl_rule_info *rulei)
940 {
941 	struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
942 	struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
943 
944 	return mlxsw_sp_acl_tcam_entry_add(mlxsw_sp, &ruleset->group,
945 					   &rule->entry, rulei);
946 }
947 
948 static void
949 mlxsw_sp_acl_tcam_flower_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv)
950 {
951 	struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
952 
953 	mlxsw_sp_acl_tcam_entry_del(mlxsw_sp, &rule->entry);
954 }
955 
956 static int
957 mlxsw_sp_acl_tcam_flower_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
958 					     void *ruleset_priv,
959 					     void *rule_priv,
960 					     struct mlxsw_sp_acl_rule_info *rulei)
961 {
962 	return -EOPNOTSUPP;
963 }
964 
965 static int
966 mlxsw_sp_acl_tcam_flower_rule_activity_get(struct mlxsw_sp *mlxsw_sp,
967 					   void *rule_priv, bool *activity)
968 {
969 	struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
970 
971 	return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp, &rule->entry,
972 						    activity);
973 }
974 
975 static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = {
976 	.ruleset_priv_size	= sizeof(struct mlxsw_sp_acl_tcam_flower_ruleset),
977 	.ruleset_add		= mlxsw_sp_acl_tcam_flower_ruleset_add,
978 	.ruleset_del		= mlxsw_sp_acl_tcam_flower_ruleset_del,
979 	.ruleset_bind		= mlxsw_sp_acl_tcam_flower_ruleset_bind,
980 	.ruleset_unbind		= mlxsw_sp_acl_tcam_flower_ruleset_unbind,
981 	.ruleset_group_id	= mlxsw_sp_acl_tcam_flower_ruleset_group_id,
982 	.rule_priv_size		= mlxsw_sp_acl_tcam_flower_rule_priv_size,
983 	.rule_add		= mlxsw_sp_acl_tcam_flower_rule_add,
984 	.rule_del		= mlxsw_sp_acl_tcam_flower_rule_del,
985 	.rule_action_replace	= mlxsw_sp_acl_tcam_flower_rule_action_replace,
986 	.rule_activity_get	= mlxsw_sp_acl_tcam_flower_rule_activity_get,
987 };
988 
989 static int
990 mlxsw_sp_acl_tcam_mr_ruleset_add(struct mlxsw_sp *mlxsw_sp,
991 				 struct mlxsw_sp_acl_tcam *tcam,
992 				 void *ruleset_priv,
993 				 struct mlxsw_afk_element_usage *tmplt_elusage)
994 {
995 	struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
996 	int err;
997 
998 	err = mlxsw_sp_acl_tcam_group_add(mlxsw_sp, tcam, &ruleset->group,
999 					  mlxsw_sp_acl_tcam_patterns,
1000 					  MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
1001 					  tmplt_elusage);
1002 	if (err)
1003 		return err;
1004 
1005 	/* For most of the TCAM clients it would make sense to take a tcam chunk
1006 	 * only when the first rule is written. This is not the case for
1007 	 * multicast router as it is required to bind the multicast router to a
1008 	 * specific ACL Group ID which must exist in HW before multicast router
1009 	 * is initialized.
1010 	 */
1011 	ruleset->chunk = mlxsw_sp_acl_tcam_chunk_get(mlxsw_sp, &ruleset->group,
1012 						     1, tmplt_elusage);
1013 	if (IS_ERR(ruleset->chunk)) {
1014 		err = PTR_ERR(ruleset->chunk);
1015 		goto err_chunk_get;
1016 	}
1017 
1018 	return 0;
1019 
1020 err_chunk_get:
1021 	mlxsw_sp_acl_tcam_group_del(mlxsw_sp, &ruleset->group);
1022 	return err;
1023 }
1024 
1025 static void
1026 mlxsw_sp_acl_tcam_mr_ruleset_del(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv)
1027 {
1028 	struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
1029 
1030 	mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, ruleset->chunk);
1031 	mlxsw_sp_acl_tcam_group_del(mlxsw_sp, &ruleset->group);
1032 }
1033 
1034 static int
1035 mlxsw_sp_acl_tcam_mr_ruleset_bind(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
1036 				  struct mlxsw_sp_port *mlxsw_sp_port,
1037 				  bool ingress)
1038 {
1039 	/* Binding is done when initializing multicast router */
1040 	return 0;
1041 }
1042 
1043 static void
1044 mlxsw_sp_acl_tcam_mr_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
1045 				    void *ruleset_priv,
1046 				    struct mlxsw_sp_port *mlxsw_sp_port,
1047 				    bool ingress)
1048 {
1049 }
1050 
1051 static u16
1052 mlxsw_sp_acl_tcam_mr_ruleset_group_id(void *ruleset_priv)
1053 {
1054 	struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
1055 
1056 	return mlxsw_sp_acl_tcam_group_id(&ruleset->group);
1057 }
1058 
1059 static size_t mlxsw_sp_acl_tcam_mr_rule_priv_size(struct mlxsw_sp *mlxsw_sp)
1060 {
1061 	return sizeof(struct mlxsw_sp_acl_tcam_mr_rule) +
1062 	       mlxsw_sp_acl_tcam_entry_priv_size(mlxsw_sp);
1063 }
1064 
1065 static int
1066 mlxsw_sp_acl_tcam_mr_rule_add(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
1067 			      void *rule_priv,
1068 			      struct mlxsw_sp_acl_rule_info *rulei)
1069 {
1070 	struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
1071 	struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
1072 
1073 	return mlxsw_sp_acl_tcam_entry_add(mlxsw_sp, &ruleset->group,
1074 					   &rule->entry, rulei);
1075 }
1076 
1077 static void
1078 mlxsw_sp_acl_tcam_mr_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv)
1079 {
1080 	struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
1081 
1082 	mlxsw_sp_acl_tcam_entry_del(mlxsw_sp, &rule->entry);
1083 }
1084 
1085 static int
1086 mlxsw_sp_acl_tcam_mr_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
1087 					 void *ruleset_priv, void *rule_priv,
1088 					 struct mlxsw_sp_acl_rule_info *rulei)
1089 {
1090 	struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
1091 	struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
1092 
1093 	return mlxsw_sp_acl_tcam_entry_action_replace(mlxsw_sp, &ruleset->group,
1094 						      &rule->entry, rulei);
1095 }
1096 
1097 static int
1098 mlxsw_sp_acl_tcam_mr_rule_activity_get(struct mlxsw_sp *mlxsw_sp,
1099 				       void *rule_priv, bool *activity)
1100 {
1101 	struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
1102 
1103 	return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp, &rule->entry,
1104 						    activity);
1105 }
1106 
1107 static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_mr_ops = {
1108 	.ruleset_priv_size	= sizeof(struct mlxsw_sp_acl_tcam_mr_ruleset),
1109 	.ruleset_add		= mlxsw_sp_acl_tcam_mr_ruleset_add,
1110 	.ruleset_del		= mlxsw_sp_acl_tcam_mr_ruleset_del,
1111 	.ruleset_bind		= mlxsw_sp_acl_tcam_mr_ruleset_bind,
1112 	.ruleset_unbind		= mlxsw_sp_acl_tcam_mr_ruleset_unbind,
1113 	.ruleset_group_id	= mlxsw_sp_acl_tcam_mr_ruleset_group_id,
1114 	.rule_priv_size		= mlxsw_sp_acl_tcam_mr_rule_priv_size,
1115 	.rule_add		= mlxsw_sp_acl_tcam_mr_rule_add,
1116 	.rule_del		= mlxsw_sp_acl_tcam_mr_rule_del,
1117 	.rule_action_replace	= mlxsw_sp_acl_tcam_mr_rule_action_replace,
1118 	.rule_activity_get	= mlxsw_sp_acl_tcam_mr_rule_activity_get,
1119 };
1120 
1121 static const struct mlxsw_sp_acl_profile_ops *
1122 mlxsw_sp_acl_tcam_profile_ops_arr[] = {
1123 	[MLXSW_SP_ACL_PROFILE_FLOWER] = &mlxsw_sp_acl_tcam_flower_ops,
1124 	[MLXSW_SP_ACL_PROFILE_MR] = &mlxsw_sp_acl_tcam_mr_ops,
1125 };
1126 
1127 const struct mlxsw_sp_acl_profile_ops *
1128 mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp,
1129 			      enum mlxsw_sp_acl_profile profile)
1130 {
1131 	const struct mlxsw_sp_acl_profile_ops *ops;
1132 
1133 	if (WARN_ON(profile >= ARRAY_SIZE(mlxsw_sp_acl_tcam_profile_ops_arr)))
1134 		return NULL;
1135 	ops = mlxsw_sp_acl_tcam_profile_ops_arr[profile];
1136 	if (WARN_ON(!ops))
1137 		return NULL;
1138 	return ops;
1139 }
1140