1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
3  * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the names of the copyright holders nor the names of its
15  *    contributors may be used to endorse or promote products derived from
16  *    this software without specific prior written permission.
17  *
18  * Alternatively, this software may be distributed under the terms of the
19  * GNU General Public License ("GPL") version 2 as published by the Free
20  * Software Foundation.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <linux/kernel.h>
36 #include <linux/slab.h>
37 #include <linux/errno.h>
38 #include <linux/bitops.h>
39 #include <linux/list.h>
40 #include <linux/rhashtable.h>
41 #include <linux/netdevice.h>
42 #include <linux/parman.h>
43 
44 #include "reg.h"
45 #include "core.h"
46 #include "resources.h"
47 #include "spectrum.h"
48 #include "core_acl_flex_keys.h"
49 
50 struct mlxsw_sp_acl_tcam {
51 	unsigned long *used_regions; /* bit array */
52 	unsigned int max_regions;
53 	unsigned long *used_groups;  /* bit array */
54 	unsigned int max_groups;
55 	unsigned int max_group_size;
56 };
57 
58 static int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
59 {
60 	struct mlxsw_sp_acl_tcam *tcam = priv;
61 	u64 max_tcam_regions;
62 	u64 max_regions;
63 	u64 max_groups;
64 	size_t alloc_size;
65 	int err;
66 
67 	max_tcam_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core,
68 					      ACL_MAX_TCAM_REGIONS);
69 	max_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_REGIONS);
70 
71 	/* Use 1:1 mapping between ACL region and TCAM region */
72 	if (max_tcam_regions < max_regions)
73 		max_regions = max_tcam_regions;
74 
75 	alloc_size = sizeof(tcam->used_regions[0]) * BITS_TO_LONGS(max_regions);
76 	tcam->used_regions = kzalloc(alloc_size, GFP_KERNEL);
77 	if (!tcam->used_regions)
78 		return -ENOMEM;
79 	tcam->max_regions = max_regions;
80 
81 	max_groups = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUPS);
82 	alloc_size = sizeof(tcam->used_groups[0]) * BITS_TO_LONGS(max_groups);
83 	tcam->used_groups = kzalloc(alloc_size, GFP_KERNEL);
84 	if (!tcam->used_groups) {
85 		err = -ENOMEM;
86 		goto err_alloc_used_groups;
87 	}
88 	tcam->max_groups = max_groups;
89 	tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
90 						 ACL_MAX_GROUP_SIZE);
91 	return 0;
92 
93 err_alloc_used_groups:
94 	kfree(tcam->used_regions);
95 	return err;
96 }
97 
98 static void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv)
99 {
100 	struct mlxsw_sp_acl_tcam *tcam = priv;
101 
102 	kfree(tcam->used_groups);
103 	kfree(tcam->used_regions);
104 }
105 
106 static int mlxsw_sp_acl_tcam_region_id_get(struct mlxsw_sp_acl_tcam *tcam,
107 					   u16 *p_id)
108 {
109 	u16 id;
110 
111 	id = find_first_zero_bit(tcam->used_regions, tcam->max_regions);
112 	if (id < tcam->max_regions) {
113 		__set_bit(id, tcam->used_regions);
114 		*p_id = id;
115 		return 0;
116 	}
117 	return -ENOBUFS;
118 }
119 
120 static void mlxsw_sp_acl_tcam_region_id_put(struct mlxsw_sp_acl_tcam *tcam,
121 					    u16 id)
122 {
123 	__clear_bit(id, tcam->used_regions);
124 }
125 
126 static int mlxsw_sp_acl_tcam_group_id_get(struct mlxsw_sp_acl_tcam *tcam,
127 					  u16 *p_id)
128 {
129 	u16 id;
130 
131 	id = find_first_zero_bit(tcam->used_groups, tcam->max_groups);
132 	if (id < tcam->max_groups) {
133 		__set_bit(id, tcam->used_groups);
134 		*p_id = id;
135 		return 0;
136 	}
137 	return -ENOBUFS;
138 }
139 
140 static void mlxsw_sp_acl_tcam_group_id_put(struct mlxsw_sp_acl_tcam *tcam,
141 					   u16 id)
142 {
143 	__clear_bit(id, tcam->used_groups);
144 }
145 
146 struct mlxsw_sp_acl_tcam_pattern {
147 	const enum mlxsw_afk_element *elements;
148 	unsigned int elements_count;
149 };
150 
151 struct mlxsw_sp_acl_tcam_group {
152 	struct mlxsw_sp_acl_tcam *tcam;
153 	u16 id;
154 	struct list_head region_list;
155 	unsigned int region_count;
156 	struct rhashtable chunk_ht;
157 	struct {
158 		u16 local_port;
159 		bool ingress;
160 	} bound;
161 	struct mlxsw_sp_acl_tcam_group_ops *ops;
162 	const struct mlxsw_sp_acl_tcam_pattern *patterns;
163 	unsigned int patterns_count;
164 };
165 
166 struct mlxsw_sp_acl_tcam_region {
167 	struct list_head list; /* Member of a TCAM group */
168 	struct list_head chunk_list; /* List of chunks under this region */
169 	struct parman *parman;
170 	struct mlxsw_sp *mlxsw_sp;
171 	struct mlxsw_sp_acl_tcam_group *group;
172 	u16 id; /* ACL ID and region ID - they are same */
173 	char tcam_region_info[MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN];
174 	struct mlxsw_afk_key_info *key_info;
175 	struct {
176 		struct parman_prio parman_prio;
177 		struct parman_item parman_item;
178 		struct mlxsw_sp_acl_rule_info *rulei;
179 	} catchall;
180 };
181 
182 struct mlxsw_sp_acl_tcam_chunk {
183 	struct list_head list; /* Member of a TCAM region */
184 	struct rhash_head ht_node; /* Member of a chunk HT */
185 	unsigned int priority; /* Priority within the region and group */
186 	struct parman_prio parman_prio;
187 	struct mlxsw_sp_acl_tcam_group *group;
188 	struct mlxsw_sp_acl_tcam_region *region;
189 	unsigned int ref_count;
190 };
191 
192 struct mlxsw_sp_acl_tcam_entry {
193 	struct parman_item parman_item;
194 	struct mlxsw_sp_acl_tcam_chunk *chunk;
195 };
196 
197 static const struct rhashtable_params mlxsw_sp_acl_tcam_chunk_ht_params = {
198 	.key_len = sizeof(unsigned int),
199 	.key_offset = offsetof(struct mlxsw_sp_acl_tcam_chunk, priority),
200 	.head_offset = offsetof(struct mlxsw_sp_acl_tcam_chunk, ht_node),
201 	.automatic_shrinking = true,
202 };
203 
204 static int mlxsw_sp_acl_tcam_group_update(struct mlxsw_sp *mlxsw_sp,
205 					  struct mlxsw_sp_acl_tcam_group *group)
206 {
207 	struct mlxsw_sp_acl_tcam_region *region;
208 	char pagt_pl[MLXSW_REG_PAGT_LEN];
209 	int acl_index = 0;
210 
211 	mlxsw_reg_pagt_pack(pagt_pl, group->id);
212 	list_for_each_entry(region, &group->region_list, list)
213 		mlxsw_reg_pagt_acl_id_pack(pagt_pl, acl_index++, region->id);
214 	mlxsw_reg_pagt_size_set(pagt_pl, acl_index);
215 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pagt), pagt_pl);
216 }
217 
218 static int
219 mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp *mlxsw_sp,
220 			    struct mlxsw_sp_acl_tcam *tcam,
221 			    struct mlxsw_sp_acl_tcam_group *group,
222 			    const struct mlxsw_sp_acl_tcam_pattern *patterns,
223 			    unsigned int patterns_count)
224 {
225 	int err;
226 
227 	group->tcam = tcam;
228 	group->patterns = patterns;
229 	group->patterns_count = patterns_count;
230 	INIT_LIST_HEAD(&group->region_list);
231 	err = mlxsw_sp_acl_tcam_group_id_get(tcam, &group->id);
232 	if (err)
233 		return err;
234 
235 	err = mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
236 	if (err)
237 		goto err_group_update;
238 
239 	err = rhashtable_init(&group->chunk_ht,
240 			      &mlxsw_sp_acl_tcam_chunk_ht_params);
241 	if (err)
242 		goto err_rhashtable_init;
243 
244 	return 0;
245 
246 err_rhashtable_init:
247 err_group_update:
248 	mlxsw_sp_acl_tcam_group_id_put(tcam, group->id);
249 	return err;
250 }
251 
252 static void mlxsw_sp_acl_tcam_group_del(struct mlxsw_sp *mlxsw_sp,
253 					struct mlxsw_sp_acl_tcam_group *group)
254 {
255 	struct mlxsw_sp_acl_tcam *tcam = group->tcam;
256 
257 	rhashtable_destroy(&group->chunk_ht);
258 	mlxsw_sp_acl_tcam_group_id_put(tcam, group->id);
259 	WARN_ON(!list_empty(&group->region_list));
260 }
261 
262 static int
263 mlxsw_sp_acl_tcam_group_bind(struct mlxsw_sp *mlxsw_sp,
264 			     struct mlxsw_sp_acl_tcam_group *group,
265 			     struct net_device *dev, bool ingress)
266 {
267 	struct mlxsw_sp_port *mlxsw_sp_port;
268 	char ppbt_pl[MLXSW_REG_PPBT_LEN];
269 
270 	if (!mlxsw_sp_port_dev_check(dev))
271 		return -EINVAL;
272 
273 	mlxsw_sp_port = netdev_priv(dev);
274 	group->bound.local_port = mlxsw_sp_port->local_port;
275 	group->bound.ingress = ingress;
276 	mlxsw_reg_ppbt_pack(ppbt_pl,
277 			    group->bound.ingress ? MLXSW_REG_PXBT_E_IACL :
278 						   MLXSW_REG_PXBT_E_EACL,
279 			    MLXSW_REG_PXBT_OP_BIND, group->bound.local_port,
280 			    group->id);
281 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
282 }
283 
284 static void
285 mlxsw_sp_acl_tcam_group_unbind(struct mlxsw_sp *mlxsw_sp,
286 			       struct mlxsw_sp_acl_tcam_group *group)
287 {
288 	char ppbt_pl[MLXSW_REG_PPBT_LEN];
289 
290 	mlxsw_reg_ppbt_pack(ppbt_pl,
291 			    group->bound.ingress ? MLXSW_REG_PXBT_E_IACL :
292 						   MLXSW_REG_PXBT_E_EACL,
293 			    MLXSW_REG_PXBT_OP_UNBIND, group->bound.local_port,
294 			    group->id);
295 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
296 }
297 
298 static u16
299 mlxsw_sp_acl_tcam_group_id(struct mlxsw_sp_acl_tcam_group *group)
300 {
301 	return group->id;
302 }
303 
304 static unsigned int
305 mlxsw_sp_acl_tcam_region_prio(struct mlxsw_sp_acl_tcam_region *region)
306 {
307 	struct mlxsw_sp_acl_tcam_chunk *chunk;
308 
309 	if (list_empty(&region->chunk_list))
310 		return 0;
311 	/* As a priority of a region, return priority of the first chunk */
312 	chunk = list_first_entry(&region->chunk_list, typeof(*chunk), list);
313 	return chunk->priority;
314 }
315 
316 static unsigned int
317 mlxsw_sp_acl_tcam_region_max_prio(struct mlxsw_sp_acl_tcam_region *region)
318 {
319 	struct mlxsw_sp_acl_tcam_chunk *chunk;
320 
321 	if (list_empty(&region->chunk_list))
322 		return 0;
323 	chunk = list_last_entry(&region->chunk_list, typeof(*chunk), list);
324 	return chunk->priority;
325 }
326 
327 static void
328 mlxsw_sp_acl_tcam_group_list_add(struct mlxsw_sp_acl_tcam_group *group,
329 				 struct mlxsw_sp_acl_tcam_region *region)
330 {
331 	struct mlxsw_sp_acl_tcam_region *region2;
332 	struct list_head *pos;
333 
334 	/* Position the region inside the list according to priority */
335 	list_for_each(pos, &group->region_list) {
336 		region2 = list_entry(pos, typeof(*region2), list);
337 		if (mlxsw_sp_acl_tcam_region_prio(region2) >
338 		    mlxsw_sp_acl_tcam_region_prio(region))
339 			break;
340 	}
341 	list_add_tail(&region->list, pos);
342 	group->region_count++;
343 }
344 
345 static void
346 mlxsw_sp_acl_tcam_group_list_del(struct mlxsw_sp_acl_tcam_group *group,
347 				 struct mlxsw_sp_acl_tcam_region *region)
348 {
349 	group->region_count--;
350 	list_del(&region->list);
351 }
352 
353 static int
354 mlxsw_sp_acl_tcam_group_region_attach(struct mlxsw_sp *mlxsw_sp,
355 				      struct mlxsw_sp_acl_tcam_group *group,
356 				      struct mlxsw_sp_acl_tcam_region *region)
357 {
358 	int err;
359 
360 	if (group->region_count == group->tcam->max_group_size)
361 		return -ENOBUFS;
362 
363 	mlxsw_sp_acl_tcam_group_list_add(group, region);
364 
365 	err = mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
366 	if (err)
367 		goto err_group_update;
368 	region->group = group;
369 
370 	return 0;
371 
372 err_group_update:
373 	mlxsw_sp_acl_tcam_group_list_del(group, region);
374 	mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
375 	return err;
376 }
377 
378 static void
379 mlxsw_sp_acl_tcam_group_region_detach(struct mlxsw_sp *mlxsw_sp,
380 				      struct mlxsw_sp_acl_tcam_region *region)
381 {
382 	struct mlxsw_sp_acl_tcam_group *group = region->group;
383 
384 	mlxsw_sp_acl_tcam_group_list_del(group, region);
385 	mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
386 }
387 
388 static struct mlxsw_sp_acl_tcam_region *
389 mlxsw_sp_acl_tcam_group_region_find(struct mlxsw_sp_acl_tcam_group *group,
390 				    unsigned int priority,
391 				    struct mlxsw_afk_element_usage *elusage,
392 				    bool *p_need_split)
393 {
394 	struct mlxsw_sp_acl_tcam_region *region, *region2;
395 	struct list_head *pos;
396 	bool issubset;
397 
398 	list_for_each(pos, &group->region_list) {
399 		region = list_entry(pos, typeof(*region), list);
400 
401 		/* First, check if the requested priority does not rather belong
402 		 * under some of the next regions.
403 		 */
404 		if (pos->next != &group->region_list) { /* not last */
405 			region2 = list_entry(pos->next, typeof(*region2), list);
406 			if (priority >= mlxsw_sp_acl_tcam_region_prio(region2))
407 				continue;
408 		}
409 
410 		issubset = mlxsw_afk_key_info_subset(region->key_info, elusage);
411 
412 		/* If requested element usage would not fit and the priority
413 		 * is lower than the currently inspected region we cannot
414 		 * use this region, so return NULL to indicate new region has
415 		 * to be created.
416 		 */
417 		if (!issubset &&
418 		    priority < mlxsw_sp_acl_tcam_region_prio(region))
419 			return NULL;
420 
421 		/* If requested element usage would not fit and the priority
422 		 * is higher than the currently inspected region we cannot
423 		 * use this region. There is still some hope that the next
424 		 * region would be the fit. So let it be processed and
425 		 * eventually break at the check right above this.
426 		 */
427 		if (!issubset &&
428 		    priority > mlxsw_sp_acl_tcam_region_max_prio(region))
429 			continue;
430 
431 		/* Indicate if the region needs to be split in order to add
432 		 * the requested priority. Split is needed when requested
433 		 * element usage won't fit into the found region.
434 		 */
435 		*p_need_split = !issubset;
436 		return region;
437 	}
438 	return NULL; /* New region has to be created. */
439 }
440 
441 static void
442 mlxsw_sp_acl_tcam_group_use_patterns(struct mlxsw_sp_acl_tcam_group *group,
443 				     struct mlxsw_afk_element_usage *elusage,
444 				     struct mlxsw_afk_element_usage *out)
445 {
446 	const struct mlxsw_sp_acl_tcam_pattern *pattern;
447 	int i;
448 
449 	for (i = 0; i < group->patterns_count; i++) {
450 		pattern = &group->patterns[i];
451 		mlxsw_afk_element_usage_fill(out, pattern->elements,
452 					     pattern->elements_count);
453 		if (mlxsw_afk_element_usage_subset(elusage, out))
454 			return;
455 	}
456 	memcpy(out, elusage, sizeof(*out));
457 }
458 
459 #define MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT 16
460 #define MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP 16
461 
462 static int
463 mlxsw_sp_acl_tcam_region_alloc(struct mlxsw_sp *mlxsw_sp,
464 			       struct mlxsw_sp_acl_tcam_region *region)
465 {
466 	struct mlxsw_afk_key_info *key_info = region->key_info;
467 	char ptar_pl[MLXSW_REG_PTAR_LEN];
468 	unsigned int encodings_count;
469 	int i;
470 	int err;
471 
472 	mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_ALLOC,
473 			    MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT,
474 			    region->id, region->tcam_region_info);
475 	encodings_count = mlxsw_afk_key_info_blocks_count_get(key_info);
476 	for (i = 0; i < encodings_count; i++) {
477 		u16 encoding;
478 
479 		encoding = mlxsw_afk_key_info_block_encoding_get(key_info, i);
480 		mlxsw_reg_ptar_key_id_pack(ptar_pl, i, encoding);
481 	}
482 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
483 	if (err)
484 		return err;
485 	mlxsw_reg_ptar_unpack(ptar_pl, region->tcam_region_info);
486 	return 0;
487 }
488 
489 static void
490 mlxsw_sp_acl_tcam_region_free(struct mlxsw_sp *mlxsw_sp,
491 			      struct mlxsw_sp_acl_tcam_region *region)
492 {
493 	char ptar_pl[MLXSW_REG_PTAR_LEN];
494 
495 	mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_FREE, 0, region->id,
496 			    region->tcam_region_info);
497 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
498 }
499 
500 static int
501 mlxsw_sp_acl_tcam_region_resize(struct mlxsw_sp *mlxsw_sp,
502 				struct mlxsw_sp_acl_tcam_region *region,
503 				u16 new_size)
504 {
505 	char ptar_pl[MLXSW_REG_PTAR_LEN];
506 
507 	mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_RESIZE,
508 			    new_size, region->id, region->tcam_region_info);
509 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
510 }
511 
512 static int
513 mlxsw_sp_acl_tcam_region_enable(struct mlxsw_sp *mlxsw_sp,
514 				struct mlxsw_sp_acl_tcam_region *region)
515 {
516 	char pacl_pl[MLXSW_REG_PACL_LEN];
517 
518 	mlxsw_reg_pacl_pack(pacl_pl, region->id, true,
519 			    region->tcam_region_info);
520 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
521 }
522 
523 static void
524 mlxsw_sp_acl_tcam_region_disable(struct mlxsw_sp *mlxsw_sp,
525 				 struct mlxsw_sp_acl_tcam_region *region)
526 {
527 	char pacl_pl[MLXSW_REG_PACL_LEN];
528 
529 	mlxsw_reg_pacl_pack(pacl_pl, region->id, false,
530 			    region->tcam_region_info);
531 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
532 }
533 
534 static int
535 mlxsw_sp_acl_tcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
536 				      struct mlxsw_sp_acl_tcam_region *region,
537 				      unsigned int offset,
538 				      struct mlxsw_sp_acl_rule_info *rulei)
539 {
540 	char ptce2_pl[MLXSW_REG_PTCE2_LEN];
541 	char *act_set;
542 	char *mask;
543 	char *key;
544 
545 	mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_WRITE_WRITE,
546 			     region->tcam_region_info, offset);
547 	key = mlxsw_reg_ptce2_flex_key_blocks_data(ptce2_pl);
548 	mask = mlxsw_reg_ptce2_mask_data(ptce2_pl);
549 	mlxsw_afk_encode(region->key_info, &rulei->values, key, mask);
550 
551 	/* Only the first action set belongs here, the rest is in KVD */
552 	act_set = mlxsw_afa_block_first_set(rulei->act_block);
553 	mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set);
554 
555 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
556 }
557 
558 static void
559 mlxsw_sp_acl_tcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp,
560 				      struct mlxsw_sp_acl_tcam_region *region,
561 				      unsigned int offset)
562 {
563 	char ptce2_pl[MLXSW_REG_PTCE2_LEN];
564 
565 	mlxsw_reg_ptce2_pack(ptce2_pl, false, MLXSW_REG_PTCE2_OP_WRITE_WRITE,
566 			     region->tcam_region_info, offset);
567 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
568 }
569 
570 static int
571 mlxsw_sp_acl_tcam_region_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
572 					    struct mlxsw_sp_acl_tcam_region *region,
573 					    unsigned int offset,
574 					    bool *activity)
575 {
576 	char ptce2_pl[MLXSW_REG_PTCE2_LEN];
577 	int err;
578 
579 	mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_QUERY_CLEAR_ON_READ,
580 			     region->tcam_region_info, offset);
581 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
582 	if (err)
583 		return err;
584 	*activity = mlxsw_reg_ptce2_a_get(ptce2_pl);
585 	return 0;
586 }
587 
588 #define MLXSW_SP_ACL_TCAM_CATCHALL_PRIO (~0U)
589 
590 static int
591 mlxsw_sp_acl_tcam_region_catchall_add(struct mlxsw_sp *mlxsw_sp,
592 				      struct mlxsw_sp_acl_tcam_region *region)
593 {
594 	struct parman_prio *parman_prio = &region->catchall.parman_prio;
595 	struct parman_item *parman_item = &region->catchall.parman_item;
596 	struct mlxsw_sp_acl_rule_info *rulei;
597 	int err;
598 
599 	parman_prio_init(region->parman, parman_prio,
600 			 MLXSW_SP_ACL_TCAM_CATCHALL_PRIO);
601 	err = parman_item_add(region->parman, parman_prio, parman_item);
602 	if (err)
603 		goto err_parman_item_add;
604 
605 	rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl);
606 	if (IS_ERR(rulei)) {
607 		err = PTR_ERR(rulei);
608 		goto err_rulei_create;
609 	}
610 
611 	mlxsw_sp_acl_rulei_act_continue(rulei);
612 	err = mlxsw_sp_acl_rulei_commit(rulei);
613 	if (err)
614 		goto err_rulei_commit;
615 
616 	err = mlxsw_sp_acl_tcam_region_entry_insert(mlxsw_sp, region,
617 						    parman_item->index, rulei);
618 	region->catchall.rulei = rulei;
619 	if (err)
620 		goto err_rule_insert;
621 
622 	return 0;
623 
624 err_rule_insert:
625 err_rulei_commit:
626 	mlxsw_sp_acl_rulei_destroy(rulei);
627 err_rulei_create:
628 	parman_item_remove(region->parman, parman_prio, parman_item);
629 err_parman_item_add:
630 	parman_prio_fini(parman_prio);
631 	return err;
632 }
633 
634 static void
635 mlxsw_sp_acl_tcam_region_catchall_del(struct mlxsw_sp *mlxsw_sp,
636 				      struct mlxsw_sp_acl_tcam_region *region)
637 {
638 	struct parman_prio *parman_prio = &region->catchall.parman_prio;
639 	struct parman_item *parman_item = &region->catchall.parman_item;
640 	struct mlxsw_sp_acl_rule_info *rulei = region->catchall.rulei;
641 
642 	mlxsw_sp_acl_tcam_region_entry_remove(mlxsw_sp, region,
643 					      parman_item->index);
644 	mlxsw_sp_acl_rulei_destroy(rulei);
645 	parman_item_remove(region->parman, parman_prio, parman_item);
646 	parman_prio_fini(parman_prio);
647 }
648 
649 static void
650 mlxsw_sp_acl_tcam_region_move(struct mlxsw_sp *mlxsw_sp,
651 			      struct mlxsw_sp_acl_tcam_region *region,
652 			      u16 src_offset, u16 dst_offset, u16 size)
653 {
654 	char prcr_pl[MLXSW_REG_PRCR_LEN];
655 
656 	mlxsw_reg_prcr_pack(prcr_pl, MLXSW_REG_PRCR_OP_MOVE,
657 			    region->tcam_region_info, src_offset,
658 			    region->tcam_region_info, dst_offset, size);
659 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(prcr), prcr_pl);
660 }
661 
662 static int mlxsw_sp_acl_tcam_region_parman_resize(void *priv,
663 						  unsigned long new_count)
664 {
665 	struct mlxsw_sp_acl_tcam_region *region = priv;
666 	struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp;
667 	u64 max_tcam_rules;
668 
669 	max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES);
670 	if (new_count > max_tcam_rules)
671 		return -EINVAL;
672 	return mlxsw_sp_acl_tcam_region_resize(mlxsw_sp, region, new_count);
673 }
674 
675 static void mlxsw_sp_acl_tcam_region_parman_move(void *priv,
676 						 unsigned long from_index,
677 						 unsigned long to_index,
678 						 unsigned long count)
679 {
680 	struct mlxsw_sp_acl_tcam_region *region = priv;
681 	struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp;
682 
683 	mlxsw_sp_acl_tcam_region_move(mlxsw_sp, region,
684 				      from_index, to_index, count);
685 }
686 
687 static const struct parman_ops mlxsw_sp_acl_tcam_region_parman_ops = {
688 	.base_count	= MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT,
689 	.resize_step	= MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP,
690 	.resize		= mlxsw_sp_acl_tcam_region_parman_resize,
691 	.move		= mlxsw_sp_acl_tcam_region_parman_move,
692 	.algo		= PARMAN_ALGO_TYPE_LSORT,
693 };
694 
695 static struct mlxsw_sp_acl_tcam_region *
696 mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp,
697 				struct mlxsw_sp_acl_tcam *tcam,
698 				struct mlxsw_afk_element_usage *elusage)
699 {
700 	struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
701 	struct mlxsw_sp_acl_tcam_region *region;
702 	int err;
703 
704 	region = kzalloc(sizeof(*region), GFP_KERNEL);
705 	if (!region)
706 		return ERR_PTR(-ENOMEM);
707 	INIT_LIST_HEAD(&region->chunk_list);
708 	region->mlxsw_sp = mlxsw_sp;
709 
710 	region->parman = parman_create(&mlxsw_sp_acl_tcam_region_parman_ops,
711 				       region);
712 	if (!region->parman) {
713 		err = -ENOMEM;
714 		goto err_parman_create;
715 	}
716 
717 	region->key_info = mlxsw_afk_key_info_get(afk, elusage);
718 	if (IS_ERR(region->key_info)) {
719 		err = PTR_ERR(region->key_info);
720 		goto err_key_info_get;
721 	}
722 
723 	err = mlxsw_sp_acl_tcam_region_id_get(tcam, &region->id);
724 	if (err)
725 		goto err_region_id_get;
726 
727 	err = mlxsw_sp_acl_tcam_region_alloc(mlxsw_sp, region);
728 	if (err)
729 		goto err_tcam_region_alloc;
730 
731 	err = mlxsw_sp_acl_tcam_region_enable(mlxsw_sp, region);
732 	if (err)
733 		goto err_tcam_region_enable;
734 
735 	err = mlxsw_sp_acl_tcam_region_catchall_add(mlxsw_sp, region);
736 	if (err)
737 		goto err_tcam_region_catchall_add;
738 
739 	return region;
740 
741 err_tcam_region_catchall_add:
742 	mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
743 err_tcam_region_enable:
744 	mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
745 err_tcam_region_alloc:
746 	mlxsw_sp_acl_tcam_region_id_put(tcam, region->id);
747 err_region_id_get:
748 	mlxsw_afk_key_info_put(region->key_info);
749 err_key_info_get:
750 	parman_destroy(region->parman);
751 err_parman_create:
752 	kfree(region);
753 	return ERR_PTR(err);
754 }
755 
756 static void
757 mlxsw_sp_acl_tcam_region_destroy(struct mlxsw_sp *mlxsw_sp,
758 				 struct mlxsw_sp_acl_tcam_region *region)
759 {
760 	mlxsw_sp_acl_tcam_region_catchall_del(mlxsw_sp, region);
761 	mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
762 	mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
763 	mlxsw_sp_acl_tcam_region_id_put(region->group->tcam, region->id);
764 	mlxsw_afk_key_info_put(region->key_info);
765 	parman_destroy(region->parman);
766 	kfree(region);
767 }
768 
769 static int
770 mlxsw_sp_acl_tcam_chunk_assoc(struct mlxsw_sp *mlxsw_sp,
771 			      struct mlxsw_sp_acl_tcam_group *group,
772 			      unsigned int priority,
773 			      struct mlxsw_afk_element_usage *elusage,
774 			      struct mlxsw_sp_acl_tcam_chunk *chunk)
775 {
776 	struct mlxsw_sp_acl_tcam_region *region;
777 	bool region_created = false;
778 	bool need_split;
779 	int err;
780 
781 	region = mlxsw_sp_acl_tcam_group_region_find(group, priority, elusage,
782 						     &need_split);
783 	if (region && need_split) {
784 		/* According to priority, the chunk should belong to an
785 		 * existing region. However, this chunk needs elements
786 		 * that region does not contain. We need to split the existing
787 		 * region into two and create a new region for this chunk
788 		 * in between. This is not supported now.
789 		 */
790 		return -EOPNOTSUPP;
791 	}
792 	if (!region) {
793 		struct mlxsw_afk_element_usage region_elusage;
794 
795 		mlxsw_sp_acl_tcam_group_use_patterns(group, elusage,
796 						     &region_elusage);
797 		region = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, group->tcam,
798 							 &region_elusage);
799 		if (IS_ERR(region))
800 			return PTR_ERR(region);
801 		region_created = true;
802 	}
803 
804 	chunk->region = region;
805 	list_add_tail(&chunk->list, &region->chunk_list);
806 
807 	if (!region_created)
808 		return 0;
809 
810 	err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp, group, region);
811 	if (err)
812 		goto err_group_region_attach;
813 
814 	return 0;
815 
816 err_group_region_attach:
817 	mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, region);
818 	return err;
819 }
820 
821 static void
822 mlxsw_sp_acl_tcam_chunk_deassoc(struct mlxsw_sp *mlxsw_sp,
823 				struct mlxsw_sp_acl_tcam_chunk *chunk)
824 {
825 	struct mlxsw_sp_acl_tcam_region *region = chunk->region;
826 
827 	list_del(&chunk->list);
828 	if (list_empty(&region->chunk_list)) {
829 		mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, region);
830 		mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, region);
831 	}
832 }
833 
834 static struct mlxsw_sp_acl_tcam_chunk *
835 mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp,
836 			       struct mlxsw_sp_acl_tcam_group *group,
837 			       unsigned int priority,
838 			       struct mlxsw_afk_element_usage *elusage)
839 {
840 	struct mlxsw_sp_acl_tcam_chunk *chunk;
841 	int err;
842 
843 	if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO)
844 		return ERR_PTR(-EINVAL);
845 
846 	chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
847 	if (!chunk)
848 		return ERR_PTR(-ENOMEM);
849 	chunk->priority = priority;
850 	chunk->group = group;
851 	chunk->ref_count = 1;
852 
853 	err = mlxsw_sp_acl_tcam_chunk_assoc(mlxsw_sp, group, priority,
854 					    elusage, chunk);
855 	if (err)
856 		goto err_chunk_assoc;
857 
858 	parman_prio_init(chunk->region->parman, &chunk->parman_prio, priority);
859 
860 	err = rhashtable_insert_fast(&group->chunk_ht, &chunk->ht_node,
861 				     mlxsw_sp_acl_tcam_chunk_ht_params);
862 	if (err)
863 		goto err_rhashtable_insert;
864 
865 	return chunk;
866 
867 err_rhashtable_insert:
868 	parman_prio_fini(&chunk->parman_prio);
869 	mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk);
870 err_chunk_assoc:
871 	kfree(chunk);
872 	return ERR_PTR(err);
873 }
874 
875 static void
876 mlxsw_sp_acl_tcam_chunk_destroy(struct mlxsw_sp *mlxsw_sp,
877 				struct mlxsw_sp_acl_tcam_chunk *chunk)
878 {
879 	struct mlxsw_sp_acl_tcam_group *group = chunk->group;
880 
881 	rhashtable_remove_fast(&group->chunk_ht, &chunk->ht_node,
882 			       mlxsw_sp_acl_tcam_chunk_ht_params);
883 	parman_prio_fini(&chunk->parman_prio);
884 	mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk);
885 	kfree(chunk);
886 }
887 
888 static struct mlxsw_sp_acl_tcam_chunk *
889 mlxsw_sp_acl_tcam_chunk_get(struct mlxsw_sp *mlxsw_sp,
890 			    struct mlxsw_sp_acl_tcam_group *group,
891 			    unsigned int priority,
892 			    struct mlxsw_afk_element_usage *elusage)
893 {
894 	struct mlxsw_sp_acl_tcam_chunk *chunk;
895 
896 	chunk = rhashtable_lookup_fast(&group->chunk_ht, &priority,
897 				       mlxsw_sp_acl_tcam_chunk_ht_params);
898 	if (chunk) {
899 		if (WARN_ON(!mlxsw_afk_key_info_subset(chunk->region->key_info,
900 						       elusage)))
901 			return ERR_PTR(-EINVAL);
902 		chunk->ref_count++;
903 		return chunk;
904 	}
905 	return mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, group,
906 					      priority, elusage);
907 }
908 
909 static void mlxsw_sp_acl_tcam_chunk_put(struct mlxsw_sp *mlxsw_sp,
910 					struct mlxsw_sp_acl_tcam_chunk *chunk)
911 {
912 	if (--chunk->ref_count)
913 		return;
914 	mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, chunk);
915 }
916 
917 static int mlxsw_sp_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp,
918 				       struct mlxsw_sp_acl_tcam_group *group,
919 				       struct mlxsw_sp_acl_tcam_entry *entry,
920 				       struct mlxsw_sp_acl_rule_info *rulei)
921 {
922 	struct mlxsw_sp_acl_tcam_chunk *chunk;
923 	struct mlxsw_sp_acl_tcam_region *region;
924 	int err;
925 
926 	chunk = mlxsw_sp_acl_tcam_chunk_get(mlxsw_sp, group, rulei->priority,
927 					    &rulei->values.elusage);
928 	if (IS_ERR(chunk))
929 		return PTR_ERR(chunk);
930 
931 	region = chunk->region;
932 	err = parman_item_add(region->parman, &chunk->parman_prio,
933 			      &entry->parman_item);
934 	if (err)
935 		goto err_parman_item_add;
936 
937 	err = mlxsw_sp_acl_tcam_region_entry_insert(mlxsw_sp, region,
938 						    entry->parman_item.index,
939 						    rulei);
940 	if (err)
941 		goto err_rule_insert;
942 	entry->chunk = chunk;
943 
944 	return 0;
945 
946 err_rule_insert:
947 	parman_item_remove(region->parman, &chunk->parman_prio,
948 			   &entry->parman_item);
949 err_parman_item_add:
950 	mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk);
951 	return err;
952 }
953 
954 static void mlxsw_sp_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp,
955 					struct mlxsw_sp_acl_tcam_entry *entry)
956 {
957 	struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk;
958 	struct mlxsw_sp_acl_tcam_region *region = chunk->region;
959 
960 	mlxsw_sp_acl_tcam_region_entry_remove(mlxsw_sp, region,
961 					      entry->parman_item.index);
962 	parman_item_remove(region->parman, &chunk->parman_prio,
963 			   &entry->parman_item);
964 	mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk);
965 }
966 
967 static int
968 mlxsw_sp_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
969 				     struct mlxsw_sp_acl_tcam_entry *entry,
970 				     bool *activity)
971 {
972 	struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk;
973 	struct mlxsw_sp_acl_tcam_region *region = chunk->region;
974 
975 	return mlxsw_sp_acl_tcam_region_entry_activity_get(mlxsw_sp, region,
976 							   entry->parman_item.index,
977 							   activity);
978 }
979 
980 static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = {
981 	MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
982 	MLXSW_AFK_ELEMENT_DMAC,
983 	MLXSW_AFK_ELEMENT_SMAC,
984 	MLXSW_AFK_ELEMENT_ETHERTYPE,
985 	MLXSW_AFK_ELEMENT_IP_PROTO,
986 	MLXSW_AFK_ELEMENT_SRC_IP4,
987 	MLXSW_AFK_ELEMENT_DST_IP4,
988 	MLXSW_AFK_ELEMENT_DST_L4_PORT,
989 	MLXSW_AFK_ELEMENT_SRC_L4_PORT,
990 	MLXSW_AFK_ELEMENT_VID,
991 	MLXSW_AFK_ELEMENT_PCP,
992 	MLXSW_AFK_ELEMENT_TCP_FLAGS,
993 	MLXSW_AFK_ELEMENT_IP_TTL_,
994 	MLXSW_AFK_ELEMENT_IP_ECN,
995 	MLXSW_AFK_ELEMENT_IP_DSCP,
996 };
997 
998 static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv6[] = {
999 	MLXSW_AFK_ELEMENT_ETHERTYPE,
1000 	MLXSW_AFK_ELEMENT_IP_PROTO,
1001 	MLXSW_AFK_ELEMENT_SRC_IP6_HI,
1002 	MLXSW_AFK_ELEMENT_SRC_IP6_LO,
1003 	MLXSW_AFK_ELEMENT_DST_IP6_HI,
1004 	MLXSW_AFK_ELEMENT_DST_IP6_LO,
1005 	MLXSW_AFK_ELEMENT_DST_L4_PORT,
1006 	MLXSW_AFK_ELEMENT_SRC_L4_PORT,
1007 };
1008 
1009 static const struct mlxsw_sp_acl_tcam_pattern mlxsw_sp_acl_tcam_patterns[] = {
1010 	{
1011 		.elements = mlxsw_sp_acl_tcam_pattern_ipv4,
1012 		.elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv4),
1013 	},
1014 	{
1015 		.elements = mlxsw_sp_acl_tcam_pattern_ipv6,
1016 		.elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv6),
1017 	},
1018 };
1019 
1020 #define MLXSW_SP_ACL_TCAM_PATTERNS_COUNT \
1021 	ARRAY_SIZE(mlxsw_sp_acl_tcam_patterns)
1022 
1023 struct mlxsw_sp_acl_tcam_flower_ruleset {
1024 	struct mlxsw_sp_acl_tcam_group group;
1025 };
1026 
1027 struct mlxsw_sp_acl_tcam_flower_rule {
1028 	struct mlxsw_sp_acl_tcam_entry entry;
1029 };
1030 
1031 static int
1032 mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp *mlxsw_sp,
1033 				     void *priv, void *ruleset_priv)
1034 {
1035 	struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1036 	struct mlxsw_sp_acl_tcam *tcam = priv;
1037 
1038 	return mlxsw_sp_acl_tcam_group_add(mlxsw_sp, tcam, &ruleset->group,
1039 					   mlxsw_sp_acl_tcam_patterns,
1040 					   MLXSW_SP_ACL_TCAM_PATTERNS_COUNT);
1041 }
1042 
1043 static void
1044 mlxsw_sp_acl_tcam_flower_ruleset_del(struct mlxsw_sp *mlxsw_sp,
1045 				     void *ruleset_priv)
1046 {
1047 	struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1048 
1049 	mlxsw_sp_acl_tcam_group_del(mlxsw_sp, &ruleset->group);
1050 }
1051 
1052 static int
1053 mlxsw_sp_acl_tcam_flower_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
1054 				      void *ruleset_priv,
1055 				      struct net_device *dev, bool ingress)
1056 {
1057 	struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1058 
1059 	return mlxsw_sp_acl_tcam_group_bind(mlxsw_sp, &ruleset->group,
1060 					    dev, ingress);
1061 }
1062 
1063 static void
1064 mlxsw_sp_acl_tcam_flower_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
1065 					void *ruleset_priv)
1066 {
1067 	struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1068 
1069 	mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->group);
1070 }
1071 
1072 static u16
1073 mlxsw_sp_acl_tcam_flower_ruleset_group_id(void *ruleset_priv)
1074 {
1075 	struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1076 
1077 	return mlxsw_sp_acl_tcam_group_id(&ruleset->group);
1078 }
1079 
1080 static int
1081 mlxsw_sp_acl_tcam_flower_rule_add(struct mlxsw_sp *mlxsw_sp,
1082 				  void *ruleset_priv, void *rule_priv,
1083 				  struct mlxsw_sp_acl_rule_info *rulei)
1084 {
1085 	struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1086 	struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
1087 
1088 	return mlxsw_sp_acl_tcam_entry_add(mlxsw_sp, &ruleset->group,
1089 					   &rule->entry, rulei);
1090 }
1091 
1092 static void
1093 mlxsw_sp_acl_tcam_flower_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv)
1094 {
1095 	struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
1096 
1097 	mlxsw_sp_acl_tcam_entry_del(mlxsw_sp, &rule->entry);
1098 }
1099 
1100 static int
1101 mlxsw_sp_acl_tcam_flower_rule_activity_get(struct mlxsw_sp *mlxsw_sp,
1102 					   void *rule_priv, bool *activity)
1103 {
1104 	struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
1105 
1106 	return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp, &rule->entry,
1107 						    activity);
1108 }
1109 
1110 static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = {
1111 	.ruleset_priv_size	= sizeof(struct mlxsw_sp_acl_tcam_flower_ruleset),
1112 	.ruleset_add		= mlxsw_sp_acl_tcam_flower_ruleset_add,
1113 	.ruleset_del		= mlxsw_sp_acl_tcam_flower_ruleset_del,
1114 	.ruleset_bind		= mlxsw_sp_acl_tcam_flower_ruleset_bind,
1115 	.ruleset_unbind		= mlxsw_sp_acl_tcam_flower_ruleset_unbind,
1116 	.ruleset_group_id	= mlxsw_sp_acl_tcam_flower_ruleset_group_id,
1117 	.rule_priv_size		= sizeof(struct mlxsw_sp_acl_tcam_flower_rule),
1118 	.rule_add		= mlxsw_sp_acl_tcam_flower_rule_add,
1119 	.rule_del		= mlxsw_sp_acl_tcam_flower_rule_del,
1120 	.rule_activity_get	= mlxsw_sp_acl_tcam_flower_rule_activity_get,
1121 };
1122 
1123 static const struct mlxsw_sp_acl_profile_ops *
1124 mlxsw_sp_acl_tcam_profile_ops_arr[] = {
1125 	[MLXSW_SP_ACL_PROFILE_FLOWER] = &mlxsw_sp_acl_tcam_flower_ops,
1126 };
1127 
1128 static const struct mlxsw_sp_acl_profile_ops *
1129 mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp,
1130 			      enum mlxsw_sp_acl_profile profile)
1131 {
1132 	const struct mlxsw_sp_acl_profile_ops *ops;
1133 
1134 	if (WARN_ON(profile >= ARRAY_SIZE(mlxsw_sp_acl_tcam_profile_ops_arr)))
1135 		return NULL;
1136 	ops = mlxsw_sp_acl_tcam_profile_ops_arr[profile];
1137 	if (WARN_ON(!ops))
1138 		return NULL;
1139 	return ops;
1140 }
1141 
1142 const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops = {
1143 	.priv_size		= sizeof(struct mlxsw_sp_acl_tcam),
1144 	.init			= mlxsw_sp_acl_tcam_init,
1145 	.fini			= mlxsw_sp_acl_tcam_fini,
1146 	.profile_ops		= mlxsw_sp_acl_tcam_profile_ops,
1147 };
1148