1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
3  * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the names of the copyright holders nor the names of its
15  *    contributors may be used to endorse or promote products derived from
16  *    this software without specific prior written permission.
17  *
18  * Alternatively, this software may be distributed under the terms of the
19  * GNU General Public License ("GPL") version 2 as published by the Free
20  * Software Foundation.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <linux/kernel.h>
36 #include <linux/slab.h>
37 #include <linux/errno.h>
38 #include <linux/bitops.h>
39 #include <linux/list.h>
40 #include <linux/rhashtable.h>
41 #include <linux/netdevice.h>
42 #include <linux/parman.h>
43 
44 #include "reg.h"
45 #include "core.h"
46 #include "resources.h"
47 #include "spectrum.h"
48 #include "core_acl_flex_keys.h"
49 
50 struct mlxsw_sp_acl_tcam {
51 	unsigned long *used_regions; /* bit array */
52 	unsigned int max_regions;
53 	unsigned long *used_groups;  /* bit array */
54 	unsigned int max_groups;
55 	unsigned int max_group_size;
56 };
57 
58 static int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
59 {
60 	struct mlxsw_sp_acl_tcam *tcam = priv;
61 	u64 max_tcam_regions;
62 	u64 max_regions;
63 	u64 max_groups;
64 	size_t alloc_size;
65 	int err;
66 
67 	max_tcam_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core,
68 					      ACL_MAX_TCAM_REGIONS);
69 	max_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_REGIONS);
70 
71 	/* Use 1:1 mapping between ACL region and TCAM region */
72 	if (max_tcam_regions < max_regions)
73 		max_regions = max_tcam_regions;
74 
75 	alloc_size = sizeof(tcam->used_regions[0]) * BITS_TO_LONGS(max_regions);
76 	tcam->used_regions = kzalloc(alloc_size, GFP_KERNEL);
77 	if (!tcam->used_regions)
78 		return -ENOMEM;
79 	tcam->max_regions = max_regions;
80 
81 	max_groups = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUPS);
82 	alloc_size = sizeof(tcam->used_groups[0]) * BITS_TO_LONGS(max_groups);
83 	tcam->used_groups = kzalloc(alloc_size, GFP_KERNEL);
84 	if (!tcam->used_groups) {
85 		err = -ENOMEM;
86 		goto err_alloc_used_groups;
87 	}
88 	tcam->max_groups = max_groups;
89 	tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
90 						 ACL_MAX_GROUP_SIZE);
91 	return 0;
92 
93 err_alloc_used_groups:
94 	kfree(tcam->used_regions);
95 	return err;
96 }
97 
98 static void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv)
99 {
100 	struct mlxsw_sp_acl_tcam *tcam = priv;
101 
102 	kfree(tcam->used_groups);
103 	kfree(tcam->used_regions);
104 }
105 
106 static int mlxsw_sp_acl_tcam_region_id_get(struct mlxsw_sp_acl_tcam *tcam,
107 					   u16 *p_id)
108 {
109 	u16 id;
110 
111 	id = find_first_zero_bit(tcam->used_regions, tcam->max_regions);
112 	if (id < tcam->max_regions) {
113 		__set_bit(id, tcam->used_regions);
114 		*p_id = id;
115 		return 0;
116 	}
117 	return -ENOBUFS;
118 }
119 
120 static void mlxsw_sp_acl_tcam_region_id_put(struct mlxsw_sp_acl_tcam *tcam,
121 					    u16 id)
122 {
123 	__clear_bit(id, tcam->used_regions);
124 }
125 
126 static int mlxsw_sp_acl_tcam_group_id_get(struct mlxsw_sp_acl_tcam *tcam,
127 					  u16 *p_id)
128 {
129 	u16 id;
130 
131 	id = find_first_zero_bit(tcam->used_groups, tcam->max_groups);
132 	if (id < tcam->max_groups) {
133 		__set_bit(id, tcam->used_groups);
134 		*p_id = id;
135 		return 0;
136 	}
137 	return -ENOBUFS;
138 }
139 
140 static void mlxsw_sp_acl_tcam_group_id_put(struct mlxsw_sp_acl_tcam *tcam,
141 					   u16 id)
142 {
143 	__clear_bit(id, tcam->used_groups);
144 }
145 
146 struct mlxsw_sp_acl_tcam_pattern {
147 	const enum mlxsw_afk_element *elements;
148 	unsigned int elements_count;
149 };
150 
151 struct mlxsw_sp_acl_tcam_group {
152 	struct mlxsw_sp_acl_tcam *tcam;
153 	u16 id;
154 	struct list_head region_list;
155 	unsigned int region_count;
156 	struct rhashtable chunk_ht;
157 	struct {
158 		u16 local_port;
159 		bool ingress;
160 	} bound;
161 	struct mlxsw_sp_acl_tcam_group_ops *ops;
162 	const struct mlxsw_sp_acl_tcam_pattern *patterns;
163 	unsigned int patterns_count;
164 };
165 
166 struct mlxsw_sp_acl_tcam_region {
167 	struct list_head list; /* Member of a TCAM group */
168 	struct list_head chunk_list; /* List of chunks under this region */
169 	struct parman *parman;
170 	struct mlxsw_sp *mlxsw_sp;
171 	struct mlxsw_sp_acl_tcam_group *group;
172 	u16 id; /* ACL ID and region ID - they are same */
173 	char tcam_region_info[MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN];
174 	struct mlxsw_afk_key_info *key_info;
175 	struct {
176 		struct parman_prio parman_prio;
177 		struct parman_item parman_item;
178 		struct mlxsw_sp_acl_rule_info *rulei;
179 	} catchall;
180 };
181 
182 struct mlxsw_sp_acl_tcam_chunk {
183 	struct list_head list; /* Member of a TCAM region */
184 	struct rhash_head ht_node; /* Member of a chunk HT */
185 	unsigned int priority; /* Priority within the region and group */
186 	struct parman_prio parman_prio;
187 	struct mlxsw_sp_acl_tcam_group *group;
188 	struct mlxsw_sp_acl_tcam_region *region;
189 	unsigned int ref_count;
190 };
191 
192 struct mlxsw_sp_acl_tcam_entry {
193 	struct parman_item parman_item;
194 	struct mlxsw_sp_acl_tcam_chunk *chunk;
195 };
196 
197 static const struct rhashtable_params mlxsw_sp_acl_tcam_chunk_ht_params = {
198 	.key_len = sizeof(unsigned int),
199 	.key_offset = offsetof(struct mlxsw_sp_acl_tcam_chunk, priority),
200 	.head_offset = offsetof(struct mlxsw_sp_acl_tcam_chunk, ht_node),
201 	.automatic_shrinking = true,
202 };
203 
204 static int mlxsw_sp_acl_tcam_group_update(struct mlxsw_sp *mlxsw_sp,
205 					  struct mlxsw_sp_acl_tcam_group *group)
206 {
207 	struct mlxsw_sp_acl_tcam_region *region;
208 	char pagt_pl[MLXSW_REG_PAGT_LEN];
209 	int acl_index = 0;
210 
211 	mlxsw_reg_pagt_pack(pagt_pl, group->id);
212 	list_for_each_entry(region, &group->region_list, list)
213 		mlxsw_reg_pagt_acl_id_pack(pagt_pl, acl_index++, region->id);
214 	mlxsw_reg_pagt_size_set(pagt_pl, acl_index);
215 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pagt), pagt_pl);
216 }
217 
218 static int
219 mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp *mlxsw_sp,
220 			    struct mlxsw_sp_acl_tcam *tcam,
221 			    struct mlxsw_sp_acl_tcam_group *group,
222 			    const struct mlxsw_sp_acl_tcam_pattern *patterns,
223 			    unsigned int patterns_count)
224 {
225 	int err;
226 
227 	group->tcam = tcam;
228 	group->patterns = patterns;
229 	group->patterns_count = patterns_count;
230 	INIT_LIST_HEAD(&group->region_list);
231 	err = mlxsw_sp_acl_tcam_group_id_get(tcam, &group->id);
232 	if (err)
233 		return err;
234 
235 	err = mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
236 	if (err)
237 		goto err_group_update;
238 
239 	err = rhashtable_init(&group->chunk_ht,
240 			      &mlxsw_sp_acl_tcam_chunk_ht_params);
241 	if (err)
242 		goto err_rhashtable_init;
243 
244 	return 0;
245 
246 err_rhashtable_init:
247 err_group_update:
248 	mlxsw_sp_acl_tcam_group_id_put(tcam, group->id);
249 	return err;
250 }
251 
252 static void mlxsw_sp_acl_tcam_group_del(struct mlxsw_sp *mlxsw_sp,
253 					struct mlxsw_sp_acl_tcam_group *group)
254 {
255 	struct mlxsw_sp_acl_tcam *tcam = group->tcam;
256 
257 	rhashtable_destroy(&group->chunk_ht);
258 	mlxsw_sp_acl_tcam_group_id_put(tcam, group->id);
259 	WARN_ON(!list_empty(&group->region_list));
260 }
261 
262 static int
263 mlxsw_sp_acl_tcam_group_bind(struct mlxsw_sp *mlxsw_sp,
264 			     struct mlxsw_sp_acl_tcam_group *group,
265 			     struct net_device *dev, bool ingress)
266 {
267 	struct mlxsw_sp_port *mlxsw_sp_port;
268 	char ppbt_pl[MLXSW_REG_PPBT_LEN];
269 
270 	if (!mlxsw_sp_port_dev_check(dev))
271 		return -EINVAL;
272 
273 	mlxsw_sp_port = netdev_priv(dev);
274 	group->bound.local_port = mlxsw_sp_port->local_port;
275 	group->bound.ingress = ingress;
276 	mlxsw_reg_ppbt_pack(ppbt_pl,
277 			    group->bound.ingress ? MLXSW_REG_PXBT_E_IACL :
278 						   MLXSW_REG_PXBT_E_EACL,
279 			    MLXSW_REG_PXBT_OP_BIND, group->bound.local_port,
280 			    group->id);
281 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
282 }
283 
284 static void
285 mlxsw_sp_acl_tcam_group_unbind(struct mlxsw_sp *mlxsw_sp,
286 			       struct mlxsw_sp_acl_tcam_group *group)
287 {
288 	char ppbt_pl[MLXSW_REG_PPBT_LEN];
289 
290 	mlxsw_reg_ppbt_pack(ppbt_pl,
291 			    group->bound.ingress ? MLXSW_REG_PXBT_E_IACL :
292 						   MLXSW_REG_PXBT_E_EACL,
293 			    MLXSW_REG_PXBT_OP_UNBIND, group->bound.local_port,
294 			    group->id);
295 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
296 }
297 
298 static unsigned int
299 mlxsw_sp_acl_tcam_region_prio(struct mlxsw_sp_acl_tcam_region *region)
300 {
301 	struct mlxsw_sp_acl_tcam_chunk *chunk;
302 
303 	if (list_empty(&region->chunk_list))
304 		return 0;
305 	/* As a priority of a region, return priority of the first chunk */
306 	chunk = list_first_entry(&region->chunk_list, typeof(*chunk), list);
307 	return chunk->priority;
308 }
309 
310 static unsigned int
311 mlxsw_sp_acl_tcam_region_max_prio(struct mlxsw_sp_acl_tcam_region *region)
312 {
313 	struct mlxsw_sp_acl_tcam_chunk *chunk;
314 
315 	if (list_empty(&region->chunk_list))
316 		return 0;
317 	chunk = list_last_entry(&region->chunk_list, typeof(*chunk), list);
318 	return chunk->priority;
319 }
320 
321 static void
322 mlxsw_sp_acl_tcam_group_list_add(struct mlxsw_sp_acl_tcam_group *group,
323 				 struct mlxsw_sp_acl_tcam_region *region)
324 {
325 	struct mlxsw_sp_acl_tcam_region *region2;
326 	struct list_head *pos;
327 
328 	/* Position the region inside the list according to priority */
329 	list_for_each(pos, &group->region_list) {
330 		region2 = list_entry(pos, typeof(*region2), list);
331 		if (mlxsw_sp_acl_tcam_region_prio(region2) >
332 		    mlxsw_sp_acl_tcam_region_prio(region))
333 			break;
334 	}
335 	list_add_tail(&region->list, pos);
336 	group->region_count++;
337 }
338 
339 static void
340 mlxsw_sp_acl_tcam_group_list_del(struct mlxsw_sp_acl_tcam_group *group,
341 				 struct mlxsw_sp_acl_tcam_region *region)
342 {
343 	group->region_count--;
344 	list_del(&region->list);
345 }
346 
347 static int
348 mlxsw_sp_acl_tcam_group_region_attach(struct mlxsw_sp *mlxsw_sp,
349 				      struct mlxsw_sp_acl_tcam_group *group,
350 				      struct mlxsw_sp_acl_tcam_region *region)
351 {
352 	int err;
353 
354 	if (group->region_count == group->tcam->max_group_size)
355 		return -ENOBUFS;
356 
357 	mlxsw_sp_acl_tcam_group_list_add(group, region);
358 
359 	err = mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
360 	if (err)
361 		goto err_group_update;
362 	region->group = group;
363 
364 	return 0;
365 
366 err_group_update:
367 	mlxsw_sp_acl_tcam_group_list_del(group, region);
368 	mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
369 	return err;
370 }
371 
372 static void
373 mlxsw_sp_acl_tcam_group_region_detach(struct mlxsw_sp *mlxsw_sp,
374 				      struct mlxsw_sp_acl_tcam_region *region)
375 {
376 	struct mlxsw_sp_acl_tcam_group *group = region->group;
377 
378 	mlxsw_sp_acl_tcam_group_list_del(group, region);
379 	mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
380 }
381 
382 static struct mlxsw_sp_acl_tcam_region *
383 mlxsw_sp_acl_tcam_group_region_find(struct mlxsw_sp_acl_tcam_group *group,
384 				    unsigned int priority,
385 				    struct mlxsw_afk_element_usage *elusage,
386 				    bool *p_need_split)
387 {
388 	struct mlxsw_sp_acl_tcam_region *region, *region2;
389 	struct list_head *pos;
390 	bool issubset;
391 
392 	list_for_each(pos, &group->region_list) {
393 		region = list_entry(pos, typeof(*region), list);
394 
395 		/* First, check if the requested priority does not rather belong
396 		 * under some of the next regions.
397 		 */
398 		if (pos->next != &group->region_list) { /* not last */
399 			region2 = list_entry(pos->next, typeof(*region2), list);
400 			if (priority >= mlxsw_sp_acl_tcam_region_prio(region2))
401 				continue;
402 		}
403 
404 		issubset = mlxsw_afk_key_info_subset(region->key_info, elusage);
405 
406 		/* If requested element usage would not fit and the priority
407 		 * is lower than the currently inspected region we cannot
408 		 * use this region, so return NULL to indicate new region has
409 		 * to be created.
410 		 */
411 		if (!issubset &&
412 		    priority < mlxsw_sp_acl_tcam_region_prio(region))
413 			return NULL;
414 
415 		/* If requested element usage would not fit and the priority
416 		 * is higher than the currently inspected region we cannot
417 		 * use this region. There is still some hope that the next
418 		 * region would be the fit. So let it be processed and
419 		 * eventually break at the check right above this.
420 		 */
421 		if (!issubset &&
422 		    priority > mlxsw_sp_acl_tcam_region_max_prio(region))
423 			continue;
424 
425 		/* Indicate if the region needs to be split in order to add
426 		 * the requested priority. Split is needed when requested
427 		 * element usage won't fit into the found region.
428 		 */
429 		*p_need_split = !issubset;
430 		return region;
431 	}
432 	return NULL; /* New region has to be created. */
433 }
434 
435 static void
436 mlxsw_sp_acl_tcam_group_use_patterns(struct mlxsw_sp_acl_tcam_group *group,
437 				     struct mlxsw_afk_element_usage *elusage,
438 				     struct mlxsw_afk_element_usage *out)
439 {
440 	const struct mlxsw_sp_acl_tcam_pattern *pattern;
441 	int i;
442 
443 	for (i = 0; i < group->patterns_count; i++) {
444 		pattern = &group->patterns[i];
445 		mlxsw_afk_element_usage_fill(out, pattern->elements,
446 					     pattern->elements_count);
447 		if (mlxsw_afk_element_usage_subset(elusage, out))
448 			return;
449 	}
450 	memcpy(out, elusage, sizeof(*out));
451 }
452 
453 #define MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT 16
454 #define MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP 16
455 
456 static int
457 mlxsw_sp_acl_tcam_region_alloc(struct mlxsw_sp *mlxsw_sp,
458 			       struct mlxsw_sp_acl_tcam_region *region)
459 {
460 	struct mlxsw_afk_key_info *key_info = region->key_info;
461 	char ptar_pl[MLXSW_REG_PTAR_LEN];
462 	unsigned int encodings_count;
463 	int i;
464 	int err;
465 
466 	mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_ALLOC,
467 			    MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT,
468 			    region->id, region->tcam_region_info);
469 	encodings_count = mlxsw_afk_key_info_blocks_count_get(key_info);
470 	for (i = 0; i < encodings_count; i++) {
471 		u16 encoding;
472 
473 		encoding = mlxsw_afk_key_info_block_encoding_get(key_info, i);
474 		mlxsw_reg_ptar_key_id_pack(ptar_pl, i, encoding);
475 	}
476 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
477 	if (err)
478 		return err;
479 	mlxsw_reg_ptar_unpack(ptar_pl, region->tcam_region_info);
480 	return 0;
481 }
482 
483 static void
484 mlxsw_sp_acl_tcam_region_free(struct mlxsw_sp *mlxsw_sp,
485 			      struct mlxsw_sp_acl_tcam_region *region)
486 {
487 	char ptar_pl[MLXSW_REG_PTAR_LEN];
488 
489 	mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_FREE, 0, region->id,
490 			    region->tcam_region_info);
491 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
492 }
493 
494 static int
495 mlxsw_sp_acl_tcam_region_resize(struct mlxsw_sp *mlxsw_sp,
496 				struct mlxsw_sp_acl_tcam_region *region,
497 				u16 new_size)
498 {
499 	char ptar_pl[MLXSW_REG_PTAR_LEN];
500 
501 	mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_RESIZE,
502 			    new_size, region->id, region->tcam_region_info);
503 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
504 }
505 
506 static int
507 mlxsw_sp_acl_tcam_region_enable(struct mlxsw_sp *mlxsw_sp,
508 				struct mlxsw_sp_acl_tcam_region *region)
509 {
510 	char pacl_pl[MLXSW_REG_PACL_LEN];
511 
512 	mlxsw_reg_pacl_pack(pacl_pl, region->id, true,
513 			    region->tcam_region_info);
514 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
515 }
516 
517 static void
518 mlxsw_sp_acl_tcam_region_disable(struct mlxsw_sp *mlxsw_sp,
519 				 struct mlxsw_sp_acl_tcam_region *region)
520 {
521 	char pacl_pl[MLXSW_REG_PACL_LEN];
522 
523 	mlxsw_reg_pacl_pack(pacl_pl, region->id, false,
524 			    region->tcam_region_info);
525 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
526 }
527 
528 static int
529 mlxsw_sp_acl_tcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
530 				      struct mlxsw_sp_acl_tcam_region *region,
531 				      unsigned int offset,
532 				      struct mlxsw_sp_acl_rule_info *rulei)
533 {
534 	char ptce2_pl[MLXSW_REG_PTCE2_LEN];
535 	char *act_set;
536 	char *mask;
537 	char *key;
538 
539 	mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_WRITE_WRITE,
540 			     region->tcam_region_info, offset);
541 	key = mlxsw_reg_ptce2_flex_key_blocks_data(ptce2_pl);
542 	mask = mlxsw_reg_ptce2_mask_data(ptce2_pl);
543 	mlxsw_afk_encode(region->key_info, &rulei->values, key, mask);
544 
545 	/* Only the first action set belongs here, the rest is in KVD */
546 	act_set = mlxsw_afa_block_first_set(rulei->act_block);
547 	mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set);
548 
549 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
550 }
551 
552 static void
553 mlxsw_sp_acl_tcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp,
554 				      struct mlxsw_sp_acl_tcam_region *region,
555 				      unsigned int offset)
556 {
557 	char ptce2_pl[MLXSW_REG_PTCE2_LEN];
558 
559 	mlxsw_reg_ptce2_pack(ptce2_pl, false, MLXSW_REG_PTCE2_OP_WRITE_WRITE,
560 			     region->tcam_region_info, offset);
561 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
562 }
563 
564 #define MLXSW_SP_ACL_TCAM_CATCHALL_PRIO (~0U)
565 
566 static int
567 mlxsw_sp_acl_tcam_region_catchall_add(struct mlxsw_sp *mlxsw_sp,
568 				      struct mlxsw_sp_acl_tcam_region *region)
569 {
570 	struct parman_prio *parman_prio = &region->catchall.parman_prio;
571 	struct parman_item *parman_item = &region->catchall.parman_item;
572 	struct mlxsw_sp_acl_rule_info *rulei;
573 	int err;
574 
575 	parman_prio_init(region->parman, parman_prio,
576 			 MLXSW_SP_ACL_TCAM_CATCHALL_PRIO);
577 	err = parman_item_add(region->parman, parman_prio, parman_item);
578 	if (err)
579 		goto err_parman_item_add;
580 
581 	rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl);
582 	if (IS_ERR(rulei)) {
583 		err = PTR_ERR(rulei);
584 		goto err_rulei_create;
585 	}
586 
587 	mlxsw_sp_acl_rulei_act_continue(rulei);
588 	err = mlxsw_sp_acl_rulei_commit(rulei);
589 	if (err)
590 		goto err_rulei_commit;
591 
592 	err = mlxsw_sp_acl_tcam_region_entry_insert(mlxsw_sp, region,
593 						    parman_item->index, rulei);
594 	region->catchall.rulei = rulei;
595 	if (err)
596 		goto err_rule_insert;
597 
598 	return 0;
599 
600 err_rule_insert:
601 err_rulei_commit:
602 	mlxsw_sp_acl_rulei_destroy(rulei);
603 err_rulei_create:
604 	parman_item_remove(region->parman, parman_prio, parman_item);
605 err_parman_item_add:
606 	parman_prio_fini(parman_prio);
607 	return err;
608 }
609 
610 static void
611 mlxsw_sp_acl_tcam_region_catchall_del(struct mlxsw_sp *mlxsw_sp,
612 				      struct mlxsw_sp_acl_tcam_region *region)
613 {
614 	struct parman_prio *parman_prio = &region->catchall.parman_prio;
615 	struct parman_item *parman_item = &region->catchall.parman_item;
616 	struct mlxsw_sp_acl_rule_info *rulei = region->catchall.rulei;
617 
618 	mlxsw_sp_acl_tcam_region_entry_remove(mlxsw_sp, region,
619 					      parman_item->index);
620 	mlxsw_sp_acl_rulei_destroy(rulei);
621 	parman_item_remove(region->parman, parman_prio, parman_item);
622 	parman_prio_fini(parman_prio);
623 }
624 
625 static void
626 mlxsw_sp_acl_tcam_region_move(struct mlxsw_sp *mlxsw_sp,
627 			      struct mlxsw_sp_acl_tcam_region *region,
628 			      u16 src_offset, u16 dst_offset, u16 size)
629 {
630 	char prcr_pl[MLXSW_REG_PRCR_LEN];
631 
632 	mlxsw_reg_prcr_pack(prcr_pl, MLXSW_REG_PRCR_OP_MOVE,
633 			    region->tcam_region_info, src_offset,
634 			    region->tcam_region_info, dst_offset, size);
635 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(prcr), prcr_pl);
636 }
637 
638 static int mlxsw_sp_acl_tcam_region_parman_resize(void *priv,
639 						  unsigned long new_count)
640 {
641 	struct mlxsw_sp_acl_tcam_region *region = priv;
642 	struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp;
643 	u64 max_tcam_rules;
644 
645 	max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES);
646 	if (new_count > max_tcam_rules)
647 		return -EINVAL;
648 	return mlxsw_sp_acl_tcam_region_resize(mlxsw_sp, region, new_count);
649 }
650 
651 static void mlxsw_sp_acl_tcam_region_parman_move(void *priv,
652 						 unsigned long from_index,
653 						 unsigned long to_index,
654 						 unsigned long count)
655 {
656 	struct mlxsw_sp_acl_tcam_region *region = priv;
657 	struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp;
658 
659 	mlxsw_sp_acl_tcam_region_move(mlxsw_sp, region,
660 				      from_index, to_index, count);
661 }
662 
663 static const struct parman_ops mlxsw_sp_acl_tcam_region_parman_ops = {
664 	.base_count	= MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT,
665 	.resize_step	= MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP,
666 	.resize		= mlxsw_sp_acl_tcam_region_parman_resize,
667 	.move		= mlxsw_sp_acl_tcam_region_parman_move,
668 	.algo		= PARMAN_ALGO_TYPE_LSORT,
669 };
670 
671 static struct mlxsw_sp_acl_tcam_region *
672 mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp,
673 				struct mlxsw_sp_acl_tcam *tcam,
674 				struct mlxsw_afk_element_usage *elusage)
675 {
676 	struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
677 	struct mlxsw_sp_acl_tcam_region *region;
678 	int err;
679 
680 	region = kzalloc(sizeof(*region), GFP_KERNEL);
681 	if (!region)
682 		return ERR_PTR(-ENOMEM);
683 	INIT_LIST_HEAD(&region->chunk_list);
684 	region->mlxsw_sp = mlxsw_sp;
685 
686 	region->parman = parman_create(&mlxsw_sp_acl_tcam_region_parman_ops,
687 				       region);
688 	if (!region->parman) {
689 		err = -ENOMEM;
690 		goto err_parman_create;
691 	}
692 
693 	region->key_info = mlxsw_afk_key_info_get(afk, elusage);
694 	if (IS_ERR(region->key_info)) {
695 		err = PTR_ERR(region->key_info);
696 		goto err_key_info_get;
697 	}
698 
699 	err = mlxsw_sp_acl_tcam_region_id_get(tcam, &region->id);
700 	if (err)
701 		goto err_region_id_get;
702 
703 	err = mlxsw_sp_acl_tcam_region_alloc(mlxsw_sp, region);
704 	if (err)
705 		goto err_tcam_region_alloc;
706 
707 	err = mlxsw_sp_acl_tcam_region_enable(mlxsw_sp, region);
708 	if (err)
709 		goto err_tcam_region_enable;
710 
711 	err = mlxsw_sp_acl_tcam_region_catchall_add(mlxsw_sp, region);
712 	if (err)
713 		goto err_tcam_region_catchall_add;
714 
715 	return region;
716 
717 err_tcam_region_catchall_add:
718 	mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
719 err_tcam_region_enable:
720 	mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
721 err_tcam_region_alloc:
722 	mlxsw_sp_acl_tcam_region_id_put(tcam, region->id);
723 err_region_id_get:
724 	mlxsw_afk_key_info_put(region->key_info);
725 err_key_info_get:
726 	parman_destroy(region->parman);
727 err_parman_create:
728 	kfree(region);
729 	return ERR_PTR(err);
730 }
731 
732 static void
733 mlxsw_sp_acl_tcam_region_destroy(struct mlxsw_sp *mlxsw_sp,
734 				 struct mlxsw_sp_acl_tcam_region *region)
735 {
736 	mlxsw_sp_acl_tcam_region_catchall_del(mlxsw_sp, region);
737 	mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
738 	mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
739 	mlxsw_sp_acl_tcam_region_id_put(region->group->tcam, region->id);
740 	mlxsw_afk_key_info_put(region->key_info);
741 	parman_destroy(region->parman);
742 	kfree(region);
743 }
744 
745 static int
746 mlxsw_sp_acl_tcam_chunk_assoc(struct mlxsw_sp *mlxsw_sp,
747 			      struct mlxsw_sp_acl_tcam_group *group,
748 			      unsigned int priority,
749 			      struct mlxsw_afk_element_usage *elusage,
750 			      struct mlxsw_sp_acl_tcam_chunk *chunk)
751 {
752 	struct mlxsw_sp_acl_tcam_region *region;
753 	bool region_created = false;
754 	bool need_split;
755 	int err;
756 
757 	region = mlxsw_sp_acl_tcam_group_region_find(group, priority, elusage,
758 						     &need_split);
759 	if (region && need_split) {
760 		/* According to priority, the chunk should belong to an
761 		 * existing region. However, this chunk needs elements
762 		 * that region does not contain. We need to split the existing
763 		 * region into two and create a new region for this chunk
764 		 * in between. This is not supported now.
765 		 */
766 		return -EOPNOTSUPP;
767 	}
768 	if (!region) {
769 		struct mlxsw_afk_element_usage region_elusage;
770 
771 		mlxsw_sp_acl_tcam_group_use_patterns(group, elusage,
772 						     &region_elusage);
773 		region = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, group->tcam,
774 							 &region_elusage);
775 		if (IS_ERR(region))
776 			return PTR_ERR(region);
777 		region_created = true;
778 	}
779 
780 	chunk->region = region;
781 	list_add_tail(&chunk->list, &region->chunk_list);
782 
783 	if (!region_created)
784 		return 0;
785 
786 	err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp, group, region);
787 	if (err)
788 		goto err_group_region_attach;
789 
790 	return 0;
791 
792 err_group_region_attach:
793 	mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, region);
794 	return err;
795 }
796 
797 static void
798 mlxsw_sp_acl_tcam_chunk_deassoc(struct mlxsw_sp *mlxsw_sp,
799 				struct mlxsw_sp_acl_tcam_chunk *chunk)
800 {
801 	struct mlxsw_sp_acl_tcam_region *region = chunk->region;
802 
803 	list_del(&chunk->list);
804 	if (list_empty(&region->chunk_list)) {
805 		mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, region);
806 		mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, region);
807 	}
808 }
809 
810 static struct mlxsw_sp_acl_tcam_chunk *
811 mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp,
812 			       struct mlxsw_sp_acl_tcam_group *group,
813 			       unsigned int priority,
814 			       struct mlxsw_afk_element_usage *elusage)
815 {
816 	struct mlxsw_sp_acl_tcam_chunk *chunk;
817 	int err;
818 
819 	if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO)
820 		return ERR_PTR(-EINVAL);
821 
822 	chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
823 	if (!chunk)
824 		return ERR_PTR(-ENOMEM);
825 	chunk->priority = priority;
826 	chunk->group = group;
827 	chunk->ref_count = 1;
828 
829 	err = mlxsw_sp_acl_tcam_chunk_assoc(mlxsw_sp, group, priority,
830 					    elusage, chunk);
831 	if (err)
832 		goto err_chunk_assoc;
833 
834 	parman_prio_init(chunk->region->parman, &chunk->parman_prio, priority);
835 
836 	err = rhashtable_insert_fast(&group->chunk_ht, &chunk->ht_node,
837 				     mlxsw_sp_acl_tcam_chunk_ht_params);
838 	if (err)
839 		goto err_rhashtable_insert;
840 
841 	return chunk;
842 
843 err_rhashtable_insert:
844 	parman_prio_fini(&chunk->parman_prio);
845 	mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk);
846 err_chunk_assoc:
847 	kfree(chunk);
848 	return ERR_PTR(err);
849 }
850 
851 static void
852 mlxsw_sp_acl_tcam_chunk_destroy(struct mlxsw_sp *mlxsw_sp,
853 				struct mlxsw_sp_acl_tcam_chunk *chunk)
854 {
855 	struct mlxsw_sp_acl_tcam_group *group = chunk->group;
856 
857 	rhashtable_remove_fast(&group->chunk_ht, &chunk->ht_node,
858 			       mlxsw_sp_acl_tcam_chunk_ht_params);
859 	parman_prio_fini(&chunk->parman_prio);
860 	mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk);
861 	kfree(chunk);
862 }
863 
864 static struct mlxsw_sp_acl_tcam_chunk *
865 mlxsw_sp_acl_tcam_chunk_get(struct mlxsw_sp *mlxsw_sp,
866 			    struct mlxsw_sp_acl_tcam_group *group,
867 			    unsigned int priority,
868 			    struct mlxsw_afk_element_usage *elusage)
869 {
870 	struct mlxsw_sp_acl_tcam_chunk *chunk;
871 
872 	chunk = rhashtable_lookup_fast(&group->chunk_ht, &priority,
873 				       mlxsw_sp_acl_tcam_chunk_ht_params);
874 	if (chunk) {
875 		if (WARN_ON(!mlxsw_afk_key_info_subset(chunk->region->key_info,
876 						       elusage)))
877 			return ERR_PTR(-EINVAL);
878 		chunk->ref_count++;
879 		return chunk;
880 	}
881 	return mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, group,
882 					      priority, elusage);
883 }
884 
885 static void mlxsw_sp_acl_tcam_chunk_put(struct mlxsw_sp *mlxsw_sp,
886 					struct mlxsw_sp_acl_tcam_chunk *chunk)
887 {
888 	if (--chunk->ref_count)
889 		return;
890 	mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, chunk);
891 }
892 
893 static int mlxsw_sp_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp,
894 				       struct mlxsw_sp_acl_tcam_group *group,
895 				       struct mlxsw_sp_acl_tcam_entry *entry,
896 				       struct mlxsw_sp_acl_rule_info *rulei)
897 {
898 	struct mlxsw_sp_acl_tcam_chunk *chunk;
899 	struct mlxsw_sp_acl_tcam_region *region;
900 	int err;
901 
902 	chunk = mlxsw_sp_acl_tcam_chunk_get(mlxsw_sp, group, rulei->priority,
903 					    &rulei->values.elusage);
904 	if (IS_ERR(chunk))
905 		return PTR_ERR(chunk);
906 
907 	region = chunk->region;
908 	err = parman_item_add(region->parman, &chunk->parman_prio,
909 			      &entry->parman_item);
910 	if (err)
911 		goto err_parman_item_add;
912 
913 	err = mlxsw_sp_acl_tcam_region_entry_insert(mlxsw_sp, region,
914 						    entry->parman_item.index,
915 						    rulei);
916 	if (err)
917 		goto err_rule_insert;
918 	entry->chunk = chunk;
919 
920 	return 0;
921 
922 err_rule_insert:
923 	parman_item_remove(region->parman, &chunk->parman_prio,
924 			   &entry->parman_item);
925 err_parman_item_add:
926 	mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk);
927 	return err;
928 }
929 
930 static void mlxsw_sp_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp,
931 					struct mlxsw_sp_acl_tcam_entry *entry)
932 {
933 	struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk;
934 	struct mlxsw_sp_acl_tcam_region *region = chunk->region;
935 
936 	mlxsw_sp_acl_tcam_region_entry_remove(mlxsw_sp, region,
937 					      entry->parman_item.index);
938 	parman_item_remove(region->parman, &chunk->parman_prio,
939 			   &entry->parman_item);
940 	mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk);
941 }
942 
943 static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = {
944 	MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
945 	MLXSW_AFK_ELEMENT_DMAC,
946 	MLXSW_AFK_ELEMENT_SMAC,
947 	MLXSW_AFK_ELEMENT_ETHERTYPE,
948 	MLXSW_AFK_ELEMENT_IP_PROTO,
949 	MLXSW_AFK_ELEMENT_SRC_IP4,
950 	MLXSW_AFK_ELEMENT_DST_IP4,
951 	MLXSW_AFK_ELEMENT_DST_L4_PORT,
952 	MLXSW_AFK_ELEMENT_SRC_L4_PORT,
953 };
954 
955 static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv6[] = {
956 	MLXSW_AFK_ELEMENT_ETHERTYPE,
957 	MLXSW_AFK_ELEMENT_IP_PROTO,
958 	MLXSW_AFK_ELEMENT_SRC_IP6_HI,
959 	MLXSW_AFK_ELEMENT_SRC_IP6_LO,
960 	MLXSW_AFK_ELEMENT_DST_IP6_HI,
961 	MLXSW_AFK_ELEMENT_DST_IP6_LO,
962 	MLXSW_AFK_ELEMENT_DST_L4_PORT,
963 	MLXSW_AFK_ELEMENT_SRC_L4_PORT,
964 };
965 
966 static const struct mlxsw_sp_acl_tcam_pattern mlxsw_sp_acl_tcam_patterns[] = {
967 	{
968 		.elements = mlxsw_sp_acl_tcam_pattern_ipv4,
969 		.elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv4),
970 	},
971 	{
972 		.elements = mlxsw_sp_acl_tcam_pattern_ipv6,
973 		.elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv6),
974 	},
975 };
976 
977 #define MLXSW_SP_ACL_TCAM_PATTERNS_COUNT \
978 	ARRAY_SIZE(mlxsw_sp_acl_tcam_patterns)
979 
980 struct mlxsw_sp_acl_tcam_flower_ruleset {
981 	struct mlxsw_sp_acl_tcam_group group;
982 };
983 
984 struct mlxsw_sp_acl_tcam_flower_rule {
985 	struct mlxsw_sp_acl_tcam_entry entry;
986 };
987 
988 static int
989 mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp *mlxsw_sp,
990 				     void *priv, void *ruleset_priv)
991 {
992 	struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
993 	struct mlxsw_sp_acl_tcam *tcam = priv;
994 
995 	return mlxsw_sp_acl_tcam_group_add(mlxsw_sp, tcam, &ruleset->group,
996 					   mlxsw_sp_acl_tcam_patterns,
997 					   MLXSW_SP_ACL_TCAM_PATTERNS_COUNT);
998 }
999 
1000 static void
1001 mlxsw_sp_acl_tcam_flower_ruleset_del(struct mlxsw_sp *mlxsw_sp,
1002 				     void *ruleset_priv)
1003 {
1004 	struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1005 
1006 	mlxsw_sp_acl_tcam_group_del(mlxsw_sp, &ruleset->group);
1007 }
1008 
1009 static int
1010 mlxsw_sp_acl_tcam_flower_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
1011 				      void *ruleset_priv,
1012 				      struct net_device *dev, bool ingress)
1013 {
1014 	struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1015 
1016 	return mlxsw_sp_acl_tcam_group_bind(mlxsw_sp, &ruleset->group,
1017 					    dev, ingress);
1018 }
1019 
1020 static void
1021 mlxsw_sp_acl_tcam_flower_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
1022 					void *ruleset_priv)
1023 {
1024 	struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1025 
1026 	mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->group);
1027 }
1028 
1029 static int
1030 mlxsw_sp_acl_tcam_flower_rule_add(struct mlxsw_sp *mlxsw_sp,
1031 				  void *ruleset_priv, void *rule_priv,
1032 				  struct mlxsw_sp_acl_rule_info *rulei)
1033 {
1034 	struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1035 	struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
1036 
1037 	return mlxsw_sp_acl_tcam_entry_add(mlxsw_sp, &ruleset->group,
1038 					   &rule->entry, rulei);
1039 }
1040 
1041 static void
1042 mlxsw_sp_acl_tcam_flower_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv)
1043 {
1044 	struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
1045 
1046 	mlxsw_sp_acl_tcam_entry_del(mlxsw_sp, &rule->entry);
1047 }
1048 
1049 static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = {
1050 	.ruleset_priv_size	= sizeof(struct mlxsw_sp_acl_tcam_flower_ruleset),
1051 	.ruleset_add		= mlxsw_sp_acl_tcam_flower_ruleset_add,
1052 	.ruleset_del		= mlxsw_sp_acl_tcam_flower_ruleset_del,
1053 	.ruleset_bind		= mlxsw_sp_acl_tcam_flower_ruleset_bind,
1054 	.ruleset_unbind		= mlxsw_sp_acl_tcam_flower_ruleset_unbind,
1055 	.rule_priv_size		= sizeof(struct mlxsw_sp_acl_tcam_flower_rule),
1056 	.rule_add		= mlxsw_sp_acl_tcam_flower_rule_add,
1057 	.rule_del		= mlxsw_sp_acl_tcam_flower_rule_del,
1058 };
1059 
1060 static const struct mlxsw_sp_acl_profile_ops *
1061 mlxsw_sp_acl_tcam_profile_ops_arr[] = {
1062 	[MLXSW_SP_ACL_PROFILE_FLOWER] = &mlxsw_sp_acl_tcam_flower_ops,
1063 };
1064 
1065 static const struct mlxsw_sp_acl_profile_ops *
1066 mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp,
1067 			      enum mlxsw_sp_acl_profile profile)
1068 {
1069 	const struct mlxsw_sp_acl_profile_ops *ops;
1070 
1071 	if (WARN_ON(profile >= ARRAY_SIZE(mlxsw_sp_acl_tcam_profile_ops_arr)))
1072 		return NULL;
1073 	ops = mlxsw_sp_acl_tcam_profile_ops_arr[profile];
1074 	if (WARN_ON(!ops))
1075 		return NULL;
1076 	return ops;
1077 }
1078 
1079 const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops = {
1080 	.priv_size		= sizeof(struct mlxsw_sp_acl_tcam),
1081 	.init			= mlxsw_sp_acl_tcam_init,
1082 	.fini			= mlxsw_sp_acl_tcam_fini,
1083 	.profile_ops		= mlxsw_sp_acl_tcam_profile_ops,
1084 };
1085