1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
3  * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2017-2018 Jiri Pirko <jiri@mellanox.com>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the names of the copyright holders nor the names of its
15  *    contributors may be used to endorse or promote products derived from
16  *    this software without specific prior written permission.
17  *
18  * Alternatively, this software may be distributed under the terms of the
19  * GNU General Public License ("GPL") version 2 as published by the Free
20  * Software Foundation.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <linux/kernel.h>
36 #include <linux/slab.h>
37 #include <linux/errno.h>
38 #include <linux/bitops.h>
39 #include <linux/list.h>
40 #include <linux/rhashtable.h>
41 #include <linux/netdevice.h>
42 
43 #include "reg.h"
44 #include "core.h"
45 #include "resources.h"
46 #include "spectrum.h"
47 #include "spectrum_acl_tcam.h"
48 #include "core_acl_flex_keys.h"
49 
50 size_t mlxsw_sp_acl_tcam_priv_size(struct mlxsw_sp *mlxsw_sp)
51 {
52 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
53 
54 	return ops->priv_size;
55 }
56 
57 int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
58 			   struct mlxsw_sp_acl_tcam *tcam)
59 {
60 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
61 	u64 max_tcam_regions;
62 	u64 max_regions;
63 	u64 max_groups;
64 	size_t alloc_size;
65 	int err;
66 
67 	max_tcam_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core,
68 					      ACL_MAX_TCAM_REGIONS);
69 	max_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_REGIONS);
70 
71 	/* Use 1:1 mapping between ACL region and TCAM region */
72 	if (max_tcam_regions < max_regions)
73 		max_regions = max_tcam_regions;
74 
75 	alloc_size = sizeof(tcam->used_regions[0]) * BITS_TO_LONGS(max_regions);
76 	tcam->used_regions = kzalloc(alloc_size, GFP_KERNEL);
77 	if (!tcam->used_regions)
78 		return -ENOMEM;
79 	tcam->max_regions = max_regions;
80 
81 	max_groups = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUPS);
82 	alloc_size = sizeof(tcam->used_groups[0]) * BITS_TO_LONGS(max_groups);
83 	tcam->used_groups = kzalloc(alloc_size, GFP_KERNEL);
84 	if (!tcam->used_groups) {
85 		err = -ENOMEM;
86 		goto err_alloc_used_groups;
87 	}
88 	tcam->max_groups = max_groups;
89 	tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
90 						 ACL_MAX_GROUP_SIZE);
91 
92 	err = ops->init(mlxsw_sp, tcam->priv, tcam);
93 	if (err)
94 		goto err_tcam_init;
95 
96 	return 0;
97 
98 err_tcam_init:
99 	kfree(tcam->used_groups);
100 err_alloc_used_groups:
101 	kfree(tcam->used_regions);
102 	return err;
103 }
104 
105 void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
106 			    struct mlxsw_sp_acl_tcam *tcam)
107 {
108 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
109 
110 	ops->fini(mlxsw_sp, tcam->priv);
111 	kfree(tcam->used_groups);
112 	kfree(tcam->used_regions);
113 }
114 
115 int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,
116 				   struct mlxsw_sp_acl_rule_info *rulei,
117 				   u32 *priority, bool fillup_priority)
118 {
119 	u64 max_priority;
120 
121 	if (!fillup_priority) {
122 		*priority = 0;
123 		return 0;
124 	}
125 
126 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, KVD_SIZE))
127 		return -EIO;
128 
129 	max_priority = MLXSW_CORE_RES_GET(mlxsw_sp->core, KVD_SIZE);
130 	if (rulei->priority > max_priority)
131 		return -EINVAL;
132 
133 	/* Unlike in TC, in HW, higher number means higher priority. */
134 	*priority = max_priority - rulei->priority;
135 	return 0;
136 }
137 
138 static int mlxsw_sp_acl_tcam_region_id_get(struct mlxsw_sp_acl_tcam *tcam,
139 					   u16 *p_id)
140 {
141 	u16 id;
142 
143 	id = find_first_zero_bit(tcam->used_regions, tcam->max_regions);
144 	if (id < tcam->max_regions) {
145 		__set_bit(id, tcam->used_regions);
146 		*p_id = id;
147 		return 0;
148 	}
149 	return -ENOBUFS;
150 }
151 
152 static void mlxsw_sp_acl_tcam_region_id_put(struct mlxsw_sp_acl_tcam *tcam,
153 					    u16 id)
154 {
155 	__clear_bit(id, tcam->used_regions);
156 }
157 
158 static int mlxsw_sp_acl_tcam_group_id_get(struct mlxsw_sp_acl_tcam *tcam,
159 					  u16 *p_id)
160 {
161 	u16 id;
162 
163 	id = find_first_zero_bit(tcam->used_groups, tcam->max_groups);
164 	if (id < tcam->max_groups) {
165 		__set_bit(id, tcam->used_groups);
166 		*p_id = id;
167 		return 0;
168 	}
169 	return -ENOBUFS;
170 }
171 
172 static void mlxsw_sp_acl_tcam_group_id_put(struct mlxsw_sp_acl_tcam *tcam,
173 					   u16 id)
174 {
175 	__clear_bit(id, tcam->used_groups);
176 }
177 
178 struct mlxsw_sp_acl_tcam_pattern {
179 	const enum mlxsw_afk_element *elements;
180 	unsigned int elements_count;
181 };
182 
183 struct mlxsw_sp_acl_tcam_group {
184 	struct mlxsw_sp_acl_tcam *tcam;
185 	u16 id;
186 	struct list_head region_list;
187 	unsigned int region_count;
188 	struct rhashtable chunk_ht;
189 	struct mlxsw_sp_acl_tcam_group_ops *ops;
190 	const struct mlxsw_sp_acl_tcam_pattern *patterns;
191 	unsigned int patterns_count;
192 	bool tmplt_elusage_set;
193 	struct mlxsw_afk_element_usage tmplt_elusage;
194 };
195 
196 struct mlxsw_sp_acl_tcam_chunk {
197 	struct list_head list; /* Member of a TCAM region */
198 	struct rhash_head ht_node; /* Member of a chunk HT */
199 	unsigned int priority; /* Priority within the region and group */
200 	struct mlxsw_sp_acl_tcam_group *group;
201 	struct mlxsw_sp_acl_tcam_region *region;
202 	unsigned int ref_count;
203 	unsigned long priv[0];
204 	/* priv has to be always the last item */
205 };
206 
207 struct mlxsw_sp_acl_tcam_entry {
208 	struct mlxsw_sp_acl_tcam_chunk *chunk;
209 	unsigned long priv[0];
210 	/* priv has to be always the last item */
211 };
212 
213 static const struct rhashtable_params mlxsw_sp_acl_tcam_chunk_ht_params = {
214 	.key_len = sizeof(unsigned int),
215 	.key_offset = offsetof(struct mlxsw_sp_acl_tcam_chunk, priority),
216 	.head_offset = offsetof(struct mlxsw_sp_acl_tcam_chunk, ht_node),
217 	.automatic_shrinking = true,
218 };
219 
220 static int mlxsw_sp_acl_tcam_group_update(struct mlxsw_sp *mlxsw_sp,
221 					  struct mlxsw_sp_acl_tcam_group *group)
222 {
223 	struct mlxsw_sp_acl_tcam_region *region;
224 	char pagt_pl[MLXSW_REG_PAGT_LEN];
225 	int acl_index = 0;
226 
227 	mlxsw_reg_pagt_pack(pagt_pl, group->id);
228 	list_for_each_entry(region, &group->region_list, list)
229 		mlxsw_reg_pagt_acl_id_pack(pagt_pl, acl_index++, region->id);
230 	mlxsw_reg_pagt_size_set(pagt_pl, acl_index);
231 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pagt), pagt_pl);
232 }
233 
234 static int
235 mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp *mlxsw_sp,
236 			    struct mlxsw_sp_acl_tcam *tcam,
237 			    struct mlxsw_sp_acl_tcam_group *group,
238 			    const struct mlxsw_sp_acl_tcam_pattern *patterns,
239 			    unsigned int patterns_count,
240 			    struct mlxsw_afk_element_usage *tmplt_elusage)
241 {
242 	int err;
243 
244 	group->tcam = tcam;
245 	group->patterns = patterns;
246 	group->patterns_count = patterns_count;
247 	if (tmplt_elusage) {
248 		group->tmplt_elusage_set = true;
249 		memcpy(&group->tmplt_elusage, tmplt_elusage,
250 		       sizeof(group->tmplt_elusage));
251 	}
252 	INIT_LIST_HEAD(&group->region_list);
253 	err = mlxsw_sp_acl_tcam_group_id_get(tcam, &group->id);
254 	if (err)
255 		return err;
256 
257 	err = rhashtable_init(&group->chunk_ht,
258 			      &mlxsw_sp_acl_tcam_chunk_ht_params);
259 	if (err)
260 		goto err_rhashtable_init;
261 
262 	return 0;
263 
264 err_rhashtable_init:
265 	mlxsw_sp_acl_tcam_group_id_put(tcam, group->id);
266 	return err;
267 }
268 
269 static void mlxsw_sp_acl_tcam_group_del(struct mlxsw_sp *mlxsw_sp,
270 					struct mlxsw_sp_acl_tcam_group *group)
271 {
272 	struct mlxsw_sp_acl_tcam *tcam = group->tcam;
273 
274 	rhashtable_destroy(&group->chunk_ht);
275 	mlxsw_sp_acl_tcam_group_id_put(tcam, group->id);
276 	WARN_ON(!list_empty(&group->region_list));
277 }
278 
279 static int
280 mlxsw_sp_acl_tcam_group_bind(struct mlxsw_sp *mlxsw_sp,
281 			     struct mlxsw_sp_acl_tcam_group *group,
282 			     struct mlxsw_sp_port *mlxsw_sp_port,
283 			     bool ingress)
284 {
285 	char ppbt_pl[MLXSW_REG_PPBT_LEN];
286 
287 	mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL :
288 					       MLXSW_REG_PXBT_E_EACL,
289 			    MLXSW_REG_PXBT_OP_BIND, mlxsw_sp_port->local_port,
290 			    group->id);
291 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
292 }
293 
294 static void
295 mlxsw_sp_acl_tcam_group_unbind(struct mlxsw_sp *mlxsw_sp,
296 			       struct mlxsw_sp_acl_tcam_group *group,
297 			       struct mlxsw_sp_port *mlxsw_sp_port,
298 			       bool ingress)
299 {
300 	char ppbt_pl[MLXSW_REG_PPBT_LEN];
301 
302 	mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL :
303 					       MLXSW_REG_PXBT_E_EACL,
304 			    MLXSW_REG_PXBT_OP_UNBIND, mlxsw_sp_port->local_port,
305 			    group->id);
306 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
307 }
308 
309 static u16
310 mlxsw_sp_acl_tcam_group_id(struct mlxsw_sp_acl_tcam_group *group)
311 {
312 	return group->id;
313 }
314 
315 static unsigned int
316 mlxsw_sp_acl_tcam_region_prio(struct mlxsw_sp_acl_tcam_region *region)
317 {
318 	struct mlxsw_sp_acl_tcam_chunk *chunk;
319 
320 	if (list_empty(&region->chunk_list))
321 		return 0;
322 	/* As a priority of a region, return priority of the first chunk */
323 	chunk = list_first_entry(&region->chunk_list, typeof(*chunk), list);
324 	return chunk->priority;
325 }
326 
327 static unsigned int
328 mlxsw_sp_acl_tcam_region_max_prio(struct mlxsw_sp_acl_tcam_region *region)
329 {
330 	struct mlxsw_sp_acl_tcam_chunk *chunk;
331 
332 	if (list_empty(&region->chunk_list))
333 		return 0;
334 	chunk = list_last_entry(&region->chunk_list, typeof(*chunk), list);
335 	return chunk->priority;
336 }
337 
338 static void
339 mlxsw_sp_acl_tcam_group_list_add(struct mlxsw_sp_acl_tcam_group *group,
340 				 struct mlxsw_sp_acl_tcam_region *region)
341 {
342 	struct mlxsw_sp_acl_tcam_region *region2;
343 	struct list_head *pos;
344 
345 	/* Position the region inside the list according to priority */
346 	list_for_each(pos, &group->region_list) {
347 		region2 = list_entry(pos, typeof(*region2), list);
348 		if (mlxsw_sp_acl_tcam_region_prio(region2) >
349 		    mlxsw_sp_acl_tcam_region_prio(region))
350 			break;
351 	}
352 	list_add_tail(&region->list, pos);
353 	group->region_count++;
354 }
355 
356 static void
357 mlxsw_sp_acl_tcam_group_list_del(struct mlxsw_sp_acl_tcam_group *group,
358 				 struct mlxsw_sp_acl_tcam_region *region)
359 {
360 	group->region_count--;
361 	list_del(&region->list);
362 }
363 
364 static int
365 mlxsw_sp_acl_tcam_group_region_attach(struct mlxsw_sp *mlxsw_sp,
366 				      struct mlxsw_sp_acl_tcam_group *group,
367 				      struct mlxsw_sp_acl_tcam_region *region)
368 {
369 	int err;
370 
371 	if (group->region_count == group->tcam->max_group_size)
372 		return -ENOBUFS;
373 
374 	mlxsw_sp_acl_tcam_group_list_add(group, region);
375 
376 	err = mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
377 	if (err)
378 		goto err_group_update;
379 	region->group = group;
380 
381 	return 0;
382 
383 err_group_update:
384 	mlxsw_sp_acl_tcam_group_list_del(group, region);
385 	mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
386 	return err;
387 }
388 
389 static void
390 mlxsw_sp_acl_tcam_group_region_detach(struct mlxsw_sp *mlxsw_sp,
391 				      struct mlxsw_sp_acl_tcam_region *region)
392 {
393 	struct mlxsw_sp_acl_tcam_group *group = region->group;
394 
395 	mlxsw_sp_acl_tcam_group_list_del(group, region);
396 	mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
397 }
398 
399 static struct mlxsw_sp_acl_tcam_region *
400 mlxsw_sp_acl_tcam_group_region_find(struct mlxsw_sp_acl_tcam_group *group,
401 				    unsigned int priority,
402 				    struct mlxsw_afk_element_usage *elusage,
403 				    bool *p_need_split)
404 {
405 	struct mlxsw_sp_acl_tcam_region *region, *region2;
406 	struct list_head *pos;
407 	bool issubset;
408 
409 	list_for_each(pos, &group->region_list) {
410 		region = list_entry(pos, typeof(*region), list);
411 
412 		/* First, check if the requested priority does not rather belong
413 		 * under some of the next regions.
414 		 */
415 		if (pos->next != &group->region_list) { /* not last */
416 			region2 = list_entry(pos->next, typeof(*region2), list);
417 			if (priority >= mlxsw_sp_acl_tcam_region_prio(region2))
418 				continue;
419 		}
420 
421 		issubset = mlxsw_afk_key_info_subset(region->key_info, elusage);
422 
423 		/* If requested element usage would not fit and the priority
424 		 * is lower than the currently inspected region we cannot
425 		 * use this region, so return NULL to indicate new region has
426 		 * to be created.
427 		 */
428 		if (!issubset &&
429 		    priority < mlxsw_sp_acl_tcam_region_prio(region))
430 			return NULL;
431 
432 		/* If requested element usage would not fit and the priority
433 		 * is higher than the currently inspected region we cannot
434 		 * use this region. There is still some hope that the next
435 		 * region would be the fit. So let it be processed and
436 		 * eventually break at the check right above this.
437 		 */
438 		if (!issubset &&
439 		    priority > mlxsw_sp_acl_tcam_region_max_prio(region))
440 			continue;
441 
442 		/* Indicate if the region needs to be split in order to add
443 		 * the requested priority. Split is needed when requested
444 		 * element usage won't fit into the found region.
445 		 */
446 		*p_need_split = !issubset;
447 		return region;
448 	}
449 	return NULL; /* New region has to be created. */
450 }
451 
452 static void
453 mlxsw_sp_acl_tcam_group_use_patterns(struct mlxsw_sp_acl_tcam_group *group,
454 				     struct mlxsw_afk_element_usage *elusage,
455 				     struct mlxsw_afk_element_usage *out)
456 {
457 	const struct mlxsw_sp_acl_tcam_pattern *pattern;
458 	int i;
459 
460 	/* In case the template is set, we don't have to look up the pattern
461 	 * and just use the template.
462 	 */
463 	if (group->tmplt_elusage_set) {
464 		memcpy(out, &group->tmplt_elusage, sizeof(*out));
465 		WARN_ON(!mlxsw_afk_element_usage_subset(elusage, out));
466 		return;
467 	}
468 
469 	for (i = 0; i < group->patterns_count; i++) {
470 		pattern = &group->patterns[i];
471 		mlxsw_afk_element_usage_fill(out, pattern->elements,
472 					     pattern->elements_count);
473 		if (mlxsw_afk_element_usage_subset(elusage, out))
474 			return;
475 	}
476 	memcpy(out, elusage, sizeof(*out));
477 }
478 
479 static int
480 mlxsw_sp_acl_tcam_region_alloc(struct mlxsw_sp *mlxsw_sp,
481 			       struct mlxsw_sp_acl_tcam_region *region)
482 {
483 	struct mlxsw_afk_key_info *key_info = region->key_info;
484 	char ptar_pl[MLXSW_REG_PTAR_LEN];
485 	unsigned int encodings_count;
486 	int i;
487 	int err;
488 
489 	mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_ALLOC,
490 			    region->key_type,
491 			    MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT,
492 			    region->id, region->tcam_region_info);
493 	encodings_count = mlxsw_afk_key_info_blocks_count_get(key_info);
494 	for (i = 0; i < encodings_count; i++) {
495 		u16 encoding;
496 
497 		encoding = mlxsw_afk_key_info_block_encoding_get(key_info, i);
498 		mlxsw_reg_ptar_key_id_pack(ptar_pl, i, encoding);
499 	}
500 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
501 	if (err)
502 		return err;
503 	mlxsw_reg_ptar_unpack(ptar_pl, region->tcam_region_info);
504 	return 0;
505 }
506 
507 static void
508 mlxsw_sp_acl_tcam_region_free(struct mlxsw_sp *mlxsw_sp,
509 			      struct mlxsw_sp_acl_tcam_region *region)
510 {
511 	char ptar_pl[MLXSW_REG_PTAR_LEN];
512 
513 	mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_FREE,
514 			    region->key_type, 0, region->id,
515 			    region->tcam_region_info);
516 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
517 }
518 
519 static int
520 mlxsw_sp_acl_tcam_region_enable(struct mlxsw_sp *mlxsw_sp,
521 				struct mlxsw_sp_acl_tcam_region *region)
522 {
523 	char pacl_pl[MLXSW_REG_PACL_LEN];
524 
525 	mlxsw_reg_pacl_pack(pacl_pl, region->id, true,
526 			    region->tcam_region_info);
527 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
528 }
529 
530 static void
531 mlxsw_sp_acl_tcam_region_disable(struct mlxsw_sp *mlxsw_sp,
532 				 struct mlxsw_sp_acl_tcam_region *region)
533 {
534 	char pacl_pl[MLXSW_REG_PACL_LEN];
535 
536 	mlxsw_reg_pacl_pack(pacl_pl, region->id, false,
537 			    region->tcam_region_info);
538 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
539 }
540 
541 static struct mlxsw_sp_acl_tcam_region *
542 mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp,
543 				struct mlxsw_sp_acl_tcam *tcam,
544 				struct mlxsw_afk_element_usage *elusage)
545 {
546 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
547 	struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
548 	struct mlxsw_sp_acl_tcam_region *region;
549 	int err;
550 
551 	region = kzalloc(sizeof(*region) + ops->region_priv_size, GFP_KERNEL);
552 	if (!region)
553 		return ERR_PTR(-ENOMEM);
554 	INIT_LIST_HEAD(&region->chunk_list);
555 	region->mlxsw_sp = mlxsw_sp;
556 
557 	region->key_info = mlxsw_afk_key_info_get(afk, elusage);
558 	if (IS_ERR(region->key_info)) {
559 		err = PTR_ERR(region->key_info);
560 		goto err_key_info_get;
561 	}
562 
563 	err = mlxsw_sp_acl_tcam_region_id_get(tcam, &region->id);
564 	if (err)
565 		goto err_region_id_get;
566 
567 	err = ops->region_associate(mlxsw_sp, region);
568 	if (err)
569 		goto err_tcam_region_associate;
570 
571 	region->key_type = ops->key_type;
572 	err = mlxsw_sp_acl_tcam_region_alloc(mlxsw_sp, region);
573 	if (err)
574 		goto err_tcam_region_alloc;
575 
576 	err = mlxsw_sp_acl_tcam_region_enable(mlxsw_sp, region);
577 	if (err)
578 		goto err_tcam_region_enable;
579 
580 	err = ops->region_init(mlxsw_sp, region->priv, region);
581 	if (err)
582 		goto err_tcam_region_init;
583 
584 	return region;
585 
586 err_tcam_region_init:
587 	mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
588 err_tcam_region_enable:
589 	mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
590 err_tcam_region_alloc:
591 err_tcam_region_associate:
592 	mlxsw_sp_acl_tcam_region_id_put(tcam, region->id);
593 err_region_id_get:
594 	mlxsw_afk_key_info_put(region->key_info);
595 err_key_info_get:
596 	kfree(region);
597 	return ERR_PTR(err);
598 }
599 
600 static void
601 mlxsw_sp_acl_tcam_region_destroy(struct mlxsw_sp *mlxsw_sp,
602 				 struct mlxsw_sp_acl_tcam_region *region)
603 {
604 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
605 
606 	ops->region_fini(mlxsw_sp, region->priv);
607 	mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
608 	mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
609 	mlxsw_sp_acl_tcam_region_id_put(region->group->tcam, region->id);
610 	mlxsw_afk_key_info_put(region->key_info);
611 	kfree(region);
612 }
613 
614 static int
615 mlxsw_sp_acl_tcam_chunk_assoc(struct mlxsw_sp *mlxsw_sp,
616 			      struct mlxsw_sp_acl_tcam_group *group,
617 			      unsigned int priority,
618 			      struct mlxsw_afk_element_usage *elusage,
619 			      struct mlxsw_sp_acl_tcam_chunk *chunk)
620 {
621 	struct mlxsw_sp_acl_tcam_region *region;
622 	bool region_created = false;
623 	bool need_split;
624 	int err;
625 
626 	region = mlxsw_sp_acl_tcam_group_region_find(group, priority, elusage,
627 						     &need_split);
628 	if (region && need_split) {
629 		/* According to priority, the chunk should belong to an
630 		 * existing region. However, this chunk needs elements
631 		 * that region does not contain. We need to split the existing
632 		 * region into two and create a new region for this chunk
633 		 * in between. This is not supported now.
634 		 */
635 		return -EOPNOTSUPP;
636 	}
637 	if (!region) {
638 		struct mlxsw_afk_element_usage region_elusage;
639 
640 		mlxsw_sp_acl_tcam_group_use_patterns(group, elusage,
641 						     &region_elusage);
642 		region = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, group->tcam,
643 							 &region_elusage);
644 		if (IS_ERR(region))
645 			return PTR_ERR(region);
646 		region_created = true;
647 	}
648 
649 	chunk->region = region;
650 	list_add_tail(&chunk->list, &region->chunk_list);
651 
652 	if (!region_created)
653 		return 0;
654 
655 	err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp, group, region);
656 	if (err)
657 		goto err_group_region_attach;
658 
659 	return 0;
660 
661 err_group_region_attach:
662 	mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, region);
663 	return err;
664 }
665 
666 static void
667 mlxsw_sp_acl_tcam_chunk_deassoc(struct mlxsw_sp *mlxsw_sp,
668 				struct mlxsw_sp_acl_tcam_chunk *chunk)
669 {
670 	struct mlxsw_sp_acl_tcam_region *region = chunk->region;
671 
672 	list_del(&chunk->list);
673 	if (list_empty(&region->chunk_list)) {
674 		mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, region);
675 		mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, region);
676 	}
677 }
678 
679 static struct mlxsw_sp_acl_tcam_chunk *
680 mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp,
681 			       struct mlxsw_sp_acl_tcam_group *group,
682 			       unsigned int priority,
683 			       struct mlxsw_afk_element_usage *elusage)
684 {
685 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
686 	struct mlxsw_sp_acl_tcam_chunk *chunk;
687 	int err;
688 
689 	if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO)
690 		return ERR_PTR(-EINVAL);
691 
692 	chunk = kzalloc(sizeof(*chunk) + ops->chunk_priv_size, GFP_KERNEL);
693 	if (!chunk)
694 		return ERR_PTR(-ENOMEM);
695 	chunk->priority = priority;
696 	chunk->group = group;
697 	chunk->ref_count = 1;
698 
699 	err = mlxsw_sp_acl_tcam_chunk_assoc(mlxsw_sp, group, priority,
700 					    elusage, chunk);
701 	if (err)
702 		goto err_chunk_assoc;
703 
704 	ops->chunk_init(chunk->region->priv, chunk->priv, priority);
705 
706 	err = rhashtable_insert_fast(&group->chunk_ht, &chunk->ht_node,
707 				     mlxsw_sp_acl_tcam_chunk_ht_params);
708 	if (err)
709 		goto err_rhashtable_insert;
710 
711 	return chunk;
712 
713 err_rhashtable_insert:
714 	ops->chunk_fini(chunk->priv);
715 	mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk);
716 err_chunk_assoc:
717 	kfree(chunk);
718 	return ERR_PTR(err);
719 }
720 
721 static void
722 mlxsw_sp_acl_tcam_chunk_destroy(struct mlxsw_sp *mlxsw_sp,
723 				struct mlxsw_sp_acl_tcam_chunk *chunk)
724 {
725 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
726 	struct mlxsw_sp_acl_tcam_group *group = chunk->group;
727 
728 	rhashtable_remove_fast(&group->chunk_ht, &chunk->ht_node,
729 			       mlxsw_sp_acl_tcam_chunk_ht_params);
730 	ops->chunk_fini(chunk->priv);
731 	mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk);
732 	kfree(chunk);
733 }
734 
735 static struct mlxsw_sp_acl_tcam_chunk *
736 mlxsw_sp_acl_tcam_chunk_get(struct mlxsw_sp *mlxsw_sp,
737 			    struct mlxsw_sp_acl_tcam_group *group,
738 			    unsigned int priority,
739 			    struct mlxsw_afk_element_usage *elusage)
740 {
741 	struct mlxsw_sp_acl_tcam_chunk *chunk;
742 
743 	chunk = rhashtable_lookup_fast(&group->chunk_ht, &priority,
744 				       mlxsw_sp_acl_tcam_chunk_ht_params);
745 	if (chunk) {
746 		if (WARN_ON(!mlxsw_afk_key_info_subset(chunk->region->key_info,
747 						       elusage)))
748 			return ERR_PTR(-EINVAL);
749 		chunk->ref_count++;
750 		return chunk;
751 	}
752 	return mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, group,
753 					      priority, elusage);
754 }
755 
756 static void mlxsw_sp_acl_tcam_chunk_put(struct mlxsw_sp *mlxsw_sp,
757 					struct mlxsw_sp_acl_tcam_chunk *chunk)
758 {
759 	if (--chunk->ref_count)
760 		return;
761 	mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, chunk);
762 }
763 
764 static size_t mlxsw_sp_acl_tcam_entry_priv_size(struct mlxsw_sp *mlxsw_sp)
765 {
766 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
767 
768 	return ops->entry_priv_size;
769 }
770 
771 static int mlxsw_sp_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp,
772 				       struct mlxsw_sp_acl_tcam_group *group,
773 				       struct mlxsw_sp_acl_tcam_entry *entry,
774 				       struct mlxsw_sp_acl_rule_info *rulei)
775 {
776 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
777 	struct mlxsw_sp_acl_tcam_chunk *chunk;
778 	struct mlxsw_sp_acl_tcam_region *region;
779 	int err;
780 
781 	chunk = mlxsw_sp_acl_tcam_chunk_get(mlxsw_sp, group, rulei->priority,
782 					    &rulei->values.elusage);
783 	if (IS_ERR(chunk))
784 		return PTR_ERR(chunk);
785 
786 	region = chunk->region;
787 
788 	err = ops->entry_add(mlxsw_sp, region->priv, chunk->priv,
789 			     entry->priv, rulei);
790 	if (err)
791 		goto err_entry_add;
792 	entry->chunk = chunk;
793 
794 	return 0;
795 
796 err_entry_add:
797 	mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk);
798 	return err;
799 }
800 
801 static void mlxsw_sp_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp,
802 					struct mlxsw_sp_acl_tcam_entry *entry)
803 {
804 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
805 	struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk;
806 	struct mlxsw_sp_acl_tcam_region *region = chunk->region;
807 
808 	ops->entry_del(mlxsw_sp, region->priv, chunk->priv, entry->priv);
809 	mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk);
810 }
811 
812 static int
813 mlxsw_sp_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
814 				     struct mlxsw_sp_acl_tcam_entry *entry,
815 				     bool *activity)
816 {
817 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
818 	struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk;
819 	struct mlxsw_sp_acl_tcam_region *region = chunk->region;
820 
821 	return ops->entry_activity_get(mlxsw_sp, region->priv,
822 				       entry->priv, activity);
823 }
824 
825 static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = {
826 	MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
827 	MLXSW_AFK_ELEMENT_DMAC_32_47,
828 	MLXSW_AFK_ELEMENT_DMAC_0_31,
829 	MLXSW_AFK_ELEMENT_SMAC_32_47,
830 	MLXSW_AFK_ELEMENT_SMAC_0_31,
831 	MLXSW_AFK_ELEMENT_ETHERTYPE,
832 	MLXSW_AFK_ELEMENT_IP_PROTO,
833 	MLXSW_AFK_ELEMENT_SRC_IP_0_31,
834 	MLXSW_AFK_ELEMENT_DST_IP_0_31,
835 	MLXSW_AFK_ELEMENT_DST_L4_PORT,
836 	MLXSW_AFK_ELEMENT_SRC_L4_PORT,
837 	MLXSW_AFK_ELEMENT_VID,
838 	MLXSW_AFK_ELEMENT_PCP,
839 	MLXSW_AFK_ELEMENT_TCP_FLAGS,
840 	MLXSW_AFK_ELEMENT_IP_TTL_,
841 	MLXSW_AFK_ELEMENT_IP_ECN,
842 	MLXSW_AFK_ELEMENT_IP_DSCP,
843 };
844 
845 static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv6[] = {
846 	MLXSW_AFK_ELEMENT_ETHERTYPE,
847 	MLXSW_AFK_ELEMENT_IP_PROTO,
848 	MLXSW_AFK_ELEMENT_SRC_IP_96_127,
849 	MLXSW_AFK_ELEMENT_SRC_IP_64_95,
850 	MLXSW_AFK_ELEMENT_SRC_IP_32_63,
851 	MLXSW_AFK_ELEMENT_SRC_IP_0_31,
852 	MLXSW_AFK_ELEMENT_DST_IP_96_127,
853 	MLXSW_AFK_ELEMENT_DST_IP_64_95,
854 	MLXSW_AFK_ELEMENT_DST_IP_32_63,
855 	MLXSW_AFK_ELEMENT_DST_IP_0_31,
856 	MLXSW_AFK_ELEMENT_DST_L4_PORT,
857 	MLXSW_AFK_ELEMENT_SRC_L4_PORT,
858 };
859 
860 static const struct mlxsw_sp_acl_tcam_pattern mlxsw_sp_acl_tcam_patterns[] = {
861 	{
862 		.elements = mlxsw_sp_acl_tcam_pattern_ipv4,
863 		.elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv4),
864 	},
865 	{
866 		.elements = mlxsw_sp_acl_tcam_pattern_ipv6,
867 		.elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv6),
868 	},
869 };
870 
871 #define MLXSW_SP_ACL_TCAM_PATTERNS_COUNT \
872 	ARRAY_SIZE(mlxsw_sp_acl_tcam_patterns)
873 
874 struct mlxsw_sp_acl_tcam_flower_ruleset {
875 	struct mlxsw_sp_acl_tcam_group group;
876 };
877 
878 struct mlxsw_sp_acl_tcam_flower_rule {
879 	struct mlxsw_sp_acl_tcam_entry entry;
880 };
881 
882 static int
883 mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp *mlxsw_sp,
884 				     struct mlxsw_sp_acl_tcam *tcam,
885 				     void *ruleset_priv,
886 				     struct mlxsw_afk_element_usage *tmplt_elusage)
887 {
888 	struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
889 
890 	return mlxsw_sp_acl_tcam_group_add(mlxsw_sp, tcam, &ruleset->group,
891 					   mlxsw_sp_acl_tcam_patterns,
892 					   MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
893 					   tmplt_elusage);
894 }
895 
896 static void
897 mlxsw_sp_acl_tcam_flower_ruleset_del(struct mlxsw_sp *mlxsw_sp,
898 				     void *ruleset_priv)
899 {
900 	struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
901 
902 	mlxsw_sp_acl_tcam_group_del(mlxsw_sp, &ruleset->group);
903 }
904 
905 static int
906 mlxsw_sp_acl_tcam_flower_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
907 				      void *ruleset_priv,
908 				      struct mlxsw_sp_port *mlxsw_sp_port,
909 				      bool ingress)
910 {
911 	struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
912 
913 	return mlxsw_sp_acl_tcam_group_bind(mlxsw_sp, &ruleset->group,
914 					    mlxsw_sp_port, ingress);
915 }
916 
917 static void
918 mlxsw_sp_acl_tcam_flower_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
919 					void *ruleset_priv,
920 					struct mlxsw_sp_port *mlxsw_sp_port,
921 					bool ingress)
922 {
923 	struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
924 
925 	mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->group,
926 				       mlxsw_sp_port, ingress);
927 }
928 
929 static u16
930 mlxsw_sp_acl_tcam_flower_ruleset_group_id(void *ruleset_priv)
931 {
932 	struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
933 
934 	return mlxsw_sp_acl_tcam_group_id(&ruleset->group);
935 }
936 
937 static size_t mlxsw_sp_acl_tcam_flower_rule_priv_size(struct mlxsw_sp *mlxsw_sp)
938 {
939 	return sizeof(struct mlxsw_sp_acl_tcam_flower_rule) +
940 	       mlxsw_sp_acl_tcam_entry_priv_size(mlxsw_sp);
941 }
942 
943 static int
944 mlxsw_sp_acl_tcam_flower_rule_add(struct mlxsw_sp *mlxsw_sp,
945 				  void *ruleset_priv, void *rule_priv,
946 				  struct mlxsw_sp_acl_rule_info *rulei)
947 {
948 	struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
949 	struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
950 
951 	return mlxsw_sp_acl_tcam_entry_add(mlxsw_sp, &ruleset->group,
952 					   &rule->entry, rulei);
953 }
954 
955 static void
956 mlxsw_sp_acl_tcam_flower_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv)
957 {
958 	struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
959 
960 	mlxsw_sp_acl_tcam_entry_del(mlxsw_sp, &rule->entry);
961 }
962 
963 static int
964 mlxsw_sp_acl_tcam_flower_rule_activity_get(struct mlxsw_sp *mlxsw_sp,
965 					   void *rule_priv, bool *activity)
966 {
967 	struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
968 
969 	return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp, &rule->entry,
970 						    activity);
971 }
972 
973 static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = {
974 	.ruleset_priv_size	= sizeof(struct mlxsw_sp_acl_tcam_flower_ruleset),
975 	.ruleset_add		= mlxsw_sp_acl_tcam_flower_ruleset_add,
976 	.ruleset_del		= mlxsw_sp_acl_tcam_flower_ruleset_del,
977 	.ruleset_bind		= mlxsw_sp_acl_tcam_flower_ruleset_bind,
978 	.ruleset_unbind		= mlxsw_sp_acl_tcam_flower_ruleset_unbind,
979 	.ruleset_group_id	= mlxsw_sp_acl_tcam_flower_ruleset_group_id,
980 	.rule_priv_size		= mlxsw_sp_acl_tcam_flower_rule_priv_size,
981 	.rule_add		= mlxsw_sp_acl_tcam_flower_rule_add,
982 	.rule_del		= mlxsw_sp_acl_tcam_flower_rule_del,
983 	.rule_activity_get	= mlxsw_sp_acl_tcam_flower_rule_activity_get,
984 };
985 
986 static const struct mlxsw_sp_acl_profile_ops *
987 mlxsw_sp_acl_tcam_profile_ops_arr[] = {
988 	[MLXSW_SP_ACL_PROFILE_FLOWER] = &mlxsw_sp_acl_tcam_flower_ops,
989 };
990 
991 const struct mlxsw_sp_acl_profile_ops *
992 mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp,
993 			      enum mlxsw_sp_acl_profile profile)
994 {
995 	const struct mlxsw_sp_acl_profile_ops *ops;
996 
997 	if (WARN_ON(profile >= ARRAY_SIZE(mlxsw_sp_acl_tcam_profile_ops_arr)))
998 		return NULL;
999 	ops = mlxsw_sp_acl_tcam_profile_ops_arr[profile];
1000 	if (WARN_ON(!ops))
1001 		return NULL;
1002 	return ops;
1003 }
1004