1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/slab.h>
6 #include <linux/errno.h>
7 #include <linux/bitops.h>
8 #include <linux/list.h>
9 #include <linux/rhashtable.h>
10 #include <linux/netdevice.h>
11 
12 #include "reg.h"
13 #include "core.h"
14 #include "resources.h"
15 #include "spectrum.h"
16 #include "spectrum_acl_tcam.h"
17 #include "core_acl_flex_keys.h"
18 
19 size_t mlxsw_sp_acl_tcam_priv_size(struct mlxsw_sp *mlxsw_sp)
20 {
21 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
22 
23 	return ops->priv_size;
24 }
25 
26 int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
27 			   struct mlxsw_sp_acl_tcam *tcam)
28 {
29 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
30 	u64 max_tcam_regions;
31 	u64 max_regions;
32 	u64 max_groups;
33 	size_t alloc_size;
34 	int err;
35 
36 	max_tcam_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core,
37 					      ACL_MAX_TCAM_REGIONS);
38 	max_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_REGIONS);
39 
40 	/* Use 1:1 mapping between ACL region and TCAM region */
41 	if (max_tcam_regions < max_regions)
42 		max_regions = max_tcam_regions;
43 
44 	alloc_size = sizeof(tcam->used_regions[0]) * BITS_TO_LONGS(max_regions);
45 	tcam->used_regions = kzalloc(alloc_size, GFP_KERNEL);
46 	if (!tcam->used_regions)
47 		return -ENOMEM;
48 	tcam->max_regions = max_regions;
49 
50 	max_groups = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUPS);
51 	alloc_size = sizeof(tcam->used_groups[0]) * BITS_TO_LONGS(max_groups);
52 	tcam->used_groups = kzalloc(alloc_size, GFP_KERNEL);
53 	if (!tcam->used_groups) {
54 		err = -ENOMEM;
55 		goto err_alloc_used_groups;
56 	}
57 	tcam->max_groups = max_groups;
58 	tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
59 						 ACL_MAX_GROUP_SIZE);
60 
61 	err = ops->init(mlxsw_sp, tcam->priv, tcam);
62 	if (err)
63 		goto err_tcam_init;
64 
65 	return 0;
66 
67 err_tcam_init:
68 	kfree(tcam->used_groups);
69 err_alloc_used_groups:
70 	kfree(tcam->used_regions);
71 	return err;
72 }
73 
74 void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
75 			    struct mlxsw_sp_acl_tcam *tcam)
76 {
77 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
78 
79 	ops->fini(mlxsw_sp, tcam->priv);
80 	kfree(tcam->used_groups);
81 	kfree(tcam->used_regions);
82 }
83 
84 int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,
85 				   struct mlxsw_sp_acl_rule_info *rulei,
86 				   u32 *priority, bool fillup_priority)
87 {
88 	u64 max_priority;
89 
90 	if (!fillup_priority) {
91 		*priority = 0;
92 		return 0;
93 	}
94 
95 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, KVD_SIZE))
96 		return -EIO;
97 
98 	max_priority = MLXSW_CORE_RES_GET(mlxsw_sp->core, KVD_SIZE);
99 	if (rulei->priority > max_priority)
100 		return -EINVAL;
101 
102 	/* Unlike in TC, in HW, higher number means higher priority. */
103 	*priority = max_priority - rulei->priority;
104 	return 0;
105 }
106 
107 static int mlxsw_sp_acl_tcam_region_id_get(struct mlxsw_sp_acl_tcam *tcam,
108 					   u16 *p_id)
109 {
110 	u16 id;
111 
112 	id = find_first_zero_bit(tcam->used_regions, tcam->max_regions);
113 	if (id < tcam->max_regions) {
114 		__set_bit(id, tcam->used_regions);
115 		*p_id = id;
116 		return 0;
117 	}
118 	return -ENOBUFS;
119 }
120 
121 static void mlxsw_sp_acl_tcam_region_id_put(struct mlxsw_sp_acl_tcam *tcam,
122 					    u16 id)
123 {
124 	__clear_bit(id, tcam->used_regions);
125 }
126 
127 static int mlxsw_sp_acl_tcam_group_id_get(struct mlxsw_sp_acl_tcam *tcam,
128 					  u16 *p_id)
129 {
130 	u16 id;
131 
132 	id = find_first_zero_bit(tcam->used_groups, tcam->max_groups);
133 	if (id < tcam->max_groups) {
134 		__set_bit(id, tcam->used_groups);
135 		*p_id = id;
136 		return 0;
137 	}
138 	return -ENOBUFS;
139 }
140 
141 static void mlxsw_sp_acl_tcam_group_id_put(struct mlxsw_sp_acl_tcam *tcam,
142 					   u16 id)
143 {
144 	__clear_bit(id, tcam->used_groups);
145 }
146 
147 struct mlxsw_sp_acl_tcam_pattern {
148 	const enum mlxsw_afk_element *elements;
149 	unsigned int elements_count;
150 };
151 
152 struct mlxsw_sp_acl_tcam_group {
153 	struct mlxsw_sp_acl_tcam *tcam;
154 	u16 id;
155 	struct list_head region_list;
156 	unsigned int region_count;
157 	struct rhashtable chunk_ht;
158 	struct mlxsw_sp_acl_tcam_group_ops *ops;
159 	const struct mlxsw_sp_acl_tcam_pattern *patterns;
160 	unsigned int patterns_count;
161 	bool tmplt_elusage_set;
162 	struct mlxsw_afk_element_usage tmplt_elusage;
163 };
164 
165 struct mlxsw_sp_acl_tcam_chunk {
166 	struct list_head list; /* Member of a TCAM region */
167 	struct rhash_head ht_node; /* Member of a chunk HT */
168 	unsigned int priority; /* Priority within the region and group */
169 	struct mlxsw_sp_acl_tcam_group *group;
170 	struct mlxsw_sp_acl_tcam_region *region;
171 	unsigned int ref_count;
172 	unsigned long priv[0];
173 	/* priv has to be always the last item */
174 };
175 
176 struct mlxsw_sp_acl_tcam_entry {
177 	struct mlxsw_sp_acl_tcam_chunk *chunk;
178 	unsigned long priv[0];
179 	/* priv has to be always the last item */
180 };
181 
182 static const struct rhashtable_params mlxsw_sp_acl_tcam_chunk_ht_params = {
183 	.key_len = sizeof(unsigned int),
184 	.key_offset = offsetof(struct mlxsw_sp_acl_tcam_chunk, priority),
185 	.head_offset = offsetof(struct mlxsw_sp_acl_tcam_chunk, ht_node),
186 	.automatic_shrinking = true,
187 };
188 
189 static int mlxsw_sp_acl_tcam_group_update(struct mlxsw_sp *mlxsw_sp,
190 					  struct mlxsw_sp_acl_tcam_group *group)
191 {
192 	struct mlxsw_sp_acl_tcam_region *region;
193 	char pagt_pl[MLXSW_REG_PAGT_LEN];
194 	int acl_index = 0;
195 
196 	mlxsw_reg_pagt_pack(pagt_pl, group->id);
197 	list_for_each_entry(region, &group->region_list, list)
198 		mlxsw_reg_pagt_acl_id_pack(pagt_pl, acl_index++, region->id);
199 	mlxsw_reg_pagt_size_set(pagt_pl, acl_index);
200 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pagt), pagt_pl);
201 }
202 
203 static int
204 mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp *mlxsw_sp,
205 			    struct mlxsw_sp_acl_tcam *tcam,
206 			    struct mlxsw_sp_acl_tcam_group *group,
207 			    const struct mlxsw_sp_acl_tcam_pattern *patterns,
208 			    unsigned int patterns_count,
209 			    struct mlxsw_afk_element_usage *tmplt_elusage)
210 {
211 	int err;
212 
213 	group->tcam = tcam;
214 	group->patterns = patterns;
215 	group->patterns_count = patterns_count;
216 	if (tmplt_elusage) {
217 		group->tmplt_elusage_set = true;
218 		memcpy(&group->tmplt_elusage, tmplt_elusage,
219 		       sizeof(group->tmplt_elusage));
220 	}
221 	INIT_LIST_HEAD(&group->region_list);
222 	err = mlxsw_sp_acl_tcam_group_id_get(tcam, &group->id);
223 	if (err)
224 		return err;
225 
226 	err = rhashtable_init(&group->chunk_ht,
227 			      &mlxsw_sp_acl_tcam_chunk_ht_params);
228 	if (err)
229 		goto err_rhashtable_init;
230 
231 	return 0;
232 
233 err_rhashtable_init:
234 	mlxsw_sp_acl_tcam_group_id_put(tcam, group->id);
235 	return err;
236 }
237 
238 static void mlxsw_sp_acl_tcam_group_del(struct mlxsw_sp *mlxsw_sp,
239 					struct mlxsw_sp_acl_tcam_group *group)
240 {
241 	struct mlxsw_sp_acl_tcam *tcam = group->tcam;
242 
243 	rhashtable_destroy(&group->chunk_ht);
244 	mlxsw_sp_acl_tcam_group_id_put(tcam, group->id);
245 	WARN_ON(!list_empty(&group->region_list));
246 }
247 
248 static int
249 mlxsw_sp_acl_tcam_group_bind(struct mlxsw_sp *mlxsw_sp,
250 			     struct mlxsw_sp_acl_tcam_group *group,
251 			     struct mlxsw_sp_port *mlxsw_sp_port,
252 			     bool ingress)
253 {
254 	char ppbt_pl[MLXSW_REG_PPBT_LEN];
255 
256 	mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL :
257 					       MLXSW_REG_PXBT_E_EACL,
258 			    MLXSW_REG_PXBT_OP_BIND, mlxsw_sp_port->local_port,
259 			    group->id);
260 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
261 }
262 
263 static void
264 mlxsw_sp_acl_tcam_group_unbind(struct mlxsw_sp *mlxsw_sp,
265 			       struct mlxsw_sp_acl_tcam_group *group,
266 			       struct mlxsw_sp_port *mlxsw_sp_port,
267 			       bool ingress)
268 {
269 	char ppbt_pl[MLXSW_REG_PPBT_LEN];
270 
271 	mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL :
272 					       MLXSW_REG_PXBT_E_EACL,
273 			    MLXSW_REG_PXBT_OP_UNBIND, mlxsw_sp_port->local_port,
274 			    group->id);
275 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
276 }
277 
278 static u16
279 mlxsw_sp_acl_tcam_group_id(struct mlxsw_sp_acl_tcam_group *group)
280 {
281 	return group->id;
282 }
283 
284 static unsigned int
285 mlxsw_sp_acl_tcam_region_prio(struct mlxsw_sp_acl_tcam_region *region)
286 {
287 	struct mlxsw_sp_acl_tcam_chunk *chunk;
288 
289 	if (list_empty(&region->chunk_list))
290 		return 0;
291 	/* As a priority of a region, return priority of the first chunk */
292 	chunk = list_first_entry(&region->chunk_list, typeof(*chunk), list);
293 	return chunk->priority;
294 }
295 
296 static unsigned int
297 mlxsw_sp_acl_tcam_region_max_prio(struct mlxsw_sp_acl_tcam_region *region)
298 {
299 	struct mlxsw_sp_acl_tcam_chunk *chunk;
300 
301 	if (list_empty(&region->chunk_list))
302 		return 0;
303 	chunk = list_last_entry(&region->chunk_list, typeof(*chunk), list);
304 	return chunk->priority;
305 }
306 
307 static void
308 mlxsw_sp_acl_tcam_group_list_add(struct mlxsw_sp_acl_tcam_group *group,
309 				 struct mlxsw_sp_acl_tcam_region *region)
310 {
311 	struct mlxsw_sp_acl_tcam_region *region2;
312 	struct list_head *pos;
313 
314 	/* Position the region inside the list according to priority */
315 	list_for_each(pos, &group->region_list) {
316 		region2 = list_entry(pos, typeof(*region2), list);
317 		if (mlxsw_sp_acl_tcam_region_prio(region2) >
318 		    mlxsw_sp_acl_tcam_region_prio(region))
319 			break;
320 	}
321 	list_add_tail(&region->list, pos);
322 	group->region_count++;
323 }
324 
325 static void
326 mlxsw_sp_acl_tcam_group_list_del(struct mlxsw_sp_acl_tcam_group *group,
327 				 struct mlxsw_sp_acl_tcam_region *region)
328 {
329 	group->region_count--;
330 	list_del(&region->list);
331 }
332 
333 static int
334 mlxsw_sp_acl_tcam_group_region_attach(struct mlxsw_sp *mlxsw_sp,
335 				      struct mlxsw_sp_acl_tcam_group *group,
336 				      struct mlxsw_sp_acl_tcam_region *region)
337 {
338 	int err;
339 
340 	if (group->region_count == group->tcam->max_group_size)
341 		return -ENOBUFS;
342 
343 	mlxsw_sp_acl_tcam_group_list_add(group, region);
344 
345 	err = mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
346 	if (err)
347 		goto err_group_update;
348 	region->group = group;
349 
350 	return 0;
351 
352 err_group_update:
353 	mlxsw_sp_acl_tcam_group_list_del(group, region);
354 	mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
355 	return err;
356 }
357 
358 static void
359 mlxsw_sp_acl_tcam_group_region_detach(struct mlxsw_sp *mlxsw_sp,
360 				      struct mlxsw_sp_acl_tcam_region *region)
361 {
362 	struct mlxsw_sp_acl_tcam_group *group = region->group;
363 
364 	mlxsw_sp_acl_tcam_group_list_del(group, region);
365 	mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
366 }
367 
368 static struct mlxsw_sp_acl_tcam_region *
369 mlxsw_sp_acl_tcam_group_region_find(struct mlxsw_sp_acl_tcam_group *group,
370 				    unsigned int priority,
371 				    struct mlxsw_afk_element_usage *elusage,
372 				    bool *p_need_split)
373 {
374 	struct mlxsw_sp_acl_tcam_region *region, *region2;
375 	struct list_head *pos;
376 	bool issubset;
377 
378 	list_for_each(pos, &group->region_list) {
379 		region = list_entry(pos, typeof(*region), list);
380 
381 		/* First, check if the requested priority does not rather belong
382 		 * under some of the next regions.
383 		 */
384 		if (pos->next != &group->region_list) { /* not last */
385 			region2 = list_entry(pos->next, typeof(*region2), list);
386 			if (priority >= mlxsw_sp_acl_tcam_region_prio(region2))
387 				continue;
388 		}
389 
390 		issubset = mlxsw_afk_key_info_subset(region->key_info, elusage);
391 
392 		/* If requested element usage would not fit and the priority
393 		 * is lower than the currently inspected region we cannot
394 		 * use this region, so return NULL to indicate new region has
395 		 * to be created.
396 		 */
397 		if (!issubset &&
398 		    priority < mlxsw_sp_acl_tcam_region_prio(region))
399 			return NULL;
400 
401 		/* If requested element usage would not fit and the priority
402 		 * is higher than the currently inspected region we cannot
403 		 * use this region. There is still some hope that the next
404 		 * region would be the fit. So let it be processed and
405 		 * eventually break at the check right above this.
406 		 */
407 		if (!issubset &&
408 		    priority > mlxsw_sp_acl_tcam_region_max_prio(region))
409 			continue;
410 
411 		/* Indicate if the region needs to be split in order to add
412 		 * the requested priority. Split is needed when requested
413 		 * element usage won't fit into the found region.
414 		 */
415 		*p_need_split = !issubset;
416 		return region;
417 	}
418 	return NULL; /* New region has to be created. */
419 }
420 
421 static void
422 mlxsw_sp_acl_tcam_group_use_patterns(struct mlxsw_sp_acl_tcam_group *group,
423 				     struct mlxsw_afk_element_usage *elusage,
424 				     struct mlxsw_afk_element_usage *out)
425 {
426 	const struct mlxsw_sp_acl_tcam_pattern *pattern;
427 	int i;
428 
429 	/* In case the template is set, we don't have to look up the pattern
430 	 * and just use the template.
431 	 */
432 	if (group->tmplt_elusage_set) {
433 		memcpy(out, &group->tmplt_elusage, sizeof(*out));
434 		WARN_ON(!mlxsw_afk_element_usage_subset(elusage, out));
435 		return;
436 	}
437 
438 	for (i = 0; i < group->patterns_count; i++) {
439 		pattern = &group->patterns[i];
440 		mlxsw_afk_element_usage_fill(out, pattern->elements,
441 					     pattern->elements_count);
442 		if (mlxsw_afk_element_usage_subset(elusage, out))
443 			return;
444 	}
445 	memcpy(out, elusage, sizeof(*out));
446 }
447 
448 static int
449 mlxsw_sp_acl_tcam_region_alloc(struct mlxsw_sp *mlxsw_sp,
450 			       struct mlxsw_sp_acl_tcam_region *region)
451 {
452 	struct mlxsw_afk_key_info *key_info = region->key_info;
453 	char ptar_pl[MLXSW_REG_PTAR_LEN];
454 	unsigned int encodings_count;
455 	int i;
456 	int err;
457 
458 	mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_ALLOC,
459 			    region->key_type,
460 			    MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT,
461 			    region->id, region->tcam_region_info);
462 	encodings_count = mlxsw_afk_key_info_blocks_count_get(key_info);
463 	for (i = 0; i < encodings_count; i++) {
464 		u16 encoding;
465 
466 		encoding = mlxsw_afk_key_info_block_encoding_get(key_info, i);
467 		mlxsw_reg_ptar_key_id_pack(ptar_pl, i, encoding);
468 	}
469 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
470 	if (err)
471 		return err;
472 	mlxsw_reg_ptar_unpack(ptar_pl, region->tcam_region_info);
473 	return 0;
474 }
475 
476 static void
477 mlxsw_sp_acl_tcam_region_free(struct mlxsw_sp *mlxsw_sp,
478 			      struct mlxsw_sp_acl_tcam_region *region)
479 {
480 	char ptar_pl[MLXSW_REG_PTAR_LEN];
481 
482 	mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_FREE,
483 			    region->key_type, 0, region->id,
484 			    region->tcam_region_info);
485 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
486 }
487 
488 static int
489 mlxsw_sp_acl_tcam_region_enable(struct mlxsw_sp *mlxsw_sp,
490 				struct mlxsw_sp_acl_tcam_region *region)
491 {
492 	char pacl_pl[MLXSW_REG_PACL_LEN];
493 
494 	mlxsw_reg_pacl_pack(pacl_pl, region->id, true,
495 			    region->tcam_region_info);
496 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
497 }
498 
499 static void
500 mlxsw_sp_acl_tcam_region_disable(struct mlxsw_sp *mlxsw_sp,
501 				 struct mlxsw_sp_acl_tcam_region *region)
502 {
503 	char pacl_pl[MLXSW_REG_PACL_LEN];
504 
505 	mlxsw_reg_pacl_pack(pacl_pl, region->id, false,
506 			    region->tcam_region_info);
507 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
508 }
509 
510 static struct mlxsw_sp_acl_tcam_region *
511 mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp,
512 				struct mlxsw_sp_acl_tcam *tcam,
513 				struct mlxsw_afk_element_usage *elusage)
514 {
515 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
516 	struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
517 	struct mlxsw_sp_acl_tcam_region *region;
518 	int err;
519 
520 	region = kzalloc(sizeof(*region) + ops->region_priv_size, GFP_KERNEL);
521 	if (!region)
522 		return ERR_PTR(-ENOMEM);
523 	INIT_LIST_HEAD(&region->chunk_list);
524 	region->mlxsw_sp = mlxsw_sp;
525 
526 	region->key_info = mlxsw_afk_key_info_get(afk, elusage);
527 	if (IS_ERR(region->key_info)) {
528 		err = PTR_ERR(region->key_info);
529 		goto err_key_info_get;
530 	}
531 
532 	err = mlxsw_sp_acl_tcam_region_id_get(tcam, &region->id);
533 	if (err)
534 		goto err_region_id_get;
535 
536 	err = ops->region_associate(mlxsw_sp, region);
537 	if (err)
538 		goto err_tcam_region_associate;
539 
540 	region->key_type = ops->key_type;
541 	err = mlxsw_sp_acl_tcam_region_alloc(mlxsw_sp, region);
542 	if (err)
543 		goto err_tcam_region_alloc;
544 
545 	err = mlxsw_sp_acl_tcam_region_enable(mlxsw_sp, region);
546 	if (err)
547 		goto err_tcam_region_enable;
548 
549 	err = ops->region_init(mlxsw_sp, region->priv, tcam->priv, region);
550 	if (err)
551 		goto err_tcam_region_init;
552 
553 	return region;
554 
555 err_tcam_region_init:
556 	mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
557 err_tcam_region_enable:
558 	mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
559 err_tcam_region_alloc:
560 err_tcam_region_associate:
561 	mlxsw_sp_acl_tcam_region_id_put(tcam, region->id);
562 err_region_id_get:
563 	mlxsw_afk_key_info_put(region->key_info);
564 err_key_info_get:
565 	kfree(region);
566 	return ERR_PTR(err);
567 }
568 
569 static void
570 mlxsw_sp_acl_tcam_region_destroy(struct mlxsw_sp *mlxsw_sp,
571 				 struct mlxsw_sp_acl_tcam_region *region)
572 {
573 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
574 
575 	ops->region_fini(mlxsw_sp, region->priv);
576 	mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
577 	mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
578 	mlxsw_sp_acl_tcam_region_id_put(region->group->tcam, region->id);
579 	mlxsw_afk_key_info_put(region->key_info);
580 	kfree(region);
581 }
582 
583 static int
584 mlxsw_sp_acl_tcam_chunk_assoc(struct mlxsw_sp *mlxsw_sp,
585 			      struct mlxsw_sp_acl_tcam_group *group,
586 			      unsigned int priority,
587 			      struct mlxsw_afk_element_usage *elusage,
588 			      struct mlxsw_sp_acl_tcam_chunk *chunk)
589 {
590 	struct mlxsw_sp_acl_tcam_region *region;
591 	bool region_created = false;
592 	bool need_split;
593 	int err;
594 
595 	region = mlxsw_sp_acl_tcam_group_region_find(group, priority, elusage,
596 						     &need_split);
597 	if (region && need_split) {
598 		/* According to priority, the chunk should belong to an
599 		 * existing region. However, this chunk needs elements
600 		 * that region does not contain. We need to split the existing
601 		 * region into two and create a new region for this chunk
602 		 * in between. This is not supported now.
603 		 */
604 		return -EOPNOTSUPP;
605 	}
606 	if (!region) {
607 		struct mlxsw_afk_element_usage region_elusage;
608 
609 		mlxsw_sp_acl_tcam_group_use_patterns(group, elusage,
610 						     &region_elusage);
611 		region = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, group->tcam,
612 							 &region_elusage);
613 		if (IS_ERR(region))
614 			return PTR_ERR(region);
615 		region_created = true;
616 	}
617 
618 	chunk->region = region;
619 	list_add_tail(&chunk->list, &region->chunk_list);
620 
621 	if (!region_created)
622 		return 0;
623 
624 	err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp, group, region);
625 	if (err)
626 		goto err_group_region_attach;
627 
628 	return 0;
629 
630 err_group_region_attach:
631 	mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, region);
632 	return err;
633 }
634 
635 static void
636 mlxsw_sp_acl_tcam_chunk_deassoc(struct mlxsw_sp *mlxsw_sp,
637 				struct mlxsw_sp_acl_tcam_chunk *chunk)
638 {
639 	struct mlxsw_sp_acl_tcam_region *region = chunk->region;
640 
641 	list_del(&chunk->list);
642 	if (list_empty(&region->chunk_list)) {
643 		mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, region);
644 		mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, region);
645 	}
646 }
647 
648 static struct mlxsw_sp_acl_tcam_chunk *
649 mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp,
650 			       struct mlxsw_sp_acl_tcam_group *group,
651 			       unsigned int priority,
652 			       struct mlxsw_afk_element_usage *elusage)
653 {
654 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
655 	struct mlxsw_sp_acl_tcam_chunk *chunk;
656 	int err;
657 
658 	if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO)
659 		return ERR_PTR(-EINVAL);
660 
661 	chunk = kzalloc(sizeof(*chunk) + ops->chunk_priv_size, GFP_KERNEL);
662 	if (!chunk)
663 		return ERR_PTR(-ENOMEM);
664 	chunk->priority = priority;
665 	chunk->group = group;
666 	chunk->ref_count = 1;
667 
668 	err = mlxsw_sp_acl_tcam_chunk_assoc(mlxsw_sp, group, priority,
669 					    elusage, chunk);
670 	if (err)
671 		goto err_chunk_assoc;
672 
673 	ops->chunk_init(chunk->region->priv, chunk->priv, priority);
674 
675 	err = rhashtable_insert_fast(&group->chunk_ht, &chunk->ht_node,
676 				     mlxsw_sp_acl_tcam_chunk_ht_params);
677 	if (err)
678 		goto err_rhashtable_insert;
679 
680 	return chunk;
681 
682 err_rhashtable_insert:
683 	ops->chunk_fini(chunk->priv);
684 	mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk);
685 err_chunk_assoc:
686 	kfree(chunk);
687 	return ERR_PTR(err);
688 }
689 
690 static void
691 mlxsw_sp_acl_tcam_chunk_destroy(struct mlxsw_sp *mlxsw_sp,
692 				struct mlxsw_sp_acl_tcam_chunk *chunk)
693 {
694 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
695 	struct mlxsw_sp_acl_tcam_group *group = chunk->group;
696 
697 	rhashtable_remove_fast(&group->chunk_ht, &chunk->ht_node,
698 			       mlxsw_sp_acl_tcam_chunk_ht_params);
699 	ops->chunk_fini(chunk->priv);
700 	mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk);
701 	kfree(chunk);
702 }
703 
704 static struct mlxsw_sp_acl_tcam_chunk *
705 mlxsw_sp_acl_tcam_chunk_get(struct mlxsw_sp *mlxsw_sp,
706 			    struct mlxsw_sp_acl_tcam_group *group,
707 			    unsigned int priority,
708 			    struct mlxsw_afk_element_usage *elusage)
709 {
710 	struct mlxsw_sp_acl_tcam_chunk *chunk;
711 
712 	chunk = rhashtable_lookup_fast(&group->chunk_ht, &priority,
713 				       mlxsw_sp_acl_tcam_chunk_ht_params);
714 	if (chunk) {
715 		if (WARN_ON(!mlxsw_afk_key_info_subset(chunk->region->key_info,
716 						       elusage)))
717 			return ERR_PTR(-EINVAL);
718 		chunk->ref_count++;
719 		return chunk;
720 	}
721 	return mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, group,
722 					      priority, elusage);
723 }
724 
725 static void mlxsw_sp_acl_tcam_chunk_put(struct mlxsw_sp *mlxsw_sp,
726 					struct mlxsw_sp_acl_tcam_chunk *chunk)
727 {
728 	if (--chunk->ref_count)
729 		return;
730 	mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, chunk);
731 }
732 
733 static size_t mlxsw_sp_acl_tcam_entry_priv_size(struct mlxsw_sp *mlxsw_sp)
734 {
735 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
736 
737 	return ops->entry_priv_size;
738 }
739 
740 static int mlxsw_sp_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp,
741 				       struct mlxsw_sp_acl_tcam_group *group,
742 				       struct mlxsw_sp_acl_tcam_entry *entry,
743 				       struct mlxsw_sp_acl_rule_info *rulei)
744 {
745 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
746 	struct mlxsw_sp_acl_tcam_chunk *chunk;
747 	struct mlxsw_sp_acl_tcam_region *region;
748 	int err;
749 
750 	chunk = mlxsw_sp_acl_tcam_chunk_get(mlxsw_sp, group, rulei->priority,
751 					    &rulei->values.elusage);
752 	if (IS_ERR(chunk))
753 		return PTR_ERR(chunk);
754 
755 	region = chunk->region;
756 
757 	err = ops->entry_add(mlxsw_sp, region->priv, chunk->priv,
758 			     entry->priv, rulei);
759 	if (err)
760 		goto err_entry_add;
761 	entry->chunk = chunk;
762 
763 	return 0;
764 
765 err_entry_add:
766 	mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk);
767 	return err;
768 }
769 
770 static void mlxsw_sp_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp,
771 					struct mlxsw_sp_acl_tcam_entry *entry)
772 {
773 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
774 	struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk;
775 	struct mlxsw_sp_acl_tcam_region *region = chunk->region;
776 
777 	ops->entry_del(mlxsw_sp, region->priv, chunk->priv, entry->priv);
778 	mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk);
779 }
780 
781 static int
782 mlxsw_sp_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
783 				     struct mlxsw_sp_acl_tcam_entry *entry,
784 				     bool *activity)
785 {
786 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
787 	struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk;
788 	struct mlxsw_sp_acl_tcam_region *region = chunk->region;
789 
790 	return ops->entry_activity_get(mlxsw_sp, region->priv,
791 				       entry->priv, activity);
792 }
793 
794 static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = {
795 	MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
796 	MLXSW_AFK_ELEMENT_DMAC_32_47,
797 	MLXSW_AFK_ELEMENT_DMAC_0_31,
798 	MLXSW_AFK_ELEMENT_SMAC_32_47,
799 	MLXSW_AFK_ELEMENT_SMAC_0_31,
800 	MLXSW_AFK_ELEMENT_ETHERTYPE,
801 	MLXSW_AFK_ELEMENT_IP_PROTO,
802 	MLXSW_AFK_ELEMENT_SRC_IP_0_31,
803 	MLXSW_AFK_ELEMENT_DST_IP_0_31,
804 	MLXSW_AFK_ELEMENT_DST_L4_PORT,
805 	MLXSW_AFK_ELEMENT_SRC_L4_PORT,
806 	MLXSW_AFK_ELEMENT_VID,
807 	MLXSW_AFK_ELEMENT_PCP,
808 	MLXSW_AFK_ELEMENT_TCP_FLAGS,
809 	MLXSW_AFK_ELEMENT_IP_TTL_,
810 	MLXSW_AFK_ELEMENT_IP_ECN,
811 	MLXSW_AFK_ELEMENT_IP_DSCP,
812 };
813 
814 static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv6[] = {
815 	MLXSW_AFK_ELEMENT_ETHERTYPE,
816 	MLXSW_AFK_ELEMENT_IP_PROTO,
817 	MLXSW_AFK_ELEMENT_SRC_IP_96_127,
818 	MLXSW_AFK_ELEMENT_SRC_IP_64_95,
819 	MLXSW_AFK_ELEMENT_SRC_IP_32_63,
820 	MLXSW_AFK_ELEMENT_SRC_IP_0_31,
821 	MLXSW_AFK_ELEMENT_DST_IP_96_127,
822 	MLXSW_AFK_ELEMENT_DST_IP_64_95,
823 	MLXSW_AFK_ELEMENT_DST_IP_32_63,
824 	MLXSW_AFK_ELEMENT_DST_IP_0_31,
825 	MLXSW_AFK_ELEMENT_DST_L4_PORT,
826 	MLXSW_AFK_ELEMENT_SRC_L4_PORT,
827 };
828 
829 static const struct mlxsw_sp_acl_tcam_pattern mlxsw_sp_acl_tcam_patterns[] = {
830 	{
831 		.elements = mlxsw_sp_acl_tcam_pattern_ipv4,
832 		.elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv4),
833 	},
834 	{
835 		.elements = mlxsw_sp_acl_tcam_pattern_ipv6,
836 		.elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv6),
837 	},
838 };
839 
840 #define MLXSW_SP_ACL_TCAM_PATTERNS_COUNT \
841 	ARRAY_SIZE(mlxsw_sp_acl_tcam_patterns)
842 
843 struct mlxsw_sp_acl_tcam_flower_ruleset {
844 	struct mlxsw_sp_acl_tcam_group group;
845 };
846 
847 struct mlxsw_sp_acl_tcam_flower_rule {
848 	struct mlxsw_sp_acl_tcam_entry entry;
849 };
850 
851 static int
852 mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp *mlxsw_sp,
853 				     struct mlxsw_sp_acl_tcam *tcam,
854 				     void *ruleset_priv,
855 				     struct mlxsw_afk_element_usage *tmplt_elusage)
856 {
857 	struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
858 
859 	return mlxsw_sp_acl_tcam_group_add(mlxsw_sp, tcam, &ruleset->group,
860 					   mlxsw_sp_acl_tcam_patterns,
861 					   MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
862 					   tmplt_elusage);
863 }
864 
865 static void
866 mlxsw_sp_acl_tcam_flower_ruleset_del(struct mlxsw_sp *mlxsw_sp,
867 				     void *ruleset_priv)
868 {
869 	struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
870 
871 	mlxsw_sp_acl_tcam_group_del(mlxsw_sp, &ruleset->group);
872 }
873 
874 static int
875 mlxsw_sp_acl_tcam_flower_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
876 				      void *ruleset_priv,
877 				      struct mlxsw_sp_port *mlxsw_sp_port,
878 				      bool ingress)
879 {
880 	struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
881 
882 	return mlxsw_sp_acl_tcam_group_bind(mlxsw_sp, &ruleset->group,
883 					    mlxsw_sp_port, ingress);
884 }
885 
886 static void
887 mlxsw_sp_acl_tcam_flower_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
888 					void *ruleset_priv,
889 					struct mlxsw_sp_port *mlxsw_sp_port,
890 					bool ingress)
891 {
892 	struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
893 
894 	mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->group,
895 				       mlxsw_sp_port, ingress);
896 }
897 
898 static u16
899 mlxsw_sp_acl_tcam_flower_ruleset_group_id(void *ruleset_priv)
900 {
901 	struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
902 
903 	return mlxsw_sp_acl_tcam_group_id(&ruleset->group);
904 }
905 
906 static size_t mlxsw_sp_acl_tcam_flower_rule_priv_size(struct mlxsw_sp *mlxsw_sp)
907 {
908 	return sizeof(struct mlxsw_sp_acl_tcam_flower_rule) +
909 	       mlxsw_sp_acl_tcam_entry_priv_size(mlxsw_sp);
910 }
911 
912 static int
913 mlxsw_sp_acl_tcam_flower_rule_add(struct mlxsw_sp *mlxsw_sp,
914 				  void *ruleset_priv, void *rule_priv,
915 				  struct mlxsw_sp_acl_rule_info *rulei)
916 {
917 	struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
918 	struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
919 
920 	return mlxsw_sp_acl_tcam_entry_add(mlxsw_sp, &ruleset->group,
921 					   &rule->entry, rulei);
922 }
923 
924 static void
925 mlxsw_sp_acl_tcam_flower_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv)
926 {
927 	struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
928 
929 	mlxsw_sp_acl_tcam_entry_del(mlxsw_sp, &rule->entry);
930 }
931 
932 static int
933 mlxsw_sp_acl_tcam_flower_rule_activity_get(struct mlxsw_sp *mlxsw_sp,
934 					   void *rule_priv, bool *activity)
935 {
936 	struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
937 
938 	return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp, &rule->entry,
939 						    activity);
940 }
941 
942 static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = {
943 	.ruleset_priv_size	= sizeof(struct mlxsw_sp_acl_tcam_flower_ruleset),
944 	.ruleset_add		= mlxsw_sp_acl_tcam_flower_ruleset_add,
945 	.ruleset_del		= mlxsw_sp_acl_tcam_flower_ruleset_del,
946 	.ruleset_bind		= mlxsw_sp_acl_tcam_flower_ruleset_bind,
947 	.ruleset_unbind		= mlxsw_sp_acl_tcam_flower_ruleset_unbind,
948 	.ruleset_group_id	= mlxsw_sp_acl_tcam_flower_ruleset_group_id,
949 	.rule_priv_size		= mlxsw_sp_acl_tcam_flower_rule_priv_size,
950 	.rule_add		= mlxsw_sp_acl_tcam_flower_rule_add,
951 	.rule_del		= mlxsw_sp_acl_tcam_flower_rule_del,
952 	.rule_activity_get	= mlxsw_sp_acl_tcam_flower_rule_activity_get,
953 };
954 
955 static const struct mlxsw_sp_acl_profile_ops *
956 mlxsw_sp_acl_tcam_profile_ops_arr[] = {
957 	[MLXSW_SP_ACL_PROFILE_FLOWER] = &mlxsw_sp_acl_tcam_flower_ops,
958 };
959 
960 const struct mlxsw_sp_acl_profile_ops *
961 mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp,
962 			      enum mlxsw_sp_acl_profile profile)
963 {
964 	const struct mlxsw_sp_acl_profile_ops *ops;
965 
966 	if (WARN_ON(profile >= ARRAY_SIZE(mlxsw_sp_acl_tcam_profile_ops_arr)))
967 		return NULL;
968 	ops = mlxsw_sp_acl_tcam_profile_ops_arr[profile];
969 	if (WARN_ON(!ops))
970 		return NULL;
971 	return ops;
972 }
973