1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
3  * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the names of the copyright holders nor the names of its
15  *    contributors may be used to endorse or promote products derived from
16  *    this software without specific prior written permission.
17  *
18  * Alternatively, this software may be distributed under the terms of the
19  * GNU General Public License ("GPL") version 2 as published by the Free
20  * Software Foundation.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <linux/kernel.h>
36 #include <linux/list.h>
37 #include <linux/netdevice.h>
38 #include <linux/parman.h>
39 
40 #include "spectrum_mr_tcam.h"
41 #include "reg.h"
42 #include "spectrum.h"
43 #include "core_acl_flex_actions.h"
44 #include "spectrum_mr.h"
45 
46 struct mlxsw_sp_mr_tcam_region {
47 	struct mlxsw_sp *mlxsw_sp;
48 	enum mlxsw_reg_rtar_key_type rtar_key_type;
49 	struct parman *parman;
50 	struct parman_prio *parman_prios;
51 };
52 
53 struct mlxsw_sp_mr_tcam {
54 	struct mlxsw_sp_mr_tcam_region ipv4_tcam_region;
55 };
56 
57 /* This struct maps to one RIGR2 register entry */
58 struct mlxsw_sp_mr_erif_sublist {
59 	struct list_head list;
60 	u32 rigr2_kvdl_index;
61 	int num_erifs;
62 	u16 erif_indices[MLXSW_REG_RIGR2_MAX_ERIFS];
63 	bool synced;
64 };
65 
66 struct mlxsw_sp_mr_tcam_erif_list {
67 	struct list_head erif_sublists;
68 	u32 kvdl_index;
69 };
70 
71 static bool
72 mlxsw_sp_mr_erif_sublist_full(struct mlxsw_sp *mlxsw_sp,
73 			      struct mlxsw_sp_mr_erif_sublist *erif_sublist)
74 {
75 	int erif_list_entries = MLXSW_CORE_RES_GET(mlxsw_sp->core,
76 						   MC_ERIF_LIST_ENTRIES);
77 
78 	return erif_sublist->num_erifs == erif_list_entries;
79 }
80 
81 static void
82 mlxsw_sp_mr_erif_list_init(struct mlxsw_sp_mr_tcam_erif_list *erif_list)
83 {
84 	INIT_LIST_HEAD(&erif_list->erif_sublists);
85 }
86 
87 #define MLXSW_SP_KVDL_RIGR2_SIZE 1
88 
89 static struct mlxsw_sp_mr_erif_sublist *
90 mlxsw_sp_mr_erif_sublist_create(struct mlxsw_sp *mlxsw_sp,
91 				struct mlxsw_sp_mr_tcam_erif_list *erif_list)
92 {
93 	struct mlxsw_sp_mr_erif_sublist *erif_sublist;
94 	int err;
95 
96 	erif_sublist = kzalloc(sizeof(*erif_sublist), GFP_KERNEL);
97 	if (!erif_sublist)
98 		return ERR_PTR(-ENOMEM);
99 	err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_RIGR2_SIZE,
100 				  &erif_sublist->rigr2_kvdl_index);
101 	if (err) {
102 		kfree(erif_sublist);
103 		return ERR_PTR(err);
104 	}
105 
106 	list_add_tail(&erif_sublist->list, &erif_list->erif_sublists);
107 	return erif_sublist;
108 }
109 
110 static void
111 mlxsw_sp_mr_erif_sublist_destroy(struct mlxsw_sp *mlxsw_sp,
112 				 struct mlxsw_sp_mr_erif_sublist *erif_sublist)
113 {
114 	list_del(&erif_sublist->list);
115 	mlxsw_sp_kvdl_free(mlxsw_sp, erif_sublist->rigr2_kvdl_index);
116 	kfree(erif_sublist);
117 }
118 
119 static int
120 mlxsw_sp_mr_erif_list_add(struct mlxsw_sp *mlxsw_sp,
121 			  struct mlxsw_sp_mr_tcam_erif_list *erif_list,
122 			  u16 erif_index)
123 {
124 	struct mlxsw_sp_mr_erif_sublist *sublist;
125 
126 	/* If either there is no erif_entry or the last one is full, allocate a
127 	 * new one.
128 	 */
129 	if (list_empty(&erif_list->erif_sublists)) {
130 		sublist = mlxsw_sp_mr_erif_sublist_create(mlxsw_sp, erif_list);
131 		if (IS_ERR(sublist))
132 			return PTR_ERR(sublist);
133 		erif_list->kvdl_index = sublist->rigr2_kvdl_index;
134 	} else {
135 		sublist = list_last_entry(&erif_list->erif_sublists,
136 					  struct mlxsw_sp_mr_erif_sublist,
137 					  list);
138 		sublist->synced = false;
139 		if (mlxsw_sp_mr_erif_sublist_full(mlxsw_sp, sublist)) {
140 			sublist = mlxsw_sp_mr_erif_sublist_create(mlxsw_sp,
141 								  erif_list);
142 			if (IS_ERR(sublist))
143 				return PTR_ERR(sublist);
144 		}
145 	}
146 
147 	/* Add the eRIF to the last entry's last index */
148 	sublist->erif_indices[sublist->num_erifs++] = erif_index;
149 	return 0;
150 }
151 
152 static void
153 mlxsw_sp_mr_erif_list_flush(struct mlxsw_sp *mlxsw_sp,
154 			    struct mlxsw_sp_mr_tcam_erif_list *erif_list)
155 {
156 	struct mlxsw_sp_mr_erif_sublist *erif_sublist, *tmp;
157 
158 	list_for_each_entry_safe(erif_sublist, tmp, &erif_list->erif_sublists,
159 				 list)
160 		mlxsw_sp_mr_erif_sublist_destroy(mlxsw_sp, erif_sublist);
161 }
162 
163 static int
164 mlxsw_sp_mr_erif_list_commit(struct mlxsw_sp *mlxsw_sp,
165 			     struct mlxsw_sp_mr_tcam_erif_list *erif_list)
166 {
167 	struct mlxsw_sp_mr_erif_sublist *curr_sublist;
168 	char rigr2_pl[MLXSW_REG_RIGR2_LEN];
169 	int err;
170 	int i;
171 
172 	list_for_each_entry(curr_sublist, &erif_list->erif_sublists, list) {
173 		if (curr_sublist->synced)
174 			continue;
175 
176 		/* If the sublist is not the last one, pack the next index */
177 		if (list_is_last(&curr_sublist->list,
178 				 &erif_list->erif_sublists)) {
179 			mlxsw_reg_rigr2_pack(rigr2_pl,
180 					     curr_sublist->rigr2_kvdl_index,
181 					     false, 0);
182 		} else {
183 			struct mlxsw_sp_mr_erif_sublist *next_sublist;
184 
185 			next_sublist = list_next_entry(curr_sublist, list);
186 			mlxsw_reg_rigr2_pack(rigr2_pl,
187 					     curr_sublist->rigr2_kvdl_index,
188 					     true,
189 					     next_sublist->rigr2_kvdl_index);
190 		}
191 
192 		/* Pack all the erifs */
193 		for (i = 0; i < curr_sublist->num_erifs; i++) {
194 			u16 erif_index = curr_sublist->erif_indices[i];
195 
196 			mlxsw_reg_rigr2_erif_entry_pack(rigr2_pl, i, true,
197 							erif_index);
198 		}
199 
200 		/* Write the entry */
201 		err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rigr2),
202 				      rigr2_pl);
203 		if (err)
204 			/* No need of a rollback here because this
205 			 * hardware entry should not be pointed yet.
206 			 */
207 			return err;
208 		curr_sublist->synced = true;
209 	}
210 	return 0;
211 }
212 
213 static void mlxsw_sp_mr_erif_list_move(struct mlxsw_sp_mr_tcam_erif_list *to,
214 				       struct mlxsw_sp_mr_tcam_erif_list *from)
215 {
216 	list_splice(&from->erif_sublists, &to->erif_sublists);
217 	to->kvdl_index = from->kvdl_index;
218 }
219 
220 struct mlxsw_sp_mr_tcam_route {
221 	struct mlxsw_sp_mr_tcam_erif_list erif_list;
222 	struct mlxsw_afa_block *afa_block;
223 	u32 counter_index;
224 	struct parman_item parman_item;
225 	struct parman_prio *parman_prio;
226 	enum mlxsw_sp_mr_route_action action;
227 	struct mlxsw_sp_mr_route_key key;
228 	u16 irif_index;
229 	u16 min_mtu;
230 };
231 
232 static struct mlxsw_afa_block *
233 mlxsw_sp_mr_tcam_afa_block_create(struct mlxsw_sp *mlxsw_sp,
234 				  enum mlxsw_sp_mr_route_action route_action,
235 				  u16 irif_index, u32 counter_index,
236 				  u16 min_mtu,
237 				  struct mlxsw_sp_mr_tcam_erif_list *erif_list)
238 {
239 	struct mlxsw_afa_block *afa_block;
240 	int err;
241 
242 	afa_block = mlxsw_afa_block_create(mlxsw_sp->afa);
243 	if (!afa_block)
244 		return ERR_PTR(-ENOMEM);
245 
246 	err = mlxsw_afa_block_append_counter(afa_block, counter_index);
247 	if (err)
248 		goto err;
249 
250 	switch (route_action) {
251 	case MLXSW_SP_MR_ROUTE_ACTION_TRAP:
252 		err = mlxsw_afa_block_append_trap(afa_block,
253 						  MLXSW_TRAP_ID_ACL1);
254 		if (err)
255 			goto err;
256 		break;
257 	case MLXSW_SP_MR_ROUTE_ACTION_TRAP_AND_FORWARD:
258 	case MLXSW_SP_MR_ROUTE_ACTION_FORWARD:
259 		/* If we are about to append a multicast router action, commit
260 		 * the erif_list.
261 		 */
262 		err = mlxsw_sp_mr_erif_list_commit(mlxsw_sp, erif_list);
263 		if (err)
264 			goto err;
265 
266 		err = mlxsw_afa_block_append_mcrouter(afa_block, irif_index,
267 						      min_mtu, false,
268 						      erif_list->kvdl_index);
269 		if (err)
270 			goto err;
271 
272 		if (route_action == MLXSW_SP_MR_ROUTE_ACTION_TRAP_AND_FORWARD) {
273 			err = mlxsw_afa_block_append_trap_and_forward(afa_block,
274 								      MLXSW_TRAP_ID_ACL2);
275 			if (err)
276 				goto err;
277 		}
278 		break;
279 	default:
280 		err = -EINVAL;
281 		goto err;
282 	}
283 
284 	err = mlxsw_afa_block_commit(afa_block);
285 	if (err)
286 		goto err;
287 	return afa_block;
288 err:
289 	mlxsw_afa_block_destroy(afa_block);
290 	return ERR_PTR(err);
291 }
292 
293 static void
294 mlxsw_sp_mr_tcam_afa_block_destroy(struct mlxsw_afa_block *afa_block)
295 {
296 	mlxsw_afa_block_destroy(afa_block);
297 }
298 
299 static int mlxsw_sp_mr_tcam_route_replace(struct mlxsw_sp *mlxsw_sp,
300 					  struct parman_item *parman_item,
301 					  struct mlxsw_sp_mr_route_key *key,
302 					  struct mlxsw_afa_block *afa_block)
303 {
304 	char rmft2_pl[MLXSW_REG_RMFT2_LEN];
305 
306 	switch (key->proto) {
307 	case MLXSW_SP_L3_PROTO_IPV4:
308 		mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, true, parman_item->index,
309 					  key->vrid,
310 					  MLXSW_REG_RMFT2_IRIF_MASK_IGNORE, 0,
311 					  ntohl(key->group.addr4),
312 					  ntohl(key->group_mask.addr4),
313 					  ntohl(key->source.addr4),
314 					  ntohl(key->source_mask.addr4),
315 					  mlxsw_afa_block_first_set(afa_block));
316 		break;
317 	case MLXSW_SP_L3_PROTO_IPV6:
318 	default:
319 		WARN_ON_ONCE(1);
320 	}
321 
322 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl);
323 }
324 
325 static int mlxsw_sp_mr_tcam_route_remove(struct mlxsw_sp *mlxsw_sp, int vrid,
326 					 struct parman_item *parman_item)
327 {
328 	char rmft2_pl[MLXSW_REG_RMFT2_LEN];
329 
330 	mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, false, parman_item->index, vrid,
331 				  0, 0, 0, 0, 0, 0, NULL);
332 
333 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl);
334 }
335 
336 static int
337 mlxsw_sp_mr_tcam_erif_populate(struct mlxsw_sp *mlxsw_sp,
338 			       struct mlxsw_sp_mr_tcam_erif_list *erif_list,
339 			       struct mlxsw_sp_mr_route_info *route_info)
340 {
341 	int err;
342 	int i;
343 
344 	for (i = 0; i < route_info->erif_num; i++) {
345 		u16 erif_index = route_info->erif_indices[i];
346 
347 		err = mlxsw_sp_mr_erif_list_add(mlxsw_sp, erif_list,
348 						erif_index);
349 		if (err)
350 			return err;
351 	}
352 	return 0;
353 }
354 
355 static int
356 mlxsw_sp_mr_tcam_route_parman_item_add(struct mlxsw_sp_mr_tcam *mr_tcam,
357 				       struct mlxsw_sp_mr_tcam_route *route,
358 				       enum mlxsw_sp_mr_route_prio prio)
359 {
360 	struct parman_prio *parman_prio = NULL;
361 	int err;
362 
363 	switch (route->key.proto) {
364 	case MLXSW_SP_L3_PROTO_IPV4:
365 		parman_prio = &mr_tcam->ipv4_tcam_region.parman_prios[prio];
366 		err = parman_item_add(mr_tcam->ipv4_tcam_region.parman,
367 				      parman_prio, &route->parman_item);
368 		if (err)
369 			return err;
370 		break;
371 	case MLXSW_SP_L3_PROTO_IPV6:
372 	default:
373 		WARN_ON_ONCE(1);
374 	}
375 	route->parman_prio = parman_prio;
376 	return 0;
377 }
378 
379 static void
380 mlxsw_sp_mr_tcam_route_parman_item_remove(struct mlxsw_sp_mr_tcam *mr_tcam,
381 					  struct mlxsw_sp_mr_tcam_route *route)
382 {
383 	switch (route->key.proto) {
384 	case MLXSW_SP_L3_PROTO_IPV4:
385 		parman_item_remove(mr_tcam->ipv4_tcam_region.parman,
386 				   route->parman_prio, &route->parman_item);
387 		break;
388 	case MLXSW_SP_L3_PROTO_IPV6:
389 	default:
390 		WARN_ON_ONCE(1);
391 	}
392 }
393 
394 static int
395 mlxsw_sp_mr_tcam_route_create(struct mlxsw_sp *mlxsw_sp, void *priv,
396 			      void *route_priv,
397 			      struct mlxsw_sp_mr_route_params *route_params)
398 {
399 	struct mlxsw_sp_mr_tcam_route *route = route_priv;
400 	struct mlxsw_sp_mr_tcam *mr_tcam = priv;
401 	int err;
402 
403 	route->key = route_params->key;
404 	route->irif_index = route_params->value.irif_index;
405 	route->min_mtu = route_params->value.min_mtu;
406 	route->action = route_params->value.route_action;
407 
408 	/* Create the egress RIFs list */
409 	mlxsw_sp_mr_erif_list_init(&route->erif_list);
410 	err = mlxsw_sp_mr_tcam_erif_populate(mlxsw_sp, &route->erif_list,
411 					     &route_params->value);
412 	if (err)
413 		goto err_erif_populate;
414 
415 	/* Create the flow counter */
416 	err = mlxsw_sp_flow_counter_alloc(mlxsw_sp, &route->counter_index);
417 	if (err)
418 		goto err_counter_alloc;
419 
420 	/* Create the flexible action block */
421 	route->afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp,
422 							     route->action,
423 							     route->irif_index,
424 							     route->counter_index,
425 							     route->min_mtu,
426 							     &route->erif_list);
427 	if (IS_ERR(route->afa_block)) {
428 		err = PTR_ERR(route->afa_block);
429 		goto err_afa_block_create;
430 	}
431 
432 	/* Allocate place in the TCAM */
433 	err = mlxsw_sp_mr_tcam_route_parman_item_add(mr_tcam, route,
434 						     route_params->prio);
435 	if (err)
436 		goto err_parman_item_add;
437 
438 	/* Write the route to the TCAM */
439 	err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
440 					     &route->key, route->afa_block);
441 	if (err)
442 		goto err_route_replace;
443 	return 0;
444 
445 err_route_replace:
446 	mlxsw_sp_mr_tcam_route_parman_item_remove(mr_tcam, route);
447 err_parman_item_add:
448 	mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
449 err_afa_block_create:
450 	mlxsw_sp_flow_counter_free(mlxsw_sp, route->counter_index);
451 err_erif_populate:
452 err_counter_alloc:
453 	mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list);
454 	return err;
455 }
456 
457 static void mlxsw_sp_mr_tcam_route_destroy(struct mlxsw_sp *mlxsw_sp,
458 					   void *priv, void *route_priv)
459 {
460 	struct mlxsw_sp_mr_tcam_route *route = route_priv;
461 	struct mlxsw_sp_mr_tcam *mr_tcam = priv;
462 
463 	mlxsw_sp_mr_tcam_route_remove(mlxsw_sp, route->key.vrid,
464 				      &route->parman_item);
465 	mlxsw_sp_mr_tcam_route_parman_item_remove(mr_tcam, route);
466 	mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
467 	mlxsw_sp_flow_counter_free(mlxsw_sp, route->counter_index);
468 	mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list);
469 }
470 
471 static int mlxsw_sp_mr_tcam_route_stats(struct mlxsw_sp *mlxsw_sp,
472 					void *route_priv, u64 *packets,
473 					u64 *bytes)
474 {
475 	struct mlxsw_sp_mr_tcam_route *route = route_priv;
476 
477 	return mlxsw_sp_flow_counter_get(mlxsw_sp, route->counter_index,
478 					 packets, bytes);
479 }
480 
481 static int
482 mlxsw_sp_mr_tcam_route_action_update(struct mlxsw_sp *mlxsw_sp,
483 				     void *route_priv,
484 				     enum mlxsw_sp_mr_route_action route_action)
485 {
486 	struct mlxsw_sp_mr_tcam_route *route = route_priv;
487 	struct mlxsw_afa_block *afa_block;
488 	int err;
489 
490 	/* Create a new flexible action block */
491 	afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp, route_action,
492 						      route->irif_index,
493 						      route->counter_index,
494 						      route->min_mtu,
495 						      &route->erif_list);
496 	if (IS_ERR(afa_block))
497 		return PTR_ERR(afa_block);
498 
499 	/* Update the TCAM route entry */
500 	err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
501 					     &route->key, afa_block);
502 	if (err)
503 		goto err;
504 
505 	/* Delete the old one */
506 	mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
507 	route->afa_block = afa_block;
508 	route->action = route_action;
509 	return 0;
510 err:
511 	mlxsw_sp_mr_tcam_afa_block_destroy(afa_block);
512 	return err;
513 }
514 
515 static int mlxsw_sp_mr_tcam_route_min_mtu_update(struct mlxsw_sp *mlxsw_sp,
516 						 void *route_priv, u16 min_mtu)
517 {
518 	struct mlxsw_sp_mr_tcam_route *route = route_priv;
519 	struct mlxsw_afa_block *afa_block;
520 	int err;
521 
522 	/* Create a new flexible action block */
523 	afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp,
524 						      route->action,
525 						      route->irif_index,
526 						      route->counter_index,
527 						      min_mtu,
528 						      &route->erif_list);
529 	if (IS_ERR(afa_block))
530 		return PTR_ERR(afa_block);
531 
532 	/* Update the TCAM route entry */
533 	err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
534 					     &route->key, afa_block);
535 	if (err)
536 		goto err;
537 
538 	/* Delete the old one */
539 	mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
540 	route->afa_block = afa_block;
541 	route->min_mtu = min_mtu;
542 	return 0;
543 err:
544 	mlxsw_sp_mr_tcam_afa_block_destroy(afa_block);
545 	return err;
546 }
547 
548 static int mlxsw_sp_mr_tcam_route_irif_update(struct mlxsw_sp *mlxsw_sp,
549 					      void *route_priv, u16 irif_index)
550 {
551 	struct mlxsw_sp_mr_tcam_route *route = route_priv;
552 
553 	if (route->action != MLXSW_SP_MR_ROUTE_ACTION_TRAP)
554 		return -EINVAL;
555 	route->irif_index = irif_index;
556 	return 0;
557 }
558 
559 static int mlxsw_sp_mr_tcam_route_erif_add(struct mlxsw_sp *mlxsw_sp,
560 					   void *route_priv, u16 erif_index)
561 {
562 	struct mlxsw_sp_mr_tcam_route *route = route_priv;
563 	int err;
564 
565 	err = mlxsw_sp_mr_erif_list_add(mlxsw_sp, &route->erif_list,
566 					erif_index);
567 	if (err)
568 		return err;
569 
570 	/* Commit the action only if the route action is not TRAP */
571 	if (route->action != MLXSW_SP_MR_ROUTE_ACTION_TRAP)
572 		return mlxsw_sp_mr_erif_list_commit(mlxsw_sp,
573 						    &route->erif_list);
574 	return 0;
575 }
576 
577 static int mlxsw_sp_mr_tcam_route_erif_del(struct mlxsw_sp *mlxsw_sp,
578 					   void *route_priv, u16 erif_index)
579 {
580 	struct mlxsw_sp_mr_tcam_route *route = route_priv;
581 	struct mlxsw_sp_mr_erif_sublist *erif_sublist;
582 	struct mlxsw_sp_mr_tcam_erif_list erif_list;
583 	struct mlxsw_afa_block *afa_block;
584 	int err;
585 	int i;
586 
587 	/* Create a copy of the original erif_list without the deleted entry */
588 	mlxsw_sp_mr_erif_list_init(&erif_list);
589 	list_for_each_entry(erif_sublist, &route->erif_list.erif_sublists, list) {
590 		for (i = 0; i < erif_sublist->num_erifs; i++) {
591 			u16 curr_erif = erif_sublist->erif_indices[i];
592 
593 			if (curr_erif == erif_index)
594 				continue;
595 			err = mlxsw_sp_mr_erif_list_add(mlxsw_sp, &erif_list,
596 							curr_erif);
597 			if (err)
598 				goto err_erif_list_add;
599 		}
600 	}
601 
602 	/* Create the flexible action block pointing to the new erif_list */
603 	afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp, route->action,
604 						      route->irif_index,
605 						      route->counter_index,
606 						      route->min_mtu,
607 						      &erif_list);
608 	if (IS_ERR(afa_block)) {
609 		err = PTR_ERR(afa_block);
610 		goto err_afa_block_create;
611 	}
612 
613 	/* Update the TCAM route entry */
614 	err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
615 					     &route->key, afa_block);
616 	if (err)
617 		goto err_route_write;
618 
619 	mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
620 	mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list);
621 	route->afa_block = afa_block;
622 	mlxsw_sp_mr_erif_list_move(&route->erif_list, &erif_list);
623 	return 0;
624 
625 err_route_write:
626 	mlxsw_sp_mr_tcam_afa_block_destroy(afa_block);
627 err_afa_block_create:
628 err_erif_list_add:
629 	mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &erif_list);
630 	return err;
631 }
632 
633 static int
634 mlxsw_sp_mr_tcam_route_update(struct mlxsw_sp *mlxsw_sp, void *route_priv,
635 			      struct mlxsw_sp_mr_route_info *route_info)
636 {
637 	struct mlxsw_sp_mr_tcam_route *route = route_priv;
638 	struct mlxsw_sp_mr_tcam_erif_list erif_list;
639 	struct mlxsw_afa_block *afa_block;
640 	int err;
641 
642 	/* Create a new erif_list */
643 	mlxsw_sp_mr_erif_list_init(&erif_list);
644 	err = mlxsw_sp_mr_tcam_erif_populate(mlxsw_sp, &erif_list, route_info);
645 	if (err)
646 		goto err_erif_populate;
647 
648 	/* Create the flexible action block pointing to the new erif_list */
649 	afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp,
650 						      route_info->route_action,
651 						      route_info->irif_index,
652 						      route->counter_index,
653 						      route_info->min_mtu,
654 						      &erif_list);
655 	if (IS_ERR(afa_block)) {
656 		err = PTR_ERR(afa_block);
657 		goto err_afa_block_create;
658 	}
659 
660 	/* Update the TCAM route entry */
661 	err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
662 					     &route->key, afa_block);
663 	if (err)
664 		goto err_route_write;
665 
666 	mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
667 	mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list);
668 	route->afa_block = afa_block;
669 	mlxsw_sp_mr_erif_list_move(&route->erif_list, &erif_list);
670 	route->action = route_info->route_action;
671 	route->irif_index = route_info->irif_index;
672 	route->min_mtu = route_info->min_mtu;
673 	return 0;
674 
675 err_route_write:
676 	mlxsw_sp_mr_tcam_afa_block_destroy(afa_block);
677 err_afa_block_create:
678 err_erif_populate:
679 	mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &erif_list);
680 	return err;
681 }
682 
683 #define MLXSW_SP_MR_TCAM_REGION_BASE_COUNT 16
684 #define MLXSW_SP_MR_TCAM_REGION_RESIZE_STEP 16
685 
686 static int
687 mlxsw_sp_mr_tcam_region_alloc(struct mlxsw_sp_mr_tcam_region *mr_tcam_region)
688 {
689 	struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
690 	char rtar_pl[MLXSW_REG_RTAR_LEN];
691 
692 	mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_ALLOCATE,
693 			    mr_tcam_region->rtar_key_type,
694 			    MLXSW_SP_MR_TCAM_REGION_BASE_COUNT);
695 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
696 }
697 
698 static void
699 mlxsw_sp_mr_tcam_region_free(struct mlxsw_sp_mr_tcam_region *mr_tcam_region)
700 {
701 	struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
702 	char rtar_pl[MLXSW_REG_RTAR_LEN];
703 
704 	mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_DEALLOCATE,
705 			    mr_tcam_region->rtar_key_type, 0);
706 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
707 }
708 
709 static int mlxsw_sp_mr_tcam_region_parman_resize(void *priv,
710 						 unsigned long new_count)
711 {
712 	struct mlxsw_sp_mr_tcam_region *mr_tcam_region = priv;
713 	struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
714 	char rtar_pl[MLXSW_REG_RTAR_LEN];
715 	u64 max_tcam_rules;
716 
717 	max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES);
718 	if (new_count > max_tcam_rules)
719 		return -EINVAL;
720 	mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_RESIZE,
721 			    mr_tcam_region->rtar_key_type, new_count);
722 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
723 }
724 
725 static void mlxsw_sp_mr_tcam_region_parman_move(void *priv,
726 						unsigned long from_index,
727 						unsigned long to_index,
728 						unsigned long count)
729 {
730 	struct mlxsw_sp_mr_tcam_region *mr_tcam_region = priv;
731 	struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
732 	char rrcr_pl[MLXSW_REG_RRCR_LEN];
733 
734 	mlxsw_reg_rrcr_pack(rrcr_pl, MLXSW_REG_RRCR_OP_MOVE,
735 			    from_index, count,
736 			    mr_tcam_region->rtar_key_type, to_index);
737 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rrcr), rrcr_pl);
738 }
739 
740 static const struct parman_ops mlxsw_sp_mr_tcam_region_parman_ops = {
741 	.base_count	= MLXSW_SP_MR_TCAM_REGION_BASE_COUNT,
742 	.resize_step	= MLXSW_SP_MR_TCAM_REGION_RESIZE_STEP,
743 	.resize		= mlxsw_sp_mr_tcam_region_parman_resize,
744 	.move		= mlxsw_sp_mr_tcam_region_parman_move,
745 	.algo		= PARMAN_ALGO_TYPE_LSORT,
746 };
747 
748 static int
749 mlxsw_sp_mr_tcam_region_init(struct mlxsw_sp *mlxsw_sp,
750 			     struct mlxsw_sp_mr_tcam_region *mr_tcam_region,
751 			     enum mlxsw_reg_rtar_key_type rtar_key_type)
752 {
753 	struct parman_prio *parman_prios;
754 	struct parman *parman;
755 	int err;
756 	int i;
757 
758 	mr_tcam_region->rtar_key_type = rtar_key_type;
759 	mr_tcam_region->mlxsw_sp = mlxsw_sp;
760 
761 	err = mlxsw_sp_mr_tcam_region_alloc(mr_tcam_region);
762 	if (err)
763 		return err;
764 
765 	parman = parman_create(&mlxsw_sp_mr_tcam_region_parman_ops,
766 			       mr_tcam_region);
767 	if (!parman) {
768 		err = -ENOMEM;
769 		goto err_parman_create;
770 	}
771 	mr_tcam_region->parman = parman;
772 
773 	parman_prios = kmalloc_array(MLXSW_SP_MR_ROUTE_PRIO_MAX + 1,
774 				     sizeof(*parman_prios), GFP_KERNEL);
775 	if (!parman_prios) {
776 		err = -ENOMEM;
777 		goto err_parman_prios_alloc;
778 	}
779 	mr_tcam_region->parman_prios = parman_prios;
780 
781 	for (i = 0; i < MLXSW_SP_MR_ROUTE_PRIO_MAX + 1; i++)
782 		parman_prio_init(mr_tcam_region->parman,
783 				 &mr_tcam_region->parman_prios[i], i);
784 	return 0;
785 
786 err_parman_prios_alloc:
787 	parman_destroy(parman);
788 err_parman_create:
789 	mlxsw_sp_mr_tcam_region_free(mr_tcam_region);
790 	return err;
791 }
792 
793 static void
794 mlxsw_sp_mr_tcam_region_fini(struct mlxsw_sp_mr_tcam_region *mr_tcam_region)
795 {
796 	int i;
797 
798 	for (i = 0; i < MLXSW_SP_MR_ROUTE_PRIO_MAX + 1; i++)
799 		parman_prio_fini(&mr_tcam_region->parman_prios[i]);
800 	kfree(mr_tcam_region->parman_prios);
801 	parman_destroy(mr_tcam_region->parman);
802 	mlxsw_sp_mr_tcam_region_free(mr_tcam_region);
803 }
804 
805 static int mlxsw_sp_mr_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
806 {
807 	struct mlxsw_sp_mr_tcam *mr_tcam = priv;
808 
809 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MC_ERIF_LIST_ENTRIES) ||
810 	    !MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_TCAM_RULES))
811 		return -EIO;
812 
813 	return mlxsw_sp_mr_tcam_region_init(mlxsw_sp,
814 					    &mr_tcam->ipv4_tcam_region,
815 					    MLXSW_REG_RTAR_KEY_TYPE_IPV4_MULTICAST);
816 }
817 
818 static void mlxsw_sp_mr_tcam_fini(void *priv)
819 {
820 	struct mlxsw_sp_mr_tcam *mr_tcam = priv;
821 
822 	mlxsw_sp_mr_tcam_region_fini(&mr_tcam->ipv4_tcam_region);
823 }
824 
825 const struct mlxsw_sp_mr_ops mlxsw_sp_mr_tcam_ops = {
826 	.priv_size = sizeof(struct mlxsw_sp_mr_tcam),
827 	.route_priv_size = sizeof(struct mlxsw_sp_mr_tcam_route),
828 	.init = mlxsw_sp_mr_tcam_init,
829 	.route_create = mlxsw_sp_mr_tcam_route_create,
830 	.route_update = mlxsw_sp_mr_tcam_route_update,
831 	.route_stats = mlxsw_sp_mr_tcam_route_stats,
832 	.route_action_update = mlxsw_sp_mr_tcam_route_action_update,
833 	.route_min_mtu_update = mlxsw_sp_mr_tcam_route_min_mtu_update,
834 	.route_irif_update = mlxsw_sp_mr_tcam_route_irif_update,
835 	.route_erif_add = mlxsw_sp_mr_tcam_route_erif_add,
836 	.route_erif_del = mlxsw_sp_mr_tcam_route_erif_del,
837 	.route_destroy = mlxsw_sp_mr_tcam_route_destroy,
838 	.fini = mlxsw_sp_mr_tcam_fini,
839 };
840