xref: /openbmc/linux/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c (revision a0ae2562c6c4b2721d9fddba63b7286c13517d9f)
1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
3  * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
5  * Copyright (c) 2018 Jiri Pirko <jiri@mellanox.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions are met:
9  *
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the names of the copyright holders nor the names of its
16  *    contributors may be used to endorse or promote products derived from
17  *    this software without specific prior written permission.
18  *
19  * Alternatively, this software may be distributed under the terms of the
20  * GNU General Public License ("GPL") version 2 as published by the Free
21  * Software Foundation.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
27  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33  * POSSIBILITY OF SUCH DAMAGE.
34  */
35 
36 #include <linux/kernel.h>
37 #include <linux/list.h>
38 #include <linux/netdevice.h>
39 
40 #include "spectrum_mr_tcam.h"
41 #include "reg.h"
42 #include "spectrum.h"
43 #include "core_acl_flex_actions.h"
44 #include "spectrum_mr.h"
45 
46 struct mlxsw_sp_mr_tcam {
47 	void *priv;
48 };
49 
50 /* This struct maps to one RIGR2 register entry */
51 struct mlxsw_sp_mr_erif_sublist {
52 	struct list_head list;
53 	u32 rigr2_kvdl_index;
54 	int num_erifs;
55 	u16 erif_indices[MLXSW_REG_RIGR2_MAX_ERIFS];
56 	bool synced;
57 };
58 
59 struct mlxsw_sp_mr_tcam_erif_list {
60 	struct list_head erif_sublists;
61 	u32 kvdl_index;
62 };
63 
64 static bool
65 mlxsw_sp_mr_erif_sublist_full(struct mlxsw_sp *mlxsw_sp,
66 			      struct mlxsw_sp_mr_erif_sublist *erif_sublist)
67 {
68 	int erif_list_entries = MLXSW_CORE_RES_GET(mlxsw_sp->core,
69 						   MC_ERIF_LIST_ENTRIES);
70 
71 	return erif_sublist->num_erifs == erif_list_entries;
72 }
73 
74 static void
75 mlxsw_sp_mr_erif_list_init(struct mlxsw_sp_mr_tcam_erif_list *erif_list)
76 {
77 	INIT_LIST_HEAD(&erif_list->erif_sublists);
78 }
79 
80 static struct mlxsw_sp_mr_erif_sublist *
81 mlxsw_sp_mr_erif_sublist_create(struct mlxsw_sp *mlxsw_sp,
82 				struct mlxsw_sp_mr_tcam_erif_list *erif_list)
83 {
84 	struct mlxsw_sp_mr_erif_sublist *erif_sublist;
85 	int err;
86 
87 	erif_sublist = kzalloc(sizeof(*erif_sublist), GFP_KERNEL);
88 	if (!erif_sublist)
89 		return ERR_PTR(-ENOMEM);
90 	err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR,
91 				  1, &erif_sublist->rigr2_kvdl_index);
92 	if (err) {
93 		kfree(erif_sublist);
94 		return ERR_PTR(err);
95 	}
96 
97 	list_add_tail(&erif_sublist->list, &erif_list->erif_sublists);
98 	return erif_sublist;
99 }
100 
101 static void
102 mlxsw_sp_mr_erif_sublist_destroy(struct mlxsw_sp *mlxsw_sp,
103 				 struct mlxsw_sp_mr_erif_sublist *erif_sublist)
104 {
105 	list_del(&erif_sublist->list);
106 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR,
107 			   1, erif_sublist->rigr2_kvdl_index);
108 	kfree(erif_sublist);
109 }
110 
111 static int
112 mlxsw_sp_mr_erif_list_add(struct mlxsw_sp *mlxsw_sp,
113 			  struct mlxsw_sp_mr_tcam_erif_list *erif_list,
114 			  u16 erif_index)
115 {
116 	struct mlxsw_sp_mr_erif_sublist *sublist;
117 
118 	/* If either there is no erif_entry or the last one is full, allocate a
119 	 * new one.
120 	 */
121 	if (list_empty(&erif_list->erif_sublists)) {
122 		sublist = mlxsw_sp_mr_erif_sublist_create(mlxsw_sp, erif_list);
123 		if (IS_ERR(sublist))
124 			return PTR_ERR(sublist);
125 		erif_list->kvdl_index = sublist->rigr2_kvdl_index;
126 	} else {
127 		sublist = list_last_entry(&erif_list->erif_sublists,
128 					  struct mlxsw_sp_mr_erif_sublist,
129 					  list);
130 		sublist->synced = false;
131 		if (mlxsw_sp_mr_erif_sublist_full(mlxsw_sp, sublist)) {
132 			sublist = mlxsw_sp_mr_erif_sublist_create(mlxsw_sp,
133 								  erif_list);
134 			if (IS_ERR(sublist))
135 				return PTR_ERR(sublist);
136 		}
137 	}
138 
139 	/* Add the eRIF to the last entry's last index */
140 	sublist->erif_indices[sublist->num_erifs++] = erif_index;
141 	return 0;
142 }
143 
144 static void
145 mlxsw_sp_mr_erif_list_flush(struct mlxsw_sp *mlxsw_sp,
146 			    struct mlxsw_sp_mr_tcam_erif_list *erif_list)
147 {
148 	struct mlxsw_sp_mr_erif_sublist *erif_sublist, *tmp;
149 
150 	list_for_each_entry_safe(erif_sublist, tmp, &erif_list->erif_sublists,
151 				 list)
152 		mlxsw_sp_mr_erif_sublist_destroy(mlxsw_sp, erif_sublist);
153 }
154 
155 static int
156 mlxsw_sp_mr_erif_list_commit(struct mlxsw_sp *mlxsw_sp,
157 			     struct mlxsw_sp_mr_tcam_erif_list *erif_list)
158 {
159 	struct mlxsw_sp_mr_erif_sublist *curr_sublist;
160 	char rigr2_pl[MLXSW_REG_RIGR2_LEN];
161 	int err;
162 	int i;
163 
164 	list_for_each_entry(curr_sublist, &erif_list->erif_sublists, list) {
165 		if (curr_sublist->synced)
166 			continue;
167 
168 		/* If the sublist is not the last one, pack the next index */
169 		if (list_is_last(&curr_sublist->list,
170 				 &erif_list->erif_sublists)) {
171 			mlxsw_reg_rigr2_pack(rigr2_pl,
172 					     curr_sublist->rigr2_kvdl_index,
173 					     false, 0);
174 		} else {
175 			struct mlxsw_sp_mr_erif_sublist *next_sublist;
176 
177 			next_sublist = list_next_entry(curr_sublist, list);
178 			mlxsw_reg_rigr2_pack(rigr2_pl,
179 					     curr_sublist->rigr2_kvdl_index,
180 					     true,
181 					     next_sublist->rigr2_kvdl_index);
182 		}
183 
184 		/* Pack all the erifs */
185 		for (i = 0; i < curr_sublist->num_erifs; i++) {
186 			u16 erif_index = curr_sublist->erif_indices[i];
187 
188 			mlxsw_reg_rigr2_erif_entry_pack(rigr2_pl, i, true,
189 							erif_index);
190 		}
191 
192 		/* Write the entry */
193 		err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rigr2),
194 				      rigr2_pl);
195 		if (err)
196 			/* No need of a rollback here because this
197 			 * hardware entry should not be pointed yet.
198 			 */
199 			return err;
200 		curr_sublist->synced = true;
201 	}
202 	return 0;
203 }
204 
205 static void mlxsw_sp_mr_erif_list_move(struct mlxsw_sp_mr_tcam_erif_list *to,
206 				       struct mlxsw_sp_mr_tcam_erif_list *from)
207 {
208 	list_splice(&from->erif_sublists, &to->erif_sublists);
209 	to->kvdl_index = from->kvdl_index;
210 }
211 
212 struct mlxsw_sp_mr_tcam_route {
213 	struct mlxsw_sp_mr_tcam_erif_list erif_list;
214 	struct mlxsw_afa_block *afa_block;
215 	u32 counter_index;
216 	enum mlxsw_sp_mr_route_action action;
217 	struct mlxsw_sp_mr_route_key key;
218 	u16 irif_index;
219 	u16 min_mtu;
220 	void *priv;
221 };
222 
223 static struct mlxsw_afa_block *
224 mlxsw_sp_mr_tcam_afa_block_create(struct mlxsw_sp *mlxsw_sp,
225 				  enum mlxsw_sp_mr_route_action route_action,
226 				  u16 irif_index, u32 counter_index,
227 				  u16 min_mtu,
228 				  struct mlxsw_sp_mr_tcam_erif_list *erif_list)
229 {
230 	struct mlxsw_afa_block *afa_block;
231 	int err;
232 
233 	afa_block = mlxsw_afa_block_create(mlxsw_sp->afa);
234 	if (!afa_block)
235 		return ERR_PTR(-ENOMEM);
236 
237 	err = mlxsw_afa_block_append_allocated_counter(afa_block,
238 						       counter_index);
239 	if (err)
240 		goto err;
241 
242 	switch (route_action) {
243 	case MLXSW_SP_MR_ROUTE_ACTION_TRAP:
244 		err = mlxsw_afa_block_append_trap(afa_block,
245 						  MLXSW_TRAP_ID_ACL1);
246 		if (err)
247 			goto err;
248 		break;
249 	case MLXSW_SP_MR_ROUTE_ACTION_TRAP_AND_FORWARD:
250 	case MLXSW_SP_MR_ROUTE_ACTION_FORWARD:
251 		/* If we are about to append a multicast router action, commit
252 		 * the erif_list.
253 		 */
254 		err = mlxsw_sp_mr_erif_list_commit(mlxsw_sp, erif_list);
255 		if (err)
256 			goto err;
257 
258 		err = mlxsw_afa_block_append_mcrouter(afa_block, irif_index,
259 						      min_mtu, false,
260 						      erif_list->kvdl_index);
261 		if (err)
262 			goto err;
263 
264 		if (route_action == MLXSW_SP_MR_ROUTE_ACTION_TRAP_AND_FORWARD) {
265 			err = mlxsw_afa_block_append_trap_and_forward(afa_block,
266 								      MLXSW_TRAP_ID_ACL2);
267 			if (err)
268 				goto err;
269 		}
270 		break;
271 	default:
272 		err = -EINVAL;
273 		goto err;
274 	}
275 
276 	err = mlxsw_afa_block_commit(afa_block);
277 	if (err)
278 		goto err;
279 	return afa_block;
280 err:
281 	mlxsw_afa_block_destroy(afa_block);
282 	return ERR_PTR(err);
283 }
284 
285 static void
286 mlxsw_sp_mr_tcam_afa_block_destroy(struct mlxsw_afa_block *afa_block)
287 {
288 	mlxsw_afa_block_destroy(afa_block);
289 }
290 
291 static int
292 mlxsw_sp_mr_tcam_erif_populate(struct mlxsw_sp *mlxsw_sp,
293 			       struct mlxsw_sp_mr_tcam_erif_list *erif_list,
294 			       struct mlxsw_sp_mr_route_info *route_info)
295 {
296 	int err;
297 	int i;
298 
299 	for (i = 0; i < route_info->erif_num; i++) {
300 		u16 erif_index = route_info->erif_indices[i];
301 
302 		err = mlxsw_sp_mr_erif_list_add(mlxsw_sp, erif_list,
303 						erif_index);
304 		if (err)
305 			return err;
306 	}
307 	return 0;
308 }
309 
310 static int
311 mlxsw_sp_mr_tcam_route_create(struct mlxsw_sp *mlxsw_sp, void *priv,
312 			      void *route_priv,
313 			      struct mlxsw_sp_mr_route_params *route_params)
314 {
315 	const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
316 	struct mlxsw_sp_mr_tcam_route *route = route_priv;
317 	struct mlxsw_sp_mr_tcam *mr_tcam = priv;
318 	int err;
319 
320 	route->key = route_params->key;
321 	route->irif_index = route_params->value.irif_index;
322 	route->min_mtu = route_params->value.min_mtu;
323 	route->action = route_params->value.route_action;
324 
325 	/* Create the egress RIFs list */
326 	mlxsw_sp_mr_erif_list_init(&route->erif_list);
327 	err = mlxsw_sp_mr_tcam_erif_populate(mlxsw_sp, &route->erif_list,
328 					     &route_params->value);
329 	if (err)
330 		goto err_erif_populate;
331 
332 	/* Create the flow counter */
333 	err = mlxsw_sp_flow_counter_alloc(mlxsw_sp, &route->counter_index);
334 	if (err)
335 		goto err_counter_alloc;
336 
337 	/* Create the flexible action block */
338 	route->afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp,
339 							     route->action,
340 							     route->irif_index,
341 							     route->counter_index,
342 							     route->min_mtu,
343 							     &route->erif_list);
344 	if (IS_ERR(route->afa_block)) {
345 		err = PTR_ERR(route->afa_block);
346 		goto err_afa_block_create;
347 	}
348 
349 	route->priv = kzalloc(ops->route_priv_size, GFP_KERNEL);
350 	if (!route->priv) {
351 		err = -ENOMEM;
352 		goto err_route_priv_alloc;
353 	}
354 
355 	/* Write the route to the TCAM */
356 	err = ops->route_create(mlxsw_sp, mr_tcam->priv, route->priv,
357 				&route->key, route->afa_block,
358 				route_params->prio);
359 	if (err)
360 		goto err_route_create;
361 	return 0;
362 
363 err_route_create:
364 	kfree(route->priv);
365 err_route_priv_alloc:
366 	mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
367 err_afa_block_create:
368 	mlxsw_sp_flow_counter_free(mlxsw_sp, route->counter_index);
369 err_erif_populate:
370 err_counter_alloc:
371 	mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list);
372 	return err;
373 }
374 
375 static void mlxsw_sp_mr_tcam_route_destroy(struct mlxsw_sp *mlxsw_sp,
376 					   void *priv, void *route_priv)
377 {
378 	const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
379 	struct mlxsw_sp_mr_tcam_route *route = route_priv;
380 	struct mlxsw_sp_mr_tcam *mr_tcam = priv;
381 
382 	ops->route_destroy(mlxsw_sp, mr_tcam->priv, route->priv, &route->key);
383 	kfree(route->priv);
384 	mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
385 	mlxsw_sp_flow_counter_free(mlxsw_sp, route->counter_index);
386 	mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list);
387 }
388 
389 static int mlxsw_sp_mr_tcam_route_stats(struct mlxsw_sp *mlxsw_sp,
390 					void *route_priv, u64 *packets,
391 					u64 *bytes)
392 {
393 	struct mlxsw_sp_mr_tcam_route *route = route_priv;
394 
395 	return mlxsw_sp_flow_counter_get(mlxsw_sp, route->counter_index,
396 					 packets, bytes);
397 }
398 
399 static int
400 mlxsw_sp_mr_tcam_route_action_update(struct mlxsw_sp *mlxsw_sp,
401 				     void *route_priv,
402 				     enum mlxsw_sp_mr_route_action route_action)
403 {
404 	const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
405 	struct mlxsw_sp_mr_tcam_route *route = route_priv;
406 	struct mlxsw_afa_block *afa_block;
407 	int err;
408 
409 	/* Create a new flexible action block */
410 	afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp, route_action,
411 						      route->irif_index,
412 						      route->counter_index,
413 						      route->min_mtu,
414 						      &route->erif_list);
415 	if (IS_ERR(afa_block))
416 		return PTR_ERR(afa_block);
417 
418 	/* Update the TCAM route entry */
419 	err = ops->route_update(mlxsw_sp, route->priv, &route->key, afa_block);
420 	if (err)
421 		goto err;
422 
423 	/* Delete the old one */
424 	mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
425 	route->afa_block = afa_block;
426 	route->action = route_action;
427 	return 0;
428 err:
429 	mlxsw_sp_mr_tcam_afa_block_destroy(afa_block);
430 	return err;
431 }
432 
433 static int mlxsw_sp_mr_tcam_route_min_mtu_update(struct mlxsw_sp *mlxsw_sp,
434 						 void *route_priv, u16 min_mtu)
435 {
436 	const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
437 	struct mlxsw_sp_mr_tcam_route *route = route_priv;
438 	struct mlxsw_afa_block *afa_block;
439 	int err;
440 
441 	/* Create a new flexible action block */
442 	afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp,
443 						      route->action,
444 						      route->irif_index,
445 						      route->counter_index,
446 						      min_mtu,
447 						      &route->erif_list);
448 	if (IS_ERR(afa_block))
449 		return PTR_ERR(afa_block);
450 
451 	/* Update the TCAM route entry */
452 	err = ops->route_update(mlxsw_sp, route->priv, &route->key, afa_block);
453 	if (err)
454 		goto err;
455 
456 	/* Delete the old one */
457 	mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
458 	route->afa_block = afa_block;
459 	route->min_mtu = min_mtu;
460 	return 0;
461 err:
462 	mlxsw_sp_mr_tcam_afa_block_destroy(afa_block);
463 	return err;
464 }
465 
466 static int mlxsw_sp_mr_tcam_route_irif_update(struct mlxsw_sp *mlxsw_sp,
467 					      void *route_priv, u16 irif_index)
468 {
469 	struct mlxsw_sp_mr_tcam_route *route = route_priv;
470 
471 	if (route->action != MLXSW_SP_MR_ROUTE_ACTION_TRAP)
472 		return -EINVAL;
473 	route->irif_index = irif_index;
474 	return 0;
475 }
476 
477 static int mlxsw_sp_mr_tcam_route_erif_add(struct mlxsw_sp *mlxsw_sp,
478 					   void *route_priv, u16 erif_index)
479 {
480 	struct mlxsw_sp_mr_tcam_route *route = route_priv;
481 	int err;
482 
483 	err = mlxsw_sp_mr_erif_list_add(mlxsw_sp, &route->erif_list,
484 					erif_index);
485 	if (err)
486 		return err;
487 
488 	/* Commit the action only if the route action is not TRAP */
489 	if (route->action != MLXSW_SP_MR_ROUTE_ACTION_TRAP)
490 		return mlxsw_sp_mr_erif_list_commit(mlxsw_sp,
491 						    &route->erif_list);
492 	return 0;
493 }
494 
495 static int mlxsw_sp_mr_tcam_route_erif_del(struct mlxsw_sp *mlxsw_sp,
496 					   void *route_priv, u16 erif_index)
497 {
498 	const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
499 	struct mlxsw_sp_mr_tcam_route *route = route_priv;
500 	struct mlxsw_sp_mr_erif_sublist *erif_sublist;
501 	struct mlxsw_sp_mr_tcam_erif_list erif_list;
502 	struct mlxsw_afa_block *afa_block;
503 	int err;
504 	int i;
505 
506 	/* Create a copy of the original erif_list without the deleted entry */
507 	mlxsw_sp_mr_erif_list_init(&erif_list);
508 	list_for_each_entry(erif_sublist, &route->erif_list.erif_sublists, list) {
509 		for (i = 0; i < erif_sublist->num_erifs; i++) {
510 			u16 curr_erif = erif_sublist->erif_indices[i];
511 
512 			if (curr_erif == erif_index)
513 				continue;
514 			err = mlxsw_sp_mr_erif_list_add(mlxsw_sp, &erif_list,
515 							curr_erif);
516 			if (err)
517 				goto err_erif_list_add;
518 		}
519 	}
520 
521 	/* Create the flexible action block pointing to the new erif_list */
522 	afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp, route->action,
523 						      route->irif_index,
524 						      route->counter_index,
525 						      route->min_mtu,
526 						      &erif_list);
527 	if (IS_ERR(afa_block)) {
528 		err = PTR_ERR(afa_block);
529 		goto err_afa_block_create;
530 	}
531 
532 	/* Update the TCAM route entry */
533 	err = ops->route_update(mlxsw_sp, route->priv, &route->key, afa_block);
534 	if (err)
535 		goto err_route_write;
536 
537 	mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
538 	mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list);
539 	route->afa_block = afa_block;
540 	mlxsw_sp_mr_erif_list_move(&route->erif_list, &erif_list);
541 	return 0;
542 
543 err_route_write:
544 	mlxsw_sp_mr_tcam_afa_block_destroy(afa_block);
545 err_afa_block_create:
546 err_erif_list_add:
547 	mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &erif_list);
548 	return err;
549 }
550 
551 static int
552 mlxsw_sp_mr_tcam_route_update(struct mlxsw_sp *mlxsw_sp, void *route_priv,
553 			      struct mlxsw_sp_mr_route_info *route_info)
554 {
555 	const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
556 	struct mlxsw_sp_mr_tcam_route *route = route_priv;
557 	struct mlxsw_sp_mr_tcam_erif_list erif_list;
558 	struct mlxsw_afa_block *afa_block;
559 	int err;
560 
561 	/* Create a new erif_list */
562 	mlxsw_sp_mr_erif_list_init(&erif_list);
563 	err = mlxsw_sp_mr_tcam_erif_populate(mlxsw_sp, &erif_list, route_info);
564 	if (err)
565 		goto err_erif_populate;
566 
567 	/* Create the flexible action block pointing to the new erif_list */
568 	afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp,
569 						      route_info->route_action,
570 						      route_info->irif_index,
571 						      route->counter_index,
572 						      route_info->min_mtu,
573 						      &erif_list);
574 	if (IS_ERR(afa_block)) {
575 		err = PTR_ERR(afa_block);
576 		goto err_afa_block_create;
577 	}
578 
579 	/* Update the TCAM route entry */
580 	err = ops->route_update(mlxsw_sp, route->priv, &route->key, afa_block);
581 	if (err)
582 		goto err_route_write;
583 
584 	mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
585 	mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list);
586 	route->afa_block = afa_block;
587 	mlxsw_sp_mr_erif_list_move(&route->erif_list, &erif_list);
588 	route->action = route_info->route_action;
589 	route->irif_index = route_info->irif_index;
590 	route->min_mtu = route_info->min_mtu;
591 	return 0;
592 
593 err_route_write:
594 	mlxsw_sp_mr_tcam_afa_block_destroy(afa_block);
595 err_afa_block_create:
596 err_erif_populate:
597 	mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &erif_list);
598 	return err;
599 }
600 
601 static int mlxsw_sp_mr_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
602 {
603 	const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
604 	struct mlxsw_sp_mr_tcam *mr_tcam = priv;
605 	int err;
606 
607 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MC_ERIF_LIST_ENTRIES))
608 		return -EIO;
609 
610 	mr_tcam->priv = kzalloc(ops->priv_size, GFP_KERNEL);
611 	if (!mr_tcam->priv)
612 		return -ENOMEM;
613 
614 	err = ops->init(mlxsw_sp, mr_tcam->priv);
615 	if (err)
616 		goto err_init;
617 	return 0;
618 
619 err_init:
620 	kfree(mr_tcam->priv);
621 	return err;
622 }
623 
624 static void mlxsw_sp_mr_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv)
625 {
626 	const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
627 	struct mlxsw_sp_mr_tcam *mr_tcam = priv;
628 
629 	ops->fini(mr_tcam->priv);
630 	kfree(mr_tcam->priv);
631 }
632 
633 const struct mlxsw_sp_mr_ops mlxsw_sp_mr_tcam_ops = {
634 	.priv_size = sizeof(struct mlxsw_sp_mr_tcam),
635 	.route_priv_size = sizeof(struct mlxsw_sp_mr_tcam_route),
636 	.init = mlxsw_sp_mr_tcam_init,
637 	.route_create = mlxsw_sp_mr_tcam_route_create,
638 	.route_update = mlxsw_sp_mr_tcam_route_update,
639 	.route_stats = mlxsw_sp_mr_tcam_route_stats,
640 	.route_action_update = mlxsw_sp_mr_tcam_route_action_update,
641 	.route_min_mtu_update = mlxsw_sp_mr_tcam_route_min_mtu_update,
642 	.route_irif_update = mlxsw_sp_mr_tcam_route_irif_update,
643 	.route_erif_add = mlxsw_sp_mr_tcam_route_erif_add,
644 	.route_erif_del = mlxsw_sp_mr_tcam_route_erif_del,
645 	.route_destroy = mlxsw_sp_mr_tcam_route_destroy,
646 	.fini = mlxsw_sp_mr_tcam_fini,
647 };
648