1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/list.h>
6 #include <linux/netdevice.h>
7 
8 #include "spectrum_mr_tcam.h"
9 #include "reg.h"
10 #include "spectrum.h"
11 #include "core_acl_flex_actions.h"
12 #include "spectrum_mr.h"
13 
14 struct mlxsw_sp_mr_tcam {
15 	void *priv;
16 };
17 
18 /* This struct maps to one RIGR2 register entry */
19 struct mlxsw_sp_mr_erif_sublist {
20 	struct list_head list;
21 	u32 rigr2_kvdl_index;
22 	int num_erifs;
23 	u16 erif_indices[MLXSW_REG_RIGR2_MAX_ERIFS];
24 	bool synced;
25 };
26 
27 struct mlxsw_sp_mr_tcam_erif_list {
28 	struct list_head erif_sublists;
29 	u32 kvdl_index;
30 };
31 
32 static bool
33 mlxsw_sp_mr_erif_sublist_full(struct mlxsw_sp *mlxsw_sp,
34 			      struct mlxsw_sp_mr_erif_sublist *erif_sublist)
35 {
36 	int erif_list_entries = MLXSW_CORE_RES_GET(mlxsw_sp->core,
37 						   MC_ERIF_LIST_ENTRIES);
38 
39 	return erif_sublist->num_erifs == erif_list_entries;
40 }
41 
42 static void
43 mlxsw_sp_mr_erif_list_init(struct mlxsw_sp_mr_tcam_erif_list *erif_list)
44 {
45 	INIT_LIST_HEAD(&erif_list->erif_sublists);
46 }
47 
48 static struct mlxsw_sp_mr_erif_sublist *
49 mlxsw_sp_mr_erif_sublist_create(struct mlxsw_sp *mlxsw_sp,
50 				struct mlxsw_sp_mr_tcam_erif_list *erif_list)
51 {
52 	struct mlxsw_sp_mr_erif_sublist *erif_sublist;
53 	int err;
54 
55 	erif_sublist = kzalloc(sizeof(*erif_sublist), GFP_KERNEL);
56 	if (!erif_sublist)
57 		return ERR_PTR(-ENOMEM);
58 	err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR,
59 				  1, &erif_sublist->rigr2_kvdl_index);
60 	if (err) {
61 		kfree(erif_sublist);
62 		return ERR_PTR(err);
63 	}
64 
65 	list_add_tail(&erif_sublist->list, &erif_list->erif_sublists);
66 	return erif_sublist;
67 }
68 
69 static void
70 mlxsw_sp_mr_erif_sublist_destroy(struct mlxsw_sp *mlxsw_sp,
71 				 struct mlxsw_sp_mr_erif_sublist *erif_sublist)
72 {
73 	list_del(&erif_sublist->list);
74 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR,
75 			   1, erif_sublist->rigr2_kvdl_index);
76 	kfree(erif_sublist);
77 }
78 
79 static int
80 mlxsw_sp_mr_erif_list_add(struct mlxsw_sp *mlxsw_sp,
81 			  struct mlxsw_sp_mr_tcam_erif_list *erif_list,
82 			  u16 erif_index)
83 {
84 	struct mlxsw_sp_mr_erif_sublist *sublist;
85 
86 	/* If either there is no erif_entry or the last one is full, allocate a
87 	 * new one.
88 	 */
89 	if (list_empty(&erif_list->erif_sublists)) {
90 		sublist = mlxsw_sp_mr_erif_sublist_create(mlxsw_sp, erif_list);
91 		if (IS_ERR(sublist))
92 			return PTR_ERR(sublist);
93 		erif_list->kvdl_index = sublist->rigr2_kvdl_index;
94 	} else {
95 		sublist = list_last_entry(&erif_list->erif_sublists,
96 					  struct mlxsw_sp_mr_erif_sublist,
97 					  list);
98 		sublist->synced = false;
99 		if (mlxsw_sp_mr_erif_sublist_full(mlxsw_sp, sublist)) {
100 			sublist = mlxsw_sp_mr_erif_sublist_create(mlxsw_sp,
101 								  erif_list);
102 			if (IS_ERR(sublist))
103 				return PTR_ERR(sublist);
104 		}
105 	}
106 
107 	/* Add the eRIF to the last entry's last index */
108 	sublist->erif_indices[sublist->num_erifs++] = erif_index;
109 	return 0;
110 }
111 
112 static void
113 mlxsw_sp_mr_erif_list_flush(struct mlxsw_sp *mlxsw_sp,
114 			    struct mlxsw_sp_mr_tcam_erif_list *erif_list)
115 {
116 	struct mlxsw_sp_mr_erif_sublist *erif_sublist, *tmp;
117 
118 	list_for_each_entry_safe(erif_sublist, tmp, &erif_list->erif_sublists,
119 				 list)
120 		mlxsw_sp_mr_erif_sublist_destroy(mlxsw_sp, erif_sublist);
121 }
122 
123 static int
124 mlxsw_sp_mr_erif_list_commit(struct mlxsw_sp *mlxsw_sp,
125 			     struct mlxsw_sp_mr_tcam_erif_list *erif_list)
126 {
127 	struct mlxsw_sp_mr_erif_sublist *curr_sublist;
128 	char rigr2_pl[MLXSW_REG_RIGR2_LEN];
129 	int err;
130 	int i;
131 
132 	list_for_each_entry(curr_sublist, &erif_list->erif_sublists, list) {
133 		if (curr_sublist->synced)
134 			continue;
135 
136 		/* If the sublist is not the last one, pack the next index */
137 		if (list_is_last(&curr_sublist->list,
138 				 &erif_list->erif_sublists)) {
139 			mlxsw_reg_rigr2_pack(rigr2_pl,
140 					     curr_sublist->rigr2_kvdl_index,
141 					     false, 0);
142 		} else {
143 			struct mlxsw_sp_mr_erif_sublist *next_sublist;
144 
145 			next_sublist = list_next_entry(curr_sublist, list);
146 			mlxsw_reg_rigr2_pack(rigr2_pl,
147 					     curr_sublist->rigr2_kvdl_index,
148 					     true,
149 					     next_sublist->rigr2_kvdl_index);
150 		}
151 
152 		/* Pack all the erifs */
153 		for (i = 0; i < curr_sublist->num_erifs; i++) {
154 			u16 erif_index = curr_sublist->erif_indices[i];
155 
156 			mlxsw_reg_rigr2_erif_entry_pack(rigr2_pl, i, true,
157 							erif_index);
158 		}
159 
160 		/* Write the entry */
161 		err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rigr2),
162 				      rigr2_pl);
163 		if (err)
164 			/* No need of a rollback here because this
165 			 * hardware entry should not be pointed yet.
166 			 */
167 			return err;
168 		curr_sublist->synced = true;
169 	}
170 	return 0;
171 }
172 
173 static void mlxsw_sp_mr_erif_list_move(struct mlxsw_sp_mr_tcam_erif_list *to,
174 				       struct mlxsw_sp_mr_tcam_erif_list *from)
175 {
176 	list_splice(&from->erif_sublists, &to->erif_sublists);
177 	to->kvdl_index = from->kvdl_index;
178 }
179 
180 struct mlxsw_sp_mr_tcam_route {
181 	struct mlxsw_sp_mr_tcam_erif_list erif_list;
182 	struct mlxsw_afa_block *afa_block;
183 	u32 counter_index;
184 	enum mlxsw_sp_mr_route_action action;
185 	struct mlxsw_sp_mr_route_key key;
186 	u16 irif_index;
187 	u16 min_mtu;
188 	void *priv;
189 };
190 
191 static struct mlxsw_afa_block *
192 mlxsw_sp_mr_tcam_afa_block_create(struct mlxsw_sp *mlxsw_sp,
193 				  enum mlxsw_sp_mr_route_action route_action,
194 				  u16 irif_index, u32 counter_index,
195 				  u16 min_mtu,
196 				  struct mlxsw_sp_mr_tcam_erif_list *erif_list)
197 {
198 	struct mlxsw_afa_block *afa_block;
199 	int err;
200 
201 	afa_block = mlxsw_afa_block_create(mlxsw_sp->afa);
202 	if (IS_ERR(afa_block))
203 		return afa_block;
204 
205 	err = mlxsw_afa_block_append_allocated_counter(afa_block,
206 						       counter_index);
207 	if (err)
208 		goto err;
209 
210 	switch (route_action) {
211 	case MLXSW_SP_MR_ROUTE_ACTION_TRAP:
212 		err = mlxsw_afa_block_append_trap(afa_block,
213 						  MLXSW_TRAP_ID_ACL1);
214 		if (err)
215 			goto err;
216 		break;
217 	case MLXSW_SP_MR_ROUTE_ACTION_TRAP_AND_FORWARD:
218 	case MLXSW_SP_MR_ROUTE_ACTION_FORWARD:
219 		/* If we are about to append a multicast router action, commit
220 		 * the erif_list.
221 		 */
222 		err = mlxsw_sp_mr_erif_list_commit(mlxsw_sp, erif_list);
223 		if (err)
224 			goto err;
225 
226 		err = mlxsw_afa_block_append_mcrouter(afa_block, irif_index,
227 						      min_mtu, false,
228 						      erif_list->kvdl_index);
229 		if (err)
230 			goto err;
231 
232 		if (route_action == MLXSW_SP_MR_ROUTE_ACTION_TRAP_AND_FORWARD) {
233 			err = mlxsw_afa_block_append_trap_and_forward(afa_block,
234 								      MLXSW_TRAP_ID_ACL2);
235 			if (err)
236 				goto err;
237 		}
238 		break;
239 	default:
240 		err = -EINVAL;
241 		goto err;
242 	}
243 
244 	err = mlxsw_afa_block_commit(afa_block);
245 	if (err)
246 		goto err;
247 	return afa_block;
248 err:
249 	mlxsw_afa_block_destroy(afa_block);
250 	return ERR_PTR(err);
251 }
252 
253 static void
254 mlxsw_sp_mr_tcam_afa_block_destroy(struct mlxsw_afa_block *afa_block)
255 {
256 	mlxsw_afa_block_destroy(afa_block);
257 }
258 
259 static int
260 mlxsw_sp_mr_tcam_erif_populate(struct mlxsw_sp *mlxsw_sp,
261 			       struct mlxsw_sp_mr_tcam_erif_list *erif_list,
262 			       struct mlxsw_sp_mr_route_info *route_info)
263 {
264 	int err;
265 	int i;
266 
267 	for (i = 0; i < route_info->erif_num; i++) {
268 		u16 erif_index = route_info->erif_indices[i];
269 
270 		err = mlxsw_sp_mr_erif_list_add(mlxsw_sp, erif_list,
271 						erif_index);
272 		if (err)
273 			return err;
274 	}
275 	return 0;
276 }
277 
278 static int
279 mlxsw_sp_mr_tcam_route_create(struct mlxsw_sp *mlxsw_sp, void *priv,
280 			      void *route_priv,
281 			      struct mlxsw_sp_mr_route_params *route_params)
282 {
283 	const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
284 	struct mlxsw_sp_mr_tcam_route *route = route_priv;
285 	struct mlxsw_sp_mr_tcam *mr_tcam = priv;
286 	int err;
287 
288 	route->key = route_params->key;
289 	route->irif_index = route_params->value.irif_index;
290 	route->min_mtu = route_params->value.min_mtu;
291 	route->action = route_params->value.route_action;
292 
293 	/* Create the egress RIFs list */
294 	mlxsw_sp_mr_erif_list_init(&route->erif_list);
295 	err = mlxsw_sp_mr_tcam_erif_populate(mlxsw_sp, &route->erif_list,
296 					     &route_params->value);
297 	if (err)
298 		goto err_erif_populate;
299 
300 	/* Create the flow counter */
301 	err = mlxsw_sp_flow_counter_alloc(mlxsw_sp, &route->counter_index);
302 	if (err)
303 		goto err_counter_alloc;
304 
305 	/* Create the flexible action block */
306 	route->afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp,
307 							     route->action,
308 							     route->irif_index,
309 							     route->counter_index,
310 							     route->min_mtu,
311 							     &route->erif_list);
312 	if (IS_ERR(route->afa_block)) {
313 		err = PTR_ERR(route->afa_block);
314 		goto err_afa_block_create;
315 	}
316 
317 	route->priv = kzalloc(ops->route_priv_size, GFP_KERNEL);
318 	if (!route->priv) {
319 		err = -ENOMEM;
320 		goto err_route_priv_alloc;
321 	}
322 
323 	/* Write the route to the TCAM */
324 	err = ops->route_create(mlxsw_sp, mr_tcam->priv, route->priv,
325 				&route->key, route->afa_block,
326 				route_params->prio);
327 	if (err)
328 		goto err_route_create;
329 	return 0;
330 
331 err_route_create:
332 	kfree(route->priv);
333 err_route_priv_alloc:
334 	mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
335 err_afa_block_create:
336 	mlxsw_sp_flow_counter_free(mlxsw_sp, route->counter_index);
337 err_erif_populate:
338 err_counter_alloc:
339 	mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list);
340 	return err;
341 }
342 
343 static void mlxsw_sp_mr_tcam_route_destroy(struct mlxsw_sp *mlxsw_sp,
344 					   void *priv, void *route_priv)
345 {
346 	const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
347 	struct mlxsw_sp_mr_tcam_route *route = route_priv;
348 	struct mlxsw_sp_mr_tcam *mr_tcam = priv;
349 
350 	ops->route_destroy(mlxsw_sp, mr_tcam->priv, route->priv, &route->key);
351 	kfree(route->priv);
352 	mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
353 	mlxsw_sp_flow_counter_free(mlxsw_sp, route->counter_index);
354 	mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list);
355 }
356 
357 static int mlxsw_sp_mr_tcam_route_stats(struct mlxsw_sp *mlxsw_sp,
358 					void *route_priv, u64 *packets,
359 					u64 *bytes)
360 {
361 	struct mlxsw_sp_mr_tcam_route *route = route_priv;
362 
363 	return mlxsw_sp_flow_counter_get(mlxsw_sp, route->counter_index,
364 					 packets, bytes);
365 }
366 
367 static int
368 mlxsw_sp_mr_tcam_route_action_update(struct mlxsw_sp *mlxsw_sp,
369 				     void *route_priv,
370 				     enum mlxsw_sp_mr_route_action route_action)
371 {
372 	const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
373 	struct mlxsw_sp_mr_tcam_route *route = route_priv;
374 	struct mlxsw_afa_block *afa_block;
375 	int err;
376 
377 	/* Create a new flexible action block */
378 	afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp, route_action,
379 						      route->irif_index,
380 						      route->counter_index,
381 						      route->min_mtu,
382 						      &route->erif_list);
383 	if (IS_ERR(afa_block))
384 		return PTR_ERR(afa_block);
385 
386 	/* Update the TCAM route entry */
387 	err = ops->route_update(mlxsw_sp, route->priv, &route->key, afa_block);
388 	if (err)
389 		goto err;
390 
391 	/* Delete the old one */
392 	mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
393 	route->afa_block = afa_block;
394 	route->action = route_action;
395 	return 0;
396 err:
397 	mlxsw_sp_mr_tcam_afa_block_destroy(afa_block);
398 	return err;
399 }
400 
401 static int mlxsw_sp_mr_tcam_route_min_mtu_update(struct mlxsw_sp *mlxsw_sp,
402 						 void *route_priv, u16 min_mtu)
403 {
404 	const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
405 	struct mlxsw_sp_mr_tcam_route *route = route_priv;
406 	struct mlxsw_afa_block *afa_block;
407 	int err;
408 
409 	/* Create a new flexible action block */
410 	afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp,
411 						      route->action,
412 						      route->irif_index,
413 						      route->counter_index,
414 						      min_mtu,
415 						      &route->erif_list);
416 	if (IS_ERR(afa_block))
417 		return PTR_ERR(afa_block);
418 
419 	/* Update the TCAM route entry */
420 	err = ops->route_update(mlxsw_sp, route->priv, &route->key, afa_block);
421 	if (err)
422 		goto err;
423 
424 	/* Delete the old one */
425 	mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
426 	route->afa_block = afa_block;
427 	route->min_mtu = min_mtu;
428 	return 0;
429 err:
430 	mlxsw_sp_mr_tcam_afa_block_destroy(afa_block);
431 	return err;
432 }
433 
434 static int mlxsw_sp_mr_tcam_route_irif_update(struct mlxsw_sp *mlxsw_sp,
435 					      void *route_priv, u16 irif_index)
436 {
437 	struct mlxsw_sp_mr_tcam_route *route = route_priv;
438 
439 	if (route->action != MLXSW_SP_MR_ROUTE_ACTION_TRAP)
440 		return -EINVAL;
441 	route->irif_index = irif_index;
442 	return 0;
443 }
444 
445 static int mlxsw_sp_mr_tcam_route_erif_add(struct mlxsw_sp *mlxsw_sp,
446 					   void *route_priv, u16 erif_index)
447 {
448 	struct mlxsw_sp_mr_tcam_route *route = route_priv;
449 	int err;
450 
451 	err = mlxsw_sp_mr_erif_list_add(mlxsw_sp, &route->erif_list,
452 					erif_index);
453 	if (err)
454 		return err;
455 
456 	/* Commit the action only if the route action is not TRAP */
457 	if (route->action != MLXSW_SP_MR_ROUTE_ACTION_TRAP)
458 		return mlxsw_sp_mr_erif_list_commit(mlxsw_sp,
459 						    &route->erif_list);
460 	return 0;
461 }
462 
463 static int mlxsw_sp_mr_tcam_route_erif_del(struct mlxsw_sp *mlxsw_sp,
464 					   void *route_priv, u16 erif_index)
465 {
466 	const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
467 	struct mlxsw_sp_mr_tcam_route *route = route_priv;
468 	struct mlxsw_sp_mr_erif_sublist *erif_sublist;
469 	struct mlxsw_sp_mr_tcam_erif_list erif_list;
470 	struct mlxsw_afa_block *afa_block;
471 	int err;
472 	int i;
473 
474 	/* Create a copy of the original erif_list without the deleted entry */
475 	mlxsw_sp_mr_erif_list_init(&erif_list);
476 	list_for_each_entry(erif_sublist, &route->erif_list.erif_sublists, list) {
477 		for (i = 0; i < erif_sublist->num_erifs; i++) {
478 			u16 curr_erif = erif_sublist->erif_indices[i];
479 
480 			if (curr_erif == erif_index)
481 				continue;
482 			err = mlxsw_sp_mr_erif_list_add(mlxsw_sp, &erif_list,
483 							curr_erif);
484 			if (err)
485 				goto err_erif_list_add;
486 		}
487 	}
488 
489 	/* Create the flexible action block pointing to the new erif_list */
490 	afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp, route->action,
491 						      route->irif_index,
492 						      route->counter_index,
493 						      route->min_mtu,
494 						      &erif_list);
495 	if (IS_ERR(afa_block)) {
496 		err = PTR_ERR(afa_block);
497 		goto err_afa_block_create;
498 	}
499 
500 	/* Update the TCAM route entry */
501 	err = ops->route_update(mlxsw_sp, route->priv, &route->key, afa_block);
502 	if (err)
503 		goto err_route_write;
504 
505 	mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
506 	mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list);
507 	route->afa_block = afa_block;
508 	mlxsw_sp_mr_erif_list_move(&route->erif_list, &erif_list);
509 	return 0;
510 
511 err_route_write:
512 	mlxsw_sp_mr_tcam_afa_block_destroy(afa_block);
513 err_afa_block_create:
514 err_erif_list_add:
515 	mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &erif_list);
516 	return err;
517 }
518 
519 static int
520 mlxsw_sp_mr_tcam_route_update(struct mlxsw_sp *mlxsw_sp, void *route_priv,
521 			      struct mlxsw_sp_mr_route_info *route_info)
522 {
523 	const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
524 	struct mlxsw_sp_mr_tcam_route *route = route_priv;
525 	struct mlxsw_sp_mr_tcam_erif_list erif_list;
526 	struct mlxsw_afa_block *afa_block;
527 	int err;
528 
529 	/* Create a new erif_list */
530 	mlxsw_sp_mr_erif_list_init(&erif_list);
531 	err = mlxsw_sp_mr_tcam_erif_populate(mlxsw_sp, &erif_list, route_info);
532 	if (err)
533 		goto err_erif_populate;
534 
535 	/* Create the flexible action block pointing to the new erif_list */
536 	afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp,
537 						      route_info->route_action,
538 						      route_info->irif_index,
539 						      route->counter_index,
540 						      route_info->min_mtu,
541 						      &erif_list);
542 	if (IS_ERR(afa_block)) {
543 		err = PTR_ERR(afa_block);
544 		goto err_afa_block_create;
545 	}
546 
547 	/* Update the TCAM route entry */
548 	err = ops->route_update(mlxsw_sp, route->priv, &route->key, afa_block);
549 	if (err)
550 		goto err_route_write;
551 
552 	mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
553 	mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list);
554 	route->afa_block = afa_block;
555 	mlxsw_sp_mr_erif_list_move(&route->erif_list, &erif_list);
556 	route->action = route_info->route_action;
557 	route->irif_index = route_info->irif_index;
558 	route->min_mtu = route_info->min_mtu;
559 	return 0;
560 
561 err_route_write:
562 	mlxsw_sp_mr_tcam_afa_block_destroy(afa_block);
563 err_afa_block_create:
564 err_erif_populate:
565 	mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &erif_list);
566 	return err;
567 }
568 
569 static int mlxsw_sp_mr_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
570 {
571 	const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
572 	struct mlxsw_sp_mr_tcam *mr_tcam = priv;
573 	int err;
574 
575 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MC_ERIF_LIST_ENTRIES))
576 		return -EIO;
577 
578 	mr_tcam->priv = kzalloc(ops->priv_size, GFP_KERNEL);
579 	if (!mr_tcam->priv)
580 		return -ENOMEM;
581 
582 	err = ops->init(mlxsw_sp, mr_tcam->priv);
583 	if (err)
584 		goto err_init;
585 	return 0;
586 
587 err_init:
588 	kfree(mr_tcam->priv);
589 	return err;
590 }
591 
592 static void mlxsw_sp_mr_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv)
593 {
594 	const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
595 	struct mlxsw_sp_mr_tcam *mr_tcam = priv;
596 
597 	ops->fini(mr_tcam->priv);
598 	kfree(mr_tcam->priv);
599 }
600 
601 const struct mlxsw_sp_mr_ops mlxsw_sp_mr_tcam_ops = {
602 	.priv_size = sizeof(struct mlxsw_sp_mr_tcam),
603 	.route_priv_size = sizeof(struct mlxsw_sp_mr_tcam_route),
604 	.init = mlxsw_sp_mr_tcam_init,
605 	.route_create = mlxsw_sp_mr_tcam_route_create,
606 	.route_update = mlxsw_sp_mr_tcam_route_update,
607 	.route_stats = mlxsw_sp_mr_tcam_route_stats,
608 	.route_action_update = mlxsw_sp_mr_tcam_route_action_update,
609 	.route_min_mtu_update = mlxsw_sp_mr_tcam_route_min_mtu_update,
610 	.route_irif_update = mlxsw_sp_mr_tcam_route_irif_update,
611 	.route_erif_add = mlxsw_sp_mr_tcam_route_erif_add,
612 	.route_erif_del = mlxsw_sp_mr_tcam_route_erif_del,
613 	.route_destroy = mlxsw_sp_mr_tcam_route_destroy,
614 	.fini = mlxsw_sp_mr_tcam_fini,
615 };
616