1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2021, Mellanox Technologies inc. All rights reserved. */
3 
4 #include "en/fs_tt_redirect.h"
5 #include "fs_core.h"
6 #include "mlx5_core.h"
7 
8 enum fs_udp_type {
9 	FS_IPV4_UDP,
10 	FS_IPV6_UDP,
11 	FS_UDP_NUM_TYPES,
12 };
13 
14 struct mlx5e_fs_udp {
15 	struct mlx5e_flow_table tables[FS_UDP_NUM_TYPES];
16 	struct mlx5_flow_handle *default_rules[FS_UDP_NUM_TYPES];
17 	int ref_cnt;
18 };
19 
20 struct mlx5e_fs_any {
21 	struct mlx5e_flow_table table;
22 	struct mlx5_flow_handle *default_rule;
23 	int ref_cnt;
24 };
25 
26 static char *fs_udp_type2str(enum fs_udp_type i)
27 {
28 	switch (i) {
29 	case FS_IPV4_UDP:
30 		return "UDP v4";
31 	default: /* FS_IPV6_UDP */
32 		return "UDP v6";
33 	}
34 }
35 
36 static enum mlx5_traffic_types fs_udp2tt(enum fs_udp_type i)
37 {
38 	switch (i) {
39 	case FS_IPV4_UDP:
40 		return MLX5_TT_IPV4_UDP;
41 	default: /* FS_IPV6_UDP */
42 		return MLX5_TT_IPV6_UDP;
43 	}
44 }
45 
46 static enum fs_udp_type tt2fs_udp(enum mlx5_traffic_types i)
47 {
48 	switch (i) {
49 	case MLX5_TT_IPV4_UDP:
50 		return FS_IPV4_UDP;
51 	case MLX5_TT_IPV6_UDP:
52 		return FS_IPV6_UDP;
53 	default:
54 		return FS_UDP_NUM_TYPES;
55 	}
56 }
57 
58 void mlx5e_fs_tt_redirect_del_rule(struct mlx5_flow_handle *rule)
59 {
60 	mlx5_del_flow_rules(rule);
61 }
62 
63 static void fs_udp_set_dport_flow(struct mlx5_flow_spec *spec, enum fs_udp_type type,
64 				  u16 udp_dport)
65 {
66 	spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
67 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
68 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_UDP);
69 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
70 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version,
71 		 type == FS_IPV4_UDP ? 4 : 6);
72 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.udp_dport);
73 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_dport, udp_dport);
74 }
75 
76 struct mlx5_flow_handle *
77 mlx5e_fs_tt_redirect_udp_add_rule(struct mlx5e_flow_steering *fs,
78 				  enum mlx5_traffic_types ttc_type,
79 				  u32 tir_num, u16 d_port)
80 {
81 	struct mlx5e_fs_udp *fs_udp = mlx5e_fs_get_udp(fs);
82 	enum fs_udp_type type = tt2fs_udp(ttc_type);
83 	struct mlx5_flow_destination dest = {};
84 	struct mlx5_flow_table *ft = NULL;
85 	MLX5_DECLARE_FLOW_ACT(flow_act);
86 	struct mlx5_flow_handle *rule;
87 	struct mlx5_flow_spec *spec;
88 	int err;
89 
90 	if (type == FS_UDP_NUM_TYPES)
91 		return ERR_PTR(-EINVAL);
92 
93 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
94 	if (!spec)
95 		return ERR_PTR(-ENOMEM);
96 
97 	ft = fs_udp->tables[type].t;
98 
99 	fs_udp_set_dport_flow(spec, type, d_port);
100 	dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
101 	dest.tir_num = tir_num;
102 
103 	rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
104 	kvfree(spec);
105 
106 	if (IS_ERR(rule)) {
107 		err = PTR_ERR(rule);
108 		fs_err(fs, "%s: add %s rule failed, err %d\n",
109 		       __func__, fs_udp_type2str(type), err);
110 	}
111 	return rule;
112 }
113 
114 static int fs_udp_add_default_rule(struct mlx5e_flow_steering *fs, enum fs_udp_type type)
115 {
116 	struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
117 	struct mlx5e_fs_udp *fs_udp = mlx5e_fs_get_udp(fs);
118 	struct mlx5e_flow_table *fs_udp_t;
119 	struct mlx5_flow_destination dest;
120 	MLX5_DECLARE_FLOW_ACT(flow_act);
121 	struct mlx5_flow_handle *rule;
122 	int err;
123 
124 	fs_udp_t = &fs_udp->tables[type];
125 
126 	dest = mlx5_ttc_get_default_dest(ttc, fs_udp2tt(type));
127 	rule = mlx5_add_flow_rules(fs_udp_t->t, NULL, &flow_act, &dest, 1);
128 	if (IS_ERR(rule)) {
129 		err = PTR_ERR(rule);
130 		fs_err(fs, "%s: add default rule failed, fs type=%d, err %d\n",
131 		       __func__, type, err);
132 		return err;
133 	}
134 
135 	fs_udp->default_rules[type] = rule;
136 	return 0;
137 }
138 
139 #define MLX5E_FS_UDP_NUM_GROUPS	(2)
140 #define MLX5E_FS_UDP_GROUP1_SIZE	(BIT(16))
141 #define MLX5E_FS_UDP_GROUP2_SIZE	(BIT(0))
142 #define MLX5E_FS_UDP_TABLE_SIZE		(MLX5E_FS_UDP_GROUP1_SIZE +\
143 					 MLX5E_FS_UDP_GROUP2_SIZE)
144 static int fs_udp_create_groups(struct mlx5e_flow_table *ft, enum fs_udp_type type)
145 {
146 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
147 	void *outer_headers_c;
148 	int ix = 0;
149 	u32 *in;
150 	int err;
151 	u8 *mc;
152 
153 	ft->g = kcalloc(MLX5E_FS_UDP_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL);
154 	in = kvzalloc(inlen, GFP_KERNEL);
155 	if  (!in || !ft->g) {
156 		kfree(ft->g);
157 		kvfree(in);
158 		return -ENOMEM;
159 	}
160 
161 	mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
162 	outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc, outer_headers);
163 	MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol);
164 	MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ip_version);
165 
166 	switch (type) {
167 	case FS_IPV4_UDP:
168 	case FS_IPV6_UDP:
169 		MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_dport);
170 		break;
171 	default:
172 		err = -EINVAL;
173 		goto out;
174 	}
175 	/* Match on udp protocol, Ipv4/6 and dport */
176 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
177 	MLX5_SET_CFG(in, start_flow_index, ix);
178 	ix += MLX5E_FS_UDP_GROUP1_SIZE;
179 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
180 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
181 	if (IS_ERR(ft->g[ft->num_groups]))
182 		goto err;
183 	ft->num_groups++;
184 
185 	/* Default Flow Group */
186 	memset(in, 0, inlen);
187 	MLX5_SET_CFG(in, start_flow_index, ix);
188 	ix += MLX5E_FS_UDP_GROUP2_SIZE;
189 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
190 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
191 	if (IS_ERR(ft->g[ft->num_groups]))
192 		goto err;
193 	ft->num_groups++;
194 
195 	kvfree(in);
196 	return 0;
197 
198 err:
199 	err = PTR_ERR(ft->g[ft->num_groups]);
200 	ft->g[ft->num_groups] = NULL;
201 out:
202 	kvfree(in);
203 
204 	return err;
205 }
206 
207 static int fs_udp_create_table(struct mlx5e_flow_steering *fs, enum fs_udp_type type)
208 {
209 	struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(fs, false);
210 	struct mlx5e_fs_udp *fs_udp = mlx5e_fs_get_udp(fs);
211 	struct mlx5_flow_table_attr ft_attr = {};
212 	struct mlx5e_flow_table *ft;
213 	int err;
214 
215 	ft = &fs_udp->tables[type];
216 	ft->num_groups = 0;
217 
218 	ft_attr.max_fte = MLX5E_FS_UDP_TABLE_SIZE;
219 	ft_attr.level = MLX5E_FS_TT_UDP_FT_LEVEL;
220 	ft_attr.prio = MLX5E_NIC_PRIO;
221 
222 	ft->t = mlx5_create_flow_table(ns, &ft_attr);
223 	if (IS_ERR(ft->t)) {
224 		err = PTR_ERR(ft->t);
225 		ft->t = NULL;
226 		return err;
227 	}
228 
229 	mlx5_core_dbg(mlx5e_fs_get_mdev(fs), "Created fs %s table id %u level %u\n",
230 		      fs_udp_type2str(type), ft->t->id, ft->t->level);
231 
232 	err = fs_udp_create_groups(ft, type);
233 	if (err)
234 		goto err;
235 
236 	err = fs_udp_add_default_rule(fs, type);
237 	if (err)
238 		goto err;
239 
240 	return 0;
241 
242 err:
243 	mlx5e_destroy_flow_table(ft);
244 	return err;
245 }
246 
247 static void fs_udp_destroy_table(struct mlx5e_fs_udp *fs_udp, int i)
248 {
249 	if (IS_ERR_OR_NULL(fs_udp->tables[i].t))
250 		return;
251 
252 	mlx5_del_flow_rules(fs_udp->default_rules[i]);
253 	mlx5e_destroy_flow_table(&fs_udp->tables[i]);
254 	fs_udp->tables[i].t = NULL;
255 }
256 
257 static int fs_udp_disable(struct mlx5e_flow_steering *fs)
258 {
259 	struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
260 	int err, i;
261 
262 	for (i = 0; i < FS_UDP_NUM_TYPES; i++) {
263 		/* Modify ttc rules destination to point back to the indir TIRs */
264 		err = mlx5_ttc_fwd_default_dest(ttc, fs_udp2tt(i));
265 		if (err) {
266 			fs_err(fs, "%s: modify ttc[%d] default destination failed, err(%d)\n",
267 			       __func__, fs_udp2tt(i), err);
268 			return err;
269 		}
270 	}
271 
272 	return 0;
273 }
274 
275 static int fs_udp_enable(struct mlx5e_flow_steering *fs)
276 {
277 	struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
278 	struct mlx5e_fs_udp *udp = mlx5e_fs_get_udp(fs);
279 	struct mlx5_flow_destination dest = {};
280 	int err, i;
281 
282 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
283 	for (i = 0; i < FS_UDP_NUM_TYPES; i++) {
284 		dest.ft = udp->tables[i].t;
285 
286 		/* Modify ttc rules destination to point on the accel_fs FTs */
287 		err = mlx5_ttc_fwd_dest(ttc, fs_udp2tt(i), &dest);
288 		if (err) {
289 			fs_err(fs, "%s: modify ttc[%d] destination to accel failed, err(%d)\n",
290 			       __func__, fs_udp2tt(i), err);
291 			return err;
292 		}
293 	}
294 	return 0;
295 }
296 
297 void mlx5e_fs_tt_redirect_udp_destroy(struct mlx5e_flow_steering *fs)
298 {
299 	struct mlx5e_fs_udp *fs_udp = mlx5e_fs_get_udp(fs);
300 	int i;
301 
302 	if (!fs_udp)
303 		return;
304 
305 	if (--fs_udp->ref_cnt)
306 		return;
307 
308 	fs_udp_disable(fs);
309 
310 	for (i = 0; i < FS_UDP_NUM_TYPES; i++)
311 		fs_udp_destroy_table(fs_udp, i);
312 
313 	kfree(fs_udp);
314 	mlx5e_fs_set_udp(fs, NULL);
315 }
316 
317 int mlx5e_fs_tt_redirect_udp_create(struct mlx5e_flow_steering *fs)
318 {
319 	struct mlx5e_fs_udp *udp = mlx5e_fs_get_udp(fs);
320 	int i, err;
321 
322 	if (udp) {
323 		udp->ref_cnt++;
324 		return 0;
325 	}
326 
327 	udp = kzalloc(sizeof(*udp), GFP_KERNEL);
328 	if (!udp)
329 		return -ENOMEM;
330 	mlx5e_fs_set_udp(fs, udp);
331 
332 	for (i = 0; i < FS_UDP_NUM_TYPES; i++) {
333 		err = fs_udp_create_table(fs, i);
334 		if (err)
335 			goto err_destroy_tables;
336 	}
337 
338 	err = fs_udp_enable(fs);
339 	if (err)
340 		goto err_destroy_tables;
341 
342 	udp->ref_cnt = 1;
343 
344 	return 0;
345 
346 err_destroy_tables:
347 	while (--i >= 0)
348 		fs_udp_destroy_table(udp, i);
349 
350 	kfree(udp);
351 	mlx5e_fs_set_udp(fs, NULL);
352 	return err;
353 }
354 
355 static void fs_any_set_ethertype_flow(struct mlx5_flow_spec *spec, u16 ether_type)
356 {
357 	spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
358 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
359 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, ether_type);
360 }
361 
362 struct mlx5_flow_handle *
363 mlx5e_fs_tt_redirect_any_add_rule(struct mlx5e_flow_steering *fs,
364 				  u32 tir_num, u16 ether_type)
365 {
366 	struct mlx5e_fs_any *fs_any = mlx5e_fs_get_any(fs);
367 	struct mlx5_flow_destination dest = {};
368 	struct mlx5_flow_table *ft = NULL;
369 	MLX5_DECLARE_FLOW_ACT(flow_act);
370 	struct mlx5_flow_handle *rule;
371 	struct mlx5_flow_spec *spec;
372 	int err;
373 
374 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
375 	if (!spec)
376 		return ERR_PTR(-ENOMEM);
377 
378 	ft = fs_any->table.t;
379 
380 	fs_any_set_ethertype_flow(spec, ether_type);
381 	dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
382 	dest.tir_num = tir_num;
383 
384 	rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
385 	kvfree(spec);
386 
387 	if (IS_ERR(rule)) {
388 		err = PTR_ERR(rule);
389 		fs_err(fs, "%s: add ANY rule failed, err %d\n",
390 		       __func__, err);
391 	}
392 	return rule;
393 }
394 
395 static int fs_any_add_default_rule(struct mlx5e_flow_steering *fs)
396 {
397 	struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
398 	struct mlx5e_fs_any *fs_any = mlx5e_fs_get_any(fs);
399 	struct mlx5e_flow_table *fs_any_t;
400 	struct mlx5_flow_destination dest;
401 	MLX5_DECLARE_FLOW_ACT(flow_act);
402 	struct mlx5_flow_handle *rule;
403 	int err;
404 
405 	fs_any_t = &fs_any->table;
406 	dest = mlx5_ttc_get_default_dest(ttc, MLX5_TT_ANY);
407 	rule = mlx5_add_flow_rules(fs_any_t->t, NULL, &flow_act, &dest, 1);
408 	if (IS_ERR(rule)) {
409 		err = PTR_ERR(rule);
410 		fs_err(fs, "%s: add default rule failed, fs type=ANY, err %d\n",
411 		       __func__, err);
412 		return err;
413 	}
414 
415 	fs_any->default_rule = rule;
416 	return 0;
417 }
418 
419 #define MLX5E_FS_ANY_NUM_GROUPS	(2)
420 #define MLX5E_FS_ANY_GROUP1_SIZE	(BIT(16))
421 #define MLX5E_FS_ANY_GROUP2_SIZE	(BIT(0))
422 #define MLX5E_FS_ANY_TABLE_SIZE		(MLX5E_FS_ANY_GROUP1_SIZE +\
423 					 MLX5E_FS_ANY_GROUP2_SIZE)
424 
425 static int fs_any_create_groups(struct mlx5e_flow_table *ft)
426 {
427 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
428 	void *outer_headers_c;
429 	int ix = 0;
430 	u32 *in;
431 	int err;
432 	u8 *mc;
433 
434 	ft->g = kcalloc(MLX5E_FS_UDP_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL);
435 	in = kvzalloc(inlen, GFP_KERNEL);
436 	if  (!in || !ft->g) {
437 		kfree(ft->g);
438 		kvfree(in);
439 		return -ENOMEM;
440 	}
441 
442 	/* Match on ethertype */
443 	mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
444 	outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc, outer_headers);
445 	MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ethertype);
446 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
447 	MLX5_SET_CFG(in, start_flow_index, ix);
448 	ix += MLX5E_FS_ANY_GROUP1_SIZE;
449 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
450 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
451 	if (IS_ERR(ft->g[ft->num_groups]))
452 		goto err;
453 	ft->num_groups++;
454 
455 	/* Default Flow Group */
456 	memset(in, 0, inlen);
457 	MLX5_SET_CFG(in, start_flow_index, ix);
458 	ix += MLX5E_FS_ANY_GROUP2_SIZE;
459 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
460 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
461 	if (IS_ERR(ft->g[ft->num_groups]))
462 		goto err;
463 	ft->num_groups++;
464 
465 	kvfree(in);
466 	return 0;
467 
468 err:
469 	err = PTR_ERR(ft->g[ft->num_groups]);
470 	ft->g[ft->num_groups] = NULL;
471 	kvfree(in);
472 
473 	return err;
474 }
475 
476 static int fs_any_create_table(struct mlx5e_flow_steering *fs)
477 {
478 	struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(fs, false);
479 	struct mlx5e_fs_any *fs_any = mlx5e_fs_get_any(fs);
480 	struct mlx5e_flow_table *ft = &fs_any->table;
481 	struct mlx5_flow_table_attr ft_attr = {};
482 	int err;
483 
484 	ft->num_groups = 0;
485 
486 	ft_attr.max_fte = MLX5E_FS_UDP_TABLE_SIZE;
487 	ft_attr.level = MLX5E_FS_TT_ANY_FT_LEVEL;
488 	ft_attr.prio = MLX5E_NIC_PRIO;
489 
490 	ft->t = mlx5_create_flow_table(ns, &ft_attr);
491 	if (IS_ERR(ft->t)) {
492 		err = PTR_ERR(ft->t);
493 		ft->t = NULL;
494 		return err;
495 	}
496 
497 	mlx5_core_dbg(mlx5e_fs_get_mdev(fs), "Created fs ANY table id %u level %u\n",
498 		      ft->t->id, ft->t->level);
499 
500 	err = fs_any_create_groups(ft);
501 	if (err)
502 		goto err;
503 
504 	err = fs_any_add_default_rule(fs);
505 	if (err)
506 		goto err;
507 
508 	return 0;
509 
510 err:
511 	mlx5e_destroy_flow_table(ft);
512 	return err;
513 }
514 
515 static int fs_any_disable(struct mlx5e_flow_steering *fs)
516 {
517 	struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
518 	int err;
519 
520 	/* Modify ttc rules destination to point back to the indir TIRs */
521 	err = mlx5_ttc_fwd_default_dest(ttc, MLX5_TT_ANY);
522 	if (err) {
523 		fs_err(fs,
524 		       "%s: modify ttc[%d] default destination failed, err(%d)\n",
525 		       __func__, MLX5_TT_ANY, err);
526 		return err;
527 	}
528 	return 0;
529 }
530 
531 static int fs_any_enable(struct mlx5e_flow_steering *fs)
532 {
533 	struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
534 	struct mlx5e_fs_any *any = mlx5e_fs_get_any(fs);
535 	struct mlx5_flow_destination dest = {};
536 	int err;
537 
538 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
539 	dest.ft = any->table.t;
540 
541 	/* Modify ttc rules destination to point on the accel_fs FTs */
542 	err = mlx5_ttc_fwd_dest(ttc, MLX5_TT_ANY, &dest);
543 	if (err) {
544 		fs_err(fs,
545 		       "%s: modify ttc[%d] destination to accel failed, err(%d)\n",
546 		       __func__, MLX5_TT_ANY, err);
547 		return err;
548 	}
549 	return 0;
550 }
551 
552 static void fs_any_destroy_table(struct mlx5e_fs_any *fs_any)
553 {
554 	if (IS_ERR_OR_NULL(fs_any->table.t))
555 		return;
556 
557 	mlx5_del_flow_rules(fs_any->default_rule);
558 	mlx5e_destroy_flow_table(&fs_any->table);
559 	fs_any->table.t = NULL;
560 }
561 
562 void mlx5e_fs_tt_redirect_any_destroy(struct mlx5e_flow_steering *fs)
563 {
564 	struct mlx5e_fs_any *fs_any = mlx5e_fs_get_any(fs);
565 
566 	if (!fs_any)
567 		return;
568 
569 	if (--fs_any->ref_cnt)
570 		return;
571 
572 	fs_any_disable(fs);
573 
574 	fs_any_destroy_table(fs_any);
575 
576 	kfree(fs_any);
577 	mlx5e_fs_set_any(fs, NULL);
578 }
579 
580 int mlx5e_fs_tt_redirect_any_create(struct mlx5e_flow_steering *fs)
581 {
582 	struct mlx5e_fs_any *fs_any = mlx5e_fs_get_any(fs);
583 	int err;
584 
585 	if (fs_any) {
586 		fs_any->ref_cnt++;
587 		return 0;
588 	}
589 
590 	fs_any = kzalloc(sizeof(*fs_any), GFP_KERNEL);
591 	if (!fs_any)
592 		return -ENOMEM;
593 	mlx5e_fs_set_any(fs, fs_any);
594 
595 	err = fs_any_create_table(fs);
596 	if (err)
597 		return err;
598 
599 	err = fs_any_enable(fs);
600 	if (err)
601 		goto err_destroy_table;
602 
603 	fs_any->ref_cnt = 1;
604 
605 	return 0;
606 
607 err_destroy_table:
608 	fs_any_destroy_table(fs_any);
609 
610 	kfree(fs_any);
611 	mlx5e_fs_set_any(fs, NULL);
612 	return err;
613 }
614