1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2021, Mellanox Technologies inc. All rights reserved. */
3 
4 #include "en/fs_tt_redirect.h"
5 #include "fs_core.h"
6 #include "mlx5_core.h"
7 
8 enum fs_udp_type {
9 	FS_IPV4_UDP,
10 	FS_IPV6_UDP,
11 	FS_UDP_NUM_TYPES,
12 };
13 
14 struct mlx5e_fs_udp {
15 	struct mlx5e_flow_table tables[FS_UDP_NUM_TYPES];
16 	struct mlx5_flow_handle *default_rules[FS_UDP_NUM_TYPES];
17 	int ref_cnt;
18 };
19 
20 struct mlx5e_fs_any {
21 	struct mlx5e_flow_table table;
22 	struct mlx5_flow_handle *default_rule;
23 	int ref_cnt;
24 };
25 
fs_udp_type2str(enum fs_udp_type i)26 static char *fs_udp_type2str(enum fs_udp_type i)
27 {
28 	switch (i) {
29 	case FS_IPV4_UDP:
30 		return "UDP v4";
31 	default: /* FS_IPV6_UDP */
32 		return "UDP v6";
33 	}
34 }
35 
fs_udp2tt(enum fs_udp_type i)36 static enum mlx5_traffic_types fs_udp2tt(enum fs_udp_type i)
37 {
38 	switch (i) {
39 	case FS_IPV4_UDP:
40 		return MLX5_TT_IPV4_UDP;
41 	default: /* FS_IPV6_UDP */
42 		return MLX5_TT_IPV6_UDP;
43 	}
44 }
45 
tt2fs_udp(enum mlx5_traffic_types i)46 static enum fs_udp_type tt2fs_udp(enum mlx5_traffic_types i)
47 {
48 	switch (i) {
49 	case MLX5_TT_IPV4_UDP:
50 		return FS_IPV4_UDP;
51 	case MLX5_TT_IPV6_UDP:
52 		return FS_IPV6_UDP;
53 	default:
54 		return FS_UDP_NUM_TYPES;
55 	}
56 }
57 
mlx5e_fs_tt_redirect_del_rule(struct mlx5_flow_handle * rule)58 void mlx5e_fs_tt_redirect_del_rule(struct mlx5_flow_handle *rule)
59 {
60 	mlx5_del_flow_rules(rule);
61 }
62 
fs_udp_set_dport_flow(struct mlx5_flow_spec * spec,enum fs_udp_type type,u16 udp_dport)63 static void fs_udp_set_dport_flow(struct mlx5_flow_spec *spec, enum fs_udp_type type,
64 				  u16 udp_dport)
65 {
66 	spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
67 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
68 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_UDP);
69 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
70 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version,
71 		 type == FS_IPV4_UDP ? 4 : 6);
72 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.udp_dport);
73 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_dport, udp_dport);
74 }
75 
76 struct mlx5_flow_handle *
mlx5e_fs_tt_redirect_udp_add_rule(struct mlx5e_flow_steering * fs,enum mlx5_traffic_types ttc_type,u32 tir_num,u16 d_port)77 mlx5e_fs_tt_redirect_udp_add_rule(struct mlx5e_flow_steering *fs,
78 				  enum mlx5_traffic_types ttc_type,
79 				  u32 tir_num, u16 d_port)
80 {
81 	struct mlx5e_fs_udp *fs_udp = mlx5e_fs_get_udp(fs);
82 	enum fs_udp_type type = tt2fs_udp(ttc_type);
83 	struct mlx5_flow_destination dest = {};
84 	struct mlx5_flow_table *ft = NULL;
85 	MLX5_DECLARE_FLOW_ACT(flow_act);
86 	struct mlx5_flow_handle *rule;
87 	struct mlx5_flow_spec *spec;
88 	int err;
89 
90 	if (type == FS_UDP_NUM_TYPES)
91 		return ERR_PTR(-EINVAL);
92 
93 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
94 	if (!spec)
95 		return ERR_PTR(-ENOMEM);
96 
97 	ft = fs_udp->tables[type].t;
98 
99 	fs_udp_set_dport_flow(spec, type, d_port);
100 	dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
101 	dest.tir_num = tir_num;
102 
103 	rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
104 	kvfree(spec);
105 
106 	if (IS_ERR(rule)) {
107 		err = PTR_ERR(rule);
108 		fs_err(fs, "%s: add %s rule failed, err %d\n",
109 		       __func__, fs_udp_type2str(type), err);
110 	}
111 	return rule;
112 }
113 
fs_udp_add_default_rule(struct mlx5e_flow_steering * fs,enum fs_udp_type type)114 static int fs_udp_add_default_rule(struct mlx5e_flow_steering *fs, enum fs_udp_type type)
115 {
116 	struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
117 	struct mlx5e_fs_udp *fs_udp = mlx5e_fs_get_udp(fs);
118 	struct mlx5e_flow_table *fs_udp_t;
119 	struct mlx5_flow_destination dest;
120 	MLX5_DECLARE_FLOW_ACT(flow_act);
121 	struct mlx5_flow_handle *rule;
122 	int err;
123 
124 	fs_udp_t = &fs_udp->tables[type];
125 
126 	dest = mlx5_ttc_get_default_dest(ttc, fs_udp2tt(type));
127 	rule = mlx5_add_flow_rules(fs_udp_t->t, NULL, &flow_act, &dest, 1);
128 	if (IS_ERR(rule)) {
129 		err = PTR_ERR(rule);
130 		fs_err(fs, "%s: add default rule failed, fs type=%d, err %d\n",
131 		       __func__, type, err);
132 		return err;
133 	}
134 
135 	fs_udp->default_rules[type] = rule;
136 	return 0;
137 }
138 
139 #define MLX5E_FS_UDP_NUM_GROUPS	(2)
140 #define MLX5E_FS_UDP_GROUP1_SIZE	(BIT(16))
141 #define MLX5E_FS_UDP_GROUP2_SIZE	(BIT(0))
142 #define MLX5E_FS_UDP_TABLE_SIZE		(MLX5E_FS_UDP_GROUP1_SIZE +\
143 					 MLX5E_FS_UDP_GROUP2_SIZE)
fs_udp_create_groups(struct mlx5e_flow_table * ft,enum fs_udp_type type)144 static int fs_udp_create_groups(struct mlx5e_flow_table *ft, enum fs_udp_type type)
145 {
146 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
147 	void *outer_headers_c;
148 	int ix = 0;
149 	u32 *in;
150 	int err;
151 	u8 *mc;
152 
153 	ft->g = kcalloc(MLX5E_FS_UDP_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL);
154 	in = kvzalloc(inlen, GFP_KERNEL);
155 	if  (!in || !ft->g) {
156 		kfree(ft->g);
157 		ft->g = NULL;
158 		kvfree(in);
159 		return -ENOMEM;
160 	}
161 
162 	mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
163 	outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc, outer_headers);
164 	MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol);
165 	MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ip_version);
166 
167 	switch (type) {
168 	case FS_IPV4_UDP:
169 	case FS_IPV6_UDP:
170 		MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_dport);
171 		break;
172 	default:
173 		err = -EINVAL;
174 		goto out;
175 	}
176 	/* Match on udp protocol, Ipv4/6 and dport */
177 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
178 	MLX5_SET_CFG(in, start_flow_index, ix);
179 	ix += MLX5E_FS_UDP_GROUP1_SIZE;
180 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
181 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
182 	if (IS_ERR(ft->g[ft->num_groups]))
183 		goto err;
184 	ft->num_groups++;
185 
186 	/* Default Flow Group */
187 	memset(in, 0, inlen);
188 	MLX5_SET_CFG(in, start_flow_index, ix);
189 	ix += MLX5E_FS_UDP_GROUP2_SIZE;
190 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
191 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
192 	if (IS_ERR(ft->g[ft->num_groups]))
193 		goto err;
194 	ft->num_groups++;
195 
196 	kvfree(in);
197 	return 0;
198 
199 err:
200 	err = PTR_ERR(ft->g[ft->num_groups]);
201 	ft->g[ft->num_groups] = NULL;
202 out:
203 	kvfree(in);
204 
205 	return err;
206 }
207 
fs_udp_create_table(struct mlx5e_flow_steering * fs,enum fs_udp_type type)208 static int fs_udp_create_table(struct mlx5e_flow_steering *fs, enum fs_udp_type type)
209 {
210 	struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(fs, false);
211 	struct mlx5e_fs_udp *fs_udp = mlx5e_fs_get_udp(fs);
212 	struct mlx5_flow_table_attr ft_attr = {};
213 	struct mlx5e_flow_table *ft;
214 	int err;
215 
216 	ft = &fs_udp->tables[type];
217 	ft->num_groups = 0;
218 
219 	ft_attr.max_fte = MLX5E_FS_UDP_TABLE_SIZE;
220 	ft_attr.level = MLX5E_FS_TT_UDP_FT_LEVEL;
221 	ft_attr.prio = MLX5E_NIC_PRIO;
222 
223 	ft->t = mlx5_create_flow_table(ns, &ft_attr);
224 	if (IS_ERR(ft->t)) {
225 		err = PTR_ERR(ft->t);
226 		ft->t = NULL;
227 		return err;
228 	}
229 
230 	mlx5_core_dbg(mlx5e_fs_get_mdev(fs), "Created fs %s table id %u level %u\n",
231 		      fs_udp_type2str(type), ft->t->id, ft->t->level);
232 
233 	err = fs_udp_create_groups(ft, type);
234 	if (err)
235 		goto err;
236 
237 	err = fs_udp_add_default_rule(fs, type);
238 	if (err)
239 		goto err;
240 
241 	return 0;
242 
243 err:
244 	mlx5e_destroy_flow_table(ft);
245 	return err;
246 }
247 
fs_udp_destroy_table(struct mlx5e_fs_udp * fs_udp,int i)248 static void fs_udp_destroy_table(struct mlx5e_fs_udp *fs_udp, int i)
249 {
250 	if (IS_ERR_OR_NULL(fs_udp->tables[i].t))
251 		return;
252 
253 	mlx5_del_flow_rules(fs_udp->default_rules[i]);
254 	mlx5e_destroy_flow_table(&fs_udp->tables[i]);
255 	fs_udp->tables[i].t = NULL;
256 }
257 
fs_udp_disable(struct mlx5e_flow_steering * fs)258 static int fs_udp_disable(struct mlx5e_flow_steering *fs)
259 {
260 	struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
261 	int err, i;
262 
263 	for (i = 0; i < FS_UDP_NUM_TYPES; i++) {
264 		/* Modify ttc rules destination to point back to the indir TIRs */
265 		err = mlx5_ttc_fwd_default_dest(ttc, fs_udp2tt(i));
266 		if (err) {
267 			fs_err(fs, "%s: modify ttc[%d] default destination failed, err(%d)\n",
268 			       __func__, fs_udp2tt(i), err);
269 			return err;
270 		}
271 	}
272 
273 	return 0;
274 }
275 
fs_udp_enable(struct mlx5e_flow_steering * fs)276 static int fs_udp_enable(struct mlx5e_flow_steering *fs)
277 {
278 	struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
279 	struct mlx5e_fs_udp *udp = mlx5e_fs_get_udp(fs);
280 	struct mlx5_flow_destination dest = {};
281 	int err, i;
282 
283 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
284 	for (i = 0; i < FS_UDP_NUM_TYPES; i++) {
285 		dest.ft = udp->tables[i].t;
286 
287 		/* Modify ttc rules destination to point on the accel_fs FTs */
288 		err = mlx5_ttc_fwd_dest(ttc, fs_udp2tt(i), &dest);
289 		if (err) {
290 			fs_err(fs, "%s: modify ttc[%d] destination to accel failed, err(%d)\n",
291 			       __func__, fs_udp2tt(i), err);
292 			return err;
293 		}
294 	}
295 	return 0;
296 }
297 
mlx5e_fs_tt_redirect_udp_destroy(struct mlx5e_flow_steering * fs)298 void mlx5e_fs_tt_redirect_udp_destroy(struct mlx5e_flow_steering *fs)
299 {
300 	struct mlx5e_fs_udp *fs_udp = mlx5e_fs_get_udp(fs);
301 	int i;
302 
303 	if (!fs_udp)
304 		return;
305 
306 	if (--fs_udp->ref_cnt)
307 		return;
308 
309 	fs_udp_disable(fs);
310 
311 	for (i = 0; i < FS_UDP_NUM_TYPES; i++)
312 		fs_udp_destroy_table(fs_udp, i);
313 
314 	kfree(fs_udp);
315 	mlx5e_fs_set_udp(fs, NULL);
316 }
317 
mlx5e_fs_tt_redirect_udp_create(struct mlx5e_flow_steering * fs)318 int mlx5e_fs_tt_redirect_udp_create(struct mlx5e_flow_steering *fs)
319 {
320 	struct mlx5e_fs_udp *udp = mlx5e_fs_get_udp(fs);
321 	int i, err;
322 
323 	if (udp) {
324 		udp->ref_cnt++;
325 		return 0;
326 	}
327 
328 	udp = kzalloc(sizeof(*udp), GFP_KERNEL);
329 	if (!udp)
330 		return -ENOMEM;
331 	mlx5e_fs_set_udp(fs, udp);
332 
333 	for (i = 0; i < FS_UDP_NUM_TYPES; i++) {
334 		err = fs_udp_create_table(fs, i);
335 		if (err)
336 			goto err_destroy_tables;
337 	}
338 
339 	err = fs_udp_enable(fs);
340 	if (err)
341 		goto err_destroy_tables;
342 
343 	udp->ref_cnt = 1;
344 
345 	return 0;
346 
347 err_destroy_tables:
348 	while (--i >= 0)
349 		fs_udp_destroy_table(udp, i);
350 
351 	kfree(udp);
352 	mlx5e_fs_set_udp(fs, NULL);
353 	return err;
354 }
355 
fs_any_set_ethertype_flow(struct mlx5_flow_spec * spec,u16 ether_type)356 static void fs_any_set_ethertype_flow(struct mlx5_flow_spec *spec, u16 ether_type)
357 {
358 	spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
359 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
360 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, ether_type);
361 }
362 
363 struct mlx5_flow_handle *
mlx5e_fs_tt_redirect_any_add_rule(struct mlx5e_flow_steering * fs,u32 tir_num,u16 ether_type)364 mlx5e_fs_tt_redirect_any_add_rule(struct mlx5e_flow_steering *fs,
365 				  u32 tir_num, u16 ether_type)
366 {
367 	struct mlx5e_fs_any *fs_any = mlx5e_fs_get_any(fs);
368 	struct mlx5_flow_destination dest = {};
369 	struct mlx5_flow_table *ft = NULL;
370 	MLX5_DECLARE_FLOW_ACT(flow_act);
371 	struct mlx5_flow_handle *rule;
372 	struct mlx5_flow_spec *spec;
373 	int err;
374 
375 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
376 	if (!spec)
377 		return ERR_PTR(-ENOMEM);
378 
379 	ft = fs_any->table.t;
380 
381 	fs_any_set_ethertype_flow(spec, ether_type);
382 	dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
383 	dest.tir_num = tir_num;
384 
385 	rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
386 	kvfree(spec);
387 
388 	if (IS_ERR(rule)) {
389 		err = PTR_ERR(rule);
390 		fs_err(fs, "%s: add ANY rule failed, err %d\n",
391 		       __func__, err);
392 	}
393 	return rule;
394 }
395 
fs_any_add_default_rule(struct mlx5e_flow_steering * fs)396 static int fs_any_add_default_rule(struct mlx5e_flow_steering *fs)
397 {
398 	struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
399 	struct mlx5e_fs_any *fs_any = mlx5e_fs_get_any(fs);
400 	struct mlx5e_flow_table *fs_any_t;
401 	struct mlx5_flow_destination dest;
402 	MLX5_DECLARE_FLOW_ACT(flow_act);
403 	struct mlx5_flow_handle *rule;
404 	int err;
405 
406 	fs_any_t = &fs_any->table;
407 	dest = mlx5_ttc_get_default_dest(ttc, MLX5_TT_ANY);
408 	rule = mlx5_add_flow_rules(fs_any_t->t, NULL, &flow_act, &dest, 1);
409 	if (IS_ERR(rule)) {
410 		err = PTR_ERR(rule);
411 		fs_err(fs, "%s: add default rule failed, fs type=ANY, err %d\n",
412 		       __func__, err);
413 		return err;
414 	}
415 
416 	fs_any->default_rule = rule;
417 	return 0;
418 }
419 
420 #define MLX5E_FS_ANY_NUM_GROUPS	(2)
421 #define MLX5E_FS_ANY_GROUP1_SIZE	(BIT(16))
422 #define MLX5E_FS_ANY_GROUP2_SIZE	(BIT(0))
423 #define MLX5E_FS_ANY_TABLE_SIZE		(MLX5E_FS_ANY_GROUP1_SIZE +\
424 					 MLX5E_FS_ANY_GROUP2_SIZE)
425 
fs_any_create_groups(struct mlx5e_flow_table * ft)426 static int fs_any_create_groups(struct mlx5e_flow_table *ft)
427 {
428 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
429 	void *outer_headers_c;
430 	int ix = 0;
431 	u32 *in;
432 	int err;
433 	u8 *mc;
434 
435 	ft->g = kcalloc(MLX5E_FS_UDP_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL);
436 	in = kvzalloc(inlen, GFP_KERNEL);
437 	if  (!in || !ft->g) {
438 		kfree(ft->g);
439 		ft->g = NULL;
440 		kvfree(in);
441 		return -ENOMEM;
442 	}
443 
444 	/* Match on ethertype */
445 	mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
446 	outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc, outer_headers);
447 	MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ethertype);
448 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
449 	MLX5_SET_CFG(in, start_flow_index, ix);
450 	ix += MLX5E_FS_ANY_GROUP1_SIZE;
451 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
452 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
453 	if (IS_ERR(ft->g[ft->num_groups]))
454 		goto err;
455 	ft->num_groups++;
456 
457 	/* Default Flow Group */
458 	memset(in, 0, inlen);
459 	MLX5_SET_CFG(in, start_flow_index, ix);
460 	ix += MLX5E_FS_ANY_GROUP2_SIZE;
461 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
462 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
463 	if (IS_ERR(ft->g[ft->num_groups]))
464 		goto err;
465 	ft->num_groups++;
466 
467 	kvfree(in);
468 	return 0;
469 
470 err:
471 	err = PTR_ERR(ft->g[ft->num_groups]);
472 	ft->g[ft->num_groups] = NULL;
473 	kvfree(in);
474 
475 	return err;
476 }
477 
fs_any_create_table(struct mlx5e_flow_steering * fs)478 static int fs_any_create_table(struct mlx5e_flow_steering *fs)
479 {
480 	struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(fs, false);
481 	struct mlx5e_fs_any *fs_any = mlx5e_fs_get_any(fs);
482 	struct mlx5e_flow_table *ft = &fs_any->table;
483 	struct mlx5_flow_table_attr ft_attr = {};
484 	int err;
485 
486 	ft->num_groups = 0;
487 
488 	ft_attr.max_fte = MLX5E_FS_UDP_TABLE_SIZE;
489 	ft_attr.level = MLX5E_FS_TT_ANY_FT_LEVEL;
490 	ft_attr.prio = MLX5E_NIC_PRIO;
491 
492 	ft->t = mlx5_create_flow_table(ns, &ft_attr);
493 	if (IS_ERR(ft->t)) {
494 		err = PTR_ERR(ft->t);
495 		ft->t = NULL;
496 		return err;
497 	}
498 
499 	mlx5_core_dbg(mlx5e_fs_get_mdev(fs), "Created fs ANY table id %u level %u\n",
500 		      ft->t->id, ft->t->level);
501 
502 	err = fs_any_create_groups(ft);
503 	if (err)
504 		goto err;
505 
506 	err = fs_any_add_default_rule(fs);
507 	if (err)
508 		goto err;
509 
510 	return 0;
511 
512 err:
513 	mlx5e_destroy_flow_table(ft);
514 	return err;
515 }
516 
fs_any_disable(struct mlx5e_flow_steering * fs)517 static int fs_any_disable(struct mlx5e_flow_steering *fs)
518 {
519 	struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
520 	int err;
521 
522 	/* Modify ttc rules destination to point back to the indir TIRs */
523 	err = mlx5_ttc_fwd_default_dest(ttc, MLX5_TT_ANY);
524 	if (err) {
525 		fs_err(fs,
526 		       "%s: modify ttc[%d] default destination failed, err(%d)\n",
527 		       __func__, MLX5_TT_ANY, err);
528 		return err;
529 	}
530 	return 0;
531 }
532 
fs_any_enable(struct mlx5e_flow_steering * fs)533 static int fs_any_enable(struct mlx5e_flow_steering *fs)
534 {
535 	struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
536 	struct mlx5e_fs_any *any = mlx5e_fs_get_any(fs);
537 	struct mlx5_flow_destination dest = {};
538 	int err;
539 
540 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
541 	dest.ft = any->table.t;
542 
543 	/* Modify ttc rules destination to point on the accel_fs FTs */
544 	err = mlx5_ttc_fwd_dest(ttc, MLX5_TT_ANY, &dest);
545 	if (err) {
546 		fs_err(fs,
547 		       "%s: modify ttc[%d] destination to accel failed, err(%d)\n",
548 		       __func__, MLX5_TT_ANY, err);
549 		return err;
550 	}
551 	return 0;
552 }
553 
fs_any_destroy_table(struct mlx5e_fs_any * fs_any)554 static void fs_any_destroy_table(struct mlx5e_fs_any *fs_any)
555 {
556 	if (IS_ERR_OR_NULL(fs_any->table.t))
557 		return;
558 
559 	mlx5_del_flow_rules(fs_any->default_rule);
560 	mlx5e_destroy_flow_table(&fs_any->table);
561 	fs_any->table.t = NULL;
562 }
563 
mlx5e_fs_tt_redirect_any_destroy(struct mlx5e_flow_steering * fs)564 void mlx5e_fs_tt_redirect_any_destroy(struct mlx5e_flow_steering *fs)
565 {
566 	struct mlx5e_fs_any *fs_any = mlx5e_fs_get_any(fs);
567 
568 	if (!fs_any)
569 		return;
570 
571 	if (--fs_any->ref_cnt)
572 		return;
573 
574 	fs_any_disable(fs);
575 
576 	fs_any_destroy_table(fs_any);
577 
578 	kfree(fs_any);
579 	mlx5e_fs_set_any(fs, NULL);
580 }
581 
mlx5e_fs_tt_redirect_any_create(struct mlx5e_flow_steering * fs)582 int mlx5e_fs_tt_redirect_any_create(struct mlx5e_flow_steering *fs)
583 {
584 	struct mlx5e_fs_any *fs_any = mlx5e_fs_get_any(fs);
585 	int err;
586 
587 	if (fs_any) {
588 		fs_any->ref_cnt++;
589 		return 0;
590 	}
591 
592 	fs_any = kzalloc(sizeof(*fs_any), GFP_KERNEL);
593 	if (!fs_any)
594 		return -ENOMEM;
595 	mlx5e_fs_set_any(fs, fs_any);
596 
597 	err = fs_any_create_table(fs);
598 	if (err)
599 		goto err_free_any;
600 
601 	err = fs_any_enable(fs);
602 	if (err)
603 		goto err_destroy_table;
604 
605 	fs_any->ref_cnt = 1;
606 
607 	return 0;
608 
609 err_destroy_table:
610 	fs_any_destroy_table(fs_any);
611 err_free_any:
612 	mlx5e_fs_set_any(fs, NULL);
613 	kfree(fs_any);
614 	return err;
615 }
616