1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES.
3 
4 #include <linux/ip.h>
5 #include <linux/ipv6.h>
6 #include <linux/tcp.h>
7 #include <linux/mlx5/fs.h>
8 #include <linux/mlx5/driver.h>
9 #include "mlx5_core.h"
10 #include "lib/fs_ttc.h"
11 
12 #define MLX5_TTC_NUM_GROUPS	3
13 #define MLX5_TTC_GROUP1_SIZE	(BIT(3) + MLX5_NUM_TUNNEL_TT)
14 #define MLX5_TTC_GROUP2_SIZE	 BIT(1)
15 #define MLX5_TTC_GROUP3_SIZE	 BIT(0)
16 #define MLX5_TTC_TABLE_SIZE	(MLX5_TTC_GROUP1_SIZE +\
17 				 MLX5_TTC_GROUP2_SIZE +\
18 				 MLX5_TTC_GROUP3_SIZE)
19 
20 #define MLX5_INNER_TTC_NUM_GROUPS	3
21 #define MLX5_INNER_TTC_GROUP1_SIZE	BIT(3)
22 #define MLX5_INNER_TTC_GROUP2_SIZE	BIT(1)
23 #define MLX5_INNER_TTC_GROUP3_SIZE	BIT(0)
24 #define MLX5_INNER_TTC_TABLE_SIZE	(MLX5_INNER_TTC_GROUP1_SIZE +\
25 					 MLX5_INNER_TTC_GROUP2_SIZE +\
26 					 MLX5_INNER_TTC_GROUP3_SIZE)
27 
28 /* L3/L4 traffic type classifier */
29 struct mlx5_ttc_table {
30 	int num_groups;
31 	struct mlx5_flow_table *t;
32 	struct mlx5_flow_group **g;
33 	struct mlx5_ttc_rule rules[MLX5_NUM_TT];
34 	struct mlx5_flow_handle *tunnel_rules[MLX5_NUM_TUNNEL_TT];
35 };
36 
mlx5_get_ttc_flow_table(struct mlx5_ttc_table * ttc)37 struct mlx5_flow_table *mlx5_get_ttc_flow_table(struct mlx5_ttc_table *ttc)
38 {
39 	return ttc->t;
40 }
41 
mlx5_cleanup_ttc_rules(struct mlx5_ttc_table * ttc)42 static void mlx5_cleanup_ttc_rules(struct mlx5_ttc_table *ttc)
43 {
44 	int i;
45 
46 	for (i = 0; i < MLX5_NUM_TT; i++) {
47 		if (!IS_ERR_OR_NULL(ttc->rules[i].rule)) {
48 			mlx5_del_flow_rules(ttc->rules[i].rule);
49 			ttc->rules[i].rule = NULL;
50 		}
51 	}
52 
53 	for (i = 0; i < MLX5_NUM_TUNNEL_TT; i++) {
54 		if (!IS_ERR_OR_NULL(ttc->tunnel_rules[i])) {
55 			mlx5_del_flow_rules(ttc->tunnel_rules[i]);
56 			ttc->tunnel_rules[i] = NULL;
57 		}
58 	}
59 }
60 
61 struct mlx5_etype_proto {
62 	u16 etype;
63 	u8 proto;
64 };
65 
66 static struct mlx5_etype_proto ttc_rules[] = {
67 	[MLX5_TT_IPV4_TCP] = {
68 		.etype = ETH_P_IP,
69 		.proto = IPPROTO_TCP,
70 	},
71 	[MLX5_TT_IPV6_TCP] = {
72 		.etype = ETH_P_IPV6,
73 		.proto = IPPROTO_TCP,
74 	},
75 	[MLX5_TT_IPV4_UDP] = {
76 		.etype = ETH_P_IP,
77 		.proto = IPPROTO_UDP,
78 	},
79 	[MLX5_TT_IPV6_UDP] = {
80 		.etype = ETH_P_IPV6,
81 		.proto = IPPROTO_UDP,
82 	},
83 	[MLX5_TT_IPV4_IPSEC_AH] = {
84 		.etype = ETH_P_IP,
85 		.proto = IPPROTO_AH,
86 	},
87 	[MLX5_TT_IPV6_IPSEC_AH] = {
88 		.etype = ETH_P_IPV6,
89 		.proto = IPPROTO_AH,
90 	},
91 	[MLX5_TT_IPV4_IPSEC_ESP] = {
92 		.etype = ETH_P_IP,
93 		.proto = IPPROTO_ESP,
94 	},
95 	[MLX5_TT_IPV6_IPSEC_ESP] = {
96 		.etype = ETH_P_IPV6,
97 		.proto = IPPROTO_ESP,
98 	},
99 	[MLX5_TT_IPV4] = {
100 		.etype = ETH_P_IP,
101 		.proto = 0,
102 	},
103 	[MLX5_TT_IPV6] = {
104 		.etype = ETH_P_IPV6,
105 		.proto = 0,
106 	},
107 	[MLX5_TT_ANY] = {
108 		.etype = 0,
109 		.proto = 0,
110 	},
111 };
112 
113 static struct mlx5_etype_proto ttc_tunnel_rules[] = {
114 	[MLX5_TT_IPV4_GRE] = {
115 		.etype = ETH_P_IP,
116 		.proto = IPPROTO_GRE,
117 	},
118 	[MLX5_TT_IPV6_GRE] = {
119 		.etype = ETH_P_IPV6,
120 		.proto = IPPROTO_GRE,
121 	},
122 	[MLX5_TT_IPV4_IPIP] = {
123 		.etype = ETH_P_IP,
124 		.proto = IPPROTO_IPIP,
125 	},
126 	[MLX5_TT_IPV6_IPIP] = {
127 		.etype = ETH_P_IPV6,
128 		.proto = IPPROTO_IPIP,
129 	},
130 	[MLX5_TT_IPV4_IPV6] = {
131 		.etype = ETH_P_IP,
132 		.proto = IPPROTO_IPV6,
133 	},
134 	[MLX5_TT_IPV6_IPV6] = {
135 		.etype = ETH_P_IPV6,
136 		.proto = IPPROTO_IPV6,
137 	},
138 
139 };
140 
mlx5_get_proto_by_tunnel_type(enum mlx5_tunnel_types tt)141 u8 mlx5_get_proto_by_tunnel_type(enum mlx5_tunnel_types tt)
142 {
143 	return ttc_tunnel_rules[tt].proto;
144 }
145 
mlx5_tunnel_proto_supported_rx(struct mlx5_core_dev * mdev,u8 proto_type)146 static bool mlx5_tunnel_proto_supported_rx(struct mlx5_core_dev *mdev,
147 					   u8 proto_type)
148 {
149 	switch (proto_type) {
150 	case IPPROTO_GRE:
151 		return MLX5_CAP_ETH(mdev, tunnel_stateless_gre);
152 	case IPPROTO_IPIP:
153 	case IPPROTO_IPV6:
154 		return (MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip) ||
155 			MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip_rx));
156 	default:
157 		return false;
158 	}
159 }
160 
mlx5_tunnel_any_rx_proto_supported(struct mlx5_core_dev * mdev)161 static bool mlx5_tunnel_any_rx_proto_supported(struct mlx5_core_dev *mdev)
162 {
163 	int tt;
164 
165 	for (tt = 0; tt < MLX5_NUM_TUNNEL_TT; tt++) {
166 		if (mlx5_tunnel_proto_supported_rx(mdev,
167 						   ttc_tunnel_rules[tt].proto))
168 			return true;
169 	}
170 	return false;
171 }
172 
mlx5_tunnel_inner_ft_supported(struct mlx5_core_dev * mdev)173 bool mlx5_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev)
174 {
175 	return (mlx5_tunnel_any_rx_proto_supported(mdev) &&
176 		MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
177 					  ft_field_support.inner_ip_version));
178 }
179 
mlx5_etype_to_ipv(u16 ethertype)180 static u8 mlx5_etype_to_ipv(u16 ethertype)
181 {
182 	if (ethertype == ETH_P_IP)
183 		return 4;
184 
185 	if (ethertype == ETH_P_IPV6)
186 		return 6;
187 
188 	return 0;
189 }
190 
191 static struct mlx5_flow_handle *
mlx5_generate_ttc_rule(struct mlx5_core_dev * dev,struct mlx5_flow_table * ft,struct mlx5_flow_destination * dest,u16 etype,u8 proto)192 mlx5_generate_ttc_rule(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
193 		       struct mlx5_flow_destination *dest, u16 etype, u8 proto)
194 {
195 	int match_ipv_outer =
196 		MLX5_CAP_FLOWTABLE_NIC_RX(dev,
197 					  ft_field_support.outer_ip_version);
198 	MLX5_DECLARE_FLOW_ACT(flow_act);
199 	struct mlx5_flow_handle *rule;
200 	struct mlx5_flow_spec *spec;
201 	int err = 0;
202 	u8 ipv;
203 
204 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
205 	if (!spec)
206 		return ERR_PTR(-ENOMEM);
207 
208 	if (proto) {
209 		spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
210 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
211 		MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, proto);
212 	}
213 
214 	ipv = mlx5_etype_to_ipv(etype);
215 	if (match_ipv_outer && ipv) {
216 		spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
217 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
218 		MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, ipv);
219 	} else if (etype) {
220 		spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
221 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
222 		MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, etype);
223 	}
224 
225 	rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
226 	if (IS_ERR(rule)) {
227 		err = PTR_ERR(rule);
228 		mlx5_core_err(dev, "%s: add rule failed\n", __func__);
229 	}
230 
231 	kvfree(spec);
232 	return err ? ERR_PTR(err) : rule;
233 }
234 
mlx5_generate_ttc_table_rules(struct mlx5_core_dev * dev,struct ttc_params * params,struct mlx5_ttc_table * ttc)235 static int mlx5_generate_ttc_table_rules(struct mlx5_core_dev *dev,
236 					 struct ttc_params *params,
237 					 struct mlx5_ttc_table *ttc)
238 {
239 	struct mlx5_flow_handle **trules;
240 	struct mlx5_ttc_rule *rules;
241 	struct mlx5_flow_table *ft;
242 	int tt;
243 	int err;
244 
245 	ft = ttc->t;
246 	rules = ttc->rules;
247 	for (tt = 0; tt < MLX5_NUM_TT; tt++) {
248 		struct mlx5_ttc_rule *rule = &rules[tt];
249 
250 		if (test_bit(tt, params->ignore_dests))
251 			continue;
252 		rule->rule = mlx5_generate_ttc_rule(dev, ft, &params->dests[tt],
253 						    ttc_rules[tt].etype,
254 						    ttc_rules[tt].proto);
255 		if (IS_ERR(rule->rule)) {
256 			err = PTR_ERR(rule->rule);
257 			rule->rule = NULL;
258 			goto del_rules;
259 		}
260 		rule->default_dest = params->dests[tt];
261 	}
262 
263 	if (!params->inner_ttc || !mlx5_tunnel_inner_ft_supported(dev))
264 		return 0;
265 
266 	trules    = ttc->tunnel_rules;
267 	for (tt = 0; tt < MLX5_NUM_TUNNEL_TT; tt++) {
268 		if (!mlx5_tunnel_proto_supported_rx(dev,
269 						    ttc_tunnel_rules[tt].proto))
270 			continue;
271 		if (test_bit(tt, params->ignore_tunnel_dests))
272 			continue;
273 		trules[tt] = mlx5_generate_ttc_rule(dev, ft,
274 						    &params->tunnel_dests[tt],
275 						    ttc_tunnel_rules[tt].etype,
276 						    ttc_tunnel_rules[tt].proto);
277 		if (IS_ERR(trules[tt])) {
278 			err = PTR_ERR(trules[tt]);
279 			trules[tt] = NULL;
280 			goto del_rules;
281 		}
282 	}
283 
284 	return 0;
285 
286 del_rules:
287 	mlx5_cleanup_ttc_rules(ttc);
288 	return err;
289 }
290 
mlx5_create_ttc_table_groups(struct mlx5_ttc_table * ttc,bool use_ipv)291 static int mlx5_create_ttc_table_groups(struct mlx5_ttc_table *ttc,
292 					bool use_ipv)
293 {
294 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
295 	int ix = 0;
296 	u32 *in;
297 	int err;
298 	u8 *mc;
299 
300 	ttc->g = kcalloc(MLX5_TTC_NUM_GROUPS, sizeof(*ttc->g), GFP_KERNEL);
301 	if (!ttc->g)
302 		return -ENOMEM;
303 	in = kvzalloc(inlen, GFP_KERNEL);
304 	if (!in) {
305 		kfree(ttc->g);
306 		ttc->g = NULL;
307 		return -ENOMEM;
308 	}
309 
310 	/* L4 Group */
311 	mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
312 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
313 	if (use_ipv)
314 		MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_version);
315 	else
316 		MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
317 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
318 	MLX5_SET_CFG(in, start_flow_index, ix);
319 	ix += MLX5_TTC_GROUP1_SIZE;
320 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
321 	ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
322 	if (IS_ERR(ttc->g[ttc->num_groups]))
323 		goto err;
324 	ttc->num_groups++;
325 
326 	/* L3 Group */
327 	MLX5_SET(fte_match_param, mc, outer_headers.ip_protocol, 0);
328 	MLX5_SET_CFG(in, start_flow_index, ix);
329 	ix += MLX5_TTC_GROUP2_SIZE;
330 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
331 	ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
332 	if (IS_ERR(ttc->g[ttc->num_groups]))
333 		goto err;
334 	ttc->num_groups++;
335 
336 	/* Any Group */
337 	memset(in, 0, inlen);
338 	MLX5_SET_CFG(in, start_flow_index, ix);
339 	ix += MLX5_TTC_GROUP3_SIZE;
340 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
341 	ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
342 	if (IS_ERR(ttc->g[ttc->num_groups]))
343 		goto err;
344 	ttc->num_groups++;
345 
346 	kvfree(in);
347 	return 0;
348 
349 err:
350 	err = PTR_ERR(ttc->g[ttc->num_groups]);
351 	ttc->g[ttc->num_groups] = NULL;
352 	kvfree(in);
353 
354 	return err;
355 }
356 
357 static struct mlx5_flow_handle *
mlx5_generate_inner_ttc_rule(struct mlx5_core_dev * dev,struct mlx5_flow_table * ft,struct mlx5_flow_destination * dest,u16 etype,u8 proto)358 mlx5_generate_inner_ttc_rule(struct mlx5_core_dev *dev,
359 			     struct mlx5_flow_table *ft,
360 			     struct mlx5_flow_destination *dest,
361 			     u16 etype, u8 proto)
362 {
363 	MLX5_DECLARE_FLOW_ACT(flow_act);
364 	struct mlx5_flow_handle *rule;
365 	struct mlx5_flow_spec *spec;
366 	int err = 0;
367 	u8 ipv;
368 
369 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
370 	if (!spec)
371 		return ERR_PTR(-ENOMEM);
372 
373 	ipv = mlx5_etype_to_ipv(etype);
374 	if (etype && ipv) {
375 		spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS;
376 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, inner_headers.ip_version);
377 		MLX5_SET(fte_match_param, spec->match_value, inner_headers.ip_version, ipv);
378 	}
379 
380 	if (proto) {
381 		spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS;
382 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, inner_headers.ip_protocol);
383 		MLX5_SET(fte_match_param, spec->match_value, inner_headers.ip_protocol, proto);
384 	}
385 
386 	rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
387 	if (IS_ERR(rule)) {
388 		err = PTR_ERR(rule);
389 		mlx5_core_err(dev, "%s: add inner TTC rule failed\n", __func__);
390 	}
391 
392 	kvfree(spec);
393 	return err ? ERR_PTR(err) : rule;
394 }
395 
mlx5_generate_inner_ttc_table_rules(struct mlx5_core_dev * dev,struct ttc_params * params,struct mlx5_ttc_table * ttc)396 static int mlx5_generate_inner_ttc_table_rules(struct mlx5_core_dev *dev,
397 					       struct ttc_params *params,
398 					       struct mlx5_ttc_table *ttc)
399 {
400 	struct mlx5_ttc_rule *rules;
401 	struct mlx5_flow_table *ft;
402 	int err;
403 	int tt;
404 
405 	ft = ttc->t;
406 	rules = ttc->rules;
407 
408 	for (tt = 0; tt < MLX5_NUM_TT; tt++) {
409 		struct mlx5_ttc_rule *rule = &rules[tt];
410 
411 		if (test_bit(tt, params->ignore_dests))
412 			continue;
413 		rule->rule = mlx5_generate_inner_ttc_rule(dev, ft,
414 							  &params->dests[tt],
415 							  ttc_rules[tt].etype,
416 							  ttc_rules[tt].proto);
417 		if (IS_ERR(rule->rule)) {
418 			err = PTR_ERR(rule->rule);
419 			rule->rule = NULL;
420 			goto del_rules;
421 		}
422 		rule->default_dest = params->dests[tt];
423 	}
424 
425 	return 0;
426 
427 del_rules:
428 
429 	mlx5_cleanup_ttc_rules(ttc);
430 	return err;
431 }
432 
mlx5_create_inner_ttc_table_groups(struct mlx5_ttc_table * ttc)433 static int mlx5_create_inner_ttc_table_groups(struct mlx5_ttc_table *ttc)
434 {
435 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
436 	int ix = 0;
437 	u32 *in;
438 	int err;
439 	u8 *mc;
440 
441 	ttc->g = kcalloc(MLX5_INNER_TTC_NUM_GROUPS, sizeof(*ttc->g),
442 			 GFP_KERNEL);
443 	if (!ttc->g)
444 		return -ENOMEM;
445 	in = kvzalloc(inlen, GFP_KERNEL);
446 	if (!in) {
447 		kfree(ttc->g);
448 		ttc->g = NULL;
449 		return -ENOMEM;
450 	}
451 
452 	/* L4 Group */
453 	mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
454 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
455 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_version);
456 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
457 	MLX5_SET_CFG(in, start_flow_index, ix);
458 	ix += MLX5_INNER_TTC_GROUP1_SIZE;
459 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
460 	ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
461 	if (IS_ERR(ttc->g[ttc->num_groups]))
462 		goto err;
463 	ttc->num_groups++;
464 
465 	/* L3 Group */
466 	MLX5_SET(fte_match_param, mc, inner_headers.ip_protocol, 0);
467 	MLX5_SET_CFG(in, start_flow_index, ix);
468 	ix += MLX5_INNER_TTC_GROUP2_SIZE;
469 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
470 	ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
471 	if (IS_ERR(ttc->g[ttc->num_groups]))
472 		goto err;
473 	ttc->num_groups++;
474 
475 	/* Any Group */
476 	memset(in, 0, inlen);
477 	MLX5_SET_CFG(in, start_flow_index, ix);
478 	ix += MLX5_INNER_TTC_GROUP3_SIZE;
479 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
480 	ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
481 	if (IS_ERR(ttc->g[ttc->num_groups]))
482 		goto err;
483 	ttc->num_groups++;
484 
485 	kvfree(in);
486 	return 0;
487 
488 err:
489 	err = PTR_ERR(ttc->g[ttc->num_groups]);
490 	ttc->g[ttc->num_groups] = NULL;
491 	kvfree(in);
492 
493 	return err;
494 }
495 
mlx5_create_inner_ttc_table(struct mlx5_core_dev * dev,struct ttc_params * params)496 struct mlx5_ttc_table *mlx5_create_inner_ttc_table(struct mlx5_core_dev *dev,
497 						   struct ttc_params *params)
498 {
499 	struct mlx5_ttc_table *ttc;
500 	int err;
501 
502 	ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL);
503 	if (!ttc)
504 		return ERR_PTR(-ENOMEM);
505 
506 	WARN_ON_ONCE(params->ft_attr.max_fte);
507 	params->ft_attr.max_fte = MLX5_INNER_TTC_TABLE_SIZE;
508 	ttc->t = mlx5_create_flow_table(params->ns, &params->ft_attr);
509 	if (IS_ERR(ttc->t)) {
510 		err = PTR_ERR(ttc->t);
511 		kvfree(ttc);
512 		return ERR_PTR(err);
513 	}
514 
515 	err = mlx5_create_inner_ttc_table_groups(ttc);
516 	if (err)
517 		goto destroy_ft;
518 
519 	err = mlx5_generate_inner_ttc_table_rules(dev, params, ttc);
520 	if (err)
521 		goto destroy_ft;
522 
523 	return ttc;
524 
525 destroy_ft:
526 	mlx5_destroy_ttc_table(ttc);
527 	return ERR_PTR(err);
528 }
529 
mlx5_destroy_ttc_table(struct mlx5_ttc_table * ttc)530 void mlx5_destroy_ttc_table(struct mlx5_ttc_table *ttc)
531 {
532 	int i;
533 
534 	mlx5_cleanup_ttc_rules(ttc);
535 	for (i = ttc->num_groups - 1; i >= 0; i--) {
536 		if (!IS_ERR_OR_NULL(ttc->g[i]))
537 			mlx5_destroy_flow_group(ttc->g[i]);
538 		ttc->g[i] = NULL;
539 	}
540 
541 	kfree(ttc->g);
542 	mlx5_destroy_flow_table(ttc->t);
543 	kvfree(ttc);
544 }
545 
mlx5_create_ttc_table(struct mlx5_core_dev * dev,struct ttc_params * params)546 struct mlx5_ttc_table *mlx5_create_ttc_table(struct mlx5_core_dev *dev,
547 					     struct ttc_params *params)
548 {
549 	bool match_ipv_outer =
550 		MLX5_CAP_FLOWTABLE_NIC_RX(dev,
551 					  ft_field_support.outer_ip_version);
552 	struct mlx5_ttc_table *ttc;
553 	int err;
554 
555 	ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL);
556 	if (!ttc)
557 		return ERR_PTR(-ENOMEM);
558 
559 	WARN_ON_ONCE(params->ft_attr.max_fte);
560 	params->ft_attr.max_fte = MLX5_TTC_TABLE_SIZE;
561 	ttc->t = mlx5_create_flow_table(params->ns, &params->ft_attr);
562 	if (IS_ERR(ttc->t)) {
563 		err = PTR_ERR(ttc->t);
564 		kvfree(ttc);
565 		return ERR_PTR(err);
566 	}
567 
568 	err = mlx5_create_ttc_table_groups(ttc, match_ipv_outer);
569 	if (err)
570 		goto destroy_ft;
571 
572 	err = mlx5_generate_ttc_table_rules(dev, params, ttc);
573 	if (err)
574 		goto destroy_ft;
575 
576 	return ttc;
577 
578 destroy_ft:
579 	mlx5_destroy_ttc_table(ttc);
580 	return ERR_PTR(err);
581 }
582 
mlx5_ttc_fwd_dest(struct mlx5_ttc_table * ttc,enum mlx5_traffic_types type,struct mlx5_flow_destination * new_dest)583 int mlx5_ttc_fwd_dest(struct mlx5_ttc_table *ttc, enum mlx5_traffic_types type,
584 		      struct mlx5_flow_destination *new_dest)
585 {
586 	return mlx5_modify_rule_destination(ttc->rules[type].rule, new_dest,
587 					    NULL);
588 }
589 
590 struct mlx5_flow_destination
mlx5_ttc_get_default_dest(struct mlx5_ttc_table * ttc,enum mlx5_traffic_types type)591 mlx5_ttc_get_default_dest(struct mlx5_ttc_table *ttc,
592 			  enum mlx5_traffic_types type)
593 {
594 	struct mlx5_flow_destination *dest = &ttc->rules[type].default_dest;
595 
596 	WARN_ONCE(dest->type != MLX5_FLOW_DESTINATION_TYPE_TIR,
597 		  "TTC[%d] default dest is not setup yet", type);
598 
599 	return *dest;
600 }
601 
mlx5_ttc_fwd_default_dest(struct mlx5_ttc_table * ttc,enum mlx5_traffic_types type)602 int mlx5_ttc_fwd_default_dest(struct mlx5_ttc_table *ttc,
603 			      enum mlx5_traffic_types type)
604 {
605 	struct mlx5_flow_destination dest = mlx5_ttc_get_default_dest(ttc, type);
606 
607 	return mlx5_ttc_fwd_dest(ttc, type, &dest);
608 }
609