1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. */
3 
4 #include <linux/netdevice.h>
5 #include "lag.h"
6 
7 enum {
8 	MLX5_LAG_FT_LEVEL_TTC,
9 	MLX5_LAG_FT_LEVEL_INNER_TTC,
10 	MLX5_LAG_FT_LEVEL_DEFINER,
11 };
12 
13 static struct mlx5_flow_group *
14 mlx5_create_hash_flow_group(struct mlx5_flow_table *ft,
15 			    struct mlx5_flow_definer *definer,
16 			    u8 rules)
17 {
18 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
19 	struct mlx5_flow_group *fg;
20 	u32 *in;
21 
22 	in = kvzalloc(inlen, GFP_KERNEL);
23 	if (!in)
24 		return ERR_PTR(-ENOMEM);
25 
26 	MLX5_SET(create_flow_group_in, in, match_definer_id,
27 		 mlx5_get_match_definer_id(definer));
28 	MLX5_SET(create_flow_group_in, in, start_flow_index, 0);
29 	MLX5_SET(create_flow_group_in, in, end_flow_index, rules - 1);
30 	MLX5_SET(create_flow_group_in, in, group_type,
31 		 MLX5_CREATE_FLOW_GROUP_IN_GROUP_TYPE_HASH_SPLIT);
32 
33 	fg = mlx5_create_flow_group(ft, in);
34 	kvfree(in);
35 	return fg;
36 }
37 
38 static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev,
39 					  struct mlx5_lag_definer *lag_definer,
40 					  u8 *ports)
41 {
42 	struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
43 	struct mlx5_flow_table_attr ft_attr = {};
44 	struct mlx5_flow_destination dest = {};
45 	MLX5_DECLARE_FLOW_ACT(flow_act);
46 	struct mlx5_flow_namespace *ns;
47 	int err, i;
48 	int idx;
49 	int j;
50 
51 	ft_attr.max_fte = ldev->ports * ldev->buckets;
52 	ft_attr.level = MLX5_LAG_FT_LEVEL_DEFINER;
53 
54 	ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_PORT_SEL);
55 	if (!ns) {
56 		mlx5_core_warn(dev, "Failed to get port selection namespace\n");
57 		return -EOPNOTSUPP;
58 	}
59 
60 	lag_definer->ft = mlx5_create_flow_table(ns, &ft_attr);
61 	if (IS_ERR(lag_definer->ft)) {
62 		mlx5_core_warn(dev, "Failed to create port selection table\n");
63 		return PTR_ERR(lag_definer->ft);
64 	}
65 
66 	lag_definer->fg = mlx5_create_hash_flow_group(lag_definer->ft,
67 						      lag_definer->definer,
68 						      ft_attr.max_fte);
69 	if (IS_ERR(lag_definer->fg)) {
70 		err = PTR_ERR(lag_definer->fg);
71 		goto destroy_ft;
72 	}
73 
74 	dest.type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
75 	dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
76 	flow_act.flags |= FLOW_ACT_NO_APPEND;
77 	for (i = 0; i < ldev->ports; i++) {
78 		for (j = 0; j < ldev->buckets; j++) {
79 			u8 affinity;
80 
81 			idx = i * ldev->buckets + j;
82 			affinity = ports[idx];
83 
84 			dest.vport.vhca_id = MLX5_CAP_GEN(ldev->pf[affinity - 1].dev,
85 							  vhca_id);
86 			lag_definer->rules[idx] = mlx5_add_flow_rules(lag_definer->ft,
87 								      NULL, &flow_act,
88 								      &dest, 1);
89 			if (IS_ERR(lag_definer->rules[idx])) {
90 				err = PTR_ERR(lag_definer->rules[idx]);
91 				while (i--)
92 					while (j--)
93 						mlx5_del_flow_rules(lag_definer->rules[idx]);
94 				goto destroy_fg;
95 			}
96 		}
97 	}
98 
99 	return 0;
100 
101 destroy_fg:
102 	mlx5_destroy_flow_group(lag_definer->fg);
103 destroy_ft:
104 	mlx5_destroy_flow_table(lag_definer->ft);
105 	return err;
106 }
107 
108 static int mlx5_lag_set_definer_inner(u32 *match_definer_mask,
109 				      enum mlx5_traffic_types tt)
110 {
111 	int format_id;
112 	u8 *ipv6;
113 
114 	switch (tt) {
115 	case MLX5_TT_IPV4_UDP:
116 	case MLX5_TT_IPV4_TCP:
117 		format_id = 23;
118 		MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
119 				 inner_l4_sport);
120 		MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
121 				 inner_l4_dport);
122 		MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
123 				 inner_ip_src_addr);
124 		MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
125 				 inner_ip_dest_addr);
126 		break;
127 	case MLX5_TT_IPV4:
128 		format_id = 23;
129 		MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
130 				 inner_l3_type);
131 		MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
132 				 inner_dmac_47_16);
133 		MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
134 				 inner_dmac_15_0);
135 		MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
136 				 inner_smac_47_16);
137 		MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
138 				 inner_smac_15_0);
139 		MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
140 				 inner_ip_src_addr);
141 		MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
142 				 inner_ip_dest_addr);
143 		break;
144 	case MLX5_TT_IPV6_TCP:
145 	case MLX5_TT_IPV6_UDP:
146 		format_id = 31;
147 		MLX5_SET_TO_ONES(match_definer_format_31, match_definer_mask,
148 				 inner_l4_sport);
149 		MLX5_SET_TO_ONES(match_definer_format_31, match_definer_mask,
150 				 inner_l4_dport);
151 		ipv6 = MLX5_ADDR_OF(match_definer_format_31, match_definer_mask,
152 				    inner_ip_dest_addr);
153 		memset(ipv6, 0xff, 16);
154 		ipv6 = MLX5_ADDR_OF(match_definer_format_31, match_definer_mask,
155 				    inner_ip_src_addr);
156 		memset(ipv6, 0xff, 16);
157 		break;
158 	case MLX5_TT_IPV6:
159 		format_id = 32;
160 		ipv6 = MLX5_ADDR_OF(match_definer_format_32, match_definer_mask,
161 				    inner_ip_dest_addr);
162 		memset(ipv6, 0xff, 16);
163 		ipv6 = MLX5_ADDR_OF(match_definer_format_32, match_definer_mask,
164 				    inner_ip_src_addr);
165 		memset(ipv6, 0xff, 16);
166 		MLX5_SET_TO_ONES(match_definer_format_32, match_definer_mask,
167 				 inner_dmac_47_16);
168 		MLX5_SET_TO_ONES(match_definer_format_32, match_definer_mask,
169 				 inner_dmac_15_0);
170 		MLX5_SET_TO_ONES(match_definer_format_32, match_definer_mask,
171 				 inner_smac_47_16);
172 		MLX5_SET_TO_ONES(match_definer_format_32, match_definer_mask,
173 				 inner_smac_15_0);
174 		break;
175 	default:
176 		format_id = 23;
177 		MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
178 				 inner_l3_type);
179 		MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
180 				 inner_dmac_47_16);
181 		MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
182 				 inner_dmac_15_0);
183 		MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
184 				 inner_smac_47_16);
185 		MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
186 				 inner_smac_15_0);
187 		break;
188 	}
189 
190 	return format_id;
191 }
192 
193 static int mlx5_lag_set_definer(u32 *match_definer_mask,
194 				enum mlx5_traffic_types tt, bool tunnel,
195 				enum netdev_lag_hash hash)
196 {
197 	int format_id;
198 	u8 *ipv6;
199 
200 	if (tunnel)
201 		return mlx5_lag_set_definer_inner(match_definer_mask, tt);
202 
203 	switch (tt) {
204 	case MLX5_TT_IPV4_UDP:
205 	case MLX5_TT_IPV4_TCP:
206 		format_id = 22;
207 		MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
208 				 outer_l4_sport);
209 		MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
210 				 outer_l4_dport);
211 		MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
212 				 outer_ip_src_addr);
213 		MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
214 				 outer_ip_dest_addr);
215 		break;
216 	case MLX5_TT_IPV4:
217 		format_id = 22;
218 		MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
219 				 outer_l3_type);
220 		MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
221 				 outer_dmac_47_16);
222 		MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
223 				 outer_dmac_15_0);
224 		MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
225 				 outer_smac_47_16);
226 		MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
227 				 outer_smac_15_0);
228 		MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
229 				 outer_ip_src_addr);
230 		MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
231 				 outer_ip_dest_addr);
232 		break;
233 	case MLX5_TT_IPV6_TCP:
234 	case MLX5_TT_IPV6_UDP:
235 		format_id = 29;
236 		MLX5_SET_TO_ONES(match_definer_format_29, match_definer_mask,
237 				 outer_l4_sport);
238 		MLX5_SET_TO_ONES(match_definer_format_29, match_definer_mask,
239 				 outer_l4_dport);
240 		ipv6 = MLX5_ADDR_OF(match_definer_format_29, match_definer_mask,
241 				    outer_ip_dest_addr);
242 		memset(ipv6, 0xff, 16);
243 		ipv6 = MLX5_ADDR_OF(match_definer_format_29, match_definer_mask,
244 				    outer_ip_src_addr);
245 		memset(ipv6, 0xff, 16);
246 		break;
247 	case MLX5_TT_IPV6:
248 		format_id = 30;
249 		ipv6 = MLX5_ADDR_OF(match_definer_format_30, match_definer_mask,
250 				    outer_ip_dest_addr);
251 		memset(ipv6, 0xff, 16);
252 		ipv6 = MLX5_ADDR_OF(match_definer_format_30, match_definer_mask,
253 				    outer_ip_src_addr);
254 		memset(ipv6, 0xff, 16);
255 		MLX5_SET_TO_ONES(match_definer_format_30, match_definer_mask,
256 				 outer_dmac_47_16);
257 		MLX5_SET_TO_ONES(match_definer_format_30, match_definer_mask,
258 				 outer_dmac_15_0);
259 		MLX5_SET_TO_ONES(match_definer_format_30, match_definer_mask,
260 				 outer_smac_47_16);
261 		MLX5_SET_TO_ONES(match_definer_format_30, match_definer_mask,
262 				 outer_smac_15_0);
263 		break;
264 	default:
265 		format_id = 0;
266 		MLX5_SET_TO_ONES(match_definer_format_0, match_definer_mask,
267 				 outer_smac_47_16);
268 		MLX5_SET_TO_ONES(match_definer_format_0, match_definer_mask,
269 				 outer_smac_15_0);
270 
271 		if (hash == NETDEV_LAG_HASH_VLAN_SRCMAC) {
272 			MLX5_SET_TO_ONES(match_definer_format_0,
273 					 match_definer_mask,
274 					 outer_first_vlan_vid);
275 			break;
276 		}
277 
278 		MLX5_SET_TO_ONES(match_definer_format_0, match_definer_mask,
279 				 outer_ethertype);
280 		MLX5_SET_TO_ONES(match_definer_format_0, match_definer_mask,
281 				 outer_dmac_47_16);
282 		MLX5_SET_TO_ONES(match_definer_format_0, match_definer_mask,
283 				 outer_dmac_15_0);
284 		break;
285 	}
286 
287 	return format_id;
288 }
289 
290 static struct mlx5_lag_definer *
291 mlx5_lag_create_definer(struct mlx5_lag *ldev, enum netdev_lag_hash hash,
292 			enum mlx5_traffic_types tt, bool tunnel, u8 *ports)
293 {
294 	struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
295 	struct mlx5_lag_definer *lag_definer;
296 	u32 *match_definer_mask;
297 	int format_id, err;
298 
299 	lag_definer = kzalloc(sizeof(*lag_definer), GFP_KERNEL);
300 	if (!lag_definer)
301 		return ERR_PTR(-ENOMEM);
302 
303 	match_definer_mask = kvzalloc(MLX5_FLD_SZ_BYTES(match_definer,
304 							match_mask),
305 				      GFP_KERNEL);
306 	if (!match_definer_mask) {
307 		err = -ENOMEM;
308 		goto free_lag_definer;
309 	}
310 
311 	format_id = mlx5_lag_set_definer(match_definer_mask, tt, tunnel, hash);
312 	lag_definer->definer =
313 		mlx5_create_match_definer(dev, MLX5_FLOW_NAMESPACE_PORT_SEL,
314 					  format_id, match_definer_mask);
315 	if (IS_ERR(lag_definer->definer)) {
316 		err = PTR_ERR(lag_definer->definer);
317 		goto free_mask;
318 	}
319 
320 	err = mlx5_lag_create_port_sel_table(ldev, lag_definer, ports);
321 	if (err)
322 		goto destroy_match_definer;
323 
324 	kvfree(match_definer_mask);
325 
326 	return lag_definer;
327 
328 destroy_match_definer:
329 	mlx5_destroy_match_definer(dev, lag_definer->definer);
330 free_mask:
331 	kvfree(match_definer_mask);
332 free_lag_definer:
333 	kfree(lag_definer);
334 	return ERR_PTR(err);
335 }
336 
337 static void mlx5_lag_destroy_definer(struct mlx5_lag *ldev,
338 				     struct mlx5_lag_definer *lag_definer)
339 {
340 	struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
341 	int idx;
342 	int i;
343 	int j;
344 
345 	for (i = 0; i < ldev->ports; i++) {
346 		for (j = 0; j < ldev->buckets; j++) {
347 			idx = i * ldev->buckets + j;
348 			mlx5_del_flow_rules(lag_definer->rules[idx]);
349 		}
350 	}
351 	mlx5_destroy_flow_group(lag_definer->fg);
352 	mlx5_destroy_flow_table(lag_definer->ft);
353 	mlx5_destroy_match_definer(dev, lag_definer->definer);
354 	kfree(lag_definer);
355 }
356 
357 static void mlx5_lag_destroy_definers(struct mlx5_lag *ldev)
358 {
359 	struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
360 	int tt;
361 
362 	for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) {
363 		if (port_sel->outer.definers[tt])
364 			mlx5_lag_destroy_definer(ldev,
365 						 port_sel->outer.definers[tt]);
366 		if (port_sel->inner.definers[tt])
367 			mlx5_lag_destroy_definer(ldev,
368 						 port_sel->inner.definers[tt]);
369 	}
370 }
371 
372 static int mlx5_lag_create_definers(struct mlx5_lag *ldev,
373 				    enum netdev_lag_hash hash_type,
374 				    u8 *ports)
375 {
376 	struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
377 	struct mlx5_lag_definer *lag_definer;
378 	int tt, err;
379 
380 	for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) {
381 		lag_definer = mlx5_lag_create_definer(ldev, hash_type, tt,
382 						      false, ports);
383 		if (IS_ERR(lag_definer)) {
384 			err = PTR_ERR(lag_definer);
385 			goto destroy_definers;
386 		}
387 		port_sel->outer.definers[tt] = lag_definer;
388 
389 		if (!port_sel->tunnel)
390 			continue;
391 
392 		lag_definer =
393 			mlx5_lag_create_definer(ldev, hash_type, tt,
394 						true, ports);
395 		if (IS_ERR(lag_definer)) {
396 			err = PTR_ERR(lag_definer);
397 			goto destroy_definers;
398 		}
399 		port_sel->inner.definers[tt] = lag_definer;
400 	}
401 
402 	return 0;
403 
404 destroy_definers:
405 	mlx5_lag_destroy_definers(ldev);
406 	return err;
407 }
408 
409 static void set_tt_map(struct mlx5_lag_port_sel *port_sel,
410 		       enum netdev_lag_hash hash)
411 {
412 	port_sel->tunnel = false;
413 
414 	switch (hash) {
415 	case NETDEV_LAG_HASH_E34:
416 		port_sel->tunnel = true;
417 		fallthrough;
418 	case NETDEV_LAG_HASH_L34:
419 		set_bit(MLX5_TT_IPV4_TCP, port_sel->tt_map);
420 		set_bit(MLX5_TT_IPV4_UDP, port_sel->tt_map);
421 		set_bit(MLX5_TT_IPV6_TCP, port_sel->tt_map);
422 		set_bit(MLX5_TT_IPV6_UDP, port_sel->tt_map);
423 		set_bit(MLX5_TT_IPV4, port_sel->tt_map);
424 		set_bit(MLX5_TT_IPV6, port_sel->tt_map);
425 		set_bit(MLX5_TT_ANY, port_sel->tt_map);
426 		break;
427 	case NETDEV_LAG_HASH_E23:
428 		port_sel->tunnel = true;
429 		fallthrough;
430 	case NETDEV_LAG_HASH_L23:
431 		set_bit(MLX5_TT_IPV4, port_sel->tt_map);
432 		set_bit(MLX5_TT_IPV6, port_sel->tt_map);
433 		set_bit(MLX5_TT_ANY, port_sel->tt_map);
434 		break;
435 	default:
436 		set_bit(MLX5_TT_ANY, port_sel->tt_map);
437 		break;
438 	}
439 }
440 
441 #define SET_IGNORE_DESTS_BITS(tt_map, dests)				\
442 	do {								\
443 		int idx;						\
444 									\
445 		for_each_clear_bit(idx, tt_map, MLX5_NUM_TT)		\
446 			set_bit(idx, dests);				\
447 	} while (0)
448 
449 static void mlx5_lag_set_inner_ttc_params(struct mlx5_lag *ldev,
450 					  struct ttc_params *ttc_params)
451 {
452 	struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
453 	struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
454 	struct mlx5_flow_table_attr *ft_attr;
455 	int tt;
456 
457 	ttc_params->ns = mlx5_get_flow_namespace(dev,
458 						 MLX5_FLOW_NAMESPACE_PORT_SEL);
459 	ft_attr = &ttc_params->ft_attr;
460 	ft_attr->level = MLX5_LAG_FT_LEVEL_INNER_TTC;
461 
462 	for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) {
463 		ttc_params->dests[tt].type =
464 			MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
465 		ttc_params->dests[tt].ft = port_sel->inner.definers[tt]->ft;
466 	}
467 	SET_IGNORE_DESTS_BITS(port_sel->tt_map, ttc_params->ignore_dests);
468 }
469 
470 static void mlx5_lag_set_outer_ttc_params(struct mlx5_lag *ldev,
471 					  struct ttc_params *ttc_params)
472 {
473 	struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
474 	struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
475 	struct mlx5_flow_table_attr *ft_attr;
476 	int tt;
477 
478 	ttc_params->ns = mlx5_get_flow_namespace(dev,
479 						 MLX5_FLOW_NAMESPACE_PORT_SEL);
480 	ft_attr = &ttc_params->ft_attr;
481 	ft_attr->level = MLX5_LAG_FT_LEVEL_TTC;
482 
483 	for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) {
484 		ttc_params->dests[tt].type =
485 			MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
486 		ttc_params->dests[tt].ft = port_sel->outer.definers[tt]->ft;
487 	}
488 	SET_IGNORE_DESTS_BITS(port_sel->tt_map, ttc_params->ignore_dests);
489 
490 	ttc_params->inner_ttc = port_sel->tunnel;
491 	if (!port_sel->tunnel)
492 		return;
493 
494 	for (tt = 0; tt < MLX5_NUM_TUNNEL_TT; tt++) {
495 		ttc_params->tunnel_dests[tt].type =
496 			MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
497 		ttc_params->tunnel_dests[tt].ft =
498 			mlx5_get_ttc_flow_table(port_sel->inner.ttc);
499 	}
500 }
501 
502 static int mlx5_lag_create_ttc_table(struct mlx5_lag *ldev)
503 {
504 	struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
505 	struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
506 	struct ttc_params ttc_params = {};
507 
508 	mlx5_lag_set_outer_ttc_params(ldev, &ttc_params);
509 	port_sel->outer.ttc = mlx5_create_ttc_table(dev, &ttc_params);
510 	if (IS_ERR(port_sel->outer.ttc))
511 		return PTR_ERR(port_sel->outer.ttc);
512 
513 	return 0;
514 }
515 
516 static int mlx5_lag_create_inner_ttc_table(struct mlx5_lag *ldev)
517 {
518 	struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
519 	struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
520 	struct ttc_params ttc_params = {};
521 
522 	mlx5_lag_set_inner_ttc_params(ldev, &ttc_params);
523 	port_sel->inner.ttc = mlx5_create_inner_ttc_table(dev, &ttc_params);
524 	if (IS_ERR(port_sel->inner.ttc))
525 		return PTR_ERR(port_sel->inner.ttc);
526 
527 	return 0;
528 }
529 
530 int mlx5_lag_port_sel_create(struct mlx5_lag *ldev,
531 			     enum netdev_lag_hash hash_type, u8 *ports)
532 {
533 	struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
534 	int err;
535 
536 	set_tt_map(port_sel, hash_type);
537 	err = mlx5_lag_create_definers(ldev, hash_type, ports);
538 	if (err)
539 		return err;
540 
541 	if (port_sel->tunnel) {
542 		err = mlx5_lag_create_inner_ttc_table(ldev);
543 		if (err)
544 			goto destroy_definers;
545 	}
546 
547 	err = mlx5_lag_create_ttc_table(ldev);
548 	if (err)
549 		goto destroy_inner;
550 
551 	return 0;
552 
553 destroy_inner:
554 	if (port_sel->tunnel)
555 		mlx5_destroy_ttc_table(port_sel->inner.ttc);
556 destroy_definers:
557 	mlx5_lag_destroy_definers(ldev);
558 	return err;
559 }
560 
561 static int __mlx5_lag_modify_definers_destinations(struct mlx5_lag *ldev,
562 						   struct mlx5_lag_definer *def,
563 						   u8 *ports)
564 {
565 	struct mlx5_flow_destination dest = {};
566 	int idx;
567 	int err;
568 	int i;
569 	int j;
570 
571 	dest.type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
572 	dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
573 
574 	for (i = 0; i < ldev->ports; i++) {
575 		for (j = 0; j < ldev->buckets; j++) {
576 			idx = i * ldev->buckets + j;
577 			if (ldev->v2p_map[i] == ports[i])
578 				continue;
579 
580 			dest.vport.vhca_id = MLX5_CAP_GEN(ldev->pf[ports[idx] - 1].dev,
581 							  vhca_id);
582 			err = mlx5_modify_rule_destination(def->rules[idx], &dest, NULL);
583 			if (err)
584 				return err;
585 		}
586 	}
587 
588 	return 0;
589 }
590 
591 static int
592 mlx5_lag_modify_definers_destinations(struct mlx5_lag *ldev,
593 				      struct mlx5_lag_definer **definers,
594 				      u8 *ports)
595 {
596 	struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
597 	int err;
598 	int tt;
599 
600 	for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) {
601 		err = __mlx5_lag_modify_definers_destinations(ldev, definers[tt], ports);
602 		if (err)
603 			return err;
604 	}
605 
606 	return 0;
607 }
608 
609 int mlx5_lag_port_sel_modify(struct mlx5_lag *ldev, u8 *ports)
610 {
611 	struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
612 	int err;
613 
614 	err = mlx5_lag_modify_definers_destinations(ldev,
615 						    port_sel->outer.definers,
616 						    ports);
617 	if (err)
618 		return err;
619 
620 	if (!port_sel->tunnel)
621 		return 0;
622 
623 	return mlx5_lag_modify_definers_destinations(ldev,
624 						     port_sel->inner.definers,
625 						     ports);
626 }
627 
628 void mlx5_lag_port_sel_destroy(struct mlx5_lag *ldev)
629 {
630 	struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
631 
632 	mlx5_destroy_ttc_table(port_sel->outer.ttc);
633 	if (port_sel->tunnel)
634 		mlx5_destroy_ttc_table(port_sel->inner.ttc);
635 	mlx5_lag_destroy_definers(ldev);
636 	memset(port_sel, 0, sizeof(*port_sel));
637 }
638