1 /*
2  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/netdevice.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/eswitch.h>
36 #include <linux/mlx5/vport.h>
37 #include "lib/devcom.h"
38 #include "mlx5_core.h"
39 #include "eswitch.h"
40 #include "lag.h"
41 #include "mp.h"
42 
43 /* General purpose, use for short periods of time.
44  * Beware of lock dependencies (preferably, no locks should be acquired
45  * under it).
46  */
47 static DEFINE_SPINLOCK(lag_lock);
48 
49 static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 remap_port1,
50 			       u8 remap_port2, bool shared_fdb, u8 flags)
51 {
52 	u32 in[MLX5_ST_SZ_DW(create_lag_in)] = {};
53 	void *lag_ctx = MLX5_ADDR_OF(create_lag_in, in, ctx);
54 
55 	MLX5_SET(create_lag_in, in, opcode, MLX5_CMD_OP_CREATE_LAG);
56 
57 	MLX5_SET(lagc, lag_ctx, fdb_selection_mode, shared_fdb);
58 	if (!(flags & MLX5_LAG_FLAG_HASH_BASED)) {
59 		MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1);
60 		MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2);
61 	} else {
62 		MLX5_SET(lagc, lag_ctx, port_select_mode,
63 			 MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT);
64 	}
65 
66 	return mlx5_cmd_exec_in(dev, create_lag, in);
67 }
68 
69 static int mlx5_cmd_modify_lag(struct mlx5_core_dev *dev, u8 remap_port1,
70 			       u8 remap_port2)
71 {
72 	u32 in[MLX5_ST_SZ_DW(modify_lag_in)] = {};
73 	void *lag_ctx = MLX5_ADDR_OF(modify_lag_in, in, ctx);
74 
75 	MLX5_SET(modify_lag_in, in, opcode, MLX5_CMD_OP_MODIFY_LAG);
76 	MLX5_SET(modify_lag_in, in, field_select, 0x1);
77 
78 	MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1);
79 	MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2);
80 
81 	return mlx5_cmd_exec_in(dev, modify_lag, in);
82 }
83 
84 int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev)
85 {
86 	u32 in[MLX5_ST_SZ_DW(create_vport_lag_in)] = {};
87 
88 	MLX5_SET(create_vport_lag_in, in, opcode, MLX5_CMD_OP_CREATE_VPORT_LAG);
89 
90 	return mlx5_cmd_exec_in(dev, create_vport_lag, in);
91 }
92 EXPORT_SYMBOL(mlx5_cmd_create_vport_lag);
93 
94 int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev)
95 {
96 	u32 in[MLX5_ST_SZ_DW(destroy_vport_lag_in)] = {};
97 
98 	MLX5_SET(destroy_vport_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_VPORT_LAG);
99 
100 	return mlx5_cmd_exec_in(dev, destroy_vport_lag, in);
101 }
102 EXPORT_SYMBOL(mlx5_cmd_destroy_vport_lag);
103 
104 static int mlx5_lag_netdev_event(struct notifier_block *this,
105 				 unsigned long event, void *ptr);
106 static void mlx5_do_bond_work(struct work_struct *work);
107 
108 static void mlx5_ldev_free(struct kref *ref)
109 {
110 	struct mlx5_lag *ldev = container_of(ref, struct mlx5_lag, ref);
111 
112 	if (ldev->nb.notifier_call)
113 		unregister_netdevice_notifier_net(&init_net, &ldev->nb);
114 	mlx5_lag_mp_cleanup(ldev);
115 	cancel_delayed_work_sync(&ldev->bond_work);
116 	destroy_workqueue(ldev->wq);
117 	kfree(ldev);
118 }
119 
120 static void mlx5_ldev_put(struct mlx5_lag *ldev)
121 {
122 	kref_put(&ldev->ref, mlx5_ldev_free);
123 }
124 
125 static void mlx5_ldev_get(struct mlx5_lag *ldev)
126 {
127 	kref_get(&ldev->ref);
128 }
129 
130 static struct mlx5_lag *mlx5_lag_dev_alloc(struct mlx5_core_dev *dev)
131 {
132 	struct mlx5_lag *ldev;
133 	int err;
134 
135 	ldev = kzalloc(sizeof(*ldev), GFP_KERNEL);
136 	if (!ldev)
137 		return NULL;
138 
139 	ldev->wq = create_singlethread_workqueue("mlx5_lag");
140 	if (!ldev->wq) {
141 		kfree(ldev);
142 		return NULL;
143 	}
144 
145 	kref_init(&ldev->ref);
146 	INIT_DELAYED_WORK(&ldev->bond_work, mlx5_do_bond_work);
147 
148 	ldev->nb.notifier_call = mlx5_lag_netdev_event;
149 	if (register_netdevice_notifier_net(&init_net, &ldev->nb)) {
150 		ldev->nb.notifier_call = NULL;
151 		mlx5_core_err(dev, "Failed to register LAG netdev notifier\n");
152 	}
153 
154 	err = mlx5_lag_mp_init(ldev);
155 	if (err)
156 		mlx5_core_err(dev, "Failed to init multipath lag err=%d\n",
157 			      err);
158 
159 	return ldev;
160 }
161 
162 int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
163 				struct net_device *ndev)
164 {
165 	int i;
166 
167 	for (i = 0; i < MLX5_MAX_PORTS; i++)
168 		if (ldev->pf[i].netdev == ndev)
169 			return i;
170 
171 	return -ENOENT;
172 }
173 
174 static bool __mlx5_lag_is_roce(struct mlx5_lag *ldev)
175 {
176 	return !!(ldev->flags & MLX5_LAG_FLAG_ROCE);
177 }
178 
179 static bool __mlx5_lag_is_sriov(struct mlx5_lag *ldev)
180 {
181 	return !!(ldev->flags & MLX5_LAG_FLAG_SRIOV);
182 }
183 
184 static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
185 					   u8 *port1, u8 *port2)
186 {
187 	bool p1en;
188 	bool p2en;
189 
190 	p1en = tracker->netdev_state[MLX5_LAG_P1].tx_enabled &&
191 	       tracker->netdev_state[MLX5_LAG_P1].link_up;
192 
193 	p2en = tracker->netdev_state[MLX5_LAG_P2].tx_enabled &&
194 	       tracker->netdev_state[MLX5_LAG_P2].link_up;
195 
196 	*port1 = 1;
197 	*port2 = 2;
198 	if ((!p1en && !p2en) || (p1en && p2en))
199 		return;
200 
201 	if (p1en)
202 		*port2 = 1;
203 	else
204 		*port1 = 2;
205 }
206 
207 static int _mlx5_modify_lag(struct mlx5_lag *ldev, u8 v2p_port1, u8 v2p_port2)
208 {
209 	struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
210 
211 	if (ldev->flags & MLX5_LAG_FLAG_HASH_BASED)
212 		return mlx5_lag_port_sel_modify(ldev, v2p_port1, v2p_port2);
213 	return mlx5_cmd_modify_lag(dev0, v2p_port1, v2p_port2);
214 }
215 
216 void mlx5_modify_lag(struct mlx5_lag *ldev,
217 		     struct lag_tracker *tracker)
218 {
219 	struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
220 	u8 v2p_port1, v2p_port2;
221 	int err;
222 
223 	mlx5_infer_tx_affinity_mapping(tracker, &v2p_port1,
224 				       &v2p_port2);
225 
226 	if (v2p_port1 != ldev->v2p_map[MLX5_LAG_P1] ||
227 	    v2p_port2 != ldev->v2p_map[MLX5_LAG_P2]) {
228 		err = _mlx5_modify_lag(ldev, v2p_port1, v2p_port2);
229 		if (err) {
230 			mlx5_core_err(dev0,
231 				      "Failed to modify LAG (%d)\n",
232 				      err);
233 			return;
234 		}
235 		ldev->v2p_map[MLX5_LAG_P1] = v2p_port1;
236 		ldev->v2p_map[MLX5_LAG_P2] = v2p_port2;
237 		mlx5_core_info(dev0, "modify lag map port 1:%d port 2:%d",
238 			       ldev->v2p_map[MLX5_LAG_P1],
239 			       ldev->v2p_map[MLX5_LAG_P2]);
240 	}
241 }
242 
243 static void mlx5_lag_set_port_sel_mode(struct mlx5_lag *ldev,
244 				       struct lag_tracker *tracker, u8 *flags)
245 {
246 	bool roce_lag = !!(*flags & MLX5_LAG_FLAG_ROCE);
247 	struct lag_func *dev0 = &ldev->pf[MLX5_LAG_P1];
248 
249 	if (roce_lag ||
250 	    !MLX5_CAP_PORT_SELECTION(dev0->dev, port_select_flow_table) ||
251 	    tracker->tx_type != NETDEV_LAG_TX_TYPE_HASH)
252 		return;
253 	*flags |= MLX5_LAG_FLAG_HASH_BASED;
254 }
255 
256 static char *get_str_port_sel_mode(u8 flags)
257 {
258 	if (flags &  MLX5_LAG_FLAG_HASH_BASED)
259 		return "hash";
260 	return "queue_affinity";
261 }
262 
263 static int mlx5_create_lag(struct mlx5_lag *ldev,
264 			   struct lag_tracker *tracker,
265 			   bool shared_fdb, u8 flags)
266 {
267 	struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
268 	struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
269 	u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
270 	int err;
271 
272 	mlx5_core_info(dev0, "lag map port 1:%d port 2:%d shared_fdb:%d mode:%s",
273 		       ldev->v2p_map[MLX5_LAG_P1], ldev->v2p_map[MLX5_LAG_P2],
274 		       shared_fdb, get_str_port_sel_mode(flags));
275 
276 	err = mlx5_cmd_create_lag(dev0, ldev->v2p_map[MLX5_LAG_P1],
277 				  ldev->v2p_map[MLX5_LAG_P2], shared_fdb, flags);
278 	if (err) {
279 		mlx5_core_err(dev0,
280 			      "Failed to create LAG (%d)\n",
281 			      err);
282 		return err;
283 	}
284 
285 	if (shared_fdb) {
286 		err = mlx5_eswitch_offloads_config_single_fdb(dev0->priv.eswitch,
287 							      dev1->priv.eswitch);
288 		if (err)
289 			mlx5_core_err(dev0, "Can't enable single FDB mode\n");
290 		else
291 			mlx5_core_info(dev0, "Operation mode is single FDB\n");
292 	}
293 
294 	if (err) {
295 		MLX5_SET(destroy_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_LAG);
296 		if (mlx5_cmd_exec_in(dev0, destroy_lag, in))
297 			mlx5_core_err(dev0,
298 				      "Failed to deactivate RoCE LAG; driver restart required\n");
299 	}
300 
301 	return err;
302 }
303 
304 int mlx5_activate_lag(struct mlx5_lag *ldev,
305 		      struct lag_tracker *tracker,
306 		      u8 flags,
307 		      bool shared_fdb)
308 {
309 	bool roce_lag = !!(flags & MLX5_LAG_FLAG_ROCE);
310 	struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
311 	int err;
312 
313 	mlx5_infer_tx_affinity_mapping(tracker, &ldev->v2p_map[MLX5_LAG_P1],
314 				       &ldev->v2p_map[MLX5_LAG_P2]);
315 	mlx5_lag_set_port_sel_mode(ldev, tracker, &flags);
316 	if (flags & MLX5_LAG_FLAG_HASH_BASED) {
317 		err = mlx5_lag_port_sel_create(ldev, tracker->hash_type,
318 					       ldev->v2p_map[MLX5_LAG_P1],
319 					       ldev->v2p_map[MLX5_LAG_P2]);
320 		if (err) {
321 			mlx5_core_err(dev0,
322 				      "Failed to create LAG port selection(%d)\n",
323 				      err);
324 			return err;
325 		}
326 	}
327 
328 	err = mlx5_create_lag(ldev, tracker, shared_fdb, flags);
329 	if (err) {
330 		if (flags & MLX5_LAG_FLAG_HASH_BASED)
331 			mlx5_lag_port_sel_destroy(ldev);
332 		if (roce_lag)
333 			mlx5_core_err(dev0,
334 				      "Failed to activate RoCE LAG\n");
335 		else
336 			mlx5_core_err(dev0,
337 				      "Failed to activate VF LAG\n"
338 				      "Make sure all VFs are unbound prior to VF LAG activation or deactivation\n");
339 		return err;
340 	}
341 
342 	ldev->flags |= flags;
343 	ldev->shared_fdb = shared_fdb;
344 	return 0;
345 }
346 
347 static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
348 {
349 	struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
350 	u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
351 	bool roce_lag = __mlx5_lag_is_roce(ldev);
352 	u8 flags = ldev->flags;
353 	int err;
354 
355 	ldev->flags &= ~MLX5_LAG_MODE_FLAGS;
356 	mlx5_lag_mp_reset(ldev);
357 
358 	if (ldev->shared_fdb) {
359 		mlx5_eswitch_offloads_destroy_single_fdb(ldev->pf[MLX5_LAG_P1].dev->priv.eswitch,
360 							 ldev->pf[MLX5_LAG_P2].dev->priv.eswitch);
361 		ldev->shared_fdb = false;
362 	}
363 
364 	MLX5_SET(destroy_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_LAG);
365 	err = mlx5_cmd_exec_in(dev0, destroy_lag, in);
366 	if (err) {
367 		if (roce_lag) {
368 			mlx5_core_err(dev0,
369 				      "Failed to deactivate RoCE LAG; driver restart required\n");
370 		} else {
371 			mlx5_core_err(dev0,
372 				      "Failed to deactivate VF LAG; driver restart required\n"
373 				      "Make sure all VFs are unbound prior to VF LAG activation or deactivation\n");
374 		}
375 	} else if (flags & MLX5_LAG_FLAG_HASH_BASED) {
376 		mlx5_lag_port_sel_destroy(ldev);
377 	}
378 
379 	return err;
380 }
381 
382 static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
383 {
384 	if (!ldev->pf[MLX5_LAG_P1].dev || !ldev->pf[MLX5_LAG_P2].dev)
385 		return false;
386 
387 #ifdef CONFIG_MLX5_ESWITCH
388 	return mlx5_esw_lag_prereq(ldev->pf[MLX5_LAG_P1].dev,
389 				   ldev->pf[MLX5_LAG_P2].dev);
390 #else
391 	return (!mlx5_sriov_is_enabled(ldev->pf[MLX5_LAG_P1].dev) &&
392 		!mlx5_sriov_is_enabled(ldev->pf[MLX5_LAG_P2].dev));
393 #endif
394 }
395 
396 static void mlx5_lag_add_devices(struct mlx5_lag *ldev)
397 {
398 	int i;
399 
400 	for (i = 0; i < MLX5_MAX_PORTS; i++) {
401 		if (!ldev->pf[i].dev)
402 			continue;
403 
404 		if (ldev->pf[i].dev->priv.flags &
405 		    MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
406 			continue;
407 
408 		ldev->pf[i].dev->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
409 		mlx5_rescan_drivers_locked(ldev->pf[i].dev);
410 	}
411 }
412 
413 static void mlx5_lag_remove_devices(struct mlx5_lag *ldev)
414 {
415 	int i;
416 
417 	for (i = 0; i < MLX5_MAX_PORTS; i++) {
418 		if (!ldev->pf[i].dev)
419 			continue;
420 
421 		if (ldev->pf[i].dev->priv.flags &
422 		    MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
423 			continue;
424 
425 		ldev->pf[i].dev->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
426 		mlx5_rescan_drivers_locked(ldev->pf[i].dev);
427 	}
428 }
429 
430 static void mlx5_disable_lag(struct mlx5_lag *ldev)
431 {
432 	struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
433 	struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
434 	bool shared_fdb = ldev->shared_fdb;
435 	bool roce_lag;
436 	int err;
437 
438 	roce_lag = __mlx5_lag_is_roce(ldev);
439 
440 	if (shared_fdb) {
441 		mlx5_lag_remove_devices(ldev);
442 	} else if (roce_lag) {
443 		if (!(dev0->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)) {
444 			dev0->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
445 			mlx5_rescan_drivers_locked(dev0);
446 		}
447 		mlx5_nic_vport_disable_roce(dev1);
448 	}
449 
450 	err = mlx5_deactivate_lag(ldev);
451 	if (err)
452 		return;
453 
454 	if (shared_fdb || roce_lag)
455 		mlx5_lag_add_devices(ldev);
456 
457 	if (shared_fdb) {
458 		if (!(dev0->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV))
459 			mlx5_eswitch_reload_reps(dev0->priv.eswitch);
460 		if (!(dev1->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV))
461 			mlx5_eswitch_reload_reps(dev1->priv.eswitch);
462 	}
463 }
464 
465 static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev)
466 {
467 	struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
468 	struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
469 
470 	if (is_mdev_switchdev_mode(dev0) &&
471 	    is_mdev_switchdev_mode(dev1) &&
472 	    mlx5_eswitch_vport_match_metadata_enabled(dev0->priv.eswitch) &&
473 	    mlx5_eswitch_vport_match_metadata_enabled(dev1->priv.eswitch) &&
474 	    mlx5_devcom_is_paired(dev0->priv.devcom,
475 				  MLX5_DEVCOM_ESW_OFFLOADS) &&
476 	    MLX5_CAP_GEN(dev1, lag_native_fdb_selection) &&
477 	    MLX5_CAP_ESW(dev1, root_ft_on_other_esw) &&
478 	    MLX5_CAP_ESW(dev0, esw_shared_ingress_acl))
479 		return true;
480 
481 	return false;
482 }
483 
484 static void mlx5_do_bond(struct mlx5_lag *ldev)
485 {
486 	struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
487 	struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
488 	struct lag_tracker tracker;
489 	bool do_bond, roce_lag;
490 	int err;
491 
492 	if (!mlx5_lag_is_ready(ldev)) {
493 		do_bond = false;
494 	} else {
495 		/* VF LAG is in multipath mode, ignore bond change requests */
496 		if (mlx5_lag_is_multipath(dev0))
497 			return;
498 
499 		tracker = ldev->tracker;
500 
501 		do_bond = tracker.is_bonded && mlx5_lag_check_prereq(ldev);
502 	}
503 
504 	if (do_bond && !__mlx5_lag_is_active(ldev)) {
505 		bool shared_fdb = mlx5_shared_fdb_supported(ldev);
506 
507 		roce_lag = !mlx5_sriov_is_enabled(dev0) &&
508 			   !mlx5_sriov_is_enabled(dev1);
509 
510 #ifdef CONFIG_MLX5_ESWITCH
511 		roce_lag = roce_lag &&
512 			   dev0->priv.eswitch->mode == MLX5_ESWITCH_NONE &&
513 			   dev1->priv.eswitch->mode == MLX5_ESWITCH_NONE;
514 #endif
515 
516 		if (shared_fdb || roce_lag)
517 			mlx5_lag_remove_devices(ldev);
518 
519 		err = mlx5_activate_lag(ldev, &tracker,
520 					roce_lag ? MLX5_LAG_FLAG_ROCE :
521 						   MLX5_LAG_FLAG_SRIOV,
522 					shared_fdb);
523 		if (err) {
524 			if (shared_fdb || roce_lag)
525 				mlx5_lag_add_devices(ldev);
526 
527 			return;
528 		} else if (roce_lag) {
529 			dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
530 			mlx5_rescan_drivers_locked(dev0);
531 			mlx5_nic_vport_enable_roce(dev1);
532 		} else if (shared_fdb) {
533 			dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
534 			mlx5_rescan_drivers_locked(dev0);
535 
536 			err = mlx5_eswitch_reload_reps(dev0->priv.eswitch);
537 			if (!err)
538 				err = mlx5_eswitch_reload_reps(dev1->priv.eswitch);
539 
540 			if (err) {
541 				dev0->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
542 				mlx5_rescan_drivers_locked(dev0);
543 				mlx5_deactivate_lag(ldev);
544 				mlx5_lag_add_devices(ldev);
545 				mlx5_eswitch_reload_reps(dev0->priv.eswitch);
546 				mlx5_eswitch_reload_reps(dev1->priv.eswitch);
547 				mlx5_core_err(dev0, "Failed to enable lag\n");
548 				return;
549 			}
550 		}
551 	} else if (do_bond && __mlx5_lag_is_active(ldev)) {
552 		mlx5_modify_lag(ldev, &tracker);
553 	} else if (!do_bond && __mlx5_lag_is_active(ldev)) {
554 		mlx5_disable_lag(ldev);
555 	}
556 }
557 
558 static void mlx5_queue_bond_work(struct mlx5_lag *ldev, unsigned long delay)
559 {
560 	queue_delayed_work(ldev->wq, &ldev->bond_work, delay);
561 }
562 
563 static void mlx5_lag_lock_eswitches(struct mlx5_core_dev *dev0,
564 				    struct mlx5_core_dev *dev1)
565 {
566 	if (dev0)
567 		mlx5_esw_lock(dev0->priv.eswitch);
568 	if (dev1)
569 		mlx5_esw_lock(dev1->priv.eswitch);
570 }
571 
572 static void mlx5_lag_unlock_eswitches(struct mlx5_core_dev *dev0,
573 				      struct mlx5_core_dev *dev1)
574 {
575 	if (dev1)
576 		mlx5_esw_unlock(dev1->priv.eswitch);
577 	if (dev0)
578 		mlx5_esw_unlock(dev0->priv.eswitch);
579 }
580 
581 static void mlx5_do_bond_work(struct work_struct *work)
582 {
583 	struct delayed_work *delayed_work = to_delayed_work(work);
584 	struct mlx5_lag *ldev = container_of(delayed_work, struct mlx5_lag,
585 					     bond_work);
586 	struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
587 	struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
588 	int status;
589 
590 	status = mlx5_dev_list_trylock();
591 	if (!status) {
592 		mlx5_queue_bond_work(ldev, HZ);
593 		return;
594 	}
595 
596 	if (ldev->mode_changes_in_progress) {
597 		mlx5_dev_list_unlock();
598 		mlx5_queue_bond_work(ldev, HZ);
599 		return;
600 	}
601 
602 	mlx5_lag_lock_eswitches(dev0, dev1);
603 	mlx5_do_bond(ldev);
604 	mlx5_lag_unlock_eswitches(dev0, dev1);
605 	mlx5_dev_list_unlock();
606 }
607 
608 static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
609 					 struct lag_tracker *tracker,
610 					 struct net_device *ndev,
611 					 struct netdev_notifier_changeupper_info *info)
612 {
613 	struct net_device *upper = info->upper_dev, *ndev_tmp;
614 	struct netdev_lag_upper_info *lag_upper_info = NULL;
615 	bool is_bonded, is_in_lag, mode_supported;
616 	int bond_status = 0;
617 	int num_slaves = 0;
618 	int idx;
619 
620 	if (!netif_is_lag_master(upper))
621 		return 0;
622 
623 	if (info->linking)
624 		lag_upper_info = info->upper_info;
625 
626 	/* The event may still be of interest if the slave does not belong to
627 	 * us, but is enslaved to a master which has one or more of our netdevs
628 	 * as slaves (e.g., if a new slave is added to a master that bonds two
629 	 * of our netdevs, we should unbond).
630 	 */
631 	rcu_read_lock();
632 	for_each_netdev_in_bond_rcu(upper, ndev_tmp) {
633 		idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev_tmp);
634 		if (idx >= 0)
635 			bond_status |= (1 << idx);
636 
637 		num_slaves++;
638 	}
639 	rcu_read_unlock();
640 
641 	/* None of this lagdev's netdevs are slaves of this master. */
642 	if (!(bond_status & 0x3))
643 		return 0;
644 
645 	if (lag_upper_info) {
646 		tracker->tx_type = lag_upper_info->tx_type;
647 		tracker->hash_type = lag_upper_info->hash_type;
648 	}
649 
650 	/* Determine bonding status:
651 	 * A device is considered bonded if both its physical ports are slaves
652 	 * of the same lag master, and only them.
653 	 */
654 	is_in_lag = num_slaves == MLX5_MAX_PORTS && bond_status == 0x3;
655 
656 	if (!mlx5_lag_is_ready(ldev) && is_in_lag) {
657 		NL_SET_ERR_MSG_MOD(info->info.extack,
658 				   "Can't activate LAG offload, PF is configured with more than 64 VFs");
659 		return 0;
660 	}
661 
662 	/* Lag mode must be activebackup or hash. */
663 	mode_supported = tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP ||
664 			 tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH;
665 
666 	if (is_in_lag && !mode_supported)
667 		NL_SET_ERR_MSG_MOD(info->info.extack,
668 				   "Can't activate LAG offload, TX type isn't supported");
669 
670 	is_bonded = is_in_lag && mode_supported;
671 	if (tracker->is_bonded != is_bonded) {
672 		tracker->is_bonded = is_bonded;
673 		return 1;
674 	}
675 
676 	return 0;
677 }
678 
679 static int mlx5_handle_changelowerstate_event(struct mlx5_lag *ldev,
680 					      struct lag_tracker *tracker,
681 					      struct net_device *ndev,
682 					      struct netdev_notifier_changelowerstate_info *info)
683 {
684 	struct netdev_lag_lower_state_info *lag_lower_info;
685 	int idx;
686 
687 	if (!netif_is_lag_port(ndev))
688 		return 0;
689 
690 	idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev);
691 	if (idx < 0)
692 		return 0;
693 
694 	/* This information is used to determine virtual to physical
695 	 * port mapping.
696 	 */
697 	lag_lower_info = info->lower_state_info;
698 	if (!lag_lower_info)
699 		return 0;
700 
701 	tracker->netdev_state[idx] = *lag_lower_info;
702 
703 	return 1;
704 }
705 
706 static int mlx5_lag_netdev_event(struct notifier_block *this,
707 				 unsigned long event, void *ptr)
708 {
709 	struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
710 	struct lag_tracker tracker;
711 	struct mlx5_lag *ldev;
712 	int changed = 0;
713 
714 	if ((event != NETDEV_CHANGEUPPER) && (event != NETDEV_CHANGELOWERSTATE))
715 		return NOTIFY_DONE;
716 
717 	ldev    = container_of(this, struct mlx5_lag, nb);
718 
719 	if (!mlx5_lag_is_ready(ldev) && event == NETDEV_CHANGELOWERSTATE)
720 		return NOTIFY_DONE;
721 
722 	tracker = ldev->tracker;
723 
724 	switch (event) {
725 	case NETDEV_CHANGEUPPER:
726 		changed = mlx5_handle_changeupper_event(ldev, &tracker, ndev,
727 							ptr);
728 		break;
729 	case NETDEV_CHANGELOWERSTATE:
730 		changed = mlx5_handle_changelowerstate_event(ldev, &tracker,
731 							     ndev, ptr);
732 		break;
733 	}
734 
735 	ldev->tracker = tracker;
736 
737 	if (changed)
738 		mlx5_queue_bond_work(ldev, 0);
739 
740 	return NOTIFY_DONE;
741 }
742 
743 static void mlx5_ldev_add_netdev(struct mlx5_lag *ldev,
744 				 struct mlx5_core_dev *dev,
745 				 struct net_device *netdev)
746 {
747 	unsigned int fn = mlx5_get_dev_index(dev);
748 
749 	if (fn >= MLX5_MAX_PORTS)
750 		return;
751 
752 	spin_lock(&lag_lock);
753 	ldev->pf[fn].netdev = netdev;
754 	ldev->tracker.netdev_state[fn].link_up = 0;
755 	ldev->tracker.netdev_state[fn].tx_enabled = 0;
756 	spin_unlock(&lag_lock);
757 }
758 
759 static void mlx5_ldev_remove_netdev(struct mlx5_lag *ldev,
760 				    struct net_device *netdev)
761 {
762 	int i;
763 
764 	spin_lock(&lag_lock);
765 	for (i = 0; i < MLX5_MAX_PORTS; i++) {
766 		if (ldev->pf[i].netdev == netdev) {
767 			ldev->pf[i].netdev = NULL;
768 			break;
769 		}
770 	}
771 	spin_unlock(&lag_lock);
772 }
773 
774 static void mlx5_ldev_add_mdev(struct mlx5_lag *ldev,
775 			       struct mlx5_core_dev *dev)
776 {
777 	unsigned int fn = mlx5_get_dev_index(dev);
778 
779 	if (fn >= MLX5_MAX_PORTS)
780 		return;
781 
782 	ldev->pf[fn].dev = dev;
783 	dev->priv.lag = ldev;
784 }
785 
786 /* Must be called with intf_mutex held */
787 static void mlx5_ldev_remove_mdev(struct mlx5_lag *ldev,
788 				  struct mlx5_core_dev *dev)
789 {
790 	int i;
791 
792 	for (i = 0; i < MLX5_MAX_PORTS; i++)
793 		if (ldev->pf[i].dev == dev)
794 			break;
795 
796 	if (i == MLX5_MAX_PORTS)
797 		return;
798 
799 	ldev->pf[i].dev = NULL;
800 	dev->priv.lag = NULL;
801 }
802 
803 /* Must be called with intf_mutex held */
804 static int __mlx5_lag_dev_add_mdev(struct mlx5_core_dev *dev)
805 {
806 	struct mlx5_lag *ldev = NULL;
807 	struct mlx5_core_dev *tmp_dev;
808 
809 	if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
810 	    !MLX5_CAP_GEN(dev, lag_master) ||
811 	    MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_MAX_PORTS)
812 		return 0;
813 
814 	tmp_dev = mlx5_get_next_phys_dev(dev);
815 	if (tmp_dev)
816 		ldev = tmp_dev->priv.lag;
817 
818 	if (!ldev) {
819 		ldev = mlx5_lag_dev_alloc(dev);
820 		if (!ldev) {
821 			mlx5_core_err(dev, "Failed to alloc lag dev\n");
822 			return 0;
823 		}
824 	} else {
825 		if (ldev->mode_changes_in_progress)
826 			return -EAGAIN;
827 		mlx5_ldev_get(ldev);
828 	}
829 
830 	mlx5_ldev_add_mdev(ldev, dev);
831 
832 	return 0;
833 }
834 
835 void mlx5_lag_remove_mdev(struct mlx5_core_dev *dev)
836 {
837 	struct mlx5_lag *ldev;
838 
839 	ldev = mlx5_lag_dev(dev);
840 	if (!ldev)
841 		return;
842 
843 recheck:
844 	mlx5_dev_list_lock();
845 	if (ldev->mode_changes_in_progress) {
846 		mlx5_dev_list_unlock();
847 		msleep(100);
848 		goto recheck;
849 	}
850 	mlx5_ldev_remove_mdev(ldev, dev);
851 	mlx5_dev_list_unlock();
852 	mlx5_ldev_put(ldev);
853 }
854 
855 void mlx5_lag_add_mdev(struct mlx5_core_dev *dev)
856 {
857 	int err;
858 
859 recheck:
860 	mlx5_dev_list_lock();
861 	err = __mlx5_lag_dev_add_mdev(dev);
862 	if (err) {
863 		mlx5_dev_list_unlock();
864 		msleep(100);
865 		goto recheck;
866 	}
867 	mlx5_dev_list_unlock();
868 }
869 
870 /* Must be called with intf_mutex held */
871 void mlx5_lag_remove_netdev(struct mlx5_core_dev *dev,
872 			    struct net_device *netdev)
873 {
874 	struct mlx5_lag *ldev;
875 
876 	ldev = mlx5_lag_dev(dev);
877 	if (!ldev)
878 		return;
879 
880 	mlx5_ldev_remove_netdev(ldev, netdev);
881 	ldev->flags &= ~MLX5_LAG_FLAG_READY;
882 
883 	if (__mlx5_lag_is_active(ldev))
884 		mlx5_queue_bond_work(ldev, 0);
885 }
886 
887 /* Must be called with intf_mutex held */
888 void mlx5_lag_add_netdev(struct mlx5_core_dev *dev,
889 			 struct net_device *netdev)
890 {
891 	struct mlx5_lag *ldev;
892 	int i;
893 
894 	ldev = mlx5_lag_dev(dev);
895 	if (!ldev)
896 		return;
897 
898 	mlx5_ldev_add_netdev(ldev, dev, netdev);
899 
900 	for (i = 0; i < MLX5_MAX_PORTS; i++)
901 		if (!ldev->pf[i].dev)
902 			break;
903 
904 	if (i >= MLX5_MAX_PORTS)
905 		ldev->flags |= MLX5_LAG_FLAG_READY;
906 	mlx5_queue_bond_work(ldev, 0);
907 }
908 
909 bool mlx5_lag_is_roce(struct mlx5_core_dev *dev)
910 {
911 	struct mlx5_lag *ldev;
912 	bool res;
913 
914 	spin_lock(&lag_lock);
915 	ldev = mlx5_lag_dev(dev);
916 	res  = ldev && __mlx5_lag_is_roce(ldev);
917 	spin_unlock(&lag_lock);
918 
919 	return res;
920 }
921 EXPORT_SYMBOL(mlx5_lag_is_roce);
922 
923 bool mlx5_lag_is_active(struct mlx5_core_dev *dev)
924 {
925 	struct mlx5_lag *ldev;
926 	bool res;
927 
928 	spin_lock(&lag_lock);
929 	ldev = mlx5_lag_dev(dev);
930 	res  = ldev && __mlx5_lag_is_active(ldev);
931 	spin_unlock(&lag_lock);
932 
933 	return res;
934 }
935 EXPORT_SYMBOL(mlx5_lag_is_active);
936 
937 bool mlx5_lag_is_master(struct mlx5_core_dev *dev)
938 {
939 	struct mlx5_lag *ldev;
940 	bool res;
941 
942 	spin_lock(&lag_lock);
943 	ldev = mlx5_lag_dev(dev);
944 	res = ldev && __mlx5_lag_is_active(ldev) &&
945 		dev == ldev->pf[MLX5_LAG_P1].dev;
946 	spin_unlock(&lag_lock);
947 
948 	return res;
949 }
950 EXPORT_SYMBOL(mlx5_lag_is_master);
951 
952 bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev)
953 {
954 	struct mlx5_lag *ldev;
955 	bool res;
956 
957 	spin_lock(&lag_lock);
958 	ldev = mlx5_lag_dev(dev);
959 	res  = ldev && __mlx5_lag_is_sriov(ldev);
960 	spin_unlock(&lag_lock);
961 
962 	return res;
963 }
964 EXPORT_SYMBOL(mlx5_lag_is_sriov);
965 
966 bool mlx5_lag_is_shared_fdb(struct mlx5_core_dev *dev)
967 {
968 	struct mlx5_lag *ldev;
969 	bool res;
970 
971 	spin_lock(&lag_lock);
972 	ldev = mlx5_lag_dev(dev);
973 	res = ldev && __mlx5_lag_is_sriov(ldev) && ldev->shared_fdb;
974 	spin_unlock(&lag_lock);
975 
976 	return res;
977 }
978 EXPORT_SYMBOL(mlx5_lag_is_shared_fdb);
979 
980 void mlx5_lag_disable_change(struct mlx5_core_dev *dev)
981 {
982 	struct mlx5_core_dev *dev0;
983 	struct mlx5_core_dev *dev1;
984 	struct mlx5_lag *ldev;
985 
986 	ldev = mlx5_lag_dev(dev);
987 	if (!ldev)
988 		return;
989 
990 	mlx5_dev_list_lock();
991 
992 	dev0 = ldev->pf[MLX5_LAG_P1].dev;
993 	dev1 = ldev->pf[MLX5_LAG_P2].dev;
994 
995 	ldev->mode_changes_in_progress++;
996 	if (__mlx5_lag_is_active(ldev)) {
997 		mlx5_lag_lock_eswitches(dev0, dev1);
998 		mlx5_disable_lag(ldev);
999 		mlx5_lag_unlock_eswitches(dev0, dev1);
1000 	}
1001 	mlx5_dev_list_unlock();
1002 }
1003 
1004 void mlx5_lag_enable_change(struct mlx5_core_dev *dev)
1005 {
1006 	struct mlx5_lag *ldev;
1007 
1008 	ldev = mlx5_lag_dev(dev);
1009 	if (!ldev)
1010 		return;
1011 
1012 	mlx5_dev_list_lock();
1013 	ldev->mode_changes_in_progress--;
1014 	mlx5_dev_list_unlock();
1015 	mlx5_queue_bond_work(ldev, 0);
1016 }
1017 
1018 struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
1019 {
1020 	struct net_device *ndev = NULL;
1021 	struct mlx5_lag *ldev;
1022 
1023 	spin_lock(&lag_lock);
1024 	ldev = mlx5_lag_dev(dev);
1025 
1026 	if (!(ldev && __mlx5_lag_is_roce(ldev)))
1027 		goto unlock;
1028 
1029 	if (ldev->tracker.tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
1030 		ndev = ldev->tracker.netdev_state[MLX5_LAG_P1].tx_enabled ?
1031 		       ldev->pf[MLX5_LAG_P1].netdev :
1032 		       ldev->pf[MLX5_LAG_P2].netdev;
1033 	} else {
1034 		ndev = ldev->pf[MLX5_LAG_P1].netdev;
1035 	}
1036 	if (ndev)
1037 		dev_hold(ndev);
1038 
1039 unlock:
1040 	spin_unlock(&lag_lock);
1041 
1042 	return ndev;
1043 }
1044 EXPORT_SYMBOL(mlx5_lag_get_roce_netdev);
1045 
1046 u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
1047 			   struct net_device *slave)
1048 {
1049 	struct mlx5_lag *ldev;
1050 	u8 port = 0;
1051 
1052 	spin_lock(&lag_lock);
1053 	ldev = mlx5_lag_dev(dev);
1054 	if (!(ldev && __mlx5_lag_is_roce(ldev)))
1055 		goto unlock;
1056 
1057 	if (ldev->pf[MLX5_LAG_P1].netdev == slave)
1058 		port = MLX5_LAG_P1;
1059 	else
1060 		port = MLX5_LAG_P2;
1061 
1062 	port = ldev->v2p_map[port];
1063 
1064 unlock:
1065 	spin_unlock(&lag_lock);
1066 	return port;
1067 }
1068 EXPORT_SYMBOL(mlx5_lag_get_slave_port);
1069 
1070 struct mlx5_core_dev *mlx5_lag_get_peer_mdev(struct mlx5_core_dev *dev)
1071 {
1072 	struct mlx5_core_dev *peer_dev = NULL;
1073 	struct mlx5_lag *ldev;
1074 
1075 	spin_lock(&lag_lock);
1076 	ldev = mlx5_lag_dev(dev);
1077 	if (!ldev)
1078 		goto unlock;
1079 
1080 	peer_dev = ldev->pf[MLX5_LAG_P1].dev == dev ?
1081 			   ldev->pf[MLX5_LAG_P2].dev :
1082 			   ldev->pf[MLX5_LAG_P1].dev;
1083 
1084 unlock:
1085 	spin_unlock(&lag_lock);
1086 	return peer_dev;
1087 }
1088 EXPORT_SYMBOL(mlx5_lag_get_peer_mdev);
1089 
1090 int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
1091 				 u64 *values,
1092 				 int num_counters,
1093 				 size_t *offsets)
1094 {
1095 	int outlen = MLX5_ST_SZ_BYTES(query_cong_statistics_out);
1096 	struct mlx5_core_dev *mdev[MLX5_MAX_PORTS];
1097 	struct mlx5_lag *ldev;
1098 	int num_ports;
1099 	int ret, i, j;
1100 	void *out;
1101 
1102 	out = kvzalloc(outlen, GFP_KERNEL);
1103 	if (!out)
1104 		return -ENOMEM;
1105 
1106 	memset(values, 0, sizeof(*values) * num_counters);
1107 
1108 	spin_lock(&lag_lock);
1109 	ldev = mlx5_lag_dev(dev);
1110 	if (ldev && __mlx5_lag_is_active(ldev)) {
1111 		num_ports = MLX5_MAX_PORTS;
1112 		mdev[MLX5_LAG_P1] = ldev->pf[MLX5_LAG_P1].dev;
1113 		mdev[MLX5_LAG_P2] = ldev->pf[MLX5_LAG_P2].dev;
1114 	} else {
1115 		num_ports = 1;
1116 		mdev[MLX5_LAG_P1] = dev;
1117 	}
1118 	spin_unlock(&lag_lock);
1119 
1120 	for (i = 0; i < num_ports; ++i) {
1121 		u32 in[MLX5_ST_SZ_DW(query_cong_statistics_in)] = {};
1122 
1123 		MLX5_SET(query_cong_statistics_in, in, opcode,
1124 			 MLX5_CMD_OP_QUERY_CONG_STATISTICS);
1125 		ret = mlx5_cmd_exec_inout(mdev[i], query_cong_statistics, in,
1126 					  out);
1127 		if (ret)
1128 			goto free;
1129 
1130 		for (j = 0; j < num_counters; ++j)
1131 			values[j] += be64_to_cpup((__be64 *)(out + offsets[j]));
1132 	}
1133 
1134 free:
1135 	kvfree(out);
1136 	return ret;
1137 }
1138 EXPORT_SYMBOL(mlx5_lag_query_cong_counters);
1139