13d677735SMaor Gottlieb /*
23d677735SMaor Gottlieb * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
33d677735SMaor Gottlieb *
43d677735SMaor Gottlieb * This software is available to you under a choice of one of two
53d677735SMaor Gottlieb * licenses. You may choose to be licensed under the terms of the GNU
63d677735SMaor Gottlieb * General Public License (GPL) Version 2, available from the file
73d677735SMaor Gottlieb * COPYING in the main directory of this source tree, or the
83d677735SMaor Gottlieb * OpenIB.org BSD license below:
93d677735SMaor Gottlieb *
103d677735SMaor Gottlieb * Redistribution and use in source and binary forms, with or
113d677735SMaor Gottlieb * without modification, are permitted provided that the following
123d677735SMaor Gottlieb * conditions are met:
133d677735SMaor Gottlieb *
143d677735SMaor Gottlieb * - Redistributions of source code must retain the above
153d677735SMaor Gottlieb * copyright notice, this list of conditions and the following
163d677735SMaor Gottlieb * disclaimer.
173d677735SMaor Gottlieb *
183d677735SMaor Gottlieb * - Redistributions in binary form must reproduce the above
193d677735SMaor Gottlieb * copyright notice, this list of conditions and the following
203d677735SMaor Gottlieb * disclaimer in the documentation and/or other materials
213d677735SMaor Gottlieb * provided with the distribution.
223d677735SMaor Gottlieb *
233d677735SMaor Gottlieb * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
243d677735SMaor Gottlieb * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
253d677735SMaor Gottlieb * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
263d677735SMaor Gottlieb * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
273d677735SMaor Gottlieb * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
283d677735SMaor Gottlieb * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
293d677735SMaor Gottlieb * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
303d677735SMaor Gottlieb * SOFTWARE.
313d677735SMaor Gottlieb */
323d677735SMaor Gottlieb
333d677735SMaor Gottlieb #include <linux/netdevice.h>
3454493a08SMark Bloch #include <net/bonding.h>
353d677735SMaor Gottlieb #include <linux/mlx5/driver.h>
363d677735SMaor Gottlieb #include <linux/mlx5/eswitch.h>
373d677735SMaor Gottlieb #include <linux/mlx5/vport.h>
383d677735SMaor Gottlieb #include "lib/devcom.h"
393d677735SMaor Gottlieb #include "mlx5_core.h"
403d677735SMaor Gottlieb #include "eswitch.h"
416cb87869SMark Bloch #include "esw/acl/ofld.h"
423d677735SMaor Gottlieb #include "lag.h"
433d677735SMaor Gottlieb #include "mp.h"
4494db3317SEli Cohen #include "mpesw.h"
453d677735SMaor Gottlieb
464f455143SMark Bloch enum {
474f455143SMark Bloch MLX5_LAG_EGRESS_PORT_1 = 1,
484f455143SMark Bloch MLX5_LAG_EGRESS_PORT_2,
494f455143SMark Bloch };
504f455143SMark Bloch
513d677735SMaor Gottlieb /* General purpose, use for short periods of time.
523d677735SMaor Gottlieb * Beware of lock dependencies (preferably, no locks should be acquired
533d677735SMaor Gottlieb * under it).
543d677735SMaor Gottlieb */
553d677735SMaor Gottlieb static DEFINE_SPINLOCK(lag_lock);
563d677735SMaor Gottlieb
get_port_sel_mode(enum mlx5_lag_mode mode,unsigned long flags)57ef9a3a4aSEli Cohen static int get_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags)
583d677735SMaor Gottlieb {
59ef9a3a4aSEli Cohen if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags))
60ef9a3a4aSEli Cohen return MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT;
61ef9a3a4aSEli Cohen
6294db3317SEli Cohen if (mode == MLX5_LAG_MODE_MPESW)
6394db3317SEli Cohen return MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_MPESW;
6494db3317SEli Cohen
65ef9a3a4aSEli Cohen return MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY;
66ef9a3a4aSEli Cohen }
67ef9a3a4aSEli Cohen
lag_active_port_bits(struct mlx5_lag * ldev)68c5c13b45SLiu, Changcheng static u8 lag_active_port_bits(struct mlx5_lag *ldev)
69c5c13b45SLiu, Changcheng {
70c5c13b45SLiu, Changcheng u8 enabled_ports[MLX5_MAX_PORTS] = {};
71c5c13b45SLiu, Changcheng u8 active_port = 0;
72c5c13b45SLiu, Changcheng int num_enabled;
73c5c13b45SLiu, Changcheng int idx;
74c5c13b45SLiu, Changcheng
75c5c13b45SLiu, Changcheng mlx5_infer_tx_enabled(&ldev->tracker, ldev->ports, enabled_ports,
76c5c13b45SLiu, Changcheng &num_enabled);
77c5c13b45SLiu, Changcheng for (idx = 0; idx < num_enabled; idx++)
78c5c13b45SLiu, Changcheng active_port |= BIT_MASK(enabled_ports[idx]);
79c5c13b45SLiu, Changcheng
80c5c13b45SLiu, Changcheng return active_port;
81c5c13b45SLiu, Changcheng }
82c5c13b45SLiu, Changcheng
mlx5_cmd_create_lag(struct mlx5_core_dev * dev,u8 * ports,int mode,unsigned long flags)83ef9a3a4aSEli Cohen static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 *ports, int mode,
84ef9a3a4aSEli Cohen unsigned long flags)
85ef9a3a4aSEli Cohen {
864892bd98SMark Bloch bool fdb_sel_mode = test_bit(MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE,
874892bd98SMark Bloch &flags);
88ef9a3a4aSEli Cohen int port_sel_mode = get_port_sel_mode(mode, flags);
893d677735SMaor Gottlieb u32 in[MLX5_ST_SZ_DW(create_lag_in)] = {};
90ef9a3a4aSEli Cohen void *lag_ctx;
913d677735SMaor Gottlieb
92ef9a3a4aSEli Cohen lag_ctx = MLX5_ADDR_OF(create_lag_in, in, ctx);
933d677735SMaor Gottlieb MLX5_SET(create_lag_in, in, opcode, MLX5_CMD_OP_CREATE_LAG);
944892bd98SMark Bloch MLX5_SET(lagc, lag_ctx, fdb_selection_mode, fdb_sel_mode);
95c5c13b45SLiu, Changcheng
96c5c13b45SLiu, Changcheng switch (port_sel_mode) {
97c5c13b45SLiu, Changcheng case MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY:
987e978e77SMark Bloch MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, ports[0]);
997e978e77SMark Bloch MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, ports[1]);
100c5c13b45SLiu, Changcheng break;
101c5c13b45SLiu, Changcheng case MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT:
102c5c13b45SLiu, Changcheng if (!MLX5_CAP_PORT_SELECTION(dev, port_select_flow_table_bypass))
103c5c13b45SLiu, Changcheng break;
104c5c13b45SLiu, Changcheng
105c5c13b45SLiu, Changcheng MLX5_SET(lagc, lag_ctx, active_port,
106c5c13b45SLiu, Changcheng lag_active_port_bits(mlx5_lag_dev(dev)));
107c5c13b45SLiu, Changcheng break;
108c5c13b45SLiu, Changcheng default:
109c5c13b45SLiu, Changcheng break;
110da6b0bb0SMaor Gottlieb }
111ef9a3a4aSEli Cohen MLX5_SET(lagc, lag_ctx, port_select_mode, port_sel_mode);
1123d677735SMaor Gottlieb
1133d677735SMaor Gottlieb return mlx5_cmd_exec_in(dev, create_lag, in);
1143d677735SMaor Gottlieb }
1153d677735SMaor Gottlieb
mlx5_cmd_modify_lag(struct mlx5_core_dev * dev,u8 num_ports,u8 * ports)1167e978e77SMark Bloch static int mlx5_cmd_modify_lag(struct mlx5_core_dev *dev, u8 num_ports,
1177e978e77SMark Bloch u8 *ports)
1183d677735SMaor Gottlieb {
1193d677735SMaor Gottlieb u32 in[MLX5_ST_SZ_DW(modify_lag_in)] = {};
1203d677735SMaor Gottlieb void *lag_ctx = MLX5_ADDR_OF(modify_lag_in, in, ctx);
1213d677735SMaor Gottlieb
1223d677735SMaor Gottlieb MLX5_SET(modify_lag_in, in, opcode, MLX5_CMD_OP_MODIFY_LAG);
1233d677735SMaor Gottlieb MLX5_SET(modify_lag_in, in, field_select, 0x1);
1243d677735SMaor Gottlieb
1257e978e77SMark Bloch MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, ports[0]);
1267e978e77SMark Bloch MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, ports[1]);
1273d677735SMaor Gottlieb
1283d677735SMaor Gottlieb return mlx5_cmd_exec_in(dev, modify_lag, in);
1293d677735SMaor Gottlieb }
1303d677735SMaor Gottlieb
mlx5_cmd_create_vport_lag(struct mlx5_core_dev * dev)1313d677735SMaor Gottlieb int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev)
1323d677735SMaor Gottlieb {
1333d677735SMaor Gottlieb u32 in[MLX5_ST_SZ_DW(create_vport_lag_in)] = {};
1343d677735SMaor Gottlieb
1353d677735SMaor Gottlieb MLX5_SET(create_vport_lag_in, in, opcode, MLX5_CMD_OP_CREATE_VPORT_LAG);
1363d677735SMaor Gottlieb
1373d677735SMaor Gottlieb return mlx5_cmd_exec_in(dev, create_vport_lag, in);
1383d677735SMaor Gottlieb }
1393d677735SMaor Gottlieb EXPORT_SYMBOL(mlx5_cmd_create_vport_lag);
1403d677735SMaor Gottlieb
mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev * dev)1413d677735SMaor Gottlieb int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev)
1423d677735SMaor Gottlieb {
1433d677735SMaor Gottlieb u32 in[MLX5_ST_SZ_DW(destroy_vport_lag_in)] = {};
1443d677735SMaor Gottlieb
1453d677735SMaor Gottlieb MLX5_SET(destroy_vport_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_VPORT_LAG);
1463d677735SMaor Gottlieb
1473d677735SMaor Gottlieb return mlx5_cmd_exec_in(dev, destroy_vport_lag, in);
1483d677735SMaor Gottlieb }
1493d677735SMaor Gottlieb EXPORT_SYMBOL(mlx5_cmd_destroy_vport_lag);
1503d677735SMaor Gottlieb
mlx5_infer_tx_disabled(struct lag_tracker * tracker,u8 num_ports,u8 * ports,int * num_disabled)151352899f3SMark Bloch static void mlx5_infer_tx_disabled(struct lag_tracker *tracker, u8 num_ports,
152352899f3SMark Bloch u8 *ports, int *num_disabled)
15324b3599eSMark Bloch {
15424b3599eSMark Bloch int i;
15524b3599eSMark Bloch
156352899f3SMark Bloch *num_disabled = 0;
157352899f3SMark Bloch for (i = 0; i < num_ports; i++) {
158352899f3SMark Bloch if (!tracker->netdev_state[i].tx_enabled ||
159352899f3SMark Bloch !tracker->netdev_state[i].link_up)
160352899f3SMark Bloch ports[(*num_disabled)++] = i;
161352899f3SMark Bloch }
162352899f3SMark Bloch }
163352899f3SMark Bloch
mlx5_infer_tx_enabled(struct lag_tracker * tracker,u8 num_ports,u8 * ports,int * num_enabled)1647f46a0b7SMark Bloch void mlx5_infer_tx_enabled(struct lag_tracker *tracker, u8 num_ports,
165352899f3SMark Bloch u8 *ports, int *num_enabled)
166352899f3SMark Bloch {
167352899f3SMark Bloch int i;
168352899f3SMark Bloch
169352899f3SMark Bloch *num_enabled = 0;
170352899f3SMark Bloch for (i = 0; i < num_ports; i++) {
171352899f3SMark Bloch if (tracker->netdev_state[i].tx_enabled &&
172352899f3SMark Bloch tracker->netdev_state[i].link_up)
173352899f3SMark Bloch ports[(*num_enabled)++] = i;
174352899f3SMark Bloch }
175352899f3SMark Bloch
176352899f3SMark Bloch if (*num_enabled == 0)
177352899f3SMark Bloch mlx5_infer_tx_disabled(tracker, num_ports, ports, num_enabled);
178352899f3SMark Bloch }
179352899f3SMark Bloch
mlx5_lag_print_mapping(struct mlx5_core_dev * dev,struct mlx5_lag * ldev,struct lag_tracker * tracker,unsigned long flags)180352899f3SMark Bloch static void mlx5_lag_print_mapping(struct mlx5_core_dev *dev,
181352899f3SMark Bloch struct mlx5_lag *ldev,
182352899f3SMark Bloch struct lag_tracker *tracker,
183ef9a3a4aSEli Cohen unsigned long flags)
184352899f3SMark Bloch {
185352899f3SMark Bloch char buf[MLX5_MAX_PORTS * 10 + 1] = {};
186352899f3SMark Bloch u8 enabled_ports[MLX5_MAX_PORTS] = {};
187352899f3SMark Bloch int written = 0;
188352899f3SMark Bloch int num_enabled;
189352899f3SMark Bloch int idx;
190352899f3SMark Bloch int err;
191352899f3SMark Bloch int i;
192352899f3SMark Bloch int j;
193352899f3SMark Bloch
194ef9a3a4aSEli Cohen if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags)) {
195352899f3SMark Bloch mlx5_infer_tx_enabled(tracker, ldev->ports, enabled_ports,
196352899f3SMark Bloch &num_enabled);
197352899f3SMark Bloch for (i = 0; i < num_enabled; i++) {
198352899f3SMark Bloch err = scnprintf(buf + written, 4, "%d, ", enabled_ports[i] + 1);
199352899f3SMark Bloch if (err != 3)
200352899f3SMark Bloch return;
201352899f3SMark Bloch written += err;
202352899f3SMark Bloch }
203352899f3SMark Bloch buf[written - 2] = 0;
204352899f3SMark Bloch mlx5_core_info(dev, "lag map active ports: %s\n", buf);
205352899f3SMark Bloch } else {
206352899f3SMark Bloch for (i = 0; i < ldev->ports; i++) {
207352899f3SMark Bloch for (j = 0; j < ldev->buckets; j++) {
208352899f3SMark Bloch idx = i * ldev->buckets + j;
209352899f3SMark Bloch err = scnprintf(buf + written, 10,
210352899f3SMark Bloch " port %d:%d", i + 1, ldev->v2p_map[idx]);
211352899f3SMark Bloch if (err != 9)
212352899f3SMark Bloch return;
213352899f3SMark Bloch written += err;
214352899f3SMark Bloch }
215352899f3SMark Bloch }
216352899f3SMark Bloch mlx5_core_info(dev, "lag map:%s\n", buf);
217352899f3SMark Bloch }
21824b3599eSMark Bloch }
21924b3599eSMark Bloch
2203d677735SMaor Gottlieb static int mlx5_lag_netdev_event(struct notifier_block *this,
2213d677735SMaor Gottlieb unsigned long event, void *ptr);
2223d677735SMaor Gottlieb static void mlx5_do_bond_work(struct work_struct *work);
2233d677735SMaor Gottlieb
mlx5_ldev_free(struct kref * ref)2243d677735SMaor Gottlieb static void mlx5_ldev_free(struct kref *ref)
2253d677735SMaor Gottlieb {
2263d677735SMaor Gottlieb struct mlx5_lag *ldev = container_of(ref, struct mlx5_lag, ref);
2273d677735SMaor Gottlieb
2283d677735SMaor Gottlieb if (ldev->nb.notifier_call)
2293d677735SMaor Gottlieb unregister_netdevice_notifier_net(&init_net, &ldev->nb);
2303d677735SMaor Gottlieb mlx5_lag_mp_cleanup(ldev);
2314d1c1379SEli Cohen cancel_delayed_work_sync(&ldev->bond_work);
2323d677735SMaor Gottlieb destroy_workqueue(ldev->wq);
233ec2fa47dSMark Bloch mutex_destroy(&ldev->lock);
2343d677735SMaor Gottlieb kfree(ldev);
2353d677735SMaor Gottlieb }
2363d677735SMaor Gottlieb
mlx5_ldev_put(struct mlx5_lag * ldev)2373d677735SMaor Gottlieb static void mlx5_ldev_put(struct mlx5_lag *ldev)
2383d677735SMaor Gottlieb {
2393d677735SMaor Gottlieb kref_put(&ldev->ref, mlx5_ldev_free);
2403d677735SMaor Gottlieb }
2413d677735SMaor Gottlieb
mlx5_ldev_get(struct mlx5_lag * ldev)2423d677735SMaor Gottlieb static void mlx5_ldev_get(struct mlx5_lag *ldev)
2433d677735SMaor Gottlieb {
2443d677735SMaor Gottlieb kref_get(&ldev->ref);
2453d677735SMaor Gottlieb }
2463d677735SMaor Gottlieb
mlx5_lag_dev_alloc(struct mlx5_core_dev * dev)2473d677735SMaor Gottlieb static struct mlx5_lag *mlx5_lag_dev_alloc(struct mlx5_core_dev *dev)
2483d677735SMaor Gottlieb {
2493d677735SMaor Gottlieb struct mlx5_lag *ldev;
2503d677735SMaor Gottlieb int err;
2513d677735SMaor Gottlieb
2523d677735SMaor Gottlieb ldev = kzalloc(sizeof(*ldev), GFP_KERNEL);
2533d677735SMaor Gottlieb if (!ldev)
2543d677735SMaor Gottlieb return NULL;
2553d677735SMaor Gottlieb
2563d677735SMaor Gottlieb ldev->wq = create_singlethread_workqueue("mlx5_lag");
2573d677735SMaor Gottlieb if (!ldev->wq) {
2583d677735SMaor Gottlieb kfree(ldev);
2593d677735SMaor Gottlieb return NULL;
2603d677735SMaor Gottlieb }
2613d677735SMaor Gottlieb
2623d677735SMaor Gottlieb kref_init(&ldev->ref);
263ec2fa47dSMark Bloch mutex_init(&ldev->lock);
2643d677735SMaor Gottlieb INIT_DELAYED_WORK(&ldev->bond_work, mlx5_do_bond_work);
2653d677735SMaor Gottlieb
2663d677735SMaor Gottlieb ldev->nb.notifier_call = mlx5_lag_netdev_event;
2673d677735SMaor Gottlieb if (register_netdevice_notifier_net(&init_net, &ldev->nb)) {
2683d677735SMaor Gottlieb ldev->nb.notifier_call = NULL;
2693d677735SMaor Gottlieb mlx5_core_err(dev, "Failed to register LAG netdev notifier\n");
2703d677735SMaor Gottlieb }
271ef9a3a4aSEli Cohen ldev->mode = MLX5_LAG_MODE_NONE;
2723d677735SMaor Gottlieb
2733d677735SMaor Gottlieb err = mlx5_lag_mp_init(ldev);
2743d677735SMaor Gottlieb if (err)
2753d677735SMaor Gottlieb mlx5_core_err(dev, "Failed to init multipath lag err=%d\n",
2763d677735SMaor Gottlieb err);
27794db3317SEli Cohen
278e9d5bb51SMark Bloch ldev->ports = MLX5_CAP_GEN(dev, num_lag_ports);
279352899f3SMark Bloch ldev->buckets = 1;
2803d677735SMaor Gottlieb
2813d677735SMaor Gottlieb return ldev;
2823d677735SMaor Gottlieb }
2833d677735SMaor Gottlieb
mlx5_lag_dev_get_netdev_idx(struct mlx5_lag * ldev,struct net_device * ndev)2843d677735SMaor Gottlieb int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
2853d677735SMaor Gottlieb struct net_device *ndev)
2863d677735SMaor Gottlieb {
2873d677735SMaor Gottlieb int i;
2883d677735SMaor Gottlieb
2897e978e77SMark Bloch for (i = 0; i < ldev->ports; i++)
2903d677735SMaor Gottlieb if (ldev->pf[i].netdev == ndev)
2913d677735SMaor Gottlieb return i;
2923d677735SMaor Gottlieb
2933d677735SMaor Gottlieb return -ENOENT;
2943d677735SMaor Gottlieb }
2953d677735SMaor Gottlieb
__mlx5_lag_is_roce(struct mlx5_lag * ldev)2963d677735SMaor Gottlieb static bool __mlx5_lag_is_roce(struct mlx5_lag *ldev)
2973d677735SMaor Gottlieb {
298ef9a3a4aSEli Cohen return ldev->mode == MLX5_LAG_MODE_ROCE;
2993d677735SMaor Gottlieb }
3003d677735SMaor Gottlieb
__mlx5_lag_is_sriov(struct mlx5_lag * ldev)3013d677735SMaor Gottlieb static bool __mlx5_lag_is_sriov(struct mlx5_lag *ldev)
3023d677735SMaor Gottlieb {
303ef9a3a4aSEli Cohen return ldev->mode == MLX5_LAG_MODE_SRIOV;
3043d677735SMaor Gottlieb }
3053d677735SMaor Gottlieb
306352899f3SMark Bloch /* Create a mapping between steering slots and active ports.
307352899f3SMark Bloch * As we have ldev->buckets slots per port first assume the native
308352899f3SMark Bloch * mapping should be used.
309352899f3SMark Bloch * If there are ports that are disabled fill the relevant slots
310352899f3SMark Bloch * with mapping that points to active ports.
311352899f3SMark Bloch */
mlx5_infer_tx_affinity_mapping(struct lag_tracker * tracker,u8 num_ports,u8 buckets,u8 * ports)3127e978e77SMark Bloch static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
313352899f3SMark Bloch u8 num_ports,
314352899f3SMark Bloch u8 buckets,
315352899f3SMark Bloch u8 *ports)
3167e978e77SMark Bloch {
3177e978e77SMark Bloch int disabled[MLX5_MAX_PORTS] = {};
3187e978e77SMark Bloch int enabled[MLX5_MAX_PORTS] = {};
3197e978e77SMark Bloch int disabled_ports_num = 0;
3207e978e77SMark Bloch int enabled_ports_num = 0;
321352899f3SMark Bloch int idx;
3227e978e77SMark Bloch u32 rand;
3237e978e77SMark Bloch int i;
324352899f3SMark Bloch int j;
3253d677735SMaor Gottlieb
3267e978e77SMark Bloch for (i = 0; i < num_ports; i++) {
3277e978e77SMark Bloch if (tracker->netdev_state[i].tx_enabled &&
3287e978e77SMark Bloch tracker->netdev_state[i].link_up)
3297e978e77SMark Bloch enabled[enabled_ports_num++] = i;
3307e978e77SMark Bloch else
3317e978e77SMark Bloch disabled[disabled_ports_num++] = i;
3327e978e77SMark Bloch }
3337e978e77SMark Bloch
334352899f3SMark Bloch /* Use native mapping by default where each port's buckets
335352899f3SMark Bloch * point the native port: 1 1 1 .. 1 2 2 2 ... 2 3 3 3 ... 3 etc
336352899f3SMark Bloch */
3377e978e77SMark Bloch for (i = 0; i < num_ports; i++)
338352899f3SMark Bloch for (j = 0; j < buckets; j++) {
339352899f3SMark Bloch idx = i * buckets + j;
340352899f3SMark Bloch ports[idx] = MLX5_LAG_EGRESS_PORT_1 + i;
341352899f3SMark Bloch }
3427e978e77SMark Bloch
3437e978e77SMark Bloch /* If all ports are disabled/enabled keep native mapping */
3447e978e77SMark Bloch if (enabled_ports_num == num_ports ||
3457e978e77SMark Bloch disabled_ports_num == num_ports)
3463d677735SMaor Gottlieb return;
3473d677735SMaor Gottlieb
3487e978e77SMark Bloch /* Go over the disabled ports and for each assign a random active port */
3497e978e77SMark Bloch for (i = 0; i < disabled_ports_num; i++) {
350352899f3SMark Bloch for (j = 0; j < buckets; j++) {
3517e978e77SMark Bloch get_random_bytes(&rand, 4);
352352899f3SMark Bloch ports[disabled[i] * buckets + j] = enabled[rand % enabled_ports_num] + 1;
353352899f3SMark Bloch }
3547e978e77SMark Bloch }
3553d677735SMaor Gottlieb }
3563d677735SMaor Gottlieb
mlx5_lag_has_drop_rule(struct mlx5_lag * ldev)3576cb87869SMark Bloch static bool mlx5_lag_has_drop_rule(struct mlx5_lag *ldev)
3586cb87869SMark Bloch {
3597e978e77SMark Bloch int i;
3607e978e77SMark Bloch
3617e978e77SMark Bloch for (i = 0; i < ldev->ports; i++)
3627e978e77SMark Bloch if (ldev->pf[i].has_drop)
3637e978e77SMark Bloch return true;
3647e978e77SMark Bloch return false;
3656cb87869SMark Bloch }
3666cb87869SMark Bloch
mlx5_lag_drop_rule_cleanup(struct mlx5_lag * ldev)3676cb87869SMark Bloch static void mlx5_lag_drop_rule_cleanup(struct mlx5_lag *ldev)
3686cb87869SMark Bloch {
3696cb87869SMark Bloch int i;
3706cb87869SMark Bloch
3717e978e77SMark Bloch for (i = 0; i < ldev->ports; i++) {
3726cb87869SMark Bloch if (!ldev->pf[i].has_drop)
3736cb87869SMark Bloch continue;
3746cb87869SMark Bloch
3756cb87869SMark Bloch mlx5_esw_acl_ingress_vport_drop_rule_destroy(ldev->pf[i].dev->priv.eswitch,
3766cb87869SMark Bloch MLX5_VPORT_UPLINK);
3776cb87869SMark Bloch ldev->pf[i].has_drop = false;
3786cb87869SMark Bloch }
3796cb87869SMark Bloch }
3806cb87869SMark Bloch
mlx5_lag_drop_rule_setup(struct mlx5_lag * ldev,struct lag_tracker * tracker)3816cb87869SMark Bloch static void mlx5_lag_drop_rule_setup(struct mlx5_lag *ldev,
3826cb87869SMark Bloch struct lag_tracker *tracker)
3836cb87869SMark Bloch {
3847e978e77SMark Bloch u8 disabled_ports[MLX5_MAX_PORTS] = {};
3857e978e77SMark Bloch struct mlx5_core_dev *dev;
3867e978e77SMark Bloch int disabled_index;
3877e978e77SMark Bloch int num_disabled;
3886cb87869SMark Bloch int err;
3897e978e77SMark Bloch int i;
3906cb87869SMark Bloch
3916cb87869SMark Bloch /* First delete the current drop rule so there won't be any dropped
3926cb87869SMark Bloch * packets
3936cb87869SMark Bloch */
3946cb87869SMark Bloch mlx5_lag_drop_rule_cleanup(ldev);
3956cb87869SMark Bloch
3966cb87869SMark Bloch if (!ldev->tracker.has_inactive)
3976cb87869SMark Bloch return;
3986cb87869SMark Bloch
3997e978e77SMark Bloch mlx5_infer_tx_disabled(tracker, ldev->ports, disabled_ports, &num_disabled);
4006cb87869SMark Bloch
4017e978e77SMark Bloch for (i = 0; i < num_disabled; i++) {
4027e978e77SMark Bloch disabled_index = disabled_ports[i];
4037e978e77SMark Bloch dev = ldev->pf[disabled_index].dev;
4047e978e77SMark Bloch err = mlx5_esw_acl_ingress_vport_drop_rule_create(dev->priv.eswitch,
4056cb87869SMark Bloch MLX5_VPORT_UPLINK);
4066cb87869SMark Bloch if (!err)
4077e978e77SMark Bloch ldev->pf[disabled_index].has_drop = true;
4086cb87869SMark Bloch else
4097e978e77SMark Bloch mlx5_core_err(dev,
4106cb87869SMark Bloch "Failed to create lag drop rule, error: %d", err);
4116cb87869SMark Bloch }
4127e978e77SMark Bloch }
4136cb87869SMark Bloch
mlx5_cmd_modify_active_port(struct mlx5_core_dev * dev,u8 ports)414c5c13b45SLiu, Changcheng static int mlx5_cmd_modify_active_port(struct mlx5_core_dev *dev, u8 ports)
415c5c13b45SLiu, Changcheng {
416c5c13b45SLiu, Changcheng u32 in[MLX5_ST_SZ_DW(modify_lag_in)] = {};
417c5c13b45SLiu, Changcheng void *lag_ctx;
418c5c13b45SLiu, Changcheng
419c5c13b45SLiu, Changcheng lag_ctx = MLX5_ADDR_OF(modify_lag_in, in, ctx);
420c5c13b45SLiu, Changcheng
421c5c13b45SLiu, Changcheng MLX5_SET(modify_lag_in, in, opcode, MLX5_CMD_OP_MODIFY_LAG);
422c5c13b45SLiu, Changcheng MLX5_SET(modify_lag_in, in, field_select, 0x2);
423c5c13b45SLiu, Changcheng
424c5c13b45SLiu, Changcheng MLX5_SET(lagc, lag_ctx, active_port, ports);
425c5c13b45SLiu, Changcheng
426c5c13b45SLiu, Changcheng return mlx5_cmd_exec_in(dev, modify_lag, in);
427c5c13b45SLiu, Changcheng }
428c5c13b45SLiu, Changcheng
_mlx5_modify_lag(struct mlx5_lag * ldev,u8 * ports)4297e978e77SMark Bloch static int _mlx5_modify_lag(struct mlx5_lag *ldev, u8 *ports)
430da6b0bb0SMaor Gottlieb {
431da6b0bb0SMaor Gottlieb struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
432c5c13b45SLiu, Changcheng u8 active_ports;
433c5c13b45SLiu, Changcheng int ret;
434da6b0bb0SMaor Gottlieb
435c5c13b45SLiu, Changcheng if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &ldev->mode_flags)) {
436c5c13b45SLiu, Changcheng ret = mlx5_lag_port_sel_modify(ldev, ports);
437c5c13b45SLiu, Changcheng if (ret ||
438c5c13b45SLiu, Changcheng !MLX5_CAP_PORT_SELECTION(dev0, port_select_flow_table_bypass))
439c5c13b45SLiu, Changcheng return ret;
440c5c13b45SLiu, Changcheng
441c5c13b45SLiu, Changcheng active_ports = lag_active_port_bits(ldev);
442c5c13b45SLiu, Changcheng
443c5c13b45SLiu, Changcheng return mlx5_cmd_modify_active_port(dev0, active_ports);
444c5c13b45SLiu, Changcheng }
4457e978e77SMark Bloch return mlx5_cmd_modify_lag(dev0, ldev->ports, ports);
446da6b0bb0SMaor Gottlieb }
447da6b0bb0SMaor Gottlieb
mlx5_modify_lag(struct mlx5_lag * ldev,struct lag_tracker * tracker)4483d677735SMaor Gottlieb void mlx5_modify_lag(struct mlx5_lag *ldev,
4493d677735SMaor Gottlieb struct lag_tracker *tracker)
4503d677735SMaor Gottlieb {
451352899f3SMark Bloch u8 ports[MLX5_MAX_PORTS * MLX5_LAG_MAX_HASH_BUCKETS] = {};
4523d677735SMaor Gottlieb struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
453352899f3SMark Bloch int idx;
4543d677735SMaor Gottlieb int err;
4557e978e77SMark Bloch int i;
456352899f3SMark Bloch int j;
4573d677735SMaor Gottlieb
458352899f3SMark Bloch mlx5_infer_tx_affinity_mapping(tracker, ldev->ports, ldev->buckets, ports);
4593d677735SMaor Gottlieb
4607e978e77SMark Bloch for (i = 0; i < ldev->ports; i++) {
461352899f3SMark Bloch for (j = 0; j < ldev->buckets; j++) {
462352899f3SMark Bloch idx = i * ldev->buckets + j;
463352899f3SMark Bloch if (ports[idx] == ldev->v2p_map[idx])
4647e978e77SMark Bloch continue;
4657e978e77SMark Bloch err = _mlx5_modify_lag(ldev, ports);
466da6b0bb0SMaor Gottlieb if (err) {
4673d677735SMaor Gottlieb mlx5_core_err(dev0,
4683d677735SMaor Gottlieb "Failed to modify LAG (%d)\n",
4693d677735SMaor Gottlieb err);
470da6b0bb0SMaor Gottlieb return;
4713d677735SMaor Gottlieb }
472352899f3SMark Bloch memcpy(ldev->v2p_map, ports, sizeof(ports));
4737e978e77SMark Bloch
474352899f3SMark Bloch mlx5_lag_print_mapping(dev0, ldev, tracker,
475ef9a3a4aSEli Cohen ldev->mode_flags);
4767e978e77SMark Bloch break;
477da6b0bb0SMaor Gottlieb }
478352899f3SMark Bloch }
4796cb87869SMark Bloch
4806cb87869SMark Bloch if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP &&
481ef9a3a4aSEli Cohen !(ldev->mode == MLX5_LAG_MODE_ROCE))
4826cb87869SMark Bloch mlx5_lag_drop_rule_setup(ldev, tracker);
483da6b0bb0SMaor Gottlieb }
484da6b0bb0SMaor Gottlieb
mlx5_lag_set_port_sel_mode_roce(struct mlx5_lag * ldev,unsigned long * flags)485cdf611d1SMark Bloch static int mlx5_lag_set_port_sel_mode_roce(struct mlx5_lag *ldev,
486ef9a3a4aSEli Cohen unsigned long *flags)
487cdf611d1SMark Bloch {
488b146a7cdSLiu, Changcheng struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
489cdf611d1SMark Bloch
490b146a7cdSLiu, Changcheng if (!MLX5_CAP_PORT_SELECTION(dev0, port_select_flow_table)) {
491b146a7cdSLiu, Changcheng if (ldev->ports > 2)
492cdf611d1SMark Bloch return -EINVAL;
493b146a7cdSLiu, Changcheng return 0;
494b146a7cdSLiu, Changcheng }
495b146a7cdSLiu, Changcheng
496352899f3SMark Bloch if (ldev->ports > 2)
497352899f3SMark Bloch ldev->buckets = MLX5_LAG_MAX_HASH_BUCKETS;
498b146a7cdSLiu, Changcheng
499b146a7cdSLiu, Changcheng set_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, flags);
500cdf611d1SMark Bloch
501cdf611d1SMark Bloch return 0;
502cdf611d1SMark Bloch }
503cdf611d1SMark Bloch
mlx5_lag_set_port_sel_mode_offloads(struct mlx5_lag * ldev,struct lag_tracker * tracker,enum mlx5_lag_mode mode,unsigned long * flags)50494db3317SEli Cohen static void mlx5_lag_set_port_sel_mode_offloads(struct mlx5_lag *ldev,
50594db3317SEli Cohen struct lag_tracker *tracker,
50694db3317SEli Cohen enum mlx5_lag_mode mode,
50794db3317SEli Cohen unsigned long *flags)
508cdf611d1SMark Bloch {
509cdf611d1SMark Bloch struct lag_func *dev0 = &ldev->pf[MLX5_LAG_P1];
510cdf611d1SMark Bloch
51194db3317SEli Cohen if (mode == MLX5_LAG_MODE_MPESW)
51294db3317SEli Cohen return;
51394db3317SEli Cohen
514cdf611d1SMark Bloch if (MLX5_CAP_PORT_SELECTION(dev0->dev, port_select_flow_table) &&
51586a12124SShay Drory tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH) {
51686a12124SShay Drory if (ldev->ports > 2)
51786a12124SShay Drory ldev->buckets = MLX5_LAG_MAX_HASH_BUCKETS;
518ef9a3a4aSEli Cohen set_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, flags);
519cdf611d1SMark Bloch }
52086a12124SShay Drory }
521cdf611d1SMark Bloch
mlx5_lag_set_flags(struct mlx5_lag * ldev,enum mlx5_lag_mode mode,struct lag_tracker * tracker,bool shared_fdb,unsigned long * flags)522ef9a3a4aSEli Cohen static int mlx5_lag_set_flags(struct mlx5_lag *ldev, enum mlx5_lag_mode mode,
523ef9a3a4aSEli Cohen struct lag_tracker *tracker, bool shared_fdb,
524ef9a3a4aSEli Cohen unsigned long *flags)
525da6b0bb0SMaor Gottlieb {
526ef9a3a4aSEli Cohen bool roce_lag = mode == MLX5_LAG_MODE_ROCE;
527ef9a3a4aSEli Cohen
528ef9a3a4aSEli Cohen *flags = 0;
5294892bd98SMark Bloch if (shared_fdb) {
530ef9a3a4aSEli Cohen set_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, flags);
5314892bd98SMark Bloch set_bit(MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE, flags);
5324892bd98SMark Bloch }
5334892bd98SMark Bloch
5344892bd98SMark Bloch if (mode == MLX5_LAG_MODE_MPESW)
5354892bd98SMark Bloch set_bit(MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE, flags);
536da6b0bb0SMaor Gottlieb
537cdf611d1SMark Bloch if (roce_lag)
538ef9a3a4aSEli Cohen return mlx5_lag_set_port_sel_mode_roce(ldev, flags);
539ef9a3a4aSEli Cohen
54094db3317SEli Cohen mlx5_lag_set_port_sel_mode_offloads(ldev, tracker, mode, flags);
54194db3317SEli Cohen return 0;
542da6b0bb0SMaor Gottlieb }
543da6b0bb0SMaor Gottlieb
mlx5_get_str_port_sel_mode(enum mlx5_lag_mode mode,unsigned long flags)5441afbd1e2SLiu, Changcheng char *mlx5_get_str_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags)
545da6b0bb0SMaor Gottlieb {
5461afbd1e2SLiu, Changcheng int port_sel_mode = get_port_sel_mode(mode, flags);
54794db3317SEli Cohen
54894db3317SEli Cohen switch (port_sel_mode) {
54994db3317SEli Cohen case MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY: return "queue_affinity";
55094db3317SEli Cohen case MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT: return "hash";
55194db3317SEli Cohen case MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_MPESW: return "mpesw";
55294db3317SEli Cohen default: return "invalid";
55394db3317SEli Cohen }
5543d677735SMaor Gottlieb }
5553d677735SMaor Gottlieb
mlx5_lag_create_single_fdb(struct mlx5_lag * ldev)556014e4d48SShay Drory static int mlx5_lag_create_single_fdb(struct mlx5_lag *ldev)
557014e4d48SShay Drory {
558014e4d48SShay Drory struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
559014e4d48SShay Drory struct mlx5_eswitch *master_esw = dev0->priv.eswitch;
560014e4d48SShay Drory int err;
561014e4d48SShay Drory int i;
562014e4d48SShay Drory
563014e4d48SShay Drory for (i = MLX5_LAG_P1 + 1; i < ldev->ports; i++) {
564014e4d48SShay Drory struct mlx5_eswitch *slave_esw = ldev->pf[i].dev->priv.eswitch;
565014e4d48SShay Drory
566014e4d48SShay Drory err = mlx5_eswitch_offloads_single_fdb_add_one(master_esw,
567014e4d48SShay Drory slave_esw, ldev->ports);
568014e4d48SShay Drory if (err)
569014e4d48SShay Drory goto err;
570014e4d48SShay Drory }
571014e4d48SShay Drory return 0;
572014e4d48SShay Drory err:
573014e4d48SShay Drory for (; i > MLX5_LAG_P1; i--)
574014e4d48SShay Drory mlx5_eswitch_offloads_single_fdb_del_one(master_esw,
575014e4d48SShay Drory ldev->pf[i].dev->priv.eswitch);
576014e4d48SShay Drory return err;
577014e4d48SShay Drory }
578014e4d48SShay Drory
mlx5_create_lag(struct mlx5_lag * ldev,struct lag_tracker * tracker,enum mlx5_lag_mode mode,unsigned long flags)5793d677735SMaor Gottlieb static int mlx5_create_lag(struct mlx5_lag *ldev,
5803d677735SMaor Gottlieb struct lag_tracker *tracker,
581ef9a3a4aSEli Cohen enum mlx5_lag_mode mode,
582ef9a3a4aSEli Cohen unsigned long flags)
5833d677735SMaor Gottlieb {
584ef9a3a4aSEli Cohen bool shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags);
5853d677735SMaor Gottlieb struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
5863d677735SMaor Gottlieb u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
5873d677735SMaor Gottlieb int err;
5883d677735SMaor Gottlieb
58994db3317SEli Cohen if (tracker)
590352899f3SMark Bloch mlx5_lag_print_mapping(dev0, ldev, tracker, flags);
5917e978e77SMark Bloch mlx5_core_info(dev0, "shared_fdb:%d mode:%s\n",
5921afbd1e2SLiu, Changcheng shared_fdb, mlx5_get_str_port_sel_mode(mode, flags));
5933d677735SMaor Gottlieb
594ef9a3a4aSEli Cohen err = mlx5_cmd_create_lag(dev0, ldev->v2p_map, mode, flags);
5953d677735SMaor Gottlieb if (err) {
5963d677735SMaor Gottlieb mlx5_core_err(dev0,
5973d677735SMaor Gottlieb "Failed to create LAG (%d)\n",
5983d677735SMaor Gottlieb err);
5993d677735SMaor Gottlieb return err;
6003d677735SMaor Gottlieb }
6013d677735SMaor Gottlieb
6023d677735SMaor Gottlieb if (shared_fdb) {
603014e4d48SShay Drory err = mlx5_lag_create_single_fdb(ldev);
6043d677735SMaor Gottlieb if (err)
6053d677735SMaor Gottlieb mlx5_core_err(dev0, "Can't enable single FDB mode\n");
6063d677735SMaor Gottlieb else
6073d677735SMaor Gottlieb mlx5_core_info(dev0, "Operation mode is single FDB\n");
6083d677735SMaor Gottlieb }
6093d677735SMaor Gottlieb
6103d677735SMaor Gottlieb if (err) {
6113d677735SMaor Gottlieb MLX5_SET(destroy_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_LAG);
6123d677735SMaor Gottlieb if (mlx5_cmd_exec_in(dev0, destroy_lag, in))
6133d677735SMaor Gottlieb mlx5_core_err(dev0,
6143d677735SMaor Gottlieb "Failed to deactivate RoCE LAG; driver restart required\n");
6153d677735SMaor Gottlieb }
6163d677735SMaor Gottlieb
6173d677735SMaor Gottlieb return err;
6183d677735SMaor Gottlieb }
6193d677735SMaor Gottlieb
mlx5_activate_lag(struct mlx5_lag * ldev,struct lag_tracker * tracker,enum mlx5_lag_mode mode,bool shared_fdb)6203d677735SMaor Gottlieb int mlx5_activate_lag(struct mlx5_lag *ldev,
6213d677735SMaor Gottlieb struct lag_tracker *tracker,
622ef9a3a4aSEli Cohen enum mlx5_lag_mode mode,
6233d677735SMaor Gottlieb bool shared_fdb)
6243d677735SMaor Gottlieb {
625ef9a3a4aSEli Cohen bool roce_lag = mode == MLX5_LAG_MODE_ROCE;
6263d677735SMaor Gottlieb struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
62794db3317SEli Cohen unsigned long flags = 0;
6283d677735SMaor Gottlieb int err;
6293d677735SMaor Gottlieb
630ef9a3a4aSEli Cohen err = mlx5_lag_set_flags(ldev, mode, tracker, shared_fdb, &flags);
631cdf611d1SMark Bloch if (err)
632cdf611d1SMark Bloch return err;
633cdf611d1SMark Bloch
63494db3317SEli Cohen if (mode != MLX5_LAG_MODE_MPESW) {
635352899f3SMark Bloch mlx5_infer_tx_affinity_mapping(tracker, ldev->ports, ldev->buckets, ldev->v2p_map);
636ef9a3a4aSEli Cohen if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags)) {
637da6b0bb0SMaor Gottlieb err = mlx5_lag_port_sel_create(ldev, tracker->hash_type,
6387e978e77SMark Bloch ldev->v2p_map);
6393d677735SMaor Gottlieb if (err) {
640da6b0bb0SMaor Gottlieb mlx5_core_err(dev0,
641da6b0bb0SMaor Gottlieb "Failed to create LAG port selection(%d)\n",
642da6b0bb0SMaor Gottlieb err);
643da6b0bb0SMaor Gottlieb return err;
644da6b0bb0SMaor Gottlieb }
645da6b0bb0SMaor Gottlieb }
64694db3317SEli Cohen }
647da6b0bb0SMaor Gottlieb
648ef9a3a4aSEli Cohen err = mlx5_create_lag(ldev, tracker, mode, flags);
649da6b0bb0SMaor Gottlieb if (err) {
650ef9a3a4aSEli Cohen if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags))
651da6b0bb0SMaor Gottlieb mlx5_lag_port_sel_destroy(ldev);
652da6b0bb0SMaor Gottlieb if (roce_lag)
6533d677735SMaor Gottlieb mlx5_core_err(dev0,
6543d677735SMaor Gottlieb "Failed to activate RoCE LAG\n");
655da6b0bb0SMaor Gottlieb else
6563d677735SMaor Gottlieb mlx5_core_err(dev0,
6573d677735SMaor Gottlieb "Failed to activate VF LAG\n"
6583d677735SMaor Gottlieb "Make sure all VFs are unbound prior to VF LAG activation or deactivation\n");
6593d677735SMaor Gottlieb return err;
6603d677735SMaor Gottlieb }
6613d677735SMaor Gottlieb
66294db3317SEli Cohen if (tracker && tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP &&
6636cb87869SMark Bloch !roce_lag)
6646cb87869SMark Bloch mlx5_lag_drop_rule_setup(ldev, tracker);
6656cb87869SMark Bloch
666ef9a3a4aSEli Cohen ldev->mode = mode;
667ef9a3a4aSEli Cohen ldev->mode_flags = flags;
6683d677735SMaor Gottlieb return 0;
6693d677735SMaor Gottlieb }
6703d677735SMaor Gottlieb
mlx5_deactivate_lag(struct mlx5_lag * ldev)67127f9e0ccSMark Bloch int mlx5_deactivate_lag(struct mlx5_lag *ldev)
6723d677735SMaor Gottlieb {
6733d677735SMaor Gottlieb struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
674014e4d48SShay Drory struct mlx5_eswitch *master_esw = dev0->priv.eswitch;
6753d677735SMaor Gottlieb u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
6763d677735SMaor Gottlieb bool roce_lag = __mlx5_lag_is_roce(ldev);
677ef9a3a4aSEli Cohen unsigned long flags = ldev->mode_flags;
6783d677735SMaor Gottlieb int err;
679014e4d48SShay Drory int i;
6803d677735SMaor Gottlieb
681ef9a3a4aSEli Cohen ldev->mode = MLX5_LAG_MODE_NONE;
682ef9a3a4aSEli Cohen ldev->mode_flags = 0;
6833d677735SMaor Gottlieb mlx5_lag_mp_reset(ldev);
6843d677735SMaor Gottlieb
685ef9a3a4aSEli Cohen if (test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags)) {
686014e4d48SShay Drory for (i = MLX5_LAG_P1 + 1; i < ldev->ports; i++)
687014e4d48SShay Drory mlx5_eswitch_offloads_single_fdb_del_one(master_esw,
688014e4d48SShay Drory ldev->pf[i].dev->priv.eswitch);
689ef9a3a4aSEli Cohen clear_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags);
6903d677735SMaor Gottlieb }
6913d677735SMaor Gottlieb
6923d677735SMaor Gottlieb MLX5_SET(destroy_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_LAG);
6933d677735SMaor Gottlieb err = mlx5_cmd_exec_in(dev0, destroy_lag, in);
6943d677735SMaor Gottlieb if (err) {
6953d677735SMaor Gottlieb if (roce_lag) {
6963d677735SMaor Gottlieb mlx5_core_err(dev0,
6973d677735SMaor Gottlieb "Failed to deactivate RoCE LAG; driver restart required\n");
6983d677735SMaor Gottlieb } else {
6993d677735SMaor Gottlieb mlx5_core_err(dev0,
7003d677735SMaor Gottlieb "Failed to deactivate VF LAG; driver restart required\n"
7013d677735SMaor Gottlieb "Make sure all VFs are unbound prior to VF LAG activation or deactivation\n");
7023d677735SMaor Gottlieb }
7036cb87869SMark Bloch return err;
7043d677735SMaor Gottlieb }
7053d677735SMaor Gottlieb
706c7fbc7bbSShay Drory if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags)) {
7076cb87869SMark Bloch mlx5_lag_port_sel_destroy(ldev);
708c7fbc7bbSShay Drory ldev->buckets = 1;
709c7fbc7bbSShay Drory }
7106cb87869SMark Bloch if (mlx5_lag_has_drop_rule(ldev))
7116cb87869SMark Bloch mlx5_lag_drop_rule_cleanup(ldev);
7126cb87869SMark Bloch
7136cb87869SMark Bloch return 0;
7143d677735SMaor Gottlieb }
7153d677735SMaor Gottlieb
7166ec0b55eSShay Drory #define MLX5_LAG_OFFLOADS_SUPPORTED_PORTS 4
mlx5_lag_check_prereq(struct mlx5_lag * ldev)717a32327a3SRoi Dayan bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
7183d677735SMaor Gottlieb {
7194202ea95SMark Bloch #ifdef CONFIG_MLX5_ESWITCH
720f019679eSChris Mi struct mlx5_core_dev *dev;
7214202ea95SMark Bloch u8 mode;
7224202ea95SMark Bloch #endif
7237e978e77SMark Bloch int i;
7244202ea95SMark Bloch
7257e978e77SMark Bloch for (i = 0; i < ldev->ports; i++)
7267e978e77SMark Bloch if (!ldev->pf[i].dev)
7273d677735SMaor Gottlieb return false;
7283d677735SMaor Gottlieb
7293d677735SMaor Gottlieb #ifdef CONFIG_MLX5_ESWITCH
7300e682f04SChris Mi for (i = 0; i < ldev->ports; i++) {
7310e682f04SChris Mi dev = ldev->pf[i].dev;
732e87c6a83SChris Mi if (mlx5_eswitch_num_vfs(dev->priv.eswitch) && !is_mdev_switchdev_mode(dev))
733e2c45931SMark Bloch return false;
7340e682f04SChris Mi }
735e2c45931SMark Bloch
7360e682f04SChris Mi dev = ldev->pf[MLX5_LAG_P1].dev;
737f019679eSChris Mi mode = mlx5_eswitch_mode(dev);
7387e978e77SMark Bloch for (i = 0; i < ldev->ports; i++)
7397e978e77SMark Bloch if (mlx5_eswitch_mode(ldev->pf[i].dev) != mode)
7407e978e77SMark Bloch return false;
7417e978e77SMark Bloch
7426ec0b55eSShay Drory if (mode == MLX5_ESWITCH_OFFLOADS && ldev->ports > MLX5_LAG_OFFLOADS_SUPPORTED_PORTS)
7437e978e77SMark Bloch return false;
7443d677735SMaor Gottlieb #else
7457e978e77SMark Bloch for (i = 0; i < ldev->ports; i++)
7467e978e77SMark Bloch if (mlx5_sriov_is_enabled(ldev->pf[i].dev))
7477e978e77SMark Bloch return false;
7483d677735SMaor Gottlieb #endif
7497e978e77SMark Bloch return true;
7503d677735SMaor Gottlieb }
7513d677735SMaor Gottlieb
mlx5_lag_add_devices(struct mlx5_lag * ldev)75227f9e0ccSMark Bloch void mlx5_lag_add_devices(struct mlx5_lag *ldev)
7533d677735SMaor Gottlieb {
7543d677735SMaor Gottlieb int i;
7553d677735SMaor Gottlieb
7567e978e77SMark Bloch for (i = 0; i < ldev->ports; i++) {
7573d677735SMaor Gottlieb if (!ldev->pf[i].dev)
7583d677735SMaor Gottlieb continue;
7593d677735SMaor Gottlieb
7603d677735SMaor Gottlieb if (ldev->pf[i].dev->priv.flags &
7613d677735SMaor Gottlieb MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
7623d677735SMaor Gottlieb continue;
7633d677735SMaor Gottlieb
7643d677735SMaor Gottlieb ldev->pf[i].dev->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
7653d677735SMaor Gottlieb mlx5_rescan_drivers_locked(ldev->pf[i].dev);
7663d677735SMaor Gottlieb }
7673d677735SMaor Gottlieb }
7683d677735SMaor Gottlieb
mlx5_lag_remove_devices(struct mlx5_lag * ldev)76927f9e0ccSMark Bloch void mlx5_lag_remove_devices(struct mlx5_lag *ldev)
7703d677735SMaor Gottlieb {
7713d677735SMaor Gottlieb int i;
7723d677735SMaor Gottlieb
7737e978e77SMark Bloch for (i = 0; i < ldev->ports; i++) {
7743d677735SMaor Gottlieb if (!ldev->pf[i].dev)
7753d677735SMaor Gottlieb continue;
7763d677735SMaor Gottlieb
7773d677735SMaor Gottlieb if (ldev->pf[i].dev->priv.flags &
7783d677735SMaor Gottlieb MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
7793d677735SMaor Gottlieb continue;
7803d677735SMaor Gottlieb
7813d677735SMaor Gottlieb ldev->pf[i].dev->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
7823d677735SMaor Gottlieb mlx5_rescan_drivers_locked(ldev->pf[i].dev);
7833d677735SMaor Gottlieb }
7843d677735SMaor Gottlieb }
7853d677735SMaor Gottlieb
mlx5_disable_lag(struct mlx5_lag * ldev)78694db3317SEli Cohen void mlx5_disable_lag(struct mlx5_lag *ldev)
7873d677735SMaor Gottlieb {
788ef9a3a4aSEli Cohen bool shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags);
7893d677735SMaor Gottlieb struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
7903d677735SMaor Gottlieb bool roce_lag;
7913d677735SMaor Gottlieb int err;
7927e978e77SMark Bloch int i;
7933d677735SMaor Gottlieb
7943d677735SMaor Gottlieb roce_lag = __mlx5_lag_is_roce(ldev);
7953d677735SMaor Gottlieb
7963d677735SMaor Gottlieb if (shared_fdb) {
7973d677735SMaor Gottlieb mlx5_lag_remove_devices(ldev);
7983d677735SMaor Gottlieb } else if (roce_lag) {
7993d677735SMaor Gottlieb if (!(dev0->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)) {
8003d677735SMaor Gottlieb dev0->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
8013d677735SMaor Gottlieb mlx5_rescan_drivers_locked(dev0);
8023d677735SMaor Gottlieb }
8037e978e77SMark Bloch for (i = 1; i < ldev->ports; i++)
8047e978e77SMark Bloch mlx5_nic_vport_disable_roce(ldev->pf[i].dev);
8053d677735SMaor Gottlieb }
8063d677735SMaor Gottlieb
8073d677735SMaor Gottlieb err = mlx5_deactivate_lag(ldev);
8083d677735SMaor Gottlieb if (err)
8093d677735SMaor Gottlieb return;
8103d677735SMaor Gottlieb
8113d677735SMaor Gottlieb if (shared_fdb || roce_lag)
8123d677735SMaor Gottlieb mlx5_lag_add_devices(ldev);
8133d677735SMaor Gottlieb
81486a12124SShay Drory if (shared_fdb)
81586a12124SShay Drory for (i = 0; i < ldev->ports; i++)
81686a12124SShay Drory if (!(ldev->pf[i].dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV))
81786a12124SShay Drory mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
8183d677735SMaor Gottlieb }
8193d677735SMaor Gottlieb
mlx5_shared_fdb_supported(struct mlx5_lag * ldev)820c83e6ab9SShay Drory static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev)
8213d677735SMaor Gottlieb {
82286a12124SShay Drory struct mlx5_core_dev *dev;
82386a12124SShay Drory int i;
8243d677735SMaor Gottlieb
82586a12124SShay Drory for (i = MLX5_LAG_P1 + 1; i < ldev->ports; i++) {
82686a12124SShay Drory dev = ldev->pf[i].dev;
82786a12124SShay Drory if (is_mdev_switchdev_mode(dev) &&
82886a12124SShay Drory mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch) &&
82986a12124SShay Drory MLX5_CAP_GEN(dev, lag_native_fdb_selection) &&
83086a12124SShay Drory MLX5_CAP_ESW(dev, root_ft_on_other_esw) &&
83186a12124SShay Drory mlx5_eswitch_get_npeers(dev->priv.eswitch) ==
83286a12124SShay Drory MLX5_CAP_GEN(dev, num_lag_ports) - 1)
83386a12124SShay Drory continue;
83486a12124SShay Drory return false;
83586a12124SShay Drory }
83686a12124SShay Drory
83786a12124SShay Drory dev = ldev->pf[MLX5_LAG_P1].dev;
83886a12124SShay Drory if (is_mdev_switchdev_mode(dev) &&
83986a12124SShay Drory mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch) &&
84088d162b4SRoi Dayan mlx5_esw_offloads_devcom_is_ready(dev->priv.eswitch) &&
84186a12124SShay Drory MLX5_CAP_ESW(dev, esw_shared_ingress_acl) &&
84286a12124SShay Drory mlx5_eswitch_get_npeers(dev->priv.eswitch) == MLX5_CAP_GEN(dev, num_lag_ports) - 1)
8433d677735SMaor Gottlieb return true;
8443d677735SMaor Gottlieb
8453d677735SMaor Gottlieb return false;
8463d677735SMaor Gottlieb }
8473d677735SMaor Gottlieb
mlx5_lag_is_roce_lag(struct mlx5_lag * ldev)8487e978e77SMark Bloch static bool mlx5_lag_is_roce_lag(struct mlx5_lag *ldev)
8497e978e77SMark Bloch {
8507e978e77SMark Bloch bool roce_lag = true;
8517e978e77SMark Bloch int i;
8527e978e77SMark Bloch
8537e978e77SMark Bloch for (i = 0; i < ldev->ports; i++)
8547e978e77SMark Bloch roce_lag = roce_lag && !mlx5_sriov_is_enabled(ldev->pf[i].dev);
8557e978e77SMark Bloch
8567e978e77SMark Bloch #ifdef CONFIG_MLX5_ESWITCH
8577e978e77SMark Bloch for (i = 0; i < ldev->ports; i++)
858f019679eSChris Mi roce_lag = roce_lag && is_mdev_legacy_mode(ldev->pf[i].dev);
8597e978e77SMark Bloch #endif
8607e978e77SMark Bloch
8617e978e77SMark Bloch return roce_lag;
8627e978e77SMark Bloch }
8637e978e77SMark Bloch
mlx5_lag_should_modify_lag(struct mlx5_lag * ldev,bool do_bond)86494db3317SEli Cohen static bool mlx5_lag_should_modify_lag(struct mlx5_lag *ldev, bool do_bond)
86594db3317SEli Cohen {
86694db3317SEli Cohen return do_bond && __mlx5_lag_is_active(ldev) &&
86794db3317SEli Cohen ldev->mode != MLX5_LAG_MODE_MPESW;
86894db3317SEli Cohen }
86994db3317SEli Cohen
mlx5_lag_should_disable_lag(struct mlx5_lag * ldev,bool do_bond)87094db3317SEli Cohen static bool mlx5_lag_should_disable_lag(struct mlx5_lag *ldev, bool do_bond)
87194db3317SEli Cohen {
87294db3317SEli Cohen return !do_bond && __mlx5_lag_is_active(ldev) &&
87394db3317SEli Cohen ldev->mode != MLX5_LAG_MODE_MPESW;
87494db3317SEli Cohen }
87594db3317SEli Cohen
mlx5_do_bond(struct mlx5_lag * ldev)8763d677735SMaor Gottlieb static void mlx5_do_bond(struct mlx5_lag *ldev)
8773d677735SMaor Gottlieb {
8783d677735SMaor Gottlieb struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
879842c3b3dSLinus Torvalds struct lag_tracker tracker = { };
8803d677735SMaor Gottlieb bool do_bond, roce_lag;
8813d677735SMaor Gottlieb int err;
8827e978e77SMark Bloch int i;
8833d677735SMaor Gottlieb
8843d677735SMaor Gottlieb if (!mlx5_lag_is_ready(ldev)) {
8853d677735SMaor Gottlieb do_bond = false;
8863d677735SMaor Gottlieb } else {
887bdfa75adSDavid S. Miller /* VF LAG is in multipath mode, ignore bond change requests */
888bdfa75adSDavid S. Miller if (mlx5_lag_is_multipath(dev0))
889bdfa75adSDavid S. Miller return;
890bdfa75adSDavid S. Miller
8913d677735SMaor Gottlieb tracker = ldev->tracker;
8923d677735SMaor Gottlieb
8933d677735SMaor Gottlieb do_bond = tracker.is_bonded && mlx5_lag_check_prereq(ldev);
8943d677735SMaor Gottlieb }
8953d677735SMaor Gottlieb
8963d677735SMaor Gottlieb if (do_bond && !__mlx5_lag_is_active(ldev)) {
8973d677735SMaor Gottlieb bool shared_fdb = mlx5_shared_fdb_supported(ldev);
8983d677735SMaor Gottlieb
8997e978e77SMark Bloch roce_lag = mlx5_lag_is_roce_lag(ldev);
9003d677735SMaor Gottlieb
9013d677735SMaor Gottlieb if (shared_fdb || roce_lag)
9023d677735SMaor Gottlieb mlx5_lag_remove_devices(ldev);
9033d677735SMaor Gottlieb
9043d677735SMaor Gottlieb err = mlx5_activate_lag(ldev, &tracker,
905ef9a3a4aSEli Cohen roce_lag ? MLX5_LAG_MODE_ROCE :
906ef9a3a4aSEli Cohen MLX5_LAG_MODE_SRIOV,
9073d677735SMaor Gottlieb shared_fdb);
9083d677735SMaor Gottlieb if (err) {
9093d677735SMaor Gottlieb if (shared_fdb || roce_lag)
9103d677735SMaor Gottlieb mlx5_lag_add_devices(ldev);
9113d677735SMaor Gottlieb
9123d677735SMaor Gottlieb return;
9133d677735SMaor Gottlieb } else if (roce_lag) {
9143d677735SMaor Gottlieb dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
9153d677735SMaor Gottlieb mlx5_rescan_drivers_locked(dev0);
9167e978e77SMark Bloch for (i = 1; i < ldev->ports; i++)
9177e978e77SMark Bloch mlx5_nic_vport_enable_roce(ldev->pf[i].dev);
9183d677735SMaor Gottlieb } else if (shared_fdb) {
91986a12124SShay Drory int i;
92086a12124SShay Drory
9213d677735SMaor Gottlieb dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
9223d677735SMaor Gottlieb mlx5_rescan_drivers_locked(dev0);
9233d677735SMaor Gottlieb
92486a12124SShay Drory for (i = 0; i < ldev->ports; i++) {
92586a12124SShay Drory err = mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
92686a12124SShay Drory if (err)
92786a12124SShay Drory break;
92886a12124SShay Drory }
9293d677735SMaor Gottlieb
9303d677735SMaor Gottlieb if (err) {
9313d677735SMaor Gottlieb dev0->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
9323d677735SMaor Gottlieb mlx5_rescan_drivers_locked(dev0);
9333d677735SMaor Gottlieb mlx5_deactivate_lag(ldev);
9343d677735SMaor Gottlieb mlx5_lag_add_devices(ldev);
93586a12124SShay Drory for (i = 0; i < ldev->ports; i++)
93686a12124SShay Drory mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
9373d677735SMaor Gottlieb mlx5_core_err(dev0, "Failed to enable lag\n");
9383d677735SMaor Gottlieb return;
9393d677735SMaor Gottlieb }
9403d677735SMaor Gottlieb }
94194db3317SEli Cohen } else if (mlx5_lag_should_modify_lag(ldev, do_bond)) {
9423d677735SMaor Gottlieb mlx5_modify_lag(ldev, &tracker);
94394db3317SEli Cohen } else if (mlx5_lag_should_disable_lag(ldev, do_bond)) {
9443d677735SMaor Gottlieb mlx5_disable_lag(ldev);
9453d677735SMaor Gottlieb }
9463d677735SMaor Gottlieb }
9473d677735SMaor Gottlieb
mlx5_queue_bond_work(struct mlx5_lag * ldev,unsigned long delay)9483d677735SMaor Gottlieb static void mlx5_queue_bond_work(struct mlx5_lag *ldev, unsigned long delay)
9493d677735SMaor Gottlieb {
9503d677735SMaor Gottlieb queue_delayed_work(ldev->wq, &ldev->bond_work, delay);
9513d677735SMaor Gottlieb }
9523d677735SMaor Gottlieb
mlx5_do_bond_work(struct work_struct * work)9533d677735SMaor Gottlieb static void mlx5_do_bond_work(struct work_struct *work)
9543d677735SMaor Gottlieb {
9553d677735SMaor Gottlieb struct delayed_work *delayed_work = to_delayed_work(work);
9563d677735SMaor Gottlieb struct mlx5_lag *ldev = container_of(delayed_work, struct mlx5_lag,
9573d677735SMaor Gottlieb bond_work);
9583d677735SMaor Gottlieb int status;
9593d677735SMaor Gottlieb
9603d677735SMaor Gottlieb status = mlx5_dev_list_trylock();
9613d677735SMaor Gottlieb if (!status) {
9623d677735SMaor Gottlieb mlx5_queue_bond_work(ldev, HZ);
9633d677735SMaor Gottlieb return;
9643d677735SMaor Gottlieb }
9653d677735SMaor Gottlieb
966ec2fa47dSMark Bloch mutex_lock(&ldev->lock);
9673d677735SMaor Gottlieb if (ldev->mode_changes_in_progress) {
968ec2fa47dSMark Bloch mutex_unlock(&ldev->lock);
9693d677735SMaor Gottlieb mlx5_dev_list_unlock();
9703d677735SMaor Gottlieb mlx5_queue_bond_work(ldev, HZ);
9713d677735SMaor Gottlieb return;
9723d677735SMaor Gottlieb }
9733d677735SMaor Gottlieb
9743d677735SMaor Gottlieb mlx5_do_bond(ldev);
975ec2fa47dSMark Bloch mutex_unlock(&ldev->lock);
9763d677735SMaor Gottlieb mlx5_dev_list_unlock();
9773d677735SMaor Gottlieb }
9783d677735SMaor Gottlieb
mlx5_handle_changeupper_event(struct mlx5_lag * ldev,struct lag_tracker * tracker,struct netdev_notifier_changeupper_info * info)9793d677735SMaor Gottlieb static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
9803d677735SMaor Gottlieb struct lag_tracker *tracker,
9813d677735SMaor Gottlieb struct netdev_notifier_changeupper_info *info)
9823d677735SMaor Gottlieb {
9833d677735SMaor Gottlieb struct net_device *upper = info->upper_dev, *ndev_tmp;
9843d677735SMaor Gottlieb struct netdev_lag_upper_info *lag_upper_info = NULL;
9853d677735SMaor Gottlieb bool is_bonded, is_in_lag, mode_supported;
98654493a08SMark Bloch bool has_inactive = 0;
98754493a08SMark Bloch struct slave *slave;
9887e978e77SMark Bloch u8 bond_status = 0;
9893d677735SMaor Gottlieb int num_slaves = 0;
990ae396d85SMaher Sanalla int changed = 0;
9913d677735SMaor Gottlieb int idx;
9923d677735SMaor Gottlieb
9933d677735SMaor Gottlieb if (!netif_is_lag_master(upper))
9943d677735SMaor Gottlieb return 0;
9953d677735SMaor Gottlieb
9963d677735SMaor Gottlieb if (info->linking)
9973d677735SMaor Gottlieb lag_upper_info = info->upper_info;
9983d677735SMaor Gottlieb
9993d677735SMaor Gottlieb /* The event may still be of interest if the slave does not belong to
10003d677735SMaor Gottlieb * us, but is enslaved to a master which has one or more of our netdevs
10013d677735SMaor Gottlieb * as slaves (e.g., if a new slave is added to a master that bonds two
10023d677735SMaor Gottlieb * of our netdevs, we should unbond).
10033d677735SMaor Gottlieb */
10043d677735SMaor Gottlieb rcu_read_lock();
10053d677735SMaor Gottlieb for_each_netdev_in_bond_rcu(upper, ndev_tmp) {
10063d677735SMaor Gottlieb idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev_tmp);
100754493a08SMark Bloch if (idx >= 0) {
100854493a08SMark Bloch slave = bond_slave_get_rcu(ndev_tmp);
100954493a08SMark Bloch if (slave)
101054493a08SMark Bloch has_inactive |= bond_is_slave_inactive(slave);
10113d677735SMaor Gottlieb bond_status |= (1 << idx);
101254493a08SMark Bloch }
10133d677735SMaor Gottlieb
10143d677735SMaor Gottlieb num_slaves++;
10153d677735SMaor Gottlieb }
10163d677735SMaor Gottlieb rcu_read_unlock();
10173d677735SMaor Gottlieb
10183d677735SMaor Gottlieb /* None of this lagdev's netdevs are slaves of this master. */
10197e978e77SMark Bloch if (!(bond_status & GENMASK(ldev->ports - 1, 0)))
10203d677735SMaor Gottlieb return 0;
10213d677735SMaor Gottlieb
1022dc48516eSMaor Gottlieb if (lag_upper_info) {
10233d677735SMaor Gottlieb tracker->tx_type = lag_upper_info->tx_type;
1024dc48516eSMaor Gottlieb tracker->hash_type = lag_upper_info->hash_type;
1025dc48516eSMaor Gottlieb }
10263d677735SMaor Gottlieb
102754493a08SMark Bloch tracker->has_inactive = has_inactive;
10283d677735SMaor Gottlieb /* Determine bonding status:
10293d677735SMaor Gottlieb * A device is considered bonded if both its physical ports are slaves
10303d677735SMaor Gottlieb * of the same lag master, and only them.
10313d677735SMaor Gottlieb */
10327e978e77SMark Bloch is_in_lag = num_slaves == ldev->ports &&
10337e978e77SMark Bloch bond_status == GENMASK(ldev->ports - 1, 0);
10343d677735SMaor Gottlieb
10353d677735SMaor Gottlieb /* Lag mode must be activebackup or hash. */
10363d677735SMaor Gottlieb mode_supported = tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP ||
10373d677735SMaor Gottlieb tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH;
10383d677735SMaor Gottlieb
10393d677735SMaor Gottlieb is_bonded = is_in_lag && mode_supported;
10403d677735SMaor Gottlieb if (tracker->is_bonded != is_bonded) {
10413d677735SMaor Gottlieb tracker->is_bonded = is_bonded;
1042ae396d85SMaher Sanalla changed = 1;
10433d677735SMaor Gottlieb }
10443d677735SMaor Gottlieb
1045ae396d85SMaher Sanalla if (!is_in_lag)
1046ae396d85SMaher Sanalla return changed;
1047ae396d85SMaher Sanalla
1048ae396d85SMaher Sanalla if (!mlx5_lag_is_ready(ldev))
1049ae396d85SMaher Sanalla NL_SET_ERR_MSG_MOD(info->info.extack,
1050ae396d85SMaher Sanalla "Can't activate LAG offload, PF is configured with more than 64 VFs");
1051ae396d85SMaher Sanalla else if (!mode_supported)
1052ae396d85SMaher Sanalla NL_SET_ERR_MSG_MOD(info->info.extack,
1053ae396d85SMaher Sanalla "Can't activate LAG offload, TX type isn't supported");
1054ae396d85SMaher Sanalla
1055ae396d85SMaher Sanalla return changed;
10563d677735SMaor Gottlieb }
10573d677735SMaor Gottlieb
mlx5_handle_changelowerstate_event(struct mlx5_lag * ldev,struct lag_tracker * tracker,struct net_device * ndev,struct netdev_notifier_changelowerstate_info * info)10583d677735SMaor Gottlieb static int mlx5_handle_changelowerstate_event(struct mlx5_lag *ldev,
10593d677735SMaor Gottlieb struct lag_tracker *tracker,
10603d677735SMaor Gottlieb struct net_device *ndev,
10613d677735SMaor Gottlieb struct netdev_notifier_changelowerstate_info *info)
10623d677735SMaor Gottlieb {
10633d677735SMaor Gottlieb struct netdev_lag_lower_state_info *lag_lower_info;
10643d677735SMaor Gottlieb int idx;
10653d677735SMaor Gottlieb
10663d677735SMaor Gottlieb if (!netif_is_lag_port(ndev))
10673d677735SMaor Gottlieb return 0;
10683d677735SMaor Gottlieb
10693d677735SMaor Gottlieb idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev);
10703d677735SMaor Gottlieb if (idx < 0)
10713d677735SMaor Gottlieb return 0;
10723d677735SMaor Gottlieb
10733d677735SMaor Gottlieb /* This information is used to determine virtual to physical
10743d677735SMaor Gottlieb * port mapping.
10753d677735SMaor Gottlieb */
10763d677735SMaor Gottlieb lag_lower_info = info->lower_state_info;
10773d677735SMaor Gottlieb if (!lag_lower_info)
10783d677735SMaor Gottlieb return 0;
10793d677735SMaor Gottlieb
10803d677735SMaor Gottlieb tracker->netdev_state[idx] = *lag_lower_info;
10813d677735SMaor Gottlieb
10823d677735SMaor Gottlieb return 1;
10833d677735SMaor Gottlieb }
10843d677735SMaor Gottlieb
mlx5_handle_changeinfodata_event(struct mlx5_lag * ldev,struct lag_tracker * tracker,struct net_device * ndev)108554493a08SMark Bloch static int mlx5_handle_changeinfodata_event(struct mlx5_lag *ldev,
108654493a08SMark Bloch struct lag_tracker *tracker,
108754493a08SMark Bloch struct net_device *ndev)
108854493a08SMark Bloch {
108954493a08SMark Bloch struct net_device *ndev_tmp;
109054493a08SMark Bloch struct slave *slave;
109154493a08SMark Bloch bool has_inactive = 0;
109254493a08SMark Bloch int idx;
109354493a08SMark Bloch
109454493a08SMark Bloch if (!netif_is_lag_master(ndev))
109554493a08SMark Bloch return 0;
109654493a08SMark Bloch
109754493a08SMark Bloch rcu_read_lock();
109854493a08SMark Bloch for_each_netdev_in_bond_rcu(ndev, ndev_tmp) {
109954493a08SMark Bloch idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev_tmp);
110054493a08SMark Bloch if (idx < 0)
110154493a08SMark Bloch continue;
110254493a08SMark Bloch
110354493a08SMark Bloch slave = bond_slave_get_rcu(ndev_tmp);
110454493a08SMark Bloch if (slave)
110554493a08SMark Bloch has_inactive |= bond_is_slave_inactive(slave);
110654493a08SMark Bloch }
110754493a08SMark Bloch rcu_read_unlock();
110854493a08SMark Bloch
110954493a08SMark Bloch if (tracker->has_inactive == has_inactive)
111054493a08SMark Bloch return 0;
111154493a08SMark Bloch
111254493a08SMark Bloch tracker->has_inactive = has_inactive;
111354493a08SMark Bloch
111454493a08SMark Bloch return 1;
111554493a08SMark Bloch }
111654493a08SMark Bloch
111794db3317SEli Cohen /* this handler is always registered to netdev events */
mlx5_lag_netdev_event(struct notifier_block * this,unsigned long event,void * ptr)11183d677735SMaor Gottlieb static int mlx5_lag_netdev_event(struct notifier_block *this,
11193d677735SMaor Gottlieb unsigned long event, void *ptr)
11203d677735SMaor Gottlieb {
11213d677735SMaor Gottlieb struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
11223d677735SMaor Gottlieb struct lag_tracker tracker;
11233d677735SMaor Gottlieb struct mlx5_lag *ldev;
11243d677735SMaor Gottlieb int changed = 0;
11253d677735SMaor Gottlieb
112654493a08SMark Bloch if (event != NETDEV_CHANGEUPPER &&
112754493a08SMark Bloch event != NETDEV_CHANGELOWERSTATE &&
112854493a08SMark Bloch event != NETDEV_CHANGEINFODATA)
11293d677735SMaor Gottlieb return NOTIFY_DONE;
11303d677735SMaor Gottlieb
11313d677735SMaor Gottlieb ldev = container_of(this, struct mlx5_lag, nb);
11323d677735SMaor Gottlieb
11333d677735SMaor Gottlieb tracker = ldev->tracker;
11343d677735SMaor Gottlieb
11353d677735SMaor Gottlieb switch (event) {
11363d677735SMaor Gottlieb case NETDEV_CHANGEUPPER:
1137a4a9c87eSEli Cohen changed = mlx5_handle_changeupper_event(ldev, &tracker, ptr);
11383d677735SMaor Gottlieb break;
11393d677735SMaor Gottlieb case NETDEV_CHANGELOWERSTATE:
11403d677735SMaor Gottlieb changed = mlx5_handle_changelowerstate_event(ldev, &tracker,
11413d677735SMaor Gottlieb ndev, ptr);
11423d677735SMaor Gottlieb break;
114354493a08SMark Bloch case NETDEV_CHANGEINFODATA:
114454493a08SMark Bloch changed = mlx5_handle_changeinfodata_event(ldev, &tracker, ndev);
114554493a08SMark Bloch break;
11463d677735SMaor Gottlieb }
11473d677735SMaor Gottlieb
11483d677735SMaor Gottlieb ldev->tracker = tracker;
11493d677735SMaor Gottlieb
11503d677735SMaor Gottlieb if (changed)
11513d677735SMaor Gottlieb mlx5_queue_bond_work(ldev, 0);
11523d677735SMaor Gottlieb
11533d677735SMaor Gottlieb return NOTIFY_DONE;
11543d677735SMaor Gottlieb }
11553d677735SMaor Gottlieb
mlx5_ldev_add_netdev(struct mlx5_lag * ldev,struct mlx5_core_dev * dev,struct net_device * netdev)11563d677735SMaor Gottlieb static void mlx5_ldev_add_netdev(struct mlx5_lag *ldev,
11573d677735SMaor Gottlieb struct mlx5_core_dev *dev,
11583d677735SMaor Gottlieb struct net_device *netdev)
11593d677735SMaor Gottlieb {
11603d677735SMaor Gottlieb unsigned int fn = mlx5_get_dev_index(dev);
11618e93f294SVlad Buslov unsigned long flags;
11623d677735SMaor Gottlieb
11637e978e77SMark Bloch if (fn >= ldev->ports)
11643d677735SMaor Gottlieb return;
11653d677735SMaor Gottlieb
11668e93f294SVlad Buslov spin_lock_irqsave(&lag_lock, flags);
11673d677735SMaor Gottlieb ldev->pf[fn].netdev = netdev;
11683d677735SMaor Gottlieb ldev->tracker.netdev_state[fn].link_up = 0;
11693d677735SMaor Gottlieb ldev->tracker.netdev_state[fn].tx_enabled = 0;
11708e93f294SVlad Buslov spin_unlock_irqrestore(&lag_lock, flags);
11713d677735SMaor Gottlieb }
11723d677735SMaor Gottlieb
mlx5_ldev_remove_netdev(struct mlx5_lag * ldev,struct net_device * netdev)11733d677735SMaor Gottlieb static void mlx5_ldev_remove_netdev(struct mlx5_lag *ldev,
11743d677735SMaor Gottlieb struct net_device *netdev)
11753d677735SMaor Gottlieb {
11768e93f294SVlad Buslov unsigned long flags;
11773d677735SMaor Gottlieb int i;
11783d677735SMaor Gottlieb
11798e93f294SVlad Buslov spin_lock_irqsave(&lag_lock, flags);
11807e978e77SMark Bloch for (i = 0; i < ldev->ports; i++) {
11813d677735SMaor Gottlieb if (ldev->pf[i].netdev == netdev) {
11823d677735SMaor Gottlieb ldev->pf[i].netdev = NULL;
11833d677735SMaor Gottlieb break;
11843d677735SMaor Gottlieb }
11853d677735SMaor Gottlieb }
11868e93f294SVlad Buslov spin_unlock_irqrestore(&lag_lock, flags);
11873d677735SMaor Gottlieb }
11883d677735SMaor Gottlieb
mlx5_ldev_add_mdev(struct mlx5_lag * ldev,struct mlx5_core_dev * dev)11893d677735SMaor Gottlieb static void mlx5_ldev_add_mdev(struct mlx5_lag *ldev,
11903d677735SMaor Gottlieb struct mlx5_core_dev *dev)
11913d677735SMaor Gottlieb {
11923d677735SMaor Gottlieb unsigned int fn = mlx5_get_dev_index(dev);
11933d677735SMaor Gottlieb
11947e978e77SMark Bloch if (fn >= ldev->ports)
11953d677735SMaor Gottlieb return;
11963d677735SMaor Gottlieb
11973d677735SMaor Gottlieb ldev->pf[fn].dev = dev;
11983d677735SMaor Gottlieb dev->priv.lag = ldev;
11993d677735SMaor Gottlieb }
12003d677735SMaor Gottlieb
mlx5_ldev_remove_mdev(struct mlx5_lag * ldev,struct mlx5_core_dev * dev)12013d677735SMaor Gottlieb static void mlx5_ldev_remove_mdev(struct mlx5_lag *ldev,
12023d677735SMaor Gottlieb struct mlx5_core_dev *dev)
12033d677735SMaor Gottlieb {
12043d677735SMaor Gottlieb int i;
12053d677735SMaor Gottlieb
12067e978e77SMark Bloch for (i = 0; i < ldev->ports; i++)
12073d677735SMaor Gottlieb if (ldev->pf[i].dev == dev)
12083d677735SMaor Gottlieb break;
12093d677735SMaor Gottlieb
12107e978e77SMark Bloch if (i == ldev->ports)
12113d677735SMaor Gottlieb return;
12123d677735SMaor Gottlieb
12133d677735SMaor Gottlieb ldev->pf[i].dev = NULL;
12143d677735SMaor Gottlieb dev->priv.lag = NULL;
12153d677735SMaor Gottlieb }
12163d677735SMaor Gottlieb
12173d677735SMaor Gottlieb /* Must be called with intf_mutex held */
__mlx5_lag_dev_add_mdev(struct mlx5_core_dev * dev)12183d677735SMaor Gottlieb static int __mlx5_lag_dev_add_mdev(struct mlx5_core_dev *dev)
12193d677735SMaor Gottlieb {
12203d677735SMaor Gottlieb struct mlx5_lag *ldev = NULL;
12213d677735SMaor Gottlieb struct mlx5_core_dev *tmp_dev;
12223d677735SMaor Gottlieb
1223bc4c2f2eSMark Bloch tmp_dev = mlx5_get_next_phys_dev_lag(dev);
12243d677735SMaor Gottlieb if (tmp_dev)
12259a49a64eSRoi Dayan ldev = mlx5_lag_dev(tmp_dev);
12263d677735SMaor Gottlieb
12273d677735SMaor Gottlieb if (!ldev) {
12283d677735SMaor Gottlieb ldev = mlx5_lag_dev_alloc(dev);
12293d677735SMaor Gottlieb if (!ldev) {
12303d677735SMaor Gottlieb mlx5_core_err(dev, "Failed to alloc lag dev\n");
12313d677735SMaor Gottlieb return 0;
12323d677735SMaor Gottlieb }
1233ec2fa47dSMark Bloch mlx5_ldev_add_mdev(ldev, dev);
1234ec2fa47dSMark Bloch return 0;
12353d677735SMaor Gottlieb }
12363d677735SMaor Gottlieb
1237ec2fa47dSMark Bloch mutex_lock(&ldev->lock);
1238ec2fa47dSMark Bloch if (ldev->mode_changes_in_progress) {
1239ec2fa47dSMark Bloch mutex_unlock(&ldev->lock);
1240ec2fa47dSMark Bloch return -EAGAIN;
1241ec2fa47dSMark Bloch }
1242ec2fa47dSMark Bloch mlx5_ldev_get(ldev);
12433d677735SMaor Gottlieb mlx5_ldev_add_mdev(ldev, dev);
1244ec2fa47dSMark Bloch mutex_unlock(&ldev->lock);
12453d677735SMaor Gottlieb
12463d677735SMaor Gottlieb return 0;
12473d677735SMaor Gottlieb }
12483d677735SMaor Gottlieb
mlx5_lag_remove_mdev(struct mlx5_core_dev * dev)12493d677735SMaor Gottlieb void mlx5_lag_remove_mdev(struct mlx5_core_dev *dev)
12503d677735SMaor Gottlieb {
12513d677735SMaor Gottlieb struct mlx5_lag *ldev;
12523d677735SMaor Gottlieb
12533d677735SMaor Gottlieb ldev = mlx5_lag_dev(dev);
12543d677735SMaor Gottlieb if (!ldev)
12553d677735SMaor Gottlieb return;
12563d677735SMaor Gottlieb
12577f46a0b7SMark Bloch /* mdev is being removed, might as well remove debugfs
12587f46a0b7SMark Bloch * as early as possible.
12597f46a0b7SMark Bloch */
12607f46a0b7SMark Bloch mlx5_ldev_remove_debugfs(dev->priv.dbg.lag_debugfs);
12613d677735SMaor Gottlieb recheck:
1262ec2fa47dSMark Bloch mutex_lock(&ldev->lock);
12633d677735SMaor Gottlieb if (ldev->mode_changes_in_progress) {
1264ec2fa47dSMark Bloch mutex_unlock(&ldev->lock);
12653d677735SMaor Gottlieb msleep(100);
12663d677735SMaor Gottlieb goto recheck;
12673d677735SMaor Gottlieb }
12683d677735SMaor Gottlieb mlx5_ldev_remove_mdev(ldev, dev);
1269ec2fa47dSMark Bloch mutex_unlock(&ldev->lock);
12703d677735SMaor Gottlieb mlx5_ldev_put(ldev);
12713d677735SMaor Gottlieb }
12723d677735SMaor Gottlieb
mlx5_lag_add_mdev(struct mlx5_core_dev * dev)12733d677735SMaor Gottlieb void mlx5_lag_add_mdev(struct mlx5_core_dev *dev)
12743d677735SMaor Gottlieb {
12753d677735SMaor Gottlieb int err;
12763d677735SMaor Gottlieb
12778ec91f5dSRoi Dayan if (!mlx5_lag_is_supported(dev))
1278bc4c2f2eSMark Bloch return;
1279bc4c2f2eSMark Bloch
12803d677735SMaor Gottlieb recheck:
12813d677735SMaor Gottlieb mlx5_dev_list_lock();
12823d677735SMaor Gottlieb err = __mlx5_lag_dev_add_mdev(dev);
12833d677735SMaor Gottlieb mlx5_dev_list_unlock();
1284ec2fa47dSMark Bloch
1285ec2fa47dSMark Bloch if (err) {
12863d677735SMaor Gottlieb msleep(100);
12873d677735SMaor Gottlieb goto recheck;
12883d677735SMaor Gottlieb }
12897f46a0b7SMark Bloch mlx5_ldev_add_debugfs(dev);
12903d677735SMaor Gottlieb }
12913d677735SMaor Gottlieb
mlx5_lag_remove_netdev(struct mlx5_core_dev * dev,struct net_device * netdev)12923d677735SMaor Gottlieb void mlx5_lag_remove_netdev(struct mlx5_core_dev *dev,
12933d677735SMaor Gottlieb struct net_device *netdev)
12943d677735SMaor Gottlieb {
12953d677735SMaor Gottlieb struct mlx5_lag *ldev;
1296ec2fa47dSMark Bloch bool lag_is_active;
12973d677735SMaor Gottlieb
12983d677735SMaor Gottlieb ldev = mlx5_lag_dev(dev);
12993d677735SMaor Gottlieb if (!ldev)
13003d677735SMaor Gottlieb return;
13013d677735SMaor Gottlieb
1302ec2fa47dSMark Bloch mutex_lock(&ldev->lock);
13033d677735SMaor Gottlieb mlx5_ldev_remove_netdev(ldev, netdev);
1304ef9a3a4aSEli Cohen clear_bit(MLX5_LAG_FLAG_NDEVS_READY, &ldev->state_flags);
13053d677735SMaor Gottlieb
1306ec2fa47dSMark Bloch lag_is_active = __mlx5_lag_is_active(ldev);
1307ec2fa47dSMark Bloch mutex_unlock(&ldev->lock);
1308ec2fa47dSMark Bloch
1309ec2fa47dSMark Bloch if (lag_is_active)
13103d677735SMaor Gottlieb mlx5_queue_bond_work(ldev, 0);
13113d677735SMaor Gottlieb }
13123d677735SMaor Gottlieb
mlx5_lag_add_netdev(struct mlx5_core_dev * dev,struct net_device * netdev)13133d677735SMaor Gottlieb void mlx5_lag_add_netdev(struct mlx5_core_dev *dev,
13143d677735SMaor Gottlieb struct net_device *netdev)
13153d677735SMaor Gottlieb {
13163d677735SMaor Gottlieb struct mlx5_lag *ldev;
13173d677735SMaor Gottlieb int i;
13183d677735SMaor Gottlieb
13193d677735SMaor Gottlieb ldev = mlx5_lag_dev(dev);
13203d677735SMaor Gottlieb if (!ldev)
13213d677735SMaor Gottlieb return;
13223d677735SMaor Gottlieb
1323ec2fa47dSMark Bloch mutex_lock(&ldev->lock);
13243d677735SMaor Gottlieb mlx5_ldev_add_netdev(ldev, dev, netdev);
13253d677735SMaor Gottlieb
13267e978e77SMark Bloch for (i = 0; i < ldev->ports; i++)
1327a6e675a6SEli Cohen if (!ldev->pf[i].netdev)
13283d677735SMaor Gottlieb break;
13293d677735SMaor Gottlieb
13307e978e77SMark Bloch if (i >= ldev->ports)
1331ef9a3a4aSEli Cohen set_bit(MLX5_LAG_FLAG_NDEVS_READY, &ldev->state_flags);
1332ec2fa47dSMark Bloch mutex_unlock(&ldev->lock);
13333d677735SMaor Gottlieb mlx5_queue_bond_work(ldev, 0);
13343d677735SMaor Gottlieb }
13353d677735SMaor Gottlieb
mlx5_lag_is_roce(struct mlx5_core_dev * dev)13363d677735SMaor Gottlieb bool mlx5_lag_is_roce(struct mlx5_core_dev *dev)
13373d677735SMaor Gottlieb {
13383d677735SMaor Gottlieb struct mlx5_lag *ldev;
13398e93f294SVlad Buslov unsigned long flags;
13403d677735SMaor Gottlieb bool res;
13413d677735SMaor Gottlieb
13428e93f294SVlad Buslov spin_lock_irqsave(&lag_lock, flags);
13433d677735SMaor Gottlieb ldev = mlx5_lag_dev(dev);
13443d677735SMaor Gottlieb res = ldev && __mlx5_lag_is_roce(ldev);
13458e93f294SVlad Buslov spin_unlock_irqrestore(&lag_lock, flags);
13463d677735SMaor Gottlieb
13473d677735SMaor Gottlieb return res;
13483d677735SMaor Gottlieb }
13493d677735SMaor Gottlieb EXPORT_SYMBOL(mlx5_lag_is_roce);
13503d677735SMaor Gottlieb
mlx5_lag_is_active(struct mlx5_core_dev * dev)13513d677735SMaor Gottlieb bool mlx5_lag_is_active(struct mlx5_core_dev *dev)
13523d677735SMaor Gottlieb {
13533d677735SMaor Gottlieb struct mlx5_lag *ldev;
13548e93f294SVlad Buslov unsigned long flags;
13553d677735SMaor Gottlieb bool res;
13563d677735SMaor Gottlieb
13578e93f294SVlad Buslov spin_lock_irqsave(&lag_lock, flags);
13583d677735SMaor Gottlieb ldev = mlx5_lag_dev(dev);
13593d677735SMaor Gottlieb res = ldev && __mlx5_lag_is_active(ldev);
13608e93f294SVlad Buslov spin_unlock_irqrestore(&lag_lock, flags);
13613d677735SMaor Gottlieb
13623d677735SMaor Gottlieb return res;
13633d677735SMaor Gottlieb }
13643d677735SMaor Gottlieb EXPORT_SYMBOL(mlx5_lag_is_active);
13653d677735SMaor Gottlieb
mlx5_lag_mode_is_hash(struct mlx5_core_dev * dev)1366a83bb5dfSLiu, Changcheng bool mlx5_lag_mode_is_hash(struct mlx5_core_dev *dev)
1367a83bb5dfSLiu, Changcheng {
1368a83bb5dfSLiu, Changcheng struct mlx5_lag *ldev;
1369a83bb5dfSLiu, Changcheng unsigned long flags;
1370a83bb5dfSLiu, Changcheng bool res = 0;
1371a83bb5dfSLiu, Changcheng
1372a83bb5dfSLiu, Changcheng spin_lock_irqsave(&lag_lock, flags);
1373a83bb5dfSLiu, Changcheng ldev = mlx5_lag_dev(dev);
1374a83bb5dfSLiu, Changcheng if (ldev)
1375a83bb5dfSLiu, Changcheng res = test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &ldev->mode_flags);
1376a83bb5dfSLiu, Changcheng spin_unlock_irqrestore(&lag_lock, flags);
1377a83bb5dfSLiu, Changcheng
1378a83bb5dfSLiu, Changcheng return res;
1379a83bb5dfSLiu, Changcheng }
1380a83bb5dfSLiu, Changcheng EXPORT_SYMBOL(mlx5_lag_mode_is_hash);
1381a83bb5dfSLiu, Changcheng
mlx5_lag_is_master(struct mlx5_core_dev * dev)13823d677735SMaor Gottlieb bool mlx5_lag_is_master(struct mlx5_core_dev *dev)
13833d677735SMaor Gottlieb {
13843d677735SMaor Gottlieb struct mlx5_lag *ldev;
13858e93f294SVlad Buslov unsigned long flags;
13863d677735SMaor Gottlieb bool res;
13873d677735SMaor Gottlieb
13888e93f294SVlad Buslov spin_lock_irqsave(&lag_lock, flags);
13893d677735SMaor Gottlieb ldev = mlx5_lag_dev(dev);
13903d677735SMaor Gottlieb res = ldev && __mlx5_lag_is_active(ldev) &&
13913d677735SMaor Gottlieb dev == ldev->pf[MLX5_LAG_P1].dev;
13928e93f294SVlad Buslov spin_unlock_irqrestore(&lag_lock, flags);
13933d677735SMaor Gottlieb
13943d677735SMaor Gottlieb return res;
13953d677735SMaor Gottlieb }
13963d677735SMaor Gottlieb EXPORT_SYMBOL(mlx5_lag_is_master);
13973d677735SMaor Gottlieb
mlx5_lag_is_sriov(struct mlx5_core_dev * dev)13983d677735SMaor Gottlieb bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev)
13993d677735SMaor Gottlieb {
14003d677735SMaor Gottlieb struct mlx5_lag *ldev;
14018e93f294SVlad Buslov unsigned long flags;
14023d677735SMaor Gottlieb bool res;
14033d677735SMaor Gottlieb
14048e93f294SVlad Buslov spin_lock_irqsave(&lag_lock, flags);
14053d677735SMaor Gottlieb ldev = mlx5_lag_dev(dev);
14063d677735SMaor Gottlieb res = ldev && __mlx5_lag_is_sriov(ldev);
14078e93f294SVlad Buslov spin_unlock_irqrestore(&lag_lock, flags);
14083d677735SMaor Gottlieb
14093d677735SMaor Gottlieb return res;
14103d677735SMaor Gottlieb }
14113d677735SMaor Gottlieb EXPORT_SYMBOL(mlx5_lag_is_sriov);
14123d677735SMaor Gottlieb
mlx5_lag_is_shared_fdb(struct mlx5_core_dev * dev)14133d677735SMaor Gottlieb bool mlx5_lag_is_shared_fdb(struct mlx5_core_dev *dev)
14143d677735SMaor Gottlieb {
14153d677735SMaor Gottlieb struct mlx5_lag *ldev;
14168e93f294SVlad Buslov unsigned long flags;
14173d677735SMaor Gottlieb bool res;
14183d677735SMaor Gottlieb
14198e93f294SVlad Buslov spin_lock_irqsave(&lag_lock, flags);
14203d677735SMaor Gottlieb ldev = mlx5_lag_dev(dev);
14216a80313dSMark Bloch res = ldev && test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags);
14228e93f294SVlad Buslov spin_unlock_irqrestore(&lag_lock, flags);
14233d677735SMaor Gottlieb
14243d677735SMaor Gottlieb return res;
14253d677735SMaor Gottlieb }
14263d677735SMaor Gottlieb EXPORT_SYMBOL(mlx5_lag_is_shared_fdb);
14273d677735SMaor Gottlieb
mlx5_lag_disable_change(struct mlx5_core_dev * dev)14283d677735SMaor Gottlieb void mlx5_lag_disable_change(struct mlx5_core_dev *dev)
14293d677735SMaor Gottlieb {
14303d677735SMaor Gottlieb struct mlx5_lag *ldev;
14313d677735SMaor Gottlieb
14323d677735SMaor Gottlieb ldev = mlx5_lag_dev(dev);
14333d677735SMaor Gottlieb if (!ldev)
14343d677735SMaor Gottlieb return;
14353d677735SMaor Gottlieb
14363d677735SMaor Gottlieb mlx5_dev_list_lock();
1437ec2fa47dSMark Bloch mutex_lock(&ldev->lock);
14383d677735SMaor Gottlieb
14393d677735SMaor Gottlieb ldev->mode_changes_in_progress++;
1440ec2fa47dSMark Bloch if (__mlx5_lag_is_active(ldev))
14413d677735SMaor Gottlieb mlx5_disable_lag(ldev);
1442ec2fa47dSMark Bloch
1443ec2fa47dSMark Bloch mutex_unlock(&ldev->lock);
14443d677735SMaor Gottlieb mlx5_dev_list_unlock();
14453d677735SMaor Gottlieb }
14463d677735SMaor Gottlieb
mlx5_lag_enable_change(struct mlx5_core_dev * dev)14473d677735SMaor Gottlieb void mlx5_lag_enable_change(struct mlx5_core_dev *dev)
14483d677735SMaor Gottlieb {
14493d677735SMaor Gottlieb struct mlx5_lag *ldev;
14503d677735SMaor Gottlieb
14513d677735SMaor Gottlieb ldev = mlx5_lag_dev(dev);
14523d677735SMaor Gottlieb if (!ldev)
14533d677735SMaor Gottlieb return;
14543d677735SMaor Gottlieb
1455ec2fa47dSMark Bloch mutex_lock(&ldev->lock);
14563d677735SMaor Gottlieb ldev->mode_changes_in_progress--;
1457ec2fa47dSMark Bloch mutex_unlock(&ldev->lock);
14583d677735SMaor Gottlieb mlx5_queue_bond_work(ldev, 0);
14593d677735SMaor Gottlieb }
14603d677735SMaor Gottlieb
mlx5_lag_get_roce_netdev(struct mlx5_core_dev * dev)14613d677735SMaor Gottlieb struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
14623d677735SMaor Gottlieb {
14633d677735SMaor Gottlieb struct net_device *ndev = NULL;
14643d677735SMaor Gottlieb struct mlx5_lag *ldev;
14658e93f294SVlad Buslov unsigned long flags;
14667e978e77SMark Bloch int i;
14673d677735SMaor Gottlieb
14688e93f294SVlad Buslov spin_lock_irqsave(&lag_lock, flags);
14693d677735SMaor Gottlieb ldev = mlx5_lag_dev(dev);
14703d677735SMaor Gottlieb
14713d677735SMaor Gottlieb if (!(ldev && __mlx5_lag_is_roce(ldev)))
14723d677735SMaor Gottlieb goto unlock;
14733d677735SMaor Gottlieb
14743d677735SMaor Gottlieb if (ldev->tracker.tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
14757e978e77SMark Bloch for (i = 0; i < ldev->ports; i++)
14767e978e77SMark Bloch if (ldev->tracker.netdev_state[i].tx_enabled)
14777e978e77SMark Bloch ndev = ldev->pf[i].netdev;
14787e978e77SMark Bloch if (!ndev)
14797e978e77SMark Bloch ndev = ldev->pf[ldev->ports - 1].netdev;
14803d677735SMaor Gottlieb } else {
14813d677735SMaor Gottlieb ndev = ldev->pf[MLX5_LAG_P1].netdev;
14823d677735SMaor Gottlieb }
14833d677735SMaor Gottlieb if (ndev)
14843d677735SMaor Gottlieb dev_hold(ndev);
14853d677735SMaor Gottlieb
14863d677735SMaor Gottlieb unlock:
14878e93f294SVlad Buslov spin_unlock_irqrestore(&lag_lock, flags);
14883d677735SMaor Gottlieb
14893d677735SMaor Gottlieb return ndev;
14903d677735SMaor Gottlieb }
14913d677735SMaor Gottlieb EXPORT_SYMBOL(mlx5_lag_get_roce_netdev);
14923d677735SMaor Gottlieb
mlx5_lag_get_slave_port(struct mlx5_core_dev * dev,struct net_device * slave)14933d677735SMaor Gottlieb u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
14943d677735SMaor Gottlieb struct net_device *slave)
14953d677735SMaor Gottlieb {
14963d677735SMaor Gottlieb struct mlx5_lag *ldev;
14978e93f294SVlad Buslov unsigned long flags;
14983d677735SMaor Gottlieb u8 port = 0;
14997e978e77SMark Bloch int i;
15003d677735SMaor Gottlieb
15018e93f294SVlad Buslov spin_lock_irqsave(&lag_lock, flags);
15023d677735SMaor Gottlieb ldev = mlx5_lag_dev(dev);
15033d677735SMaor Gottlieb if (!(ldev && __mlx5_lag_is_roce(ldev)))
15043d677735SMaor Gottlieb goto unlock;
15053d677735SMaor Gottlieb
15067e978e77SMark Bloch for (i = 0; i < ldev->ports; i++) {
15077e978e77SMark Bloch if (ldev->pf[MLX5_LAG_P1].netdev == slave) {
15087e978e77SMark Bloch port = i;
15097e978e77SMark Bloch break;
15107e978e77SMark Bloch }
15117e978e77SMark Bloch }
15123d677735SMaor Gottlieb
1513352899f3SMark Bloch port = ldev->v2p_map[port * ldev->buckets];
15143d677735SMaor Gottlieb
15153d677735SMaor Gottlieb unlock:
15168e93f294SVlad Buslov spin_unlock_irqrestore(&lag_lock, flags);
15173d677735SMaor Gottlieb return port;
15183d677735SMaor Gottlieb }
15193d677735SMaor Gottlieb EXPORT_SYMBOL(mlx5_lag_get_slave_port);
15203d677735SMaor Gottlieb
mlx5_lag_get_num_ports(struct mlx5_core_dev * dev)152134a30d76SMark Bloch u8 mlx5_lag_get_num_ports(struct mlx5_core_dev *dev)
152234a30d76SMark Bloch {
15237e978e77SMark Bloch struct mlx5_lag *ldev;
15247e978e77SMark Bloch
15257e978e77SMark Bloch ldev = mlx5_lag_dev(dev);
15267e978e77SMark Bloch if (!ldev)
15277e978e77SMark Bloch return 0;
15287e978e77SMark Bloch
15297e978e77SMark Bloch return ldev->ports;
153034a30d76SMark Bloch }
153134a30d76SMark Bloch EXPORT_SYMBOL(mlx5_lag_get_num_ports);
153234a30d76SMark Bloch
mlx5_lag_get_next_peer_mdev(struct mlx5_core_dev * dev,int * i)1533222dd185SShay Drory struct mlx5_core_dev *mlx5_lag_get_next_peer_mdev(struct mlx5_core_dev *dev, int *i)
15343d677735SMaor Gottlieb {
15353d677735SMaor Gottlieb struct mlx5_core_dev *peer_dev = NULL;
15363d677735SMaor Gottlieb struct mlx5_lag *ldev;
15378e93f294SVlad Buslov unsigned long flags;
1538222dd185SShay Drory int idx;
15393d677735SMaor Gottlieb
15408e93f294SVlad Buslov spin_lock_irqsave(&lag_lock, flags);
15413d677735SMaor Gottlieb ldev = mlx5_lag_dev(dev);
15423d677735SMaor Gottlieb if (!ldev)
15433d677735SMaor Gottlieb goto unlock;
15443d677735SMaor Gottlieb
1545222dd185SShay Drory if (*i == ldev->ports)
1546222dd185SShay Drory goto unlock;
1547222dd185SShay Drory for (idx = *i; idx < ldev->ports; idx++)
1548222dd185SShay Drory if (ldev->pf[idx].dev != dev)
1549222dd185SShay Drory break;
1550222dd185SShay Drory
1551222dd185SShay Drory if (idx == ldev->ports) {
1552222dd185SShay Drory *i = idx;
1553222dd185SShay Drory goto unlock;
1554222dd185SShay Drory }
1555222dd185SShay Drory *i = idx + 1;
1556222dd185SShay Drory
1557222dd185SShay Drory peer_dev = ldev->pf[idx].dev;
15583d677735SMaor Gottlieb
15593d677735SMaor Gottlieb unlock:
15608e93f294SVlad Buslov spin_unlock_irqrestore(&lag_lock, flags);
15613d677735SMaor Gottlieb return peer_dev;
15623d677735SMaor Gottlieb }
1563222dd185SShay Drory EXPORT_SYMBOL(mlx5_lag_get_next_peer_mdev);
15643d677735SMaor Gottlieb
mlx5_lag_query_cong_counters(struct mlx5_core_dev * dev,u64 * values,int num_counters,size_t * offsets)15653d677735SMaor Gottlieb int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
15663d677735SMaor Gottlieb u64 *values,
15673d677735SMaor Gottlieb int num_counters,
15683d677735SMaor Gottlieb size_t *offsets)
15693d677735SMaor Gottlieb {
15703d677735SMaor Gottlieb int outlen = MLX5_ST_SZ_BYTES(query_cong_statistics_out);
15717e978e77SMark Bloch struct mlx5_core_dev **mdev;
15723d677735SMaor Gottlieb struct mlx5_lag *ldev;
15738e93f294SVlad Buslov unsigned long flags;
15743d677735SMaor Gottlieb int num_ports;
15753d677735SMaor Gottlieb int ret, i, j;
15763d677735SMaor Gottlieb void *out;
15773d677735SMaor Gottlieb
15783d677735SMaor Gottlieb out = kvzalloc(outlen, GFP_KERNEL);
15793d677735SMaor Gottlieb if (!out)
15803d677735SMaor Gottlieb return -ENOMEM;
15813d677735SMaor Gottlieb
15827e978e77SMark Bloch mdev = kvzalloc(sizeof(mdev[0]) * MLX5_MAX_PORTS, GFP_KERNEL);
15837e978e77SMark Bloch if (!mdev) {
15847e978e77SMark Bloch ret = -ENOMEM;
15857e978e77SMark Bloch goto free_out;
15867e978e77SMark Bloch }
15877e978e77SMark Bloch
15883d677735SMaor Gottlieb memset(values, 0, sizeof(*values) * num_counters);
15893d677735SMaor Gottlieb
15908e93f294SVlad Buslov spin_lock_irqsave(&lag_lock, flags);
15913d677735SMaor Gottlieb ldev = mlx5_lag_dev(dev);
15923d677735SMaor Gottlieb if (ldev && __mlx5_lag_is_active(ldev)) {
15937e978e77SMark Bloch num_ports = ldev->ports;
15947e978e77SMark Bloch for (i = 0; i < ldev->ports; i++)
15957e978e77SMark Bloch mdev[i] = ldev->pf[i].dev;
15963d677735SMaor Gottlieb } else {
15973d677735SMaor Gottlieb num_ports = 1;
15983d677735SMaor Gottlieb mdev[MLX5_LAG_P1] = dev;
15993d677735SMaor Gottlieb }
16008e93f294SVlad Buslov spin_unlock_irqrestore(&lag_lock, flags);
16013d677735SMaor Gottlieb
16023d677735SMaor Gottlieb for (i = 0; i < num_ports; ++i) {
16033d677735SMaor Gottlieb u32 in[MLX5_ST_SZ_DW(query_cong_statistics_in)] = {};
16043d677735SMaor Gottlieb
16053d677735SMaor Gottlieb MLX5_SET(query_cong_statistics_in, in, opcode,
16063d677735SMaor Gottlieb MLX5_CMD_OP_QUERY_CONG_STATISTICS);
16073d677735SMaor Gottlieb ret = mlx5_cmd_exec_inout(mdev[i], query_cong_statistics, in,
16083d677735SMaor Gottlieb out);
16093d677735SMaor Gottlieb if (ret)
16107e978e77SMark Bloch goto free_mdev;
16113d677735SMaor Gottlieb
16123d677735SMaor Gottlieb for (j = 0; j < num_counters; ++j)
16133d677735SMaor Gottlieb values[j] += be64_to_cpup((__be64 *)(out + offsets[j]));
16143d677735SMaor Gottlieb }
16153d677735SMaor Gottlieb
16167e978e77SMark Bloch free_mdev:
16177e978e77SMark Bloch kvfree(mdev);
16187e978e77SMark Bloch free_out:
16193d677735SMaor Gottlieb kvfree(out);
16203d677735SMaor Gottlieb return ret;
16213d677735SMaor Gottlieb }
16223d677735SMaor Gottlieb EXPORT_SYMBOL(mlx5_lag_query_cong_counters);
1623