1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
3 
4 #include <linux/netdevice.h>
5 #include <net/nexthop.h>
6 #include "lag/lag.h"
7 #include "eswitch.h"
8 #include "esw/acl/ofld.h"
9 #include "lib/mlx5.h"
10 
11 static void mlx5_mpesw_metadata_cleanup(struct mlx5_lag *ldev)
12 {
13 	struct mlx5_core_dev *dev;
14 	struct mlx5_eswitch *esw;
15 	u32 pf_metadata;
16 	int i;
17 
18 	for (i = 0; i < ldev->ports; i++) {
19 		dev = ldev->pf[i].dev;
20 		esw = dev->priv.eswitch;
21 		pf_metadata = ldev->lag_mpesw.pf_metadata[i];
22 		if (!pf_metadata)
23 			continue;
24 		mlx5_esw_acl_ingress_vport_metadata_update(esw, MLX5_VPORT_UPLINK, 0);
25 		mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_MULTIPORT_ESW,
26 					 (void *)0);
27 		mlx5_esw_match_metadata_free(esw, pf_metadata);
28 		ldev->lag_mpesw.pf_metadata[i] = 0;
29 	}
30 }
31 
32 static int mlx5_mpesw_metadata_set(struct mlx5_lag *ldev)
33 {
34 	struct mlx5_core_dev *dev;
35 	struct mlx5_eswitch *esw;
36 	u32 pf_metadata;
37 	int i, err;
38 
39 	for (i = 0; i < ldev->ports; i++) {
40 		dev = ldev->pf[i].dev;
41 		esw = dev->priv.eswitch;
42 		pf_metadata = mlx5_esw_match_metadata_alloc(esw);
43 		if (!pf_metadata) {
44 			err = -ENOSPC;
45 			goto err_metadata;
46 		}
47 
48 		ldev->lag_mpesw.pf_metadata[i] = pf_metadata;
49 		err = mlx5_esw_acl_ingress_vport_metadata_update(esw, MLX5_VPORT_UPLINK,
50 								 pf_metadata);
51 		if (err)
52 			goto err_metadata;
53 	}
54 
55 	for (i = 0; i < ldev->ports; i++) {
56 		dev = ldev->pf[i].dev;
57 		mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_MULTIPORT_ESW,
58 					 (void *)0);
59 	}
60 
61 	return 0;
62 
63 err_metadata:
64 	mlx5_mpesw_metadata_cleanup(ldev);
65 	return err;
66 }
67 
68 static int enable_mpesw(struct mlx5_lag *ldev)
69 {
70 	struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
71 	struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
72 	int err;
73 
74 	if (ldev->mode != MLX5_LAG_MODE_NONE)
75 		return -EINVAL;
76 
77 	if (mlx5_eswitch_mode(dev0) != MLX5_ESWITCH_OFFLOADS ||
78 	    !MLX5_CAP_PORT_SELECTION(dev0, port_select_flow_table) ||
79 	    !MLX5_CAP_GEN(dev0, create_lag_when_not_master_up) ||
80 	    !mlx5_lag_check_prereq(ldev))
81 		return -EOPNOTSUPP;
82 
83 	err = mlx5_mpesw_metadata_set(ldev);
84 	if (err)
85 		return err;
86 
87 	mlx5_lag_remove_devices(ldev);
88 
89 	err = mlx5_activate_lag(ldev, NULL, MLX5_LAG_MODE_MPESW, true);
90 	if (err) {
91 		mlx5_core_warn(dev0, "Failed to create LAG in MPESW mode (%d)\n", err);
92 		goto err_add_devices;
93 	}
94 
95 	dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
96 	mlx5_rescan_drivers_locked(dev0);
97 	err = mlx5_eswitch_reload_reps(dev0->priv.eswitch);
98 	if (!err)
99 		err = mlx5_eswitch_reload_reps(dev1->priv.eswitch);
100 	if (err)
101 		goto err_rescan_drivers;
102 
103 	return 0;
104 
105 err_rescan_drivers:
106 	dev0->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
107 	mlx5_rescan_drivers_locked(dev0);
108 	mlx5_deactivate_lag(ldev);
109 err_add_devices:
110 	mlx5_lag_add_devices(ldev);
111 	mlx5_eswitch_reload_reps(dev0->priv.eswitch);
112 	mlx5_eswitch_reload_reps(dev1->priv.eswitch);
113 	mlx5_mpesw_metadata_cleanup(ldev);
114 	return err;
115 }
116 
117 static void disable_mpesw(struct mlx5_lag *ldev)
118 {
119 	if (ldev->mode == MLX5_LAG_MODE_MPESW) {
120 		mlx5_mpesw_metadata_cleanup(ldev);
121 		mlx5_disable_lag(ldev);
122 	}
123 }
124 
125 static void mlx5_mpesw_work(struct work_struct *work)
126 {
127 	struct mlx5_mpesw_work_st *mpesww = container_of(work, struct mlx5_mpesw_work_st, work);
128 	struct mlx5_lag *ldev = mpesww->lag;
129 
130 	mlx5_dev_list_lock();
131 	mutex_lock(&ldev->lock);
132 	if (ldev->mode_changes_in_progress) {
133 		mpesww->result = -EAGAIN;
134 		goto unlock;
135 	}
136 
137 	if (mpesww->op == MLX5_MPESW_OP_ENABLE)
138 		mpesww->result = enable_mpesw(ldev);
139 	else if (mpesww->op == MLX5_MPESW_OP_DISABLE)
140 		disable_mpesw(ldev);
141 unlock:
142 	mutex_unlock(&ldev->lock);
143 	mlx5_dev_list_unlock();
144 	complete(&mpesww->comp);
145 }
146 
147 static int mlx5_lag_mpesw_queue_work(struct mlx5_core_dev *dev,
148 				     enum mpesw_op op)
149 {
150 	struct mlx5_lag *ldev = mlx5_lag_dev(dev);
151 	struct mlx5_mpesw_work_st *work;
152 	int err = 0;
153 
154 	if (!ldev)
155 		return 0;
156 
157 	work = kzalloc(sizeof(*work), GFP_KERNEL);
158 	if (!work)
159 		return -ENOMEM;
160 
161 	INIT_WORK(&work->work, mlx5_mpesw_work);
162 	init_completion(&work->comp);
163 	work->op = op;
164 	work->lag = ldev;
165 
166 	if (!queue_work(ldev->wq, &work->work)) {
167 		mlx5_core_warn(dev, "failed to queue mpesw work\n");
168 		err = -EINVAL;
169 		goto out;
170 	}
171 	wait_for_completion(&work->comp);
172 	err = work->result;
173 out:
174 	kfree(work);
175 	return err;
176 }
177 
178 void mlx5_lag_mpesw_disable(struct mlx5_core_dev *dev)
179 {
180 	mlx5_lag_mpesw_queue_work(dev, MLX5_MPESW_OP_DISABLE);
181 }
182 
183 int mlx5_lag_mpesw_enable(struct mlx5_core_dev *dev)
184 {
185 	return mlx5_lag_mpesw_queue_work(dev, MLX5_MPESW_OP_ENABLE);
186 }
187 
188 int mlx5_lag_mpesw_do_mirred(struct mlx5_core_dev *mdev,
189 			     struct net_device *out_dev,
190 			     struct netlink_ext_ack *extack)
191 {
192 	struct mlx5_lag *ldev = mlx5_lag_dev(mdev);
193 
194 	if (!netif_is_bond_master(out_dev) || !ldev)
195 		return 0;
196 
197 	if (ldev->mode != MLX5_LAG_MODE_MPESW)
198 		return 0;
199 
200 	NL_SET_ERR_MSG_MOD(extack, "can't forward to bond in mpesw mode");
201 	return -EOPNOTSUPP;
202 }
203 
204 bool mlx5_lag_is_mpesw(struct mlx5_core_dev *dev)
205 {
206 	struct mlx5_lag *ldev = mlx5_lag_dev(dev);
207 
208 	return ldev && ldev->mode == MLX5_LAG_MODE_MPESW;
209 }
210 EXPORT_SYMBOL(mlx5_lag_is_mpesw);
211