1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2021 Mellanox Technologies. */
3 
4 #include <linux/netdevice.h>
5 #include <linux/if_bridge.h>
6 #include <net/netevent.h>
7 #include <net/switchdev.h>
8 #include "bridge.h"
9 #include "esw/bridge.h"
10 #include "en_rep.h"
11 
12 #define MLX5_ESW_BRIDGE_UPDATE_INTERVAL 1000
13 
14 struct mlx5_bridge_switchdev_fdb_work {
15 	struct work_struct work;
16 	struct switchdev_notifier_fdb_info fdb_info;
17 	struct net_device *dev;
18 	bool add;
19 };
20 
21 static int mlx5_esw_bridge_port_changeupper(struct notifier_block *nb, void *ptr)
22 {
23 	struct mlx5_esw_bridge_offloads *br_offloads = container_of(nb,
24 								    struct mlx5_esw_bridge_offloads,
25 								    netdev_nb);
26 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
27 	struct netdev_notifier_changeupper_info *info = ptr;
28 	struct netlink_ext_ack *extack;
29 	struct mlx5e_rep_priv *rpriv;
30 	struct mlx5_eswitch *esw;
31 	struct mlx5_vport *vport;
32 	struct net_device *upper;
33 	struct mlx5e_priv *priv;
34 	u16 vport_num;
35 
36 	if (!mlx5e_eswitch_rep(dev))
37 		return 0;
38 
39 	upper = info->upper_dev;
40 	if (!netif_is_bridge_master(upper))
41 		return 0;
42 
43 	esw = br_offloads->esw;
44 	priv = netdev_priv(dev);
45 	if (esw != priv->mdev->priv.eswitch)
46 		return 0;
47 
48 	rpriv = priv->ppriv;
49 	vport_num = rpriv->rep->vport;
50 	vport = mlx5_eswitch_get_vport(esw, vport_num);
51 	if (IS_ERR(vport))
52 		return PTR_ERR(vport);
53 
54 	extack = netdev_notifier_info_to_extack(&info->info);
55 
56 	return info->linking ?
57 		mlx5_esw_bridge_vport_link(upper->ifindex, br_offloads, vport, extack) :
58 		mlx5_esw_bridge_vport_unlink(upper->ifindex, br_offloads, vport, extack);
59 }
60 
61 static int mlx5_esw_bridge_switchdev_port_event(struct notifier_block *nb,
62 						unsigned long event, void *ptr)
63 {
64 	int err = 0;
65 
66 	switch (event) {
67 	case NETDEV_PRECHANGEUPPER:
68 		break;
69 
70 	case NETDEV_CHANGEUPPER:
71 		err = mlx5_esw_bridge_port_changeupper(nb, ptr);
72 		break;
73 	}
74 
75 	return notifier_from_errno(err);
76 }
77 
78 static int mlx5_esw_bridge_port_obj_add(struct net_device *dev,
79 					const void *ctx,
80 					const struct switchdev_obj *obj,
81 					struct netlink_ext_ack *extack)
82 {
83 	const struct switchdev_obj_port_vlan *vlan;
84 	struct mlx5e_rep_priv *rpriv;
85 	struct mlx5_eswitch *esw;
86 	struct mlx5_vport *vport;
87 	struct mlx5e_priv *priv;
88 	u16 vport_num;
89 	int err = 0;
90 
91 	priv = netdev_priv(dev);
92 	rpriv = priv->ppriv;
93 	vport_num = rpriv->rep->vport;
94 	esw = priv->mdev->priv.eswitch;
95 	vport = mlx5_eswitch_get_vport(esw, vport_num);
96 	if (IS_ERR(vport))
97 		return PTR_ERR(vport);
98 
99 	switch (obj->id) {
100 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
101 		vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
102 		err = mlx5_esw_bridge_port_vlan_add(vlan->vid, vlan->flags, esw, vport, extack);
103 		break;
104 	default:
105 		return -EOPNOTSUPP;
106 	}
107 	return err;
108 }
109 
110 static int mlx5_esw_bridge_port_obj_del(struct net_device *dev,
111 					const void *ctx,
112 					const struct switchdev_obj *obj)
113 {
114 	const struct switchdev_obj_port_vlan *vlan;
115 	struct mlx5e_rep_priv *rpriv;
116 	struct mlx5_eswitch *esw;
117 	struct mlx5_vport *vport;
118 	struct mlx5e_priv *priv;
119 	u16 vport_num;
120 
121 	priv = netdev_priv(dev);
122 	rpriv = priv->ppriv;
123 	vport_num = rpriv->rep->vport;
124 	esw = priv->mdev->priv.eswitch;
125 	vport = mlx5_eswitch_get_vport(esw, vport_num);
126 	if (IS_ERR(vport))
127 		return PTR_ERR(vport);
128 
129 	switch (obj->id) {
130 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
131 		vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
132 		mlx5_esw_bridge_port_vlan_del(vlan->vid, esw, vport);
133 		break;
134 	default:
135 		return -EOPNOTSUPP;
136 	}
137 	return 0;
138 }
139 
140 static int mlx5_esw_bridge_port_obj_attr_set(struct net_device *dev,
141 					     const void *ctx,
142 					     const struct switchdev_attr *attr,
143 					     struct netlink_ext_ack *extack)
144 {
145 	struct mlx5e_rep_priv *rpriv;
146 	struct mlx5_eswitch *esw;
147 	struct mlx5_vport *vport;
148 	struct mlx5e_priv *priv;
149 	u16 vport_num;
150 	int err = 0;
151 
152 	priv = netdev_priv(dev);
153 	rpriv = priv->ppriv;
154 	vport_num = rpriv->rep->vport;
155 	esw = priv->mdev->priv.eswitch;
156 	vport = mlx5_eswitch_get_vport(esw, vport_num);
157 	if (IS_ERR(vport))
158 		return PTR_ERR(vport);
159 
160 	switch (attr->id) {
161 	case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
162 		if (attr->u.brport_flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD)) {
163 			NL_SET_ERR_MSG_MOD(extack, "Flag is not supported");
164 			err = -EINVAL;
165 		}
166 		break;
167 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
168 		break;
169 	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
170 		err = mlx5_esw_bridge_ageing_time_set(attr->u.ageing_time, esw, vport);
171 		break;
172 	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
173 		err = mlx5_esw_bridge_vlan_filtering_set(attr->u.vlan_filtering, esw, vport);
174 		break;
175 	default:
176 		err = -EOPNOTSUPP;
177 	}
178 
179 	return err;
180 }
181 
182 static int mlx5_esw_bridge_event_blocking(struct notifier_block *unused,
183 					  unsigned long event, void *ptr)
184 {
185 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
186 	int err;
187 
188 	switch (event) {
189 	case SWITCHDEV_PORT_OBJ_ADD:
190 		err = switchdev_handle_port_obj_add(dev, ptr,
191 						    mlx5e_eswitch_rep,
192 						    mlx5_esw_bridge_port_obj_add);
193 		break;
194 	case SWITCHDEV_PORT_OBJ_DEL:
195 		err = switchdev_handle_port_obj_del(dev, ptr,
196 						    mlx5e_eswitch_rep,
197 						    mlx5_esw_bridge_port_obj_del);
198 		break;
199 	case SWITCHDEV_PORT_ATTR_SET:
200 		err = switchdev_handle_port_attr_set(dev, ptr,
201 						     mlx5e_eswitch_rep,
202 						     mlx5_esw_bridge_port_obj_attr_set);
203 		break;
204 	default:
205 		err = 0;
206 	}
207 
208 	return notifier_from_errno(err);
209 }
210 
211 static void
212 mlx5_esw_bridge_cleanup_switchdev_fdb_work(struct mlx5_bridge_switchdev_fdb_work *fdb_work)
213 {
214 	dev_put(fdb_work->dev);
215 	kfree(fdb_work->fdb_info.addr);
216 	kfree(fdb_work);
217 }
218 
219 static void mlx5_esw_bridge_switchdev_fdb_event_work(struct work_struct *work)
220 {
221 	struct mlx5_bridge_switchdev_fdb_work *fdb_work =
222 		container_of(work, struct mlx5_bridge_switchdev_fdb_work, work);
223 	struct switchdev_notifier_fdb_info *fdb_info =
224 		&fdb_work->fdb_info;
225 	struct net_device *dev = fdb_work->dev;
226 	struct mlx5e_rep_priv *rpriv;
227 	struct mlx5_eswitch *esw;
228 	struct mlx5_vport *vport;
229 	struct mlx5e_priv *priv;
230 	u16 vport_num;
231 
232 	rtnl_lock();
233 
234 	priv = netdev_priv(dev);
235 	rpriv = priv->ppriv;
236 	vport_num = rpriv->rep->vport;
237 	esw = priv->mdev->priv.eswitch;
238 	vport = mlx5_eswitch_get_vport(esw, vport_num);
239 	if (IS_ERR(vport))
240 		goto out;
241 
242 	if (fdb_work->add)
243 		mlx5_esw_bridge_fdb_create(dev, esw, vport, fdb_info);
244 	else
245 		mlx5_esw_bridge_fdb_remove(dev, esw, vport, fdb_info);
246 
247 out:
248 	rtnl_unlock();
249 	mlx5_esw_bridge_cleanup_switchdev_fdb_work(fdb_work);
250 }
251 
252 static struct mlx5_bridge_switchdev_fdb_work *
253 mlx5_esw_bridge_init_switchdev_fdb_work(struct net_device *dev, bool add,
254 					struct switchdev_notifier_fdb_info *fdb_info)
255 {
256 	struct mlx5_bridge_switchdev_fdb_work *work;
257 	u8 *addr;
258 
259 	work = kzalloc(sizeof(*work), GFP_ATOMIC);
260 	if (!work)
261 		return ERR_PTR(-ENOMEM);
262 
263 	INIT_WORK(&work->work, mlx5_esw_bridge_switchdev_fdb_event_work);
264 	memcpy(&work->fdb_info, fdb_info, sizeof(work->fdb_info));
265 
266 	addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
267 	if (!addr) {
268 		kfree(work);
269 		return ERR_PTR(-ENOMEM);
270 	}
271 	ether_addr_copy(addr, fdb_info->addr);
272 	work->fdb_info.addr = addr;
273 
274 	dev_hold(dev);
275 	work->dev = dev;
276 	work->add = add;
277 	return work;
278 }
279 
280 static int mlx5_esw_bridge_switchdev_event(struct notifier_block *nb,
281 					   unsigned long event, void *ptr)
282 {
283 	struct mlx5_esw_bridge_offloads *br_offloads = container_of(nb,
284 								    struct mlx5_esw_bridge_offloads,
285 								    nb);
286 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
287 	struct switchdev_notifier_fdb_info *fdb_info;
288 	struct mlx5_bridge_switchdev_fdb_work *work;
289 	struct switchdev_notifier_info *info = ptr;
290 	struct net_device *upper;
291 	struct mlx5e_priv *priv;
292 
293 	if (!mlx5e_eswitch_rep(dev))
294 		return NOTIFY_DONE;
295 	priv = netdev_priv(dev);
296 	if (priv->mdev->priv.eswitch != br_offloads->esw)
297 		return NOTIFY_DONE;
298 
299 	if (event == SWITCHDEV_PORT_ATTR_SET) {
300 		int err = switchdev_handle_port_attr_set(dev, ptr,
301 							 mlx5e_eswitch_rep,
302 							 mlx5_esw_bridge_port_obj_attr_set);
303 		return notifier_from_errno(err);
304 	}
305 
306 	upper = netdev_master_upper_dev_get_rcu(dev);
307 	if (!upper)
308 		return NOTIFY_DONE;
309 	if (!netif_is_bridge_master(upper))
310 		return NOTIFY_DONE;
311 
312 	switch (event) {
313 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
314 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
315 		fdb_info = container_of(info,
316 					struct switchdev_notifier_fdb_info,
317 					info);
318 
319 		work = mlx5_esw_bridge_init_switchdev_fdb_work(dev,
320 							       event == SWITCHDEV_FDB_ADD_TO_DEVICE,
321 							       fdb_info);
322 		if (IS_ERR(work)) {
323 			WARN_ONCE(1, "Failed to init switchdev work, err=%ld",
324 				  PTR_ERR(work));
325 			return notifier_from_errno(PTR_ERR(work));
326 		}
327 
328 		queue_work(br_offloads->wq, &work->work);
329 		break;
330 	default:
331 		break;
332 	}
333 	return NOTIFY_DONE;
334 }
335 
336 static void mlx5_esw_bridge_update_work(struct work_struct *work)
337 {
338 	struct mlx5_esw_bridge_offloads *br_offloads = container_of(work,
339 								    struct mlx5_esw_bridge_offloads,
340 								    update_work.work);
341 
342 	rtnl_lock();
343 	mlx5_esw_bridge_update(br_offloads);
344 	rtnl_unlock();
345 
346 	queue_delayed_work(br_offloads->wq, &br_offloads->update_work,
347 			   msecs_to_jiffies(MLX5_ESW_BRIDGE_UPDATE_INTERVAL));
348 }
349 
350 void mlx5e_rep_bridge_init(struct mlx5e_priv *priv)
351 {
352 	struct mlx5_esw_bridge_offloads *br_offloads;
353 	struct mlx5_core_dev *mdev = priv->mdev;
354 	struct mlx5_eswitch *esw =
355 		mdev->priv.eswitch;
356 	int err;
357 
358 	rtnl_lock();
359 	br_offloads = mlx5_esw_bridge_init(esw);
360 	rtnl_unlock();
361 	if (IS_ERR(br_offloads)) {
362 		esw_warn(mdev, "Failed to init esw bridge (err=%ld)\n", PTR_ERR(br_offloads));
363 		return;
364 	}
365 
366 	br_offloads->wq = alloc_ordered_workqueue("mlx5_bridge_wq", 0);
367 	if (!br_offloads->wq) {
368 		esw_warn(mdev, "Failed to allocate bridge offloads workqueue\n");
369 		goto err_alloc_wq;
370 	}
371 	INIT_DELAYED_WORK(&br_offloads->update_work, mlx5_esw_bridge_update_work);
372 	queue_delayed_work(br_offloads->wq, &br_offloads->update_work,
373 			   msecs_to_jiffies(MLX5_ESW_BRIDGE_UPDATE_INTERVAL));
374 
375 	br_offloads->nb.notifier_call = mlx5_esw_bridge_switchdev_event;
376 	err = register_switchdev_notifier(&br_offloads->nb);
377 	if (err) {
378 		esw_warn(mdev, "Failed to register switchdev notifier (err=%d)\n", err);
379 		goto err_register_swdev;
380 	}
381 
382 	br_offloads->nb_blk.notifier_call = mlx5_esw_bridge_event_blocking;
383 	err = register_switchdev_blocking_notifier(&br_offloads->nb_blk);
384 	if (err) {
385 		esw_warn(mdev, "Failed to register blocking switchdev notifier (err=%d)\n", err);
386 		goto err_register_swdev_blk;
387 	}
388 
389 	br_offloads->netdev_nb.notifier_call = mlx5_esw_bridge_switchdev_port_event;
390 	err = register_netdevice_notifier(&br_offloads->netdev_nb);
391 	if (err) {
392 		esw_warn(mdev, "Failed to register bridge offloads netdevice notifier (err=%d)\n",
393 			 err);
394 		goto err_register_netdev;
395 	}
396 	return;
397 
398 err_register_netdev:
399 	unregister_switchdev_blocking_notifier(&br_offloads->nb_blk);
400 err_register_swdev_blk:
401 	unregister_switchdev_notifier(&br_offloads->nb);
402 err_register_swdev:
403 	destroy_workqueue(br_offloads->wq);
404 err_alloc_wq:
405 	mlx5_esw_bridge_cleanup(esw);
406 }
407 
408 void mlx5e_rep_bridge_cleanup(struct mlx5e_priv *priv)
409 {
410 	struct mlx5_esw_bridge_offloads *br_offloads;
411 	struct mlx5_core_dev *mdev = priv->mdev;
412 	struct mlx5_eswitch *esw =
413 		mdev->priv.eswitch;
414 
415 	br_offloads = esw->br_offloads;
416 	if (!br_offloads)
417 		return;
418 
419 	unregister_netdevice_notifier(&br_offloads->netdev_nb);
420 	unregister_switchdev_blocking_notifier(&br_offloads->nb_blk);
421 	unregister_switchdev_notifier(&br_offloads->nb);
422 	cancel_delayed_work(&br_offloads->update_work);
423 	destroy_workqueue(br_offloads->wq);
424 	rtnl_lock();
425 	mlx5_esw_bridge_cleanup(esw);
426 	rtnl_unlock();
427 }
428