1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2021 Mellanox Technologies. */
3 
4 #include <linux/netdevice.h>
5 #include <linux/if_bridge.h>
6 #include <net/netevent.h>
7 #include <net/switchdev.h>
8 #include "bridge.h"
9 #include "esw/bridge.h"
10 #include "en_rep.h"
11 
12 #define MLX5_ESW_BRIDGE_UPDATE_INTERVAL 1000
13 
14 struct mlx5_bridge_switchdev_fdb_work {
15 	struct work_struct work;
16 	struct switchdev_notifier_fdb_info fdb_info;
17 	struct net_device *dev;
18 	struct mlx5_esw_bridge_offloads *br_offloads;
19 	bool add;
20 };
21 
22 static bool mlx5_esw_bridge_dev_same_esw(struct net_device *dev, struct mlx5_eswitch *esw)
23 {
24 	struct mlx5e_priv *priv = netdev_priv(dev);
25 
26 	return esw == priv->mdev->priv.eswitch;
27 }
28 
29 static bool mlx5_esw_bridge_dev_same_hw(struct net_device *dev, struct mlx5_eswitch *esw)
30 {
31 	struct mlx5e_priv *priv = netdev_priv(dev);
32 	struct mlx5_core_dev *mdev, *esw_mdev;
33 	u64 system_guid, esw_system_guid;
34 
35 	mdev = priv->mdev;
36 	esw_mdev = esw->dev;
37 
38 	system_guid = mlx5_query_nic_system_image_guid(mdev);
39 	esw_system_guid = mlx5_query_nic_system_image_guid(esw_mdev);
40 
41 	return system_guid == esw_system_guid;
42 }
43 
44 static struct net_device *
45 mlx5_esw_bridge_lag_rep_get(struct net_device *dev, struct mlx5_eswitch *esw)
46 {
47 	struct net_device *lower;
48 	struct list_head *iter;
49 
50 	netdev_for_each_lower_dev(dev, lower, iter) {
51 		struct mlx5_core_dev *mdev;
52 		struct mlx5e_priv *priv;
53 
54 		if (!mlx5e_eswitch_rep(lower))
55 			continue;
56 
57 		priv = netdev_priv(lower);
58 		mdev = priv->mdev;
59 		if (mlx5_lag_is_shared_fdb(mdev) && mlx5_esw_bridge_dev_same_esw(lower, esw))
60 			return lower;
61 	}
62 
63 	return NULL;
64 }
65 
66 static struct net_device *
67 mlx5_esw_bridge_rep_vport_num_vhca_id_get(struct net_device *dev, struct mlx5_eswitch *esw,
68 					  u16 *vport_num, u16 *esw_owner_vhca_id)
69 {
70 	struct mlx5e_rep_priv *rpriv;
71 	struct mlx5e_priv *priv;
72 
73 	if (netif_is_lag_master(dev))
74 		dev = mlx5_esw_bridge_lag_rep_get(dev, esw);
75 
76 	if (!dev || !mlx5e_eswitch_rep(dev) || !mlx5_esw_bridge_dev_same_hw(dev, esw))
77 		return NULL;
78 
79 	priv = netdev_priv(dev);
80 	rpriv = priv->ppriv;
81 	*vport_num = rpriv->rep->vport;
82 	*esw_owner_vhca_id = MLX5_CAP_GEN(priv->mdev, vhca_id);
83 	return dev;
84 }
85 
86 static struct net_device *
87 mlx5_esw_bridge_lower_rep_vport_num_vhca_id_get(struct net_device *dev, struct mlx5_eswitch *esw,
88 						u16 *vport_num, u16 *esw_owner_vhca_id)
89 {
90 	struct net_device *lower_dev;
91 	struct list_head *iter;
92 
93 	if (netif_is_lag_master(dev) || mlx5e_eswitch_rep(dev))
94 		return mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, esw, vport_num,
95 								 esw_owner_vhca_id);
96 
97 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
98 		struct net_device *rep;
99 
100 		if (netif_is_bridge_master(lower_dev))
101 			continue;
102 
103 		rep = mlx5_esw_bridge_lower_rep_vport_num_vhca_id_get(lower_dev, esw, vport_num,
104 								      esw_owner_vhca_id);
105 		if (rep)
106 			return rep;
107 	}
108 
109 	return NULL;
110 }
111 
112 static bool mlx5_esw_bridge_is_local(struct net_device *dev, struct net_device *rep,
113 				     struct mlx5_eswitch *esw)
114 {
115 	struct mlx5_core_dev *mdev;
116 	struct mlx5e_priv *priv;
117 
118 	if (!mlx5_esw_bridge_dev_same_esw(rep, esw))
119 		return false;
120 
121 	priv = netdev_priv(rep);
122 	mdev = priv->mdev;
123 	if (netif_is_lag_master(dev))
124 		return mlx5_lag_is_shared_fdb(mdev) && mlx5_lag_is_master(mdev);
125 	return true;
126 }
127 
128 static int mlx5_esw_bridge_port_changeupper(struct notifier_block *nb, void *ptr)
129 {
130 	struct mlx5_esw_bridge_offloads *br_offloads = container_of(nb,
131 								    struct mlx5_esw_bridge_offloads,
132 								    netdev_nb);
133 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
134 	struct netdev_notifier_changeupper_info *info = ptr;
135 	struct net_device *upper = info->upper_dev, *rep;
136 	struct mlx5_eswitch *esw = br_offloads->esw;
137 	u16 vport_num, esw_owner_vhca_id;
138 	struct netlink_ext_ack *extack;
139 	int ifindex = upper->ifindex;
140 	int err = 0;
141 
142 	if (!netif_is_bridge_master(upper))
143 		return 0;
144 
145 	rep = mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, esw, &vport_num, &esw_owner_vhca_id);
146 	if (!rep)
147 		return 0;
148 
149 	extack = netdev_notifier_info_to_extack(&info->info);
150 
151 	if (mlx5_esw_bridge_is_local(dev, rep, esw))
152 		err = info->linking ?
153 			mlx5_esw_bridge_vport_link(ifindex, vport_num, esw_owner_vhca_id,
154 						   br_offloads, extack) :
155 			mlx5_esw_bridge_vport_unlink(ifindex, vport_num, esw_owner_vhca_id,
156 						     br_offloads, extack);
157 	else if (mlx5_esw_bridge_dev_same_hw(rep, esw))
158 		err = info->linking ?
159 			mlx5_esw_bridge_vport_peer_link(ifindex, vport_num, esw_owner_vhca_id,
160 							br_offloads, extack) :
161 			mlx5_esw_bridge_vport_peer_unlink(ifindex, vport_num, esw_owner_vhca_id,
162 							  br_offloads, extack);
163 
164 	return err;
165 }
166 
167 static int mlx5_esw_bridge_switchdev_port_event(struct notifier_block *nb,
168 						unsigned long event, void *ptr)
169 {
170 	int err = 0;
171 
172 	switch (event) {
173 	case NETDEV_PRECHANGEUPPER:
174 		break;
175 
176 	case NETDEV_CHANGEUPPER:
177 		err = mlx5_esw_bridge_port_changeupper(nb, ptr);
178 		break;
179 	}
180 
181 	return notifier_from_errno(err);
182 }
183 
184 static int
185 mlx5_esw_bridge_port_obj_add(struct net_device *dev,
186 			     struct switchdev_notifier_port_obj_info *port_obj_info,
187 			     struct mlx5_esw_bridge_offloads *br_offloads)
188 {
189 	struct netlink_ext_ack *extack = switchdev_notifier_info_to_extack(&port_obj_info->info);
190 	const struct switchdev_obj *obj = port_obj_info->obj;
191 	const struct switchdev_obj_port_vlan *vlan;
192 	u16 vport_num, esw_owner_vhca_id;
193 	int err;
194 
195 	if (!mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, br_offloads->esw, &vport_num,
196 						       &esw_owner_vhca_id))
197 		return 0;
198 
199 	port_obj_info->handled = true;
200 
201 	switch (obj->id) {
202 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
203 		vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
204 		err = mlx5_esw_bridge_port_vlan_add(vport_num, esw_owner_vhca_id, vlan->vid,
205 						    vlan->flags, br_offloads, extack);
206 		break;
207 	default:
208 		return -EOPNOTSUPP;
209 	}
210 	return err;
211 }
212 
213 static int
214 mlx5_esw_bridge_port_obj_del(struct net_device *dev,
215 			     struct switchdev_notifier_port_obj_info *port_obj_info,
216 			     struct mlx5_esw_bridge_offloads *br_offloads)
217 {
218 	const struct switchdev_obj *obj = port_obj_info->obj;
219 	const struct switchdev_obj_port_vlan *vlan;
220 	u16 vport_num, esw_owner_vhca_id;
221 
222 	if (!mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, br_offloads->esw, &vport_num,
223 						       &esw_owner_vhca_id))
224 		return 0;
225 
226 	port_obj_info->handled = true;
227 
228 	switch (obj->id) {
229 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
230 		vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
231 		mlx5_esw_bridge_port_vlan_del(vport_num, esw_owner_vhca_id, vlan->vid, br_offloads);
232 		break;
233 	default:
234 		return -EOPNOTSUPP;
235 	}
236 	return 0;
237 }
238 
239 static int
240 mlx5_esw_bridge_port_obj_attr_set(struct net_device *dev,
241 				  struct switchdev_notifier_port_attr_info *port_attr_info,
242 				  struct mlx5_esw_bridge_offloads *br_offloads)
243 {
244 	struct netlink_ext_ack *extack = switchdev_notifier_info_to_extack(&port_attr_info->info);
245 	const struct switchdev_attr *attr = port_attr_info->attr;
246 	u16 vport_num, esw_owner_vhca_id;
247 	int err = 0;
248 
249 	if (!mlx5_esw_bridge_lower_rep_vport_num_vhca_id_get(dev, br_offloads->esw, &vport_num,
250 							     &esw_owner_vhca_id))
251 		return 0;
252 
253 	port_attr_info->handled = true;
254 
255 	switch (attr->id) {
256 	case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
257 		if (attr->u.brport_flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD)) {
258 			NL_SET_ERR_MSG_MOD(extack, "Flag is not supported");
259 			err = -EINVAL;
260 		}
261 		break;
262 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
263 		break;
264 	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
265 		err = mlx5_esw_bridge_ageing_time_set(vport_num, esw_owner_vhca_id,
266 						      attr->u.ageing_time, br_offloads);
267 		break;
268 	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
269 		err = mlx5_esw_bridge_vlan_filtering_set(vport_num, esw_owner_vhca_id,
270 							 attr->u.vlan_filtering, br_offloads);
271 		break;
272 	default:
273 		err = -EOPNOTSUPP;
274 	}
275 
276 	return err;
277 }
278 
279 static int mlx5_esw_bridge_event_blocking(struct notifier_block *nb,
280 					  unsigned long event, void *ptr)
281 {
282 	struct mlx5_esw_bridge_offloads *br_offloads = container_of(nb,
283 								    struct mlx5_esw_bridge_offloads,
284 								    nb_blk);
285 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
286 	int err;
287 
288 	switch (event) {
289 	case SWITCHDEV_PORT_OBJ_ADD:
290 		err = mlx5_esw_bridge_port_obj_add(dev, ptr, br_offloads);
291 		break;
292 	case SWITCHDEV_PORT_OBJ_DEL:
293 		err = mlx5_esw_bridge_port_obj_del(dev, ptr, br_offloads);
294 		break;
295 	case SWITCHDEV_PORT_ATTR_SET:
296 		err = mlx5_esw_bridge_port_obj_attr_set(dev, ptr, br_offloads);
297 		break;
298 	default:
299 		err = 0;
300 	}
301 
302 	return notifier_from_errno(err);
303 }
304 
305 static void
306 mlx5_esw_bridge_cleanup_switchdev_fdb_work(struct mlx5_bridge_switchdev_fdb_work *fdb_work)
307 {
308 	dev_put(fdb_work->dev);
309 	kfree(fdb_work->fdb_info.addr);
310 	kfree(fdb_work);
311 }
312 
313 static void mlx5_esw_bridge_switchdev_fdb_event_work(struct work_struct *work)
314 {
315 	struct mlx5_bridge_switchdev_fdb_work *fdb_work =
316 		container_of(work, struct mlx5_bridge_switchdev_fdb_work, work);
317 	struct switchdev_notifier_fdb_info *fdb_info =
318 		&fdb_work->fdb_info;
319 	struct mlx5_esw_bridge_offloads *br_offloads =
320 		fdb_work->br_offloads;
321 	struct net_device *dev = fdb_work->dev;
322 	u16 vport_num, esw_owner_vhca_id;
323 
324 	rtnl_lock();
325 
326 	if (!mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, br_offloads->esw, &vport_num,
327 						       &esw_owner_vhca_id))
328 		goto out;
329 
330 	if (fdb_work->add)
331 		mlx5_esw_bridge_fdb_create(dev, vport_num, esw_owner_vhca_id, br_offloads,
332 					   fdb_info);
333 	else
334 		mlx5_esw_bridge_fdb_remove(dev, vport_num, esw_owner_vhca_id, br_offloads,
335 					   fdb_info);
336 
337 out:
338 	rtnl_unlock();
339 	mlx5_esw_bridge_cleanup_switchdev_fdb_work(fdb_work);
340 }
341 
342 static struct mlx5_bridge_switchdev_fdb_work *
343 mlx5_esw_bridge_init_switchdev_fdb_work(struct net_device *dev, bool add,
344 					struct switchdev_notifier_fdb_info *fdb_info,
345 					struct mlx5_esw_bridge_offloads *br_offloads)
346 {
347 	struct mlx5_bridge_switchdev_fdb_work *work;
348 	u8 *addr;
349 
350 	work = kzalloc(sizeof(*work), GFP_ATOMIC);
351 	if (!work)
352 		return ERR_PTR(-ENOMEM);
353 
354 	INIT_WORK(&work->work, mlx5_esw_bridge_switchdev_fdb_event_work);
355 	memcpy(&work->fdb_info, fdb_info, sizeof(work->fdb_info));
356 
357 	addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
358 	if (!addr) {
359 		kfree(work);
360 		return ERR_PTR(-ENOMEM);
361 	}
362 	ether_addr_copy(addr, fdb_info->addr);
363 	work->fdb_info.addr = addr;
364 
365 	dev_hold(dev);
366 	work->dev = dev;
367 	work->br_offloads = br_offloads;
368 	work->add = add;
369 	return work;
370 }
371 
372 static int mlx5_esw_bridge_switchdev_event(struct notifier_block *nb,
373 					   unsigned long event, void *ptr)
374 {
375 	struct mlx5_esw_bridge_offloads *br_offloads = container_of(nb,
376 								    struct mlx5_esw_bridge_offloads,
377 								    nb);
378 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
379 	struct switchdev_notifier_fdb_info *fdb_info;
380 	struct mlx5_bridge_switchdev_fdb_work *work;
381 	struct mlx5_eswitch *esw = br_offloads->esw;
382 	struct switchdev_notifier_info *info = ptr;
383 	u16 vport_num, esw_owner_vhca_id;
384 	struct net_device *upper, *rep;
385 
386 	if (event == SWITCHDEV_PORT_ATTR_SET) {
387 		int err = mlx5_esw_bridge_port_obj_attr_set(dev, ptr, br_offloads);
388 
389 		return notifier_from_errno(err);
390 	}
391 
392 	upper = netdev_master_upper_dev_get_rcu(dev);
393 	if (!upper)
394 		return NOTIFY_DONE;
395 	if (!netif_is_bridge_master(upper))
396 		return NOTIFY_DONE;
397 
398 	rep = mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, esw, &vport_num, &esw_owner_vhca_id);
399 	if (!rep)
400 		return NOTIFY_DONE;
401 
402 	switch (event) {
403 	case SWITCHDEV_FDB_ADD_TO_BRIDGE:
404 		/* only handle the event on native eswtich of representor */
405 		if (!mlx5_esw_bridge_is_local(dev, rep, esw))
406 			break;
407 
408 		fdb_info = container_of(info,
409 					struct switchdev_notifier_fdb_info,
410 					info);
411 		mlx5_esw_bridge_fdb_update_used(dev, vport_num, esw_owner_vhca_id, br_offloads,
412 						fdb_info);
413 		break;
414 	case SWITCHDEV_FDB_DEL_TO_BRIDGE:
415 		/* only handle the event on peers */
416 		if (mlx5_esw_bridge_is_local(dev, rep, esw))
417 			break;
418 		fallthrough;
419 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
420 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
421 		fdb_info = container_of(info,
422 					struct switchdev_notifier_fdb_info,
423 					info);
424 
425 		work = mlx5_esw_bridge_init_switchdev_fdb_work(dev,
426 							       event == SWITCHDEV_FDB_ADD_TO_DEVICE,
427 							       fdb_info,
428 							       br_offloads);
429 		if (IS_ERR(work)) {
430 			WARN_ONCE(1, "Failed to init switchdev work, err=%ld",
431 				  PTR_ERR(work));
432 			return notifier_from_errno(PTR_ERR(work));
433 		}
434 
435 		queue_work(br_offloads->wq, &work->work);
436 		break;
437 	default:
438 		break;
439 	}
440 	return NOTIFY_DONE;
441 }
442 
443 static void mlx5_esw_bridge_update_work(struct work_struct *work)
444 {
445 	struct mlx5_esw_bridge_offloads *br_offloads = container_of(work,
446 								    struct mlx5_esw_bridge_offloads,
447 								    update_work.work);
448 
449 	rtnl_lock();
450 	mlx5_esw_bridge_update(br_offloads);
451 	rtnl_unlock();
452 
453 	queue_delayed_work(br_offloads->wq, &br_offloads->update_work,
454 			   msecs_to_jiffies(MLX5_ESW_BRIDGE_UPDATE_INTERVAL));
455 }
456 
457 void mlx5e_rep_bridge_init(struct mlx5e_priv *priv)
458 {
459 	struct mlx5_esw_bridge_offloads *br_offloads;
460 	struct mlx5_core_dev *mdev = priv->mdev;
461 	struct mlx5_eswitch *esw =
462 		mdev->priv.eswitch;
463 	int err;
464 
465 	rtnl_lock();
466 	br_offloads = mlx5_esw_bridge_init(esw);
467 	rtnl_unlock();
468 	if (IS_ERR(br_offloads)) {
469 		esw_warn(mdev, "Failed to init esw bridge (err=%ld)\n", PTR_ERR(br_offloads));
470 		return;
471 	}
472 
473 	br_offloads->wq = alloc_ordered_workqueue("mlx5_bridge_wq", 0);
474 	if (!br_offloads->wq) {
475 		esw_warn(mdev, "Failed to allocate bridge offloads workqueue\n");
476 		goto err_alloc_wq;
477 	}
478 
479 	br_offloads->nb.notifier_call = mlx5_esw_bridge_switchdev_event;
480 	err = register_switchdev_notifier(&br_offloads->nb);
481 	if (err) {
482 		esw_warn(mdev, "Failed to register switchdev notifier (err=%d)\n", err);
483 		goto err_register_swdev;
484 	}
485 
486 	br_offloads->nb_blk.notifier_call = mlx5_esw_bridge_event_blocking;
487 	err = register_switchdev_blocking_notifier(&br_offloads->nb_blk);
488 	if (err) {
489 		esw_warn(mdev, "Failed to register blocking switchdev notifier (err=%d)\n", err);
490 		goto err_register_swdev_blk;
491 	}
492 
493 	br_offloads->netdev_nb.notifier_call = mlx5_esw_bridge_switchdev_port_event;
494 	err = register_netdevice_notifier_net(&init_net, &br_offloads->netdev_nb);
495 	if (err) {
496 		esw_warn(mdev, "Failed to register bridge offloads netdevice notifier (err=%d)\n",
497 			 err);
498 		goto err_register_netdev;
499 	}
500 	INIT_DELAYED_WORK(&br_offloads->update_work, mlx5_esw_bridge_update_work);
501 	queue_delayed_work(br_offloads->wq, &br_offloads->update_work,
502 			   msecs_to_jiffies(MLX5_ESW_BRIDGE_UPDATE_INTERVAL));
503 	return;
504 
505 err_register_netdev:
506 	unregister_switchdev_blocking_notifier(&br_offloads->nb_blk);
507 err_register_swdev_blk:
508 	unregister_switchdev_notifier(&br_offloads->nb);
509 err_register_swdev:
510 	destroy_workqueue(br_offloads->wq);
511 err_alloc_wq:
512 	rtnl_lock();
513 	mlx5_esw_bridge_cleanup(esw);
514 	rtnl_unlock();
515 }
516 
517 void mlx5e_rep_bridge_cleanup(struct mlx5e_priv *priv)
518 {
519 	struct mlx5_esw_bridge_offloads *br_offloads;
520 	struct mlx5_core_dev *mdev = priv->mdev;
521 	struct mlx5_eswitch *esw =
522 		mdev->priv.eswitch;
523 
524 	br_offloads = esw->br_offloads;
525 	if (!br_offloads)
526 		return;
527 
528 	cancel_delayed_work_sync(&br_offloads->update_work);
529 	unregister_netdevice_notifier_net(&init_net, &br_offloads->netdev_nb);
530 	unregister_switchdev_blocking_notifier(&br_offloads->nb_blk);
531 	unregister_switchdev_notifier(&br_offloads->nb);
532 	destroy_workqueue(br_offloads->wq);
533 	rtnl_lock();
534 	mlx5_esw_bridge_cleanup(esw);
535 	rtnl_unlock();
536 }
537