xref: /openbmc/linux/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c (revision 36db6e8484ed455bbb320d89a119378897ae991c)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2021 Mellanox Technologies. */
3 
4 #include <linux/netdevice.h>
5 #include <linux/if_bridge.h>
6 #include <net/netevent.h>
7 #include <net/switchdev.h>
8 #include "bridge.h"
9 #include "esw/bridge.h"
10 #include "en_rep.h"
11 
12 #define MLX5_ESW_BRIDGE_UPDATE_INTERVAL 1000
13 
14 struct mlx5_bridge_switchdev_fdb_work {
15 	struct work_struct work;
16 	struct switchdev_notifier_fdb_info fdb_info;
17 	struct net_device *dev;
18 	struct mlx5_esw_bridge_offloads *br_offloads;
19 	bool add;
20 };
21 
mlx5_esw_bridge_dev_same_esw(struct net_device * dev,struct mlx5_eswitch * esw)22 static bool mlx5_esw_bridge_dev_same_esw(struct net_device *dev, struct mlx5_eswitch *esw)
23 {
24 	struct mlx5e_priv *priv = netdev_priv(dev);
25 
26 	return esw == priv->mdev->priv.eswitch;
27 }
28 
mlx5_esw_bridge_dev_same_hw(struct net_device * dev,struct mlx5_eswitch * esw)29 static bool mlx5_esw_bridge_dev_same_hw(struct net_device *dev, struct mlx5_eswitch *esw)
30 {
31 	struct mlx5e_priv *priv = netdev_priv(dev);
32 	struct mlx5_core_dev *mdev, *esw_mdev;
33 	u64 system_guid, esw_system_guid;
34 
35 	mdev = priv->mdev;
36 	esw_mdev = esw->dev;
37 
38 	system_guid = mlx5_query_nic_system_image_guid(mdev);
39 	esw_system_guid = mlx5_query_nic_system_image_guid(esw_mdev);
40 
41 	return system_guid == esw_system_guid;
42 }
43 
44 static struct net_device *
mlx5_esw_bridge_lag_rep_get(struct net_device * dev,struct mlx5_eswitch * esw)45 mlx5_esw_bridge_lag_rep_get(struct net_device *dev, struct mlx5_eswitch *esw)
46 {
47 	struct net_device *lower;
48 	struct list_head *iter;
49 
50 	netdev_for_each_lower_dev(dev, lower, iter) {
51 		if (!mlx5e_eswitch_rep(lower))
52 			continue;
53 
54 		if (mlx5_esw_bridge_dev_same_esw(lower, esw))
55 			return lower;
56 	}
57 
58 	return NULL;
59 }
60 
61 static struct net_device *
mlx5_esw_bridge_rep_vport_num_vhca_id_get(struct net_device * dev,struct mlx5_eswitch * esw,u16 * vport_num,u16 * esw_owner_vhca_id)62 mlx5_esw_bridge_rep_vport_num_vhca_id_get(struct net_device *dev, struct mlx5_eswitch *esw,
63 					  u16 *vport_num, u16 *esw_owner_vhca_id)
64 {
65 	struct mlx5e_rep_priv *rpriv;
66 	struct mlx5e_priv *priv;
67 
68 	if (netif_is_lag_master(dev))
69 		dev = mlx5_esw_bridge_lag_rep_get(dev, esw);
70 
71 	if (!dev || !mlx5e_eswitch_rep(dev) || !mlx5_esw_bridge_dev_same_hw(dev, esw))
72 		return NULL;
73 
74 	priv = netdev_priv(dev);
75 
76 	if (!priv->mdev->priv.eswitch->br_offloads)
77 		return NULL;
78 
79 	rpriv = priv->ppriv;
80 	*vport_num = rpriv->rep->vport;
81 	*esw_owner_vhca_id = MLX5_CAP_GEN(priv->mdev, vhca_id);
82 	return dev;
83 }
84 
85 static struct net_device *
mlx5_esw_bridge_lower_rep_vport_num_vhca_id_get(struct net_device * dev,struct mlx5_eswitch * esw,u16 * vport_num,u16 * esw_owner_vhca_id)86 mlx5_esw_bridge_lower_rep_vport_num_vhca_id_get(struct net_device *dev, struct mlx5_eswitch *esw,
87 						u16 *vport_num, u16 *esw_owner_vhca_id)
88 {
89 	struct net_device *lower_dev;
90 	struct list_head *iter;
91 
92 	if (netif_is_lag_master(dev) || mlx5e_eswitch_rep(dev))
93 		return mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, esw, vport_num,
94 								 esw_owner_vhca_id);
95 
96 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
97 		struct net_device *rep;
98 
99 		if (netif_is_bridge_master(lower_dev))
100 			continue;
101 
102 		rep = mlx5_esw_bridge_lower_rep_vport_num_vhca_id_get(lower_dev, esw, vport_num,
103 								      esw_owner_vhca_id);
104 		if (rep)
105 			return rep;
106 	}
107 
108 	return NULL;
109 }
110 
mlx5_esw_bridge_is_local(struct net_device * dev,struct net_device * rep,struct mlx5_eswitch * esw)111 static bool mlx5_esw_bridge_is_local(struct net_device *dev, struct net_device *rep,
112 				     struct mlx5_eswitch *esw)
113 {
114 	struct mlx5_core_dev *mdev;
115 	struct mlx5e_priv *priv;
116 
117 	if (!mlx5_esw_bridge_dev_same_esw(rep, esw))
118 		return false;
119 
120 	priv = netdev_priv(rep);
121 	mdev = priv->mdev;
122 	if (netif_is_lag_master(dev))
123 		return mlx5_lag_is_master(mdev);
124 	return true;
125 }
126 
mlx5_esw_bridge_port_changeupper(struct notifier_block * nb,void * ptr)127 static int mlx5_esw_bridge_port_changeupper(struct notifier_block *nb, void *ptr)
128 {
129 	struct mlx5_esw_bridge_offloads *br_offloads = container_of(nb,
130 								    struct mlx5_esw_bridge_offloads,
131 								    netdev_nb);
132 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
133 	struct netdev_notifier_changeupper_info *info = ptr;
134 	struct net_device *upper = info->upper_dev, *rep;
135 	struct mlx5_eswitch *esw = br_offloads->esw;
136 	u16 vport_num, esw_owner_vhca_id;
137 	struct netlink_ext_ack *extack;
138 	int err = 0;
139 
140 	if (!netif_is_bridge_master(upper))
141 		return 0;
142 
143 	rep = mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, esw, &vport_num, &esw_owner_vhca_id);
144 	if (!rep)
145 		return 0;
146 
147 	extack = netdev_notifier_info_to_extack(&info->info);
148 
149 	if (mlx5_esw_bridge_is_local(dev, rep, esw))
150 		err = info->linking ?
151 			mlx5_esw_bridge_vport_link(upper, vport_num, esw_owner_vhca_id,
152 						   br_offloads, extack) :
153 			mlx5_esw_bridge_vport_unlink(upper, vport_num, esw_owner_vhca_id,
154 						     br_offloads, extack);
155 	else if (mlx5_esw_bridge_dev_same_hw(rep, esw))
156 		err = info->linking ?
157 			mlx5_esw_bridge_vport_peer_link(upper, vport_num, esw_owner_vhca_id,
158 							br_offloads, extack) :
159 			mlx5_esw_bridge_vport_peer_unlink(upper, vport_num, esw_owner_vhca_id,
160 							  br_offloads, extack);
161 
162 	return err;
163 }
164 
165 static int
mlx5_esw_bridge_changeupper_validate_netdev(void * ptr)166 mlx5_esw_bridge_changeupper_validate_netdev(void *ptr)
167 {
168 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
169 	struct netdev_notifier_changeupper_info *info = ptr;
170 	struct net_device *upper = info->upper_dev;
171 	struct net_device *lower;
172 	struct list_head *iter;
173 
174 	if (!netif_is_bridge_master(upper) || !netif_is_lag_master(dev))
175 		return 0;
176 
177 	netdev_for_each_lower_dev(dev, lower, iter) {
178 		struct mlx5_core_dev *mdev;
179 		struct mlx5e_priv *priv;
180 
181 		if (!mlx5e_eswitch_rep(lower))
182 			continue;
183 
184 		priv = netdev_priv(lower);
185 		mdev = priv->mdev;
186 		if (!mlx5_lag_is_active(mdev))
187 			return -EAGAIN;
188 		if (!mlx5_lag_is_shared_fdb(mdev))
189 			return -EOPNOTSUPP;
190 	}
191 
192 	return 0;
193 }
194 
mlx5_esw_bridge_switchdev_port_event(struct notifier_block * nb,unsigned long event,void * ptr)195 static int mlx5_esw_bridge_switchdev_port_event(struct notifier_block *nb,
196 						unsigned long event, void *ptr)
197 {
198 	int err = 0;
199 
200 	switch (event) {
201 	case NETDEV_PRECHANGEUPPER:
202 		err = mlx5_esw_bridge_changeupper_validate_netdev(ptr);
203 		break;
204 
205 	case NETDEV_CHANGEUPPER:
206 		err = mlx5_esw_bridge_port_changeupper(nb, ptr);
207 		break;
208 	}
209 
210 	return notifier_from_errno(err);
211 }
212 
213 static int
mlx5_esw_bridge_port_obj_add(struct net_device * dev,struct switchdev_notifier_port_obj_info * port_obj_info,struct mlx5_esw_bridge_offloads * br_offloads)214 mlx5_esw_bridge_port_obj_add(struct net_device *dev,
215 			     struct switchdev_notifier_port_obj_info *port_obj_info,
216 			     struct mlx5_esw_bridge_offloads *br_offloads)
217 {
218 	struct netlink_ext_ack *extack = switchdev_notifier_info_to_extack(&port_obj_info->info);
219 	const struct switchdev_obj *obj = port_obj_info->obj;
220 	const struct switchdev_obj_port_vlan *vlan;
221 	const struct switchdev_obj_port_mdb *mdb;
222 	u16 vport_num, esw_owner_vhca_id;
223 	int err;
224 
225 	if (!mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, br_offloads->esw, &vport_num,
226 						       &esw_owner_vhca_id))
227 		return 0;
228 
229 	port_obj_info->handled = true;
230 
231 	switch (obj->id) {
232 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
233 		vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
234 		err = mlx5_esw_bridge_port_vlan_add(vport_num, esw_owner_vhca_id, vlan->vid,
235 						    vlan->flags, br_offloads, extack);
236 		break;
237 	case SWITCHDEV_OBJ_ID_PORT_MDB:
238 		mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
239 		err = mlx5_esw_bridge_port_mdb_add(dev, vport_num, esw_owner_vhca_id, mdb->addr,
240 						   mdb->vid, br_offloads, extack);
241 		break;
242 	default:
243 		return -EOPNOTSUPP;
244 	}
245 	return err;
246 }
247 
248 static int
mlx5_esw_bridge_port_obj_del(struct net_device * dev,struct switchdev_notifier_port_obj_info * port_obj_info,struct mlx5_esw_bridge_offloads * br_offloads)249 mlx5_esw_bridge_port_obj_del(struct net_device *dev,
250 			     struct switchdev_notifier_port_obj_info *port_obj_info,
251 			     struct mlx5_esw_bridge_offloads *br_offloads)
252 {
253 	const struct switchdev_obj *obj = port_obj_info->obj;
254 	const struct switchdev_obj_port_vlan *vlan;
255 	const struct switchdev_obj_port_mdb *mdb;
256 	u16 vport_num, esw_owner_vhca_id;
257 
258 	if (!mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, br_offloads->esw, &vport_num,
259 						       &esw_owner_vhca_id))
260 		return 0;
261 
262 	port_obj_info->handled = true;
263 
264 	switch (obj->id) {
265 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
266 		vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
267 		mlx5_esw_bridge_port_vlan_del(vport_num, esw_owner_vhca_id, vlan->vid, br_offloads);
268 		break;
269 	case SWITCHDEV_OBJ_ID_PORT_MDB:
270 		mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
271 		mlx5_esw_bridge_port_mdb_del(dev, vport_num, esw_owner_vhca_id, mdb->addr, mdb->vid,
272 					     br_offloads);
273 		break;
274 	default:
275 		return -EOPNOTSUPP;
276 	}
277 	return 0;
278 }
279 
280 static int
mlx5_esw_bridge_port_obj_attr_set(struct net_device * dev,struct switchdev_notifier_port_attr_info * port_attr_info,struct mlx5_esw_bridge_offloads * br_offloads)281 mlx5_esw_bridge_port_obj_attr_set(struct net_device *dev,
282 				  struct switchdev_notifier_port_attr_info *port_attr_info,
283 				  struct mlx5_esw_bridge_offloads *br_offloads)
284 {
285 	struct netlink_ext_ack *extack = switchdev_notifier_info_to_extack(&port_attr_info->info);
286 	const struct switchdev_attr *attr = port_attr_info->attr;
287 	u16 vport_num, esw_owner_vhca_id;
288 	int err = 0;
289 
290 	if (!mlx5_esw_bridge_lower_rep_vport_num_vhca_id_get(dev, br_offloads->esw, &vport_num,
291 							     &esw_owner_vhca_id))
292 		return 0;
293 
294 	port_attr_info->handled = true;
295 
296 	switch (attr->id) {
297 	case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
298 		if (attr->u.brport_flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD)) {
299 			NL_SET_ERR_MSG_MOD(extack, "Flag is not supported");
300 			err = -EINVAL;
301 		}
302 		break;
303 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
304 		break;
305 	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
306 		err = mlx5_esw_bridge_ageing_time_set(vport_num, esw_owner_vhca_id,
307 						      attr->u.ageing_time, br_offloads);
308 		break;
309 	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
310 		err = mlx5_esw_bridge_vlan_filtering_set(vport_num, esw_owner_vhca_id,
311 							 attr->u.vlan_filtering, br_offloads);
312 		break;
313 	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL:
314 		err = mlx5_esw_bridge_vlan_proto_set(vport_num,
315 						     esw_owner_vhca_id,
316 						     attr->u.vlan_protocol,
317 						     br_offloads);
318 		break;
319 	case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
320 		err = mlx5_esw_bridge_mcast_set(vport_num, esw_owner_vhca_id,
321 						!attr->u.mc_disabled, br_offloads);
322 		break;
323 	default:
324 		err = -EOPNOTSUPP;
325 	}
326 
327 	return err;
328 }
329 
mlx5_esw_bridge_event_blocking(struct notifier_block * nb,unsigned long event,void * ptr)330 static int mlx5_esw_bridge_event_blocking(struct notifier_block *nb,
331 					  unsigned long event, void *ptr)
332 {
333 	struct mlx5_esw_bridge_offloads *br_offloads = container_of(nb,
334 								    struct mlx5_esw_bridge_offloads,
335 								    nb_blk);
336 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
337 	int err;
338 
339 	switch (event) {
340 	case SWITCHDEV_PORT_OBJ_ADD:
341 		err = mlx5_esw_bridge_port_obj_add(dev, ptr, br_offloads);
342 		break;
343 	case SWITCHDEV_PORT_OBJ_DEL:
344 		err = mlx5_esw_bridge_port_obj_del(dev, ptr, br_offloads);
345 		break;
346 	case SWITCHDEV_PORT_ATTR_SET:
347 		err = mlx5_esw_bridge_port_obj_attr_set(dev, ptr, br_offloads);
348 		break;
349 	default:
350 		err = 0;
351 	}
352 
353 	return notifier_from_errno(err);
354 }
355 
356 static void
mlx5_esw_bridge_cleanup_switchdev_fdb_work(struct mlx5_bridge_switchdev_fdb_work * fdb_work)357 mlx5_esw_bridge_cleanup_switchdev_fdb_work(struct mlx5_bridge_switchdev_fdb_work *fdb_work)
358 {
359 	dev_put(fdb_work->dev);
360 	kfree(fdb_work->fdb_info.addr);
361 	kfree(fdb_work);
362 }
363 
mlx5_esw_bridge_switchdev_fdb_event_work(struct work_struct * work)364 static void mlx5_esw_bridge_switchdev_fdb_event_work(struct work_struct *work)
365 {
366 	struct mlx5_bridge_switchdev_fdb_work *fdb_work =
367 		container_of(work, struct mlx5_bridge_switchdev_fdb_work, work);
368 	struct switchdev_notifier_fdb_info *fdb_info =
369 		&fdb_work->fdb_info;
370 	struct mlx5_esw_bridge_offloads *br_offloads =
371 		fdb_work->br_offloads;
372 	struct net_device *dev = fdb_work->dev;
373 	u16 vport_num, esw_owner_vhca_id;
374 
375 	rtnl_lock();
376 
377 	if (!mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, br_offloads->esw, &vport_num,
378 						       &esw_owner_vhca_id))
379 		goto out;
380 
381 	if (fdb_work->add)
382 		mlx5_esw_bridge_fdb_create(dev, vport_num, esw_owner_vhca_id, br_offloads,
383 					   fdb_info);
384 	else
385 		mlx5_esw_bridge_fdb_remove(dev, vport_num, esw_owner_vhca_id, br_offloads,
386 					   fdb_info);
387 
388 out:
389 	rtnl_unlock();
390 	mlx5_esw_bridge_cleanup_switchdev_fdb_work(fdb_work);
391 }
392 
393 static struct mlx5_bridge_switchdev_fdb_work *
mlx5_esw_bridge_init_switchdev_fdb_work(struct net_device * dev,bool add,struct switchdev_notifier_fdb_info * fdb_info,struct mlx5_esw_bridge_offloads * br_offloads)394 mlx5_esw_bridge_init_switchdev_fdb_work(struct net_device *dev, bool add,
395 					struct switchdev_notifier_fdb_info *fdb_info,
396 					struct mlx5_esw_bridge_offloads *br_offloads)
397 {
398 	struct mlx5_bridge_switchdev_fdb_work *work;
399 	u8 *addr;
400 
401 	work = kzalloc(sizeof(*work), GFP_ATOMIC);
402 	if (!work)
403 		return ERR_PTR(-ENOMEM);
404 
405 	INIT_WORK(&work->work, mlx5_esw_bridge_switchdev_fdb_event_work);
406 	memcpy(&work->fdb_info, fdb_info, sizeof(work->fdb_info));
407 
408 	addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
409 	if (!addr) {
410 		kfree(work);
411 		return ERR_PTR(-ENOMEM);
412 	}
413 	ether_addr_copy(addr, fdb_info->addr);
414 	work->fdb_info.addr = addr;
415 
416 	dev_hold(dev);
417 	work->dev = dev;
418 	work->br_offloads = br_offloads;
419 	work->add = add;
420 	return work;
421 }
422 
mlx5_esw_bridge_switchdev_event(struct notifier_block * nb,unsigned long event,void * ptr)423 static int mlx5_esw_bridge_switchdev_event(struct notifier_block *nb,
424 					   unsigned long event, void *ptr)
425 {
426 	struct mlx5_esw_bridge_offloads *br_offloads = container_of(nb,
427 								    struct mlx5_esw_bridge_offloads,
428 								    nb);
429 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
430 	struct switchdev_notifier_fdb_info *fdb_info;
431 	struct mlx5_bridge_switchdev_fdb_work *work;
432 	struct mlx5_eswitch *esw = br_offloads->esw;
433 	struct switchdev_notifier_info *info = ptr;
434 	u16 vport_num, esw_owner_vhca_id;
435 	struct net_device *upper, *rep;
436 
437 	if (event == SWITCHDEV_PORT_ATTR_SET) {
438 		int err = mlx5_esw_bridge_port_obj_attr_set(dev, ptr, br_offloads);
439 
440 		return notifier_from_errno(err);
441 	}
442 
443 	upper = netdev_master_upper_dev_get_rcu(dev);
444 	if (!upper)
445 		return NOTIFY_DONE;
446 	if (!netif_is_bridge_master(upper))
447 		return NOTIFY_DONE;
448 
449 	rep = mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, esw, &vport_num, &esw_owner_vhca_id);
450 	if (!rep)
451 		return NOTIFY_DONE;
452 
453 	if (netif_is_lag_master(dev) && !mlx5_lag_is_shared_fdb(esw->dev))
454 		return NOTIFY_DONE;
455 
456 	switch (event) {
457 	case SWITCHDEV_FDB_ADD_TO_BRIDGE:
458 		fdb_info = container_of(info,
459 					struct switchdev_notifier_fdb_info,
460 					info);
461 		mlx5_esw_bridge_fdb_update_used(dev, vport_num, esw_owner_vhca_id, br_offloads,
462 						fdb_info);
463 		break;
464 	case SWITCHDEV_FDB_DEL_TO_BRIDGE:
465 		/* only handle the event on peers */
466 		if (mlx5_esw_bridge_is_local(dev, rep, esw))
467 			break;
468 
469 		fdb_info = container_of(info,
470 					struct switchdev_notifier_fdb_info,
471 					info);
472 		/* Mark for deletion to prevent the update wq task from
473 		 * spuriously refreshing the entry which would mark it again as
474 		 * offloaded in SW bridge. After this fallthrough to regular
475 		 * async delete code.
476 		 */
477 		mlx5_esw_bridge_fdb_mark_deleted(dev, vport_num, esw_owner_vhca_id, br_offloads,
478 						 fdb_info);
479 		fallthrough;
480 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
481 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
482 		fdb_info = container_of(info,
483 					struct switchdev_notifier_fdb_info,
484 					info);
485 
486 		work = mlx5_esw_bridge_init_switchdev_fdb_work(dev,
487 							       event == SWITCHDEV_FDB_ADD_TO_DEVICE,
488 							       fdb_info,
489 							       br_offloads);
490 		if (IS_ERR(work)) {
491 			WARN_ONCE(1, "Failed to init switchdev work, err=%ld",
492 				  PTR_ERR(work));
493 			return notifier_from_errno(PTR_ERR(work));
494 		}
495 
496 		queue_work(br_offloads->wq, &work->work);
497 		break;
498 	default:
499 		break;
500 	}
501 	return NOTIFY_DONE;
502 }
503 
mlx5_esw_bridge_update_work(struct work_struct * work)504 static void mlx5_esw_bridge_update_work(struct work_struct *work)
505 {
506 	struct mlx5_esw_bridge_offloads *br_offloads = container_of(work,
507 								    struct mlx5_esw_bridge_offloads,
508 								    update_work.work);
509 
510 	rtnl_lock();
511 	mlx5_esw_bridge_update(br_offloads);
512 	rtnl_unlock();
513 
514 	queue_delayed_work(br_offloads->wq, &br_offloads->update_work,
515 			   msecs_to_jiffies(MLX5_ESW_BRIDGE_UPDATE_INTERVAL));
516 }
517 
mlx5e_rep_bridge_init(struct mlx5e_priv * priv)518 void mlx5e_rep_bridge_init(struct mlx5e_priv *priv)
519 {
520 	struct mlx5_esw_bridge_offloads *br_offloads;
521 	struct mlx5_core_dev *mdev = priv->mdev;
522 	struct mlx5_eswitch *esw =
523 		mdev->priv.eswitch;
524 	int err;
525 
526 	rtnl_lock();
527 	br_offloads = mlx5_esw_bridge_init(esw);
528 	rtnl_unlock();
529 	if (IS_ERR(br_offloads)) {
530 		esw_warn(mdev, "Failed to init esw bridge (err=%ld)\n", PTR_ERR(br_offloads));
531 		return;
532 	}
533 
534 	br_offloads->wq = alloc_ordered_workqueue("mlx5_bridge_wq", 0);
535 	if (!br_offloads->wq) {
536 		esw_warn(mdev, "Failed to allocate bridge offloads workqueue\n");
537 		goto err_alloc_wq;
538 	}
539 
540 	br_offloads->nb.notifier_call = mlx5_esw_bridge_switchdev_event;
541 	err = register_switchdev_notifier(&br_offloads->nb);
542 	if (err) {
543 		esw_warn(mdev, "Failed to register switchdev notifier (err=%d)\n", err);
544 		goto err_register_swdev;
545 	}
546 
547 	br_offloads->nb_blk.notifier_call = mlx5_esw_bridge_event_blocking;
548 	err = register_switchdev_blocking_notifier(&br_offloads->nb_blk);
549 	if (err) {
550 		esw_warn(mdev, "Failed to register blocking switchdev notifier (err=%d)\n", err);
551 		goto err_register_swdev_blk;
552 	}
553 
554 	br_offloads->netdev_nb.notifier_call = mlx5_esw_bridge_switchdev_port_event;
555 	err = register_netdevice_notifier_net(&init_net, &br_offloads->netdev_nb);
556 	if (err) {
557 		esw_warn(mdev, "Failed to register bridge offloads netdevice notifier (err=%d)\n",
558 			 err);
559 		goto err_register_netdev;
560 	}
561 	INIT_DELAYED_WORK(&br_offloads->update_work, mlx5_esw_bridge_update_work);
562 	queue_delayed_work(br_offloads->wq, &br_offloads->update_work,
563 			   msecs_to_jiffies(MLX5_ESW_BRIDGE_UPDATE_INTERVAL));
564 	return;
565 
566 err_register_netdev:
567 	unregister_switchdev_blocking_notifier(&br_offloads->nb_blk);
568 err_register_swdev_blk:
569 	unregister_switchdev_notifier(&br_offloads->nb);
570 err_register_swdev:
571 	destroy_workqueue(br_offloads->wq);
572 err_alloc_wq:
573 	rtnl_lock();
574 	mlx5_esw_bridge_cleanup(esw);
575 	rtnl_unlock();
576 }
577 
mlx5e_rep_bridge_cleanup(struct mlx5e_priv * priv)578 void mlx5e_rep_bridge_cleanup(struct mlx5e_priv *priv)
579 {
580 	struct mlx5_esw_bridge_offloads *br_offloads;
581 	struct mlx5_core_dev *mdev = priv->mdev;
582 	struct mlx5_eswitch *esw =
583 		mdev->priv.eswitch;
584 
585 	br_offloads = esw->br_offloads;
586 	if (!br_offloads)
587 		return;
588 
589 	cancel_delayed_work_sync(&br_offloads->update_work);
590 	unregister_netdevice_notifier_net(&init_net, &br_offloads->netdev_nb);
591 	unregister_switchdev_blocking_notifier(&br_offloads->nb_blk);
592 	unregister_switchdev_notifier(&br_offloads->nb);
593 	destroy_workqueue(br_offloads->wq);
594 	rtnl_lock();
595 	mlx5_esw_bridge_cleanup(esw);
596 	rtnl_unlock();
597 }
598