xref: /openbmc/linux/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c (revision 4d016ae42efb214d4b441b0654771ddf34c72891)
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/slab.h>
9 #include <linux/device.h>
10 #include <linux/skbuff.h>
11 #include <linux/if_vlan.h>
12 #include <linux/if_bridge.h>
13 #include <linux/workqueue.h>
14 #include <linux/jiffies.h>
15 #include <linux/rtnetlink.h>
16 #include <linux/netlink.h>
17 #include <net/switchdev.h>
18 #include <net/vxlan.h>
19 
20 #include "spectrum_span.h"
21 #include "spectrum_switchdev.h"
22 #include "spectrum.h"
23 #include "core.h"
24 #include "reg.h"
25 
26 struct mlxsw_sp_bridge_ops;
27 
28 struct mlxsw_sp_bridge {
29 	struct mlxsw_sp *mlxsw_sp;
30 	struct {
31 		struct delayed_work dw;
32 #define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100
33 		unsigned int interval; /* ms */
34 	} fdb_notify;
35 #define MLXSW_SP_MIN_AGEING_TIME 10
36 #define MLXSW_SP_MAX_AGEING_TIME 1000000
37 #define MLXSW_SP_DEFAULT_AGEING_TIME 300
38 	u32 ageing_time;
39 	bool vlan_enabled_exists;
40 	struct list_head bridges_list;
41 	DECLARE_BITMAP(mids_bitmap, MLXSW_SP_MID_MAX);
42 	const struct mlxsw_sp_bridge_ops *bridge_8021q_ops;
43 	const struct mlxsw_sp_bridge_ops *bridge_8021d_ops;
44 	const struct mlxsw_sp_bridge_ops *bridge_8021ad_ops;
45 };
46 
47 struct mlxsw_sp_bridge_device {
48 	struct net_device *dev;
49 	struct list_head list;
50 	struct list_head ports_list;
51 	struct list_head mdb_list;
52 	struct rhashtable mdb_ht;
53 	u8 vlan_enabled:1,
54 	   multicast_enabled:1,
55 	   mrouter:1;
56 	const struct mlxsw_sp_bridge_ops *ops;
57 };
58 
59 struct mlxsw_sp_bridge_port {
60 	struct net_device *dev;
61 	struct mlxsw_sp_bridge_device *bridge_device;
62 	struct list_head list;
63 	struct list_head vlans_list;
64 	unsigned int ref_count;
65 	u8 stp_state;
66 	unsigned long flags;
67 	bool mrouter;
68 	bool lagged;
69 	union {
70 		u16 lag_id;
71 		u16 system_port;
72 	};
73 };
74 
75 struct mlxsw_sp_bridge_vlan {
76 	struct list_head list;
77 	struct list_head port_vlan_list;
78 	u16 vid;
79 };
80 
81 struct mlxsw_sp_bridge_ops {
82 	int (*port_join)(struct mlxsw_sp_bridge_device *bridge_device,
83 			 struct mlxsw_sp_bridge_port *bridge_port,
84 			 struct mlxsw_sp_port *mlxsw_sp_port,
85 			 struct netlink_ext_ack *extack);
86 	void (*port_leave)(struct mlxsw_sp_bridge_device *bridge_device,
87 			   struct mlxsw_sp_bridge_port *bridge_port,
88 			   struct mlxsw_sp_port *mlxsw_sp_port);
89 	int (*vxlan_join)(struct mlxsw_sp_bridge_device *bridge_device,
90 			  const struct net_device *vxlan_dev, u16 vid,
91 			  struct netlink_ext_ack *extack);
92 	struct mlxsw_sp_fid *
93 		(*fid_get)(struct mlxsw_sp_bridge_device *bridge_device,
94 			   u16 vid, struct netlink_ext_ack *extack);
95 	struct mlxsw_sp_fid *
96 		(*fid_lookup)(struct mlxsw_sp_bridge_device *bridge_device,
97 			      u16 vid);
98 	u16 (*fid_vid)(struct mlxsw_sp_bridge_device *bridge_device,
99 		       const struct mlxsw_sp_fid *fid);
100 };
101 
102 struct mlxsw_sp_switchdev_ops {
103 	void (*init)(struct mlxsw_sp *mlxsw_sp);
104 };
105 
106 struct mlxsw_sp_mdb_entry_key {
107 	unsigned char addr[ETH_ALEN];
108 	u16 fid;
109 };
110 
111 struct mlxsw_sp_mdb_entry {
112 	struct list_head list;
113 	struct rhash_head ht_node;
114 	struct mlxsw_sp_mdb_entry_key key;
115 	u16 mid;
116 	struct list_head ports_list;
117 	u16 ports_count;
118 };
119 
120 struct mlxsw_sp_mdb_entry_port {
121 	struct list_head list; /* Member of 'ports_list'. */
122 	u16 local_port;
123 	refcount_t refcount;
124 	bool mrouter;
125 };
126 
127 static const struct rhashtable_params mlxsw_sp_mdb_ht_params = {
128 	.key_offset = offsetof(struct mlxsw_sp_mdb_entry, key),
129 	.head_offset = offsetof(struct mlxsw_sp_mdb_entry, ht_node),
130 	.key_len = sizeof(struct mlxsw_sp_mdb_entry_key),
131 };
132 
133 static int
134 mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
135 			       struct mlxsw_sp_bridge_port *bridge_port,
136 			       u16 fid_index);
137 
138 static void
139 mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
140 			       struct mlxsw_sp_bridge_port *bridge_port,
141 			       u16 fid_index);
142 
143 static int
144 mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp *mlxsw_sp,
145 				   struct mlxsw_sp_bridge_device
146 				   *bridge_device, bool mc_enabled);
147 
148 static void
149 mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
150 				 struct mlxsw_sp_bridge_port *bridge_port,
151 				 bool add);
152 
153 static struct mlxsw_sp_bridge_device *
154 mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge,
155 			    const struct net_device *br_dev)
156 {
157 	struct mlxsw_sp_bridge_device *bridge_device;
158 
159 	list_for_each_entry(bridge_device, &bridge->bridges_list, list)
160 		if (bridge_device->dev == br_dev)
161 			return bridge_device;
162 
163 	return NULL;
164 }
165 
166 bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
167 					 const struct net_device *br_dev)
168 {
169 	return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
170 }
171 
172 static int mlxsw_sp_bridge_device_upper_rif_destroy(struct net_device *dev,
173 						    struct netdev_nested_priv *priv)
174 {
175 	struct mlxsw_sp *mlxsw_sp = priv->data;
176 
177 	mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
178 	return 0;
179 }
180 
181 static void mlxsw_sp_bridge_device_rifs_destroy(struct mlxsw_sp *mlxsw_sp,
182 						struct net_device *dev)
183 {
184 	struct netdev_nested_priv priv = {
185 		.data = (void *)mlxsw_sp,
186 	};
187 
188 	mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
189 	netdev_walk_all_upper_dev_rcu(dev,
190 				      mlxsw_sp_bridge_device_upper_rif_destroy,
191 				      &priv);
192 }
193 
194 static int mlxsw_sp_bridge_device_vxlan_init(struct mlxsw_sp_bridge *bridge,
195 					     struct net_device *br_dev,
196 					     struct netlink_ext_ack *extack)
197 {
198 	struct net_device *dev, *stop_dev;
199 	struct list_head *iter;
200 	int err;
201 
202 	netdev_for_each_lower_dev(br_dev, dev, iter) {
203 		if (netif_is_vxlan(dev) && netif_running(dev)) {
204 			err = mlxsw_sp_bridge_vxlan_join(bridge->mlxsw_sp,
205 							 br_dev, dev, 0,
206 							 extack);
207 			if (err) {
208 				stop_dev = dev;
209 				goto err_vxlan_join;
210 			}
211 		}
212 	}
213 
214 	return 0;
215 
216 err_vxlan_join:
217 	netdev_for_each_lower_dev(br_dev, dev, iter) {
218 		if (netif_is_vxlan(dev) && netif_running(dev)) {
219 			if (stop_dev == dev)
220 				break;
221 			mlxsw_sp_bridge_vxlan_leave(bridge->mlxsw_sp, dev);
222 		}
223 	}
224 	return err;
225 }
226 
227 static void mlxsw_sp_bridge_device_vxlan_fini(struct mlxsw_sp_bridge *bridge,
228 					      struct net_device *br_dev)
229 {
230 	struct net_device *dev;
231 	struct list_head *iter;
232 
233 	netdev_for_each_lower_dev(br_dev, dev, iter) {
234 		if (netif_is_vxlan(dev) && netif_running(dev))
235 			mlxsw_sp_bridge_vxlan_leave(bridge->mlxsw_sp, dev);
236 	}
237 }
238 
239 static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp,
240 					      bool no_delay)
241 {
242 	struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
243 	unsigned int interval = no_delay ? 0 : bridge->fdb_notify.interval;
244 
245 	mlxsw_core_schedule_dw(&bridge->fdb_notify.dw,
246 			       msecs_to_jiffies(interval));
247 }
248 
249 static struct mlxsw_sp_bridge_device *
250 mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
251 			      struct net_device *br_dev,
252 			      struct netlink_ext_ack *extack)
253 {
254 	struct device *dev = bridge->mlxsw_sp->bus_info->dev;
255 	struct mlxsw_sp_bridge_device *bridge_device;
256 	bool vlan_enabled = br_vlan_enabled(br_dev);
257 	int err;
258 
259 	if (vlan_enabled && bridge->vlan_enabled_exists) {
260 		dev_err(dev, "Only one VLAN-aware bridge is supported\n");
261 		NL_SET_ERR_MSG_MOD(extack, "Only one VLAN-aware bridge is supported");
262 		return ERR_PTR(-EINVAL);
263 	}
264 
265 	bridge_device = kzalloc(sizeof(*bridge_device), GFP_KERNEL);
266 	if (!bridge_device)
267 		return ERR_PTR(-ENOMEM);
268 
269 	err = rhashtable_init(&bridge_device->mdb_ht, &mlxsw_sp_mdb_ht_params);
270 	if (err)
271 		goto err_mdb_rhashtable_init;
272 
273 	bridge_device->dev = br_dev;
274 	bridge_device->vlan_enabled = vlan_enabled;
275 	bridge_device->multicast_enabled = br_multicast_enabled(br_dev);
276 	bridge_device->mrouter = br_multicast_router(br_dev);
277 	INIT_LIST_HEAD(&bridge_device->ports_list);
278 	if (vlan_enabled) {
279 		u16 proto;
280 
281 		bridge->vlan_enabled_exists = true;
282 		br_vlan_get_proto(br_dev, &proto);
283 		if (proto == ETH_P_8021AD)
284 			bridge_device->ops = bridge->bridge_8021ad_ops;
285 		else
286 			bridge_device->ops = bridge->bridge_8021q_ops;
287 	} else {
288 		bridge_device->ops = bridge->bridge_8021d_ops;
289 	}
290 	INIT_LIST_HEAD(&bridge_device->mdb_list);
291 
292 	if (list_empty(&bridge->bridges_list))
293 		mlxsw_sp_fdb_notify_work_schedule(bridge->mlxsw_sp, false);
294 	list_add(&bridge_device->list, &bridge->bridges_list);
295 
296 	/* It is possible we already have VXLAN devices enslaved to the bridge.
297 	 * In which case, we need to replay their configuration as if they were
298 	 * just now enslaved to the bridge.
299 	 */
300 	err = mlxsw_sp_bridge_device_vxlan_init(bridge, br_dev, extack);
301 	if (err)
302 		goto err_vxlan_init;
303 
304 	return bridge_device;
305 
306 err_vxlan_init:
307 	list_del(&bridge_device->list);
308 	if (bridge_device->vlan_enabled)
309 		bridge->vlan_enabled_exists = false;
310 	rhashtable_destroy(&bridge_device->mdb_ht);
311 err_mdb_rhashtable_init:
312 	kfree(bridge_device);
313 	return ERR_PTR(err);
314 }
315 
316 static void
317 mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
318 			       struct mlxsw_sp_bridge_device *bridge_device)
319 {
320 	mlxsw_sp_bridge_device_vxlan_fini(bridge, bridge_device->dev);
321 	mlxsw_sp_bridge_device_rifs_destroy(bridge->mlxsw_sp,
322 					    bridge_device->dev);
323 	list_del(&bridge_device->list);
324 	if (list_empty(&bridge->bridges_list))
325 		cancel_delayed_work(&bridge->fdb_notify.dw);
326 	if (bridge_device->vlan_enabled)
327 		bridge->vlan_enabled_exists = false;
328 	WARN_ON(!list_empty(&bridge_device->ports_list));
329 	WARN_ON(!list_empty(&bridge_device->mdb_list));
330 	rhashtable_destroy(&bridge_device->mdb_ht);
331 	kfree(bridge_device);
332 }
333 
334 static struct mlxsw_sp_bridge_device *
335 mlxsw_sp_bridge_device_get(struct mlxsw_sp_bridge *bridge,
336 			   struct net_device *br_dev,
337 			   struct netlink_ext_ack *extack)
338 {
339 	struct mlxsw_sp_bridge_device *bridge_device;
340 
341 	bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
342 	if (bridge_device)
343 		return bridge_device;
344 
345 	return mlxsw_sp_bridge_device_create(bridge, br_dev, extack);
346 }
347 
348 static void
349 mlxsw_sp_bridge_device_put(struct mlxsw_sp_bridge *bridge,
350 			   struct mlxsw_sp_bridge_device *bridge_device)
351 {
352 	if (list_empty(&bridge_device->ports_list))
353 		mlxsw_sp_bridge_device_destroy(bridge, bridge_device);
354 }
355 
356 static struct mlxsw_sp_bridge_port *
357 __mlxsw_sp_bridge_port_find(const struct mlxsw_sp_bridge_device *bridge_device,
358 			    const struct net_device *brport_dev)
359 {
360 	struct mlxsw_sp_bridge_port *bridge_port;
361 
362 	list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
363 		if (bridge_port->dev == brport_dev)
364 			return bridge_port;
365 	}
366 
367 	return NULL;
368 }
369 
370 struct mlxsw_sp_bridge_port *
371 mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge *bridge,
372 			  struct net_device *brport_dev)
373 {
374 	struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
375 	struct mlxsw_sp_bridge_device *bridge_device;
376 
377 	if (!br_dev)
378 		return NULL;
379 
380 	bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
381 	if (!bridge_device)
382 		return NULL;
383 
384 	return __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
385 }
386 
387 static int mlxsw_sp_port_obj_add(struct net_device *dev, const void *ctx,
388 				 const struct switchdev_obj *obj,
389 				 struct netlink_ext_ack *extack);
390 static int mlxsw_sp_port_obj_del(struct net_device *dev, const void *ctx,
391 				 const struct switchdev_obj *obj);
392 
393 struct mlxsw_sp_bridge_port_replay_switchdev_objs {
394 	struct net_device *brport_dev;
395 	struct mlxsw_sp_port *mlxsw_sp_port;
396 	int done;
397 };
398 
399 static int
400 mlxsw_sp_bridge_port_replay_switchdev_objs(struct notifier_block *nb,
401 					   unsigned long event, void *ptr)
402 {
403 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
404 	struct switchdev_notifier_port_obj_info *port_obj_info = ptr;
405 	struct netlink_ext_ack *extack = port_obj_info->info.extack;
406 	struct mlxsw_sp_bridge_port_replay_switchdev_objs *rso;
407 	int err = 0;
408 
409 	rso = (void *)port_obj_info->info.ctx;
410 
411 	if (event != SWITCHDEV_PORT_OBJ_ADD ||
412 	    dev != rso->brport_dev)
413 		goto out;
414 
415 	/* When a port is joining the bridge through a LAG, there likely are
416 	 * VLANs configured on that LAG already. The replay will thus attempt to
417 	 * have the given port-vlans join the corresponding FIDs. But the LAG
418 	 * netdevice has already called the ndo_vlan_rx_add_vid NDO for its VLAN
419 	 * memberships, back before CHANGEUPPER was distributed and netdevice
420 	 * master set. So now before propagating the VLAN events further, we
421 	 * first need to kill the corresponding VID at the mlxsw_sp_port.
422 	 *
423 	 * Note that this doesn't need to be rolled back on failure -- if the
424 	 * replay fails, the enslavement is off, and the VIDs would be killed by
425 	 * LAG anyway as part of its rollback.
426 	 */
427 	if (port_obj_info->obj->id == SWITCHDEV_OBJ_ID_PORT_VLAN) {
428 		u16 vid = SWITCHDEV_OBJ_PORT_VLAN(port_obj_info->obj)->vid;
429 
430 		err = mlxsw_sp_port_kill_vid(rso->mlxsw_sp_port->dev, 0, vid);
431 		if (err)
432 			goto out;
433 	}
434 
435 	++rso->done;
436 	err = mlxsw_sp_port_obj_add(rso->mlxsw_sp_port->dev, NULL,
437 				    port_obj_info->obj, extack);
438 
439 out:
440 	return notifier_from_errno(err);
441 }
442 
443 static struct notifier_block mlxsw_sp_bridge_port_replay_switchdev_objs_nb = {
444 	.notifier_call = mlxsw_sp_bridge_port_replay_switchdev_objs,
445 };
446 
447 static int
448 mlxsw_sp_bridge_port_unreplay_switchdev_objs(struct notifier_block *nb,
449 					     unsigned long event, void *ptr)
450 {
451 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
452 	struct switchdev_notifier_port_obj_info *port_obj_info = ptr;
453 	struct mlxsw_sp_bridge_port_replay_switchdev_objs *rso;
454 
455 	rso = (void *)port_obj_info->info.ctx;
456 
457 	if (event != SWITCHDEV_PORT_OBJ_ADD ||
458 	    dev != rso->brport_dev)
459 		return NOTIFY_DONE;
460 	if (!rso->done--)
461 		return NOTIFY_STOP;
462 
463 	mlxsw_sp_port_obj_del(rso->mlxsw_sp_port->dev, NULL,
464 			      port_obj_info->obj);
465 	return NOTIFY_DONE;
466 }
467 
468 static struct notifier_block mlxsw_sp_bridge_port_unreplay_switchdev_objs_nb = {
469 	.notifier_call = mlxsw_sp_bridge_port_unreplay_switchdev_objs,
470 };
471 
472 static struct mlxsw_sp_bridge_port *
473 mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device,
474 			    struct net_device *brport_dev,
475 			    struct netlink_ext_ack *extack)
476 {
477 	struct mlxsw_sp_bridge_port *bridge_port;
478 	struct mlxsw_sp_port *mlxsw_sp_port;
479 	int err;
480 
481 	bridge_port = kzalloc(sizeof(*bridge_port), GFP_KERNEL);
482 	if (!bridge_port)
483 		return ERR_PTR(-ENOMEM);
484 
485 	mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(brport_dev);
486 	bridge_port->lagged = mlxsw_sp_port->lagged;
487 	if (bridge_port->lagged)
488 		bridge_port->lag_id = mlxsw_sp_port->lag_id;
489 	else
490 		bridge_port->system_port = mlxsw_sp_port->local_port;
491 	bridge_port->dev = brport_dev;
492 	bridge_port->bridge_device = bridge_device;
493 	bridge_port->stp_state = br_port_get_stp_state(brport_dev);
494 	bridge_port->flags = BR_LEARNING | BR_FLOOD | BR_LEARNING_SYNC |
495 			     BR_MCAST_FLOOD;
496 	INIT_LIST_HEAD(&bridge_port->vlans_list);
497 	list_add(&bridge_port->list, &bridge_device->ports_list);
498 	bridge_port->ref_count = 1;
499 
500 	err = switchdev_bridge_port_offload(brport_dev, mlxsw_sp_port->dev,
501 					    NULL, NULL, NULL, false, extack);
502 	if (err)
503 		goto err_switchdev_offload;
504 
505 	return bridge_port;
506 
507 err_switchdev_offload:
508 	list_del(&bridge_port->list);
509 	kfree(bridge_port);
510 	return ERR_PTR(err);
511 }
512 
513 static void
514 mlxsw_sp_bridge_port_destroy(struct mlxsw_sp_bridge_port *bridge_port)
515 {
516 	switchdev_bridge_port_unoffload(bridge_port->dev, NULL, NULL, NULL);
517 	list_del(&bridge_port->list);
518 	WARN_ON(!list_empty(&bridge_port->vlans_list));
519 	kfree(bridge_port);
520 }
521 
522 static struct mlxsw_sp_bridge_port *
523 mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge,
524 			 struct net_device *brport_dev,
525 			 struct netlink_ext_ack *extack)
526 {
527 	struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
528 	struct mlxsw_sp_bridge_device *bridge_device;
529 	struct mlxsw_sp_bridge_port *bridge_port;
530 	int err;
531 
532 	bridge_port = mlxsw_sp_bridge_port_find(bridge, brport_dev);
533 	if (bridge_port) {
534 		bridge_port->ref_count++;
535 		return bridge_port;
536 	}
537 
538 	bridge_device = mlxsw_sp_bridge_device_get(bridge, br_dev, extack);
539 	if (IS_ERR(bridge_device))
540 		return ERR_CAST(bridge_device);
541 
542 	bridge_port = mlxsw_sp_bridge_port_create(bridge_device, brport_dev,
543 						  extack);
544 	if (IS_ERR(bridge_port)) {
545 		err = PTR_ERR(bridge_port);
546 		goto err_bridge_port_create;
547 	}
548 
549 	return bridge_port;
550 
551 err_bridge_port_create:
552 	mlxsw_sp_bridge_device_put(bridge, bridge_device);
553 	return ERR_PTR(err);
554 }
555 
556 static void mlxsw_sp_bridge_port_put(struct mlxsw_sp_bridge *bridge,
557 				     struct mlxsw_sp_bridge_port *bridge_port)
558 {
559 	struct mlxsw_sp_bridge_device *bridge_device;
560 
561 	if (--bridge_port->ref_count != 0)
562 		return;
563 	bridge_device = bridge_port->bridge_device;
564 	mlxsw_sp_bridge_port_destroy(bridge_port);
565 	mlxsw_sp_bridge_device_put(bridge, bridge_device);
566 }
567 
568 static struct mlxsw_sp_port_vlan *
569 mlxsw_sp_port_vlan_find_by_bridge(struct mlxsw_sp_port *mlxsw_sp_port,
570 				  const struct mlxsw_sp_bridge_device *
571 				  bridge_device,
572 				  u16 vid)
573 {
574 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
575 
576 	list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
577 			    list) {
578 		if (!mlxsw_sp_port_vlan->bridge_port)
579 			continue;
580 		if (mlxsw_sp_port_vlan->bridge_port->bridge_device !=
581 		    bridge_device)
582 			continue;
583 		if (bridge_device->vlan_enabled &&
584 		    mlxsw_sp_port_vlan->vid != vid)
585 			continue;
586 		return mlxsw_sp_port_vlan;
587 	}
588 
589 	return NULL;
590 }
591 
592 static struct mlxsw_sp_port_vlan*
593 mlxsw_sp_port_vlan_find_by_fid(struct mlxsw_sp_port *mlxsw_sp_port,
594 			       u16 fid_index)
595 {
596 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
597 
598 	list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
599 			    list) {
600 		struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
601 
602 		if (fid && mlxsw_sp_fid_index(fid) == fid_index)
603 			return mlxsw_sp_port_vlan;
604 	}
605 
606 	return NULL;
607 }
608 
609 static struct mlxsw_sp_bridge_vlan *
610 mlxsw_sp_bridge_vlan_find(const struct mlxsw_sp_bridge_port *bridge_port,
611 			  u16 vid)
612 {
613 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
614 
615 	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
616 		if (bridge_vlan->vid == vid)
617 			return bridge_vlan;
618 	}
619 
620 	return NULL;
621 }
622 
623 static struct mlxsw_sp_bridge_vlan *
624 mlxsw_sp_bridge_vlan_create(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
625 {
626 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
627 
628 	bridge_vlan = kzalloc(sizeof(*bridge_vlan), GFP_KERNEL);
629 	if (!bridge_vlan)
630 		return NULL;
631 
632 	INIT_LIST_HEAD(&bridge_vlan->port_vlan_list);
633 	bridge_vlan->vid = vid;
634 	list_add(&bridge_vlan->list, &bridge_port->vlans_list);
635 
636 	return bridge_vlan;
637 }
638 
639 static void
640 mlxsw_sp_bridge_vlan_destroy(struct mlxsw_sp_bridge_vlan *bridge_vlan)
641 {
642 	list_del(&bridge_vlan->list);
643 	WARN_ON(!list_empty(&bridge_vlan->port_vlan_list));
644 	kfree(bridge_vlan);
645 }
646 
647 static struct mlxsw_sp_bridge_vlan *
648 mlxsw_sp_bridge_vlan_get(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
649 {
650 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
651 
652 	bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
653 	if (bridge_vlan)
654 		return bridge_vlan;
655 
656 	return mlxsw_sp_bridge_vlan_create(bridge_port, vid);
657 }
658 
659 static void mlxsw_sp_bridge_vlan_put(struct mlxsw_sp_bridge_vlan *bridge_vlan)
660 {
661 	if (list_empty(&bridge_vlan->port_vlan_list))
662 		mlxsw_sp_bridge_vlan_destroy(bridge_vlan);
663 }
664 
665 static int
666 mlxsw_sp_port_bridge_vlan_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
667 				  struct mlxsw_sp_bridge_vlan *bridge_vlan,
668 				  u8 state)
669 {
670 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
671 
672 	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
673 			    bridge_vlan_node) {
674 		if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
675 			continue;
676 		return mlxsw_sp_port_vid_stp_set(mlxsw_sp_port,
677 						 bridge_vlan->vid, state);
678 	}
679 
680 	return 0;
681 }
682 
683 static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
684 					    struct net_device *orig_dev,
685 					    u8 state)
686 {
687 	struct mlxsw_sp_bridge_port *bridge_port;
688 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
689 	int err;
690 
691 	/* It's possible we failed to enslave the port, yet this
692 	 * operation is executed due to it being deferred.
693 	 */
694 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
695 						orig_dev);
696 	if (!bridge_port)
697 		return 0;
698 
699 	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
700 		err = mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port,
701 							bridge_vlan, state);
702 		if (err)
703 			goto err_port_bridge_vlan_stp_set;
704 	}
705 
706 	bridge_port->stp_state = state;
707 
708 	return 0;
709 
710 err_port_bridge_vlan_stp_set:
711 	list_for_each_entry_continue_reverse(bridge_vlan,
712 					     &bridge_port->vlans_list, list)
713 		mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port, bridge_vlan,
714 						  bridge_port->stp_state);
715 	return err;
716 }
717 
718 static int
719 mlxsw_sp_port_bridge_vlan_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
720 				    struct mlxsw_sp_bridge_vlan *bridge_vlan,
721 				    enum mlxsw_sp_flood_type packet_type,
722 				    bool member)
723 {
724 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
725 
726 	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
727 			    bridge_vlan_node) {
728 		if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
729 			continue;
730 		return mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid,
731 					      packet_type,
732 					      mlxsw_sp_port->local_port,
733 					      member);
734 	}
735 
736 	return 0;
737 }
738 
739 static int
740 mlxsw_sp_bridge_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port,
741 				     struct mlxsw_sp_bridge_port *bridge_port,
742 				     enum mlxsw_sp_flood_type packet_type,
743 				     bool member)
744 {
745 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
746 	int err;
747 
748 	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
749 		err = mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port,
750 							  bridge_vlan,
751 							  packet_type,
752 							  member);
753 		if (err)
754 			goto err_port_bridge_vlan_flood_set;
755 	}
756 
757 	return 0;
758 
759 err_port_bridge_vlan_flood_set:
760 	list_for_each_entry_continue_reverse(bridge_vlan,
761 					     &bridge_port->vlans_list, list)
762 		mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port, bridge_vlan,
763 						    packet_type, !member);
764 	return err;
765 }
766 
767 static int
768 mlxsw_sp_bridge_vlans_flood_set(struct mlxsw_sp_bridge_vlan *bridge_vlan,
769 				enum mlxsw_sp_flood_type packet_type,
770 				bool member)
771 {
772 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
773 	int err;
774 
775 	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
776 			    bridge_vlan_node) {
777 		u16 local_port = mlxsw_sp_port_vlan->mlxsw_sp_port->local_port;
778 
779 		err = mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid,
780 					     packet_type, local_port, member);
781 		if (err)
782 			goto err_fid_flood_set;
783 	}
784 
785 	return 0;
786 
787 err_fid_flood_set:
788 	list_for_each_entry_continue_reverse(mlxsw_sp_port_vlan,
789 					     &bridge_vlan->port_vlan_list,
790 					     list) {
791 		u16 local_port = mlxsw_sp_port_vlan->mlxsw_sp_port->local_port;
792 
793 		mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid, packet_type,
794 				       local_port, !member);
795 	}
796 
797 	return err;
798 }
799 
800 static int
801 mlxsw_sp_bridge_ports_flood_table_set(struct mlxsw_sp_bridge_port *bridge_port,
802 				      enum mlxsw_sp_flood_type packet_type,
803 				      bool member)
804 {
805 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
806 	int err;
807 
808 	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
809 		err = mlxsw_sp_bridge_vlans_flood_set(bridge_vlan, packet_type,
810 						      member);
811 		if (err)
812 			goto err_bridge_vlans_flood_set;
813 	}
814 
815 	return 0;
816 
817 err_bridge_vlans_flood_set:
818 	list_for_each_entry_continue_reverse(bridge_vlan,
819 					     &bridge_port->vlans_list, list)
820 		mlxsw_sp_bridge_vlans_flood_set(bridge_vlan, packet_type,
821 						!member);
822 	return err;
823 }
824 
825 static int
826 mlxsw_sp_port_bridge_vlan_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
827 				       struct mlxsw_sp_bridge_vlan *bridge_vlan,
828 				       bool set)
829 {
830 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
831 	u16 vid = bridge_vlan->vid;
832 
833 	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
834 			    bridge_vlan_node) {
835 		if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
836 			continue;
837 		return mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, set);
838 	}
839 
840 	return 0;
841 }
842 
843 static int
844 mlxsw_sp_bridge_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
845 				  struct mlxsw_sp_bridge_port *bridge_port,
846 				  bool set)
847 {
848 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
849 	int err;
850 
851 	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
852 		err = mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
853 							     bridge_vlan, set);
854 		if (err)
855 			goto err_port_bridge_vlan_learning_set;
856 	}
857 
858 	return 0;
859 
860 err_port_bridge_vlan_learning_set:
861 	list_for_each_entry_continue_reverse(bridge_vlan,
862 					     &bridge_port->vlans_list, list)
863 		mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
864 						       bridge_vlan, !set);
865 	return err;
866 }
867 
868 static int
869 mlxsw_sp_port_attr_br_pre_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
870 				    const struct net_device *orig_dev,
871 				    struct switchdev_brport_flags flags,
872 				    struct netlink_ext_ack *extack)
873 {
874 	if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
875 			   BR_PORT_LOCKED | BR_PORT_MAB)) {
876 		NL_SET_ERR_MSG_MOD(extack, "Unsupported bridge port flag");
877 		return -EINVAL;
878 	}
879 
880 	if ((flags.mask & BR_PORT_LOCKED) && is_vlan_dev(orig_dev)) {
881 		NL_SET_ERR_MSG_MOD(extack, "Locked flag cannot be set on a VLAN upper");
882 		return -EINVAL;
883 	}
884 
885 	if ((flags.mask & BR_PORT_LOCKED) && vlan_uses_dev(orig_dev)) {
886 		NL_SET_ERR_MSG_MOD(extack, "Locked flag cannot be set on a bridge port that has VLAN uppers");
887 		return -EINVAL;
888 	}
889 
890 	return 0;
891 }
892 
893 static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
894 					   struct net_device *orig_dev,
895 					   struct switchdev_brport_flags flags)
896 {
897 	struct mlxsw_sp_bridge_port *bridge_port;
898 	int err;
899 
900 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
901 						orig_dev);
902 	if (!bridge_port)
903 		return 0;
904 
905 	if (flags.mask & BR_FLOOD) {
906 		err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
907 							   bridge_port,
908 							   MLXSW_SP_FLOOD_TYPE_UC,
909 							   flags.val & BR_FLOOD);
910 		if (err)
911 			return err;
912 	}
913 
914 	if (flags.mask & BR_LEARNING) {
915 		err = mlxsw_sp_bridge_port_learning_set(mlxsw_sp_port,
916 							bridge_port,
917 							flags.val & BR_LEARNING);
918 		if (err)
919 			return err;
920 	}
921 
922 	if (flags.mask & BR_PORT_LOCKED) {
923 		err = mlxsw_sp_port_security_set(mlxsw_sp_port,
924 						 flags.val & BR_PORT_LOCKED);
925 		if (err)
926 			return err;
927 	}
928 
929 	if (bridge_port->bridge_device->multicast_enabled)
930 		goto out;
931 
932 	if (flags.mask & BR_MCAST_FLOOD) {
933 		err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
934 							   bridge_port,
935 							   MLXSW_SP_FLOOD_TYPE_MC,
936 							   flags.val & BR_MCAST_FLOOD);
937 		if (err)
938 			return err;
939 	}
940 
941 out:
942 	memcpy(&bridge_port->flags, &flags.val, sizeof(flags.val));
943 	return 0;
944 }
945 
946 static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
947 {
948 	char sfdat_pl[MLXSW_REG_SFDAT_LEN];
949 	int err;
950 
951 	mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time);
952 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl);
953 	if (err)
954 		return err;
955 	mlxsw_sp->bridge->ageing_time = ageing_time;
956 	return 0;
957 }
958 
959 static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
960 					    unsigned long ageing_clock_t)
961 {
962 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
963 	unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
964 	u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
965 
966 	if (ageing_time < MLXSW_SP_MIN_AGEING_TIME ||
967 	    ageing_time > MLXSW_SP_MAX_AGEING_TIME)
968 		return -ERANGE;
969 
970 	return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
971 }
972 
973 static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
974 					  struct net_device *orig_dev,
975 					  bool vlan_enabled)
976 {
977 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
978 	struct mlxsw_sp_bridge_device *bridge_device;
979 
980 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
981 	if (WARN_ON(!bridge_device))
982 		return -EINVAL;
983 
984 	if (bridge_device->vlan_enabled == vlan_enabled)
985 		return 0;
986 
987 	netdev_err(bridge_device->dev, "VLAN filtering can't be changed for existing bridge\n");
988 	return -EINVAL;
989 }
990 
991 static int mlxsw_sp_port_attr_br_vlan_proto_set(struct mlxsw_sp_port *mlxsw_sp_port,
992 						struct net_device *orig_dev,
993 						u16 vlan_proto)
994 {
995 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
996 	struct mlxsw_sp_bridge_device *bridge_device;
997 
998 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
999 	if (WARN_ON(!bridge_device))
1000 		return -EINVAL;
1001 
1002 	netdev_err(bridge_device->dev, "VLAN protocol can't be changed on existing bridge\n");
1003 	return -EINVAL;
1004 }
1005 
1006 static int mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
1007 					  struct net_device *orig_dev,
1008 					  bool is_port_mrouter)
1009 {
1010 	struct mlxsw_sp_bridge_port *bridge_port;
1011 	int err;
1012 
1013 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
1014 						orig_dev);
1015 	if (!bridge_port)
1016 		return 0;
1017 
1018 	mlxsw_sp_port_mrouter_update_mdb(mlxsw_sp_port, bridge_port,
1019 					 is_port_mrouter);
1020 
1021 	if (!bridge_port->bridge_device->multicast_enabled)
1022 		goto out;
1023 
1024 	err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
1025 						   MLXSW_SP_FLOOD_TYPE_MC,
1026 						   is_port_mrouter);
1027 	if (err)
1028 		return err;
1029 
1030 out:
1031 	bridge_port->mrouter = is_port_mrouter;
1032 	return 0;
1033 }
1034 
1035 static bool mlxsw_sp_mc_flood(const struct mlxsw_sp_bridge_port *bridge_port)
1036 {
1037 	const struct mlxsw_sp_bridge_device *bridge_device;
1038 
1039 	bridge_device = bridge_port->bridge_device;
1040 	return bridge_device->multicast_enabled ? bridge_port->mrouter :
1041 					bridge_port->flags & BR_MCAST_FLOOD;
1042 }
1043 
1044 static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
1045 					 struct net_device *orig_dev,
1046 					 bool mc_disabled)
1047 {
1048 	enum mlxsw_sp_flood_type packet_type = MLXSW_SP_FLOOD_TYPE_MC;
1049 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1050 	struct mlxsw_sp_bridge_device *bridge_device;
1051 	struct mlxsw_sp_bridge_port *bridge_port;
1052 	int err;
1053 
1054 	/* It's possible we failed to enslave the port, yet this
1055 	 * operation is executed due to it being deferred.
1056 	 */
1057 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
1058 	if (!bridge_device)
1059 		return 0;
1060 
1061 	if (bridge_device->multicast_enabled == !mc_disabled)
1062 		return 0;
1063 
1064 	bridge_device->multicast_enabled = !mc_disabled;
1065 	err = mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp, bridge_device,
1066 						 !mc_disabled);
1067 	if (err)
1068 		goto err_mc_enable_sync;
1069 
1070 	list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
1071 		bool member = mlxsw_sp_mc_flood(bridge_port);
1072 
1073 		err = mlxsw_sp_bridge_ports_flood_table_set(bridge_port,
1074 							    packet_type,
1075 							    member);
1076 		if (err)
1077 			goto err_flood_table_set;
1078 	}
1079 
1080 	return 0;
1081 
1082 err_flood_table_set:
1083 	list_for_each_entry_continue_reverse(bridge_port,
1084 					     &bridge_device->ports_list, list) {
1085 		bool member = mlxsw_sp_mc_flood(bridge_port);
1086 
1087 		mlxsw_sp_bridge_ports_flood_table_set(bridge_port, packet_type,
1088 						      !member);
1089 	}
1090 	mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp, bridge_device,
1091 					   mc_disabled);
1092 err_mc_enable_sync:
1093 	bridge_device->multicast_enabled = mc_disabled;
1094 	return err;
1095 }
1096 
1097 static struct mlxsw_sp_mdb_entry_port *
1098 mlxsw_sp_mdb_entry_port_lookup(struct mlxsw_sp_mdb_entry *mdb_entry,
1099 			       u16 local_port)
1100 {
1101 	struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
1102 
1103 	list_for_each_entry(mdb_entry_port, &mdb_entry->ports_list, list) {
1104 		if (mdb_entry_port->local_port == local_port)
1105 			return mdb_entry_port;
1106 	}
1107 
1108 	return NULL;
1109 }
1110 
1111 static struct mlxsw_sp_mdb_entry_port *
1112 mlxsw_sp_mdb_entry_port_get(struct mlxsw_sp *mlxsw_sp,
1113 			    struct mlxsw_sp_mdb_entry *mdb_entry,
1114 			    u16 local_port)
1115 {
1116 	struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
1117 	int err;
1118 
1119 	mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port);
1120 	if (mdb_entry_port) {
1121 		if (mdb_entry_port->mrouter &&
1122 		    refcount_read(&mdb_entry_port->refcount) == 1)
1123 			mdb_entry->ports_count++;
1124 
1125 		refcount_inc(&mdb_entry_port->refcount);
1126 		return mdb_entry_port;
1127 	}
1128 
1129 	err = mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
1130 					  mdb_entry->key.fid, local_port, true);
1131 	if (err)
1132 		return ERR_PTR(err);
1133 
1134 	mdb_entry_port = kzalloc(sizeof(*mdb_entry_port), GFP_KERNEL);
1135 	if (!mdb_entry_port) {
1136 		err = -ENOMEM;
1137 		goto err_mdb_entry_port_alloc;
1138 	}
1139 
1140 	mdb_entry_port->local_port = local_port;
1141 	refcount_set(&mdb_entry_port->refcount, 1);
1142 	list_add(&mdb_entry_port->list, &mdb_entry->ports_list);
1143 	mdb_entry->ports_count++;
1144 
1145 	return mdb_entry_port;
1146 
1147 err_mdb_entry_port_alloc:
1148 	mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
1149 				    mdb_entry->key.fid, local_port, false);
1150 	return ERR_PTR(err);
1151 }
1152 
1153 static void
1154 mlxsw_sp_mdb_entry_port_put(struct mlxsw_sp *mlxsw_sp,
1155 			    struct mlxsw_sp_mdb_entry *mdb_entry,
1156 			    u16 local_port, bool force)
1157 {
1158 	struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
1159 
1160 	mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port);
1161 	if (!mdb_entry_port)
1162 		return;
1163 
1164 	if (!force && !refcount_dec_and_test(&mdb_entry_port->refcount)) {
1165 		if (mdb_entry_port->mrouter &&
1166 		    refcount_read(&mdb_entry_port->refcount) == 1)
1167 			mdb_entry->ports_count--;
1168 		return;
1169 	}
1170 
1171 	mdb_entry->ports_count--;
1172 	list_del(&mdb_entry_port->list);
1173 	kfree(mdb_entry_port);
1174 	mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
1175 				    mdb_entry->key.fid, local_port, false);
1176 }
1177 
1178 static __always_unused struct mlxsw_sp_mdb_entry_port *
1179 mlxsw_sp_mdb_entry_mrouter_port_get(struct mlxsw_sp *mlxsw_sp,
1180 				    struct mlxsw_sp_mdb_entry *mdb_entry,
1181 				    u16 local_port)
1182 {
1183 	struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
1184 	int err;
1185 
1186 	mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port);
1187 	if (mdb_entry_port) {
1188 		if (!mdb_entry_port->mrouter)
1189 			refcount_inc(&mdb_entry_port->refcount);
1190 		return mdb_entry_port;
1191 	}
1192 
1193 	err = mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
1194 					  mdb_entry->key.fid, local_port, true);
1195 	if (err)
1196 		return ERR_PTR(err);
1197 
1198 	mdb_entry_port = kzalloc(sizeof(*mdb_entry_port), GFP_KERNEL);
1199 	if (!mdb_entry_port) {
1200 		err = -ENOMEM;
1201 		goto err_mdb_entry_port_alloc;
1202 	}
1203 
1204 	mdb_entry_port->local_port = local_port;
1205 	refcount_set(&mdb_entry_port->refcount, 1);
1206 	mdb_entry_port->mrouter = true;
1207 	list_add(&mdb_entry_port->list, &mdb_entry->ports_list);
1208 
1209 	return mdb_entry_port;
1210 
1211 err_mdb_entry_port_alloc:
1212 	mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
1213 				    mdb_entry->key.fid, local_port, false);
1214 	return ERR_PTR(err);
1215 }
1216 
1217 static __always_unused void
1218 mlxsw_sp_mdb_entry_mrouter_port_put(struct mlxsw_sp *mlxsw_sp,
1219 				    struct mlxsw_sp_mdb_entry *mdb_entry,
1220 				    u16 local_port)
1221 {
1222 	struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
1223 
1224 	mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port);
1225 	if (!mdb_entry_port)
1226 		return;
1227 
1228 	if (!mdb_entry_port->mrouter)
1229 		return;
1230 
1231 	mdb_entry_port->mrouter = false;
1232 	if (!refcount_dec_and_test(&mdb_entry_port->refcount))
1233 		return;
1234 
1235 	list_del(&mdb_entry_port->list);
1236 	kfree(mdb_entry_port);
1237 	mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
1238 				    mdb_entry->key.fid, local_port, false);
1239 }
1240 
1241 static void
1242 mlxsw_sp_bridge_mrouter_update_mdb(struct mlxsw_sp *mlxsw_sp,
1243 				   struct mlxsw_sp_bridge_device *bridge_device,
1244 				   bool add)
1245 {
1246 	u16 local_port = mlxsw_sp_router_port(mlxsw_sp);
1247 	struct mlxsw_sp_mdb_entry *mdb_entry;
1248 
1249 	list_for_each_entry(mdb_entry, &bridge_device->mdb_list, list) {
1250 		if (add)
1251 			mlxsw_sp_mdb_entry_mrouter_port_get(mlxsw_sp, mdb_entry,
1252 							    local_port);
1253 		else
1254 			mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp, mdb_entry,
1255 							    local_port);
1256 	}
1257 }
1258 
1259 static int
1260 mlxsw_sp_port_attr_br_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
1261 				  struct net_device *orig_dev,
1262 				  bool is_mrouter)
1263 {
1264 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1265 	struct mlxsw_sp_bridge_device *bridge_device;
1266 
1267 	/* It's possible we failed to enslave the port, yet this
1268 	 * operation is executed due to it being deferred.
1269 	 */
1270 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
1271 	if (!bridge_device)
1272 		return 0;
1273 
1274 	if (bridge_device->mrouter != is_mrouter)
1275 		mlxsw_sp_bridge_mrouter_update_mdb(mlxsw_sp, bridge_device,
1276 						   is_mrouter);
1277 	bridge_device->mrouter = is_mrouter;
1278 	return 0;
1279 }
1280 
1281 static int mlxsw_sp_port_attr_set(struct net_device *dev, const void *ctx,
1282 				  const struct switchdev_attr *attr,
1283 				  struct netlink_ext_ack *extack)
1284 {
1285 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1286 	int err;
1287 
1288 	switch (attr->id) {
1289 	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
1290 		err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port,
1291 						       attr->orig_dev,
1292 						       attr->u.stp_state);
1293 		break;
1294 	case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
1295 		err = mlxsw_sp_port_attr_br_pre_flags_set(mlxsw_sp_port,
1296 							  attr->orig_dev,
1297 							  attr->u.brport_flags,
1298 							  extack);
1299 		break;
1300 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
1301 		err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port,
1302 						      attr->orig_dev,
1303 						      attr->u.brport_flags);
1304 		break;
1305 	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
1306 		err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port,
1307 						       attr->u.ageing_time);
1308 		break;
1309 	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
1310 		err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port,
1311 						     attr->orig_dev,
1312 						     attr->u.vlan_filtering);
1313 		break;
1314 	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL:
1315 		err = mlxsw_sp_port_attr_br_vlan_proto_set(mlxsw_sp_port,
1316 							   attr->orig_dev,
1317 							   attr->u.vlan_protocol);
1318 		break;
1319 	case SWITCHDEV_ATTR_ID_PORT_MROUTER:
1320 		err = mlxsw_sp_port_attr_mrouter_set(mlxsw_sp_port,
1321 						     attr->orig_dev,
1322 						     attr->u.mrouter);
1323 		break;
1324 	case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
1325 		err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port,
1326 						    attr->orig_dev,
1327 						    attr->u.mc_disabled);
1328 		break;
1329 	case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER:
1330 		err = mlxsw_sp_port_attr_br_mrouter_set(mlxsw_sp_port,
1331 							attr->orig_dev,
1332 							attr->u.mrouter);
1333 		break;
1334 	default:
1335 		err = -EOPNOTSUPP;
1336 		break;
1337 	}
1338 
1339 	mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
1340 
1341 	return err;
1342 }
1343 
1344 static int
1345 mlxsw_sp_port_vlan_fid_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
1346 			    struct mlxsw_sp_bridge_port *bridge_port,
1347 			    struct netlink_ext_ack *extack)
1348 {
1349 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1350 	struct mlxsw_sp_bridge_device *bridge_device;
1351 	u16 local_port = mlxsw_sp_port->local_port;
1352 	u16 vid = mlxsw_sp_port_vlan->vid;
1353 	struct mlxsw_sp_fid *fid;
1354 	int err;
1355 
1356 	bridge_device = bridge_port->bridge_device;
1357 	fid = bridge_device->ops->fid_get(bridge_device, vid, extack);
1358 	if (IS_ERR(fid))
1359 		return PTR_ERR(fid);
1360 
1361 	err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port,
1362 				     bridge_port->flags & BR_FLOOD);
1363 	if (err)
1364 		goto err_fid_uc_flood_set;
1365 
1366 	err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port,
1367 				     mlxsw_sp_mc_flood(bridge_port));
1368 	if (err)
1369 		goto err_fid_mc_flood_set;
1370 
1371 	err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port,
1372 				     true);
1373 	if (err)
1374 		goto err_fid_bc_flood_set;
1375 
1376 	err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
1377 	if (err)
1378 		goto err_fid_port_vid_map;
1379 
1380 	mlxsw_sp_port_vlan->fid = fid;
1381 
1382 	return 0;
1383 
1384 err_fid_port_vid_map:
1385 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
1386 err_fid_bc_flood_set:
1387 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
1388 err_fid_mc_flood_set:
1389 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
1390 err_fid_uc_flood_set:
1391 	mlxsw_sp_fid_put(fid);
1392 	return err;
1393 }
1394 
1395 static void
1396 mlxsw_sp_port_vlan_fid_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1397 {
1398 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1399 	struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
1400 	u16 local_port = mlxsw_sp_port->local_port;
1401 	u16 vid = mlxsw_sp_port_vlan->vid;
1402 
1403 	mlxsw_sp_port_vlan->fid = NULL;
1404 	mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
1405 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
1406 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
1407 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
1408 	mlxsw_sp_fid_put(fid);
1409 }
1410 
1411 static u16
1412 mlxsw_sp_port_pvid_determine(const struct mlxsw_sp_port *mlxsw_sp_port,
1413 			     u16 vid, bool is_pvid)
1414 {
1415 	if (is_pvid)
1416 		return vid;
1417 	else if (mlxsw_sp_port->pvid == vid)
1418 		return 0;	/* Dis-allow untagged packets */
1419 	else
1420 		return mlxsw_sp_port->pvid;
1421 }
1422 
1423 static int
1424 mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
1425 			       struct mlxsw_sp_bridge_port *bridge_port,
1426 			       struct netlink_ext_ack *extack)
1427 {
1428 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1429 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
1430 	u16 vid = mlxsw_sp_port_vlan->vid;
1431 	int err;
1432 
1433 	/* No need to continue if only VLAN flags were changed */
1434 	if (mlxsw_sp_port_vlan->bridge_port)
1435 		return 0;
1436 
1437 	err = mlxsw_sp_port_vlan_fid_join(mlxsw_sp_port_vlan, bridge_port,
1438 					  extack);
1439 	if (err)
1440 		return err;
1441 
1442 	err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid,
1443 					     bridge_port->flags & BR_LEARNING);
1444 	if (err)
1445 		goto err_port_vid_learning_set;
1446 
1447 	err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
1448 					bridge_port->stp_state);
1449 	if (err)
1450 		goto err_port_vid_stp_set;
1451 
1452 	bridge_vlan = mlxsw_sp_bridge_vlan_get(bridge_port, vid);
1453 	if (!bridge_vlan) {
1454 		err = -ENOMEM;
1455 		goto err_bridge_vlan_get;
1456 	}
1457 
1458 	list_add(&mlxsw_sp_port_vlan->bridge_vlan_node,
1459 		 &bridge_vlan->port_vlan_list);
1460 
1461 	mlxsw_sp_bridge_port_get(mlxsw_sp_port->mlxsw_sp->bridge,
1462 				 bridge_port->dev, extack);
1463 	mlxsw_sp_port_vlan->bridge_port = bridge_port;
1464 
1465 	return 0;
1466 
1467 err_bridge_vlan_get:
1468 	mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
1469 err_port_vid_stp_set:
1470 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
1471 err_port_vid_learning_set:
1472 	mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
1473 	return err;
1474 }
1475 
1476 void
1477 mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1478 {
1479 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1480 	struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
1481 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
1482 	struct mlxsw_sp_bridge_port *bridge_port;
1483 	u16 vid = mlxsw_sp_port_vlan->vid;
1484 	bool last_port;
1485 
1486 	if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021Q &&
1487 		    mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021D))
1488 		return;
1489 
1490 	bridge_port = mlxsw_sp_port_vlan->bridge_port;
1491 	bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
1492 	last_port = list_is_singular(&bridge_vlan->port_vlan_list);
1493 
1494 	list_del(&mlxsw_sp_port_vlan->bridge_vlan_node);
1495 	mlxsw_sp_bridge_vlan_put(bridge_vlan);
1496 	mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
1497 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
1498 	if (last_port)
1499 		mlxsw_sp_bridge_port_fdb_flush(mlxsw_sp_port->mlxsw_sp,
1500 					       bridge_port,
1501 					       mlxsw_sp_fid_index(fid));
1502 
1503 	mlxsw_sp_bridge_port_mdb_flush(mlxsw_sp_port, bridge_port,
1504 				       mlxsw_sp_fid_index(fid));
1505 
1506 	mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
1507 
1508 	mlxsw_sp_bridge_port_put(mlxsw_sp_port->mlxsw_sp->bridge, bridge_port);
1509 	mlxsw_sp_port_vlan->bridge_port = NULL;
1510 }
1511 
1512 static int
1513 mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port,
1514 			      struct mlxsw_sp_bridge_port *bridge_port,
1515 			      u16 vid, bool is_untagged, bool is_pvid,
1516 			      struct netlink_ext_ack *extack)
1517 {
1518 	u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid);
1519 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1520 	u16 old_pvid = mlxsw_sp_port->pvid;
1521 	u16 proto;
1522 	int err;
1523 
1524 	/* The only valid scenario in which a port-vlan already exists, is if
1525 	 * the VLAN flags were changed and the port-vlan is associated with the
1526 	 * correct bridge port
1527 	 */
1528 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1529 	if (mlxsw_sp_port_vlan &&
1530 	    mlxsw_sp_port_vlan->bridge_port != bridge_port)
1531 		return -EEXIST;
1532 
1533 	if (!mlxsw_sp_port_vlan) {
1534 		mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port,
1535 							       vid);
1536 		if (IS_ERR(mlxsw_sp_port_vlan))
1537 			return PTR_ERR(mlxsw_sp_port_vlan);
1538 	}
1539 
1540 	err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true,
1541 				     is_untagged);
1542 	if (err)
1543 		goto err_port_vlan_set;
1544 
1545 	br_vlan_get_proto(bridge_port->bridge_device->dev, &proto);
1546 	err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid, proto);
1547 	if (err)
1548 		goto err_port_pvid_set;
1549 
1550 	err = mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port,
1551 					     extack);
1552 	if (err)
1553 		goto err_port_vlan_bridge_join;
1554 
1555 	return 0;
1556 
1557 err_port_vlan_bridge_join:
1558 	mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid, proto);
1559 err_port_pvid_set:
1560 	mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1561 err_port_vlan_set:
1562 	mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1563 	return err;
1564 }
1565 
1566 static int
1567 mlxsw_sp_br_rif_pvid_change(struct mlxsw_sp *mlxsw_sp,
1568 			    struct net_device *br_dev,
1569 			    const struct switchdev_obj_port_vlan *vlan,
1570 			    struct netlink_ext_ack *extack)
1571 {
1572 	bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
1573 
1574 	return mlxsw_sp_router_bridge_vlan_add(mlxsw_sp, br_dev, vlan->vid,
1575 					       flag_pvid, extack);
1576 }
1577 
1578 static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
1579 				   const struct switchdev_obj_port_vlan *vlan,
1580 				   struct netlink_ext_ack *extack)
1581 {
1582 	bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
1583 	bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
1584 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1585 	struct net_device *orig_dev = vlan->obj.orig_dev;
1586 	struct mlxsw_sp_bridge_port *bridge_port;
1587 
1588 	if (netif_is_bridge_master(orig_dev)) {
1589 		int err = 0;
1590 
1591 		if (br_vlan_enabled(orig_dev))
1592 			err = mlxsw_sp_br_rif_pvid_change(mlxsw_sp, orig_dev,
1593 							  vlan, extack);
1594 		if (!err)
1595 			err = -EOPNOTSUPP;
1596 		return err;
1597 	}
1598 
1599 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1600 	if (WARN_ON(!bridge_port))
1601 		return -EINVAL;
1602 
1603 	if (!bridge_port->bridge_device->vlan_enabled)
1604 		return 0;
1605 
1606 	return mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port,
1607 					     vlan->vid, flag_untagged,
1608 					     flag_pvid, extack);
1609 }
1610 
1611 static enum mlxsw_reg_sfdf_flush_type mlxsw_sp_fdb_flush_type(bool lagged)
1612 {
1613 	return lagged ? MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID :
1614 			MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID;
1615 }
1616 
1617 static int
1618 mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
1619 			       struct mlxsw_sp_bridge_port *bridge_port,
1620 			       u16 fid_index)
1621 {
1622 	bool lagged = bridge_port->lagged;
1623 	char sfdf_pl[MLXSW_REG_SFDF_LEN];
1624 	u16 system_port;
1625 
1626 	system_port = lagged ? bridge_port->lag_id : bridge_port->system_port;
1627 	mlxsw_reg_sfdf_pack(sfdf_pl, mlxsw_sp_fdb_flush_type(lagged));
1628 	mlxsw_reg_sfdf_fid_set(sfdf_pl, fid_index);
1629 	mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl, system_port);
1630 
1631 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
1632 }
1633 
1634 static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
1635 {
1636 	return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
1637 			 MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_MLAG;
1638 }
1639 
1640 static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
1641 {
1642 	return adding ? MLXSW_REG_SFD_OP_WRITE_EDIT :
1643 			MLXSW_REG_SFD_OP_WRITE_REMOVE;
1644 }
1645 
1646 static int
1647 mlxsw_sp_port_fdb_tun_uc_op4(struct mlxsw_sp *mlxsw_sp, bool dynamic,
1648 			     const char *mac, u16 fid, __be32 addr, bool adding)
1649 {
1650 	char *sfd_pl;
1651 	u8 num_rec;
1652 	u32 uip;
1653 	int err;
1654 
1655 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1656 	if (!sfd_pl)
1657 		return -ENOMEM;
1658 
1659 	uip = be32_to_cpu(addr);
1660 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1661 	mlxsw_reg_sfd_uc_tunnel_pack4(sfd_pl, 0,
1662 				      mlxsw_sp_sfd_rec_policy(dynamic), mac,
1663 				      fid, MLXSW_REG_SFD_REC_ACTION_NOP, uip);
1664 	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1665 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1666 	if (err)
1667 		goto out;
1668 
1669 	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1670 		err = -EBUSY;
1671 
1672 out:
1673 	kfree(sfd_pl);
1674 	return err;
1675 }
1676 
1677 static int mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(struct mlxsw_sp *mlxsw_sp,
1678 						  const char *mac, u16 fid,
1679 						  u32 kvdl_index, bool adding)
1680 {
1681 	char *sfd_pl;
1682 	u8 num_rec;
1683 	int err;
1684 
1685 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1686 	if (!sfd_pl)
1687 		return -ENOMEM;
1688 
1689 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1690 	mlxsw_reg_sfd_uc_tunnel_pack6(sfd_pl, 0, mac, fid,
1691 				      MLXSW_REG_SFD_REC_ACTION_NOP, kvdl_index);
1692 	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1693 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1694 	if (err)
1695 		goto out;
1696 
1697 	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1698 		err = -EBUSY;
1699 
1700 out:
1701 	kfree(sfd_pl);
1702 	return err;
1703 }
1704 
1705 static int mlxsw_sp_port_fdb_tun_uc_op6_add(struct mlxsw_sp *mlxsw_sp,
1706 					    const char *mac, u16 fid,
1707 					    const struct in6_addr *addr)
1708 {
1709 	u32 kvdl_index;
1710 	int err;
1711 
1712 	err = mlxsw_sp_nve_ipv6_addr_kvdl_set(mlxsw_sp, addr, &kvdl_index);
1713 	if (err)
1714 		return err;
1715 
1716 	err = mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(mlxsw_sp, mac, fid,
1717 						     kvdl_index, true);
1718 	if (err)
1719 		goto err_sfd_write;
1720 
1721 	err = mlxsw_sp_nve_ipv6_addr_map_replace(mlxsw_sp, mac, fid, addr);
1722 	if (err)
1723 		/* Replace can fail only for creating new mapping, so removing
1724 		 * the FDB entry in the error path is OK.
1725 		 */
1726 		goto err_addr_replace;
1727 
1728 	return 0;
1729 
1730 err_addr_replace:
1731 	mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(mlxsw_sp, mac, fid, kvdl_index,
1732 					       false);
1733 err_sfd_write:
1734 	mlxsw_sp_nve_ipv6_addr_kvdl_unset(mlxsw_sp, addr);
1735 	return err;
1736 }
1737 
1738 static void mlxsw_sp_port_fdb_tun_uc_op6_del(struct mlxsw_sp *mlxsw_sp,
1739 					     const char *mac, u16 fid,
1740 					     const struct in6_addr *addr)
1741 {
1742 	mlxsw_sp_nve_ipv6_addr_map_del(mlxsw_sp, mac, fid);
1743 	mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(mlxsw_sp, mac, fid, 0, false);
1744 	mlxsw_sp_nve_ipv6_addr_kvdl_unset(mlxsw_sp, addr);
1745 }
1746 
1747 static int
1748 mlxsw_sp_port_fdb_tun_uc_op6(struct mlxsw_sp *mlxsw_sp, const char *mac,
1749 			     u16 fid, const struct in6_addr *addr, bool adding)
1750 {
1751 	if (adding)
1752 		return mlxsw_sp_port_fdb_tun_uc_op6_add(mlxsw_sp, mac, fid,
1753 							addr);
1754 
1755 	mlxsw_sp_port_fdb_tun_uc_op6_del(mlxsw_sp, mac, fid, addr);
1756 	return 0;
1757 }
1758 
1759 static int mlxsw_sp_port_fdb_tunnel_uc_op(struct mlxsw_sp *mlxsw_sp,
1760 					  const char *mac, u16 fid,
1761 					  enum mlxsw_sp_l3proto proto,
1762 					  const union mlxsw_sp_l3addr *addr,
1763 					  bool adding, bool dynamic)
1764 {
1765 	switch (proto) {
1766 	case MLXSW_SP_L3_PROTO_IPV4:
1767 		return mlxsw_sp_port_fdb_tun_uc_op4(mlxsw_sp, dynamic, mac, fid,
1768 						    addr->addr4, adding);
1769 	case MLXSW_SP_L3_PROTO_IPV6:
1770 		return mlxsw_sp_port_fdb_tun_uc_op6(mlxsw_sp, mac, fid,
1771 						    &addr->addr6, adding);
1772 	default:
1773 		WARN_ON(1);
1774 		return -EOPNOTSUPP;
1775 	}
1776 }
1777 
1778 static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u16 local_port,
1779 				     const char *mac, u16 fid, u16 vid,
1780 				     bool adding,
1781 				     enum mlxsw_reg_sfd_rec_action action,
1782 				     enum mlxsw_reg_sfd_rec_policy policy)
1783 {
1784 	char *sfd_pl;
1785 	u8 num_rec;
1786 	int err;
1787 
1788 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1789 	if (!sfd_pl)
1790 		return -ENOMEM;
1791 
1792 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1793 	mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, mac, fid, vid, action,
1794 			      local_port);
1795 	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1796 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1797 	if (err)
1798 		goto out;
1799 
1800 	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1801 		err = -EBUSY;
1802 
1803 out:
1804 	kfree(sfd_pl);
1805 	return err;
1806 }
1807 
1808 static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u16 local_port,
1809 				   const char *mac, u16 fid, u16 vid,
1810 				   bool adding, bool dynamic)
1811 {
1812 	return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, vid,
1813 					 adding, MLXSW_REG_SFD_REC_ACTION_NOP,
1814 					 mlxsw_sp_sfd_rec_policy(dynamic));
1815 }
1816 
1817 int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
1818 			bool adding)
1819 {
1820 	return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, 0, adding,
1821 					 MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
1822 					 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY);
1823 }
1824 
1825 static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
1826 				       const char *mac, u16 fid, u16 lag_vid,
1827 				       bool adding, bool dynamic)
1828 {
1829 	char *sfd_pl;
1830 	u8 num_rec;
1831 	int err;
1832 
1833 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1834 	if (!sfd_pl)
1835 		return -ENOMEM;
1836 
1837 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1838 	mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
1839 				  mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
1840 				  lag_vid, lag_id);
1841 	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1842 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1843 	if (err)
1844 		goto out;
1845 
1846 	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1847 		err = -EBUSY;
1848 
1849 out:
1850 	kfree(sfd_pl);
1851 	return err;
1852 }
1853 
1854 static int
1855 mlxsw_sp_port_fdb_set(struct mlxsw_sp_port *mlxsw_sp_port,
1856 		      struct switchdev_notifier_fdb_info *fdb_info, bool adding)
1857 {
1858 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1859 	struct net_device *orig_dev = fdb_info->info.dev;
1860 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1861 	struct mlxsw_sp_bridge_device *bridge_device;
1862 	struct mlxsw_sp_bridge_port *bridge_port;
1863 	u16 fid_index, vid;
1864 
1865 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1866 	if (!bridge_port)
1867 		return -EINVAL;
1868 
1869 	bridge_device = bridge_port->bridge_device;
1870 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1871 							       bridge_device,
1872 							       fdb_info->vid);
1873 	if (!mlxsw_sp_port_vlan)
1874 		return 0;
1875 
1876 	fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1877 	vid = mlxsw_sp_port_vlan->vid;
1878 
1879 	if (!bridge_port->lagged)
1880 		return mlxsw_sp_port_fdb_uc_op(mlxsw_sp,
1881 					       bridge_port->system_port,
1882 					       fdb_info->addr, fid_index, vid,
1883 					       adding, false);
1884 	else
1885 		return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp,
1886 						   bridge_port->lag_id,
1887 						   fdb_info->addr, fid_index,
1888 						   vid, adding, false);
1889 }
1890 
1891 static int mlxsw_sp_mdb_entry_write(struct mlxsw_sp *mlxsw_sp,
1892 				    const struct mlxsw_sp_mdb_entry *mdb_entry,
1893 				    bool adding)
1894 {
1895 	char *sfd_pl;
1896 	u8 num_rec;
1897 	int err;
1898 
1899 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1900 	if (!sfd_pl)
1901 		return -ENOMEM;
1902 
1903 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1904 	mlxsw_reg_sfd_mc_pack(sfd_pl, 0, mdb_entry->key.addr,
1905 			      mdb_entry->key.fid, MLXSW_REG_SFD_REC_ACTION_NOP,
1906 			      mdb_entry->mid);
1907 	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1908 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1909 	if (err)
1910 		goto out;
1911 
1912 	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1913 		err = -EBUSY;
1914 
1915 out:
1916 	kfree(sfd_pl);
1917 	return err;
1918 }
1919 
1920 static void
1921 mlxsw_sp_bridge_port_get_ports_bitmap(struct mlxsw_sp *mlxsw_sp,
1922 				      struct mlxsw_sp_bridge_port *bridge_port,
1923 				      struct mlxsw_sp_ports_bitmap *ports_bm)
1924 {
1925 	struct mlxsw_sp_port *mlxsw_sp_port;
1926 	u64 max_lag_members, i;
1927 	int lag_id;
1928 
1929 	if (!bridge_port->lagged) {
1930 		set_bit(bridge_port->system_port, ports_bm->bitmap);
1931 	} else {
1932 		max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1933 						     MAX_LAG_MEMBERS);
1934 		lag_id = bridge_port->lag_id;
1935 		for (i = 0; i < max_lag_members; i++) {
1936 			mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp,
1937 								 lag_id, i);
1938 			if (mlxsw_sp_port)
1939 				set_bit(mlxsw_sp_port->local_port,
1940 					ports_bm->bitmap);
1941 		}
1942 	}
1943 }
1944 
1945 static void
1946 mlxsw_sp_mc_get_mrouters_bitmap(struct mlxsw_sp_ports_bitmap *flood_bm,
1947 				struct mlxsw_sp_bridge_device *bridge_device,
1948 				struct mlxsw_sp *mlxsw_sp)
1949 {
1950 	struct mlxsw_sp_bridge_port *bridge_port;
1951 
1952 	list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
1953 		if (bridge_port->mrouter) {
1954 			mlxsw_sp_bridge_port_get_ports_bitmap(mlxsw_sp,
1955 							      bridge_port,
1956 							      flood_bm);
1957 		}
1958 	}
1959 }
1960 
1961 static int mlxsw_sp_mc_mdb_mrouters_add(struct mlxsw_sp *mlxsw_sp,
1962 					struct mlxsw_sp_ports_bitmap *ports_bm,
1963 					struct mlxsw_sp_mdb_entry *mdb_entry)
1964 {
1965 	struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
1966 	unsigned int nbits = ports_bm->nbits;
1967 	int i;
1968 
1969 	for_each_set_bit(i, ports_bm->bitmap, nbits) {
1970 		mdb_entry_port = mlxsw_sp_mdb_entry_mrouter_port_get(mlxsw_sp,
1971 								     mdb_entry,
1972 								     i);
1973 		if (IS_ERR(mdb_entry_port)) {
1974 			nbits = i;
1975 			goto err_mrouter_port_get;
1976 		}
1977 	}
1978 
1979 	return 0;
1980 
1981 err_mrouter_port_get:
1982 	for_each_set_bit(i, ports_bm->bitmap, nbits)
1983 		mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp, mdb_entry, i);
1984 	return PTR_ERR(mdb_entry_port);
1985 }
1986 
1987 static void mlxsw_sp_mc_mdb_mrouters_del(struct mlxsw_sp *mlxsw_sp,
1988 					 struct mlxsw_sp_ports_bitmap *ports_bm,
1989 					 struct mlxsw_sp_mdb_entry *mdb_entry)
1990 {
1991 	int i;
1992 
1993 	for_each_set_bit(i, ports_bm->bitmap, ports_bm->nbits)
1994 		mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp, mdb_entry, i);
1995 }
1996 
1997 static int
1998 mlxsw_sp_mc_mdb_mrouters_set(struct mlxsw_sp *mlxsw_sp,
1999 			     struct mlxsw_sp_bridge_device *bridge_device,
2000 			     struct mlxsw_sp_mdb_entry *mdb_entry, bool add)
2001 {
2002 	struct mlxsw_sp_ports_bitmap ports_bm;
2003 	int err;
2004 
2005 	err = mlxsw_sp_port_bitmap_init(mlxsw_sp, &ports_bm);
2006 	if (err)
2007 		return err;
2008 
2009 	mlxsw_sp_mc_get_mrouters_bitmap(&ports_bm, bridge_device, mlxsw_sp);
2010 
2011 	if (add)
2012 		err = mlxsw_sp_mc_mdb_mrouters_add(mlxsw_sp, &ports_bm,
2013 						   mdb_entry);
2014 	else
2015 		mlxsw_sp_mc_mdb_mrouters_del(mlxsw_sp, &ports_bm, mdb_entry);
2016 
2017 	mlxsw_sp_port_bitmap_fini(&ports_bm);
2018 	return err;
2019 }
2020 
2021 static struct mlxsw_sp_mdb_entry *
2022 mlxsw_sp_mc_mdb_entry_init(struct mlxsw_sp *mlxsw_sp,
2023 			   struct mlxsw_sp_bridge_device *bridge_device,
2024 			   const unsigned char *addr, u16 fid, u16 local_port)
2025 {
2026 	struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
2027 	struct mlxsw_sp_mdb_entry *mdb_entry;
2028 	int err;
2029 
2030 	mdb_entry = kzalloc(sizeof(*mdb_entry), GFP_KERNEL);
2031 	if (!mdb_entry)
2032 		return ERR_PTR(-ENOMEM);
2033 
2034 	ether_addr_copy(mdb_entry->key.addr, addr);
2035 	mdb_entry->key.fid = fid;
2036 	err = mlxsw_sp_pgt_mid_alloc(mlxsw_sp, &mdb_entry->mid);
2037 	if (err)
2038 		goto err_pgt_mid_alloc;
2039 
2040 	INIT_LIST_HEAD(&mdb_entry->ports_list);
2041 
2042 	err = mlxsw_sp_mc_mdb_mrouters_set(mlxsw_sp, bridge_device, mdb_entry,
2043 					   true);
2044 	if (err)
2045 		goto err_mdb_mrouters_set;
2046 
2047 	mdb_entry_port = mlxsw_sp_mdb_entry_port_get(mlxsw_sp, mdb_entry,
2048 						     local_port);
2049 	if (IS_ERR(mdb_entry_port)) {
2050 		err = PTR_ERR(mdb_entry_port);
2051 		goto err_mdb_entry_port_get;
2052 	}
2053 
2054 	if (bridge_device->multicast_enabled) {
2055 		err = mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, true);
2056 		if (err)
2057 			goto err_mdb_entry_write;
2058 	}
2059 
2060 	err = rhashtable_insert_fast(&bridge_device->mdb_ht,
2061 				     &mdb_entry->ht_node,
2062 				     mlxsw_sp_mdb_ht_params);
2063 	if (err)
2064 		goto err_rhashtable_insert;
2065 
2066 	list_add_tail(&mdb_entry->list, &bridge_device->mdb_list);
2067 
2068 	return mdb_entry;
2069 
2070 err_rhashtable_insert:
2071 	if (bridge_device->multicast_enabled)
2072 		mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, false);
2073 err_mdb_entry_write:
2074 	mlxsw_sp_mdb_entry_port_put(mlxsw_sp, mdb_entry, local_port, false);
2075 err_mdb_entry_port_get:
2076 	mlxsw_sp_mc_mdb_mrouters_set(mlxsw_sp, bridge_device, mdb_entry, false);
2077 err_mdb_mrouters_set:
2078 	mlxsw_sp_pgt_mid_free(mlxsw_sp, mdb_entry->mid);
2079 err_pgt_mid_alloc:
2080 	kfree(mdb_entry);
2081 	return ERR_PTR(err);
2082 }
2083 
2084 static void
2085 mlxsw_sp_mc_mdb_entry_fini(struct mlxsw_sp *mlxsw_sp,
2086 			   struct mlxsw_sp_mdb_entry *mdb_entry,
2087 			   struct mlxsw_sp_bridge_device *bridge_device,
2088 			   u16 local_port, bool force)
2089 {
2090 	list_del(&mdb_entry->list);
2091 	rhashtable_remove_fast(&bridge_device->mdb_ht, &mdb_entry->ht_node,
2092 			       mlxsw_sp_mdb_ht_params);
2093 	if (bridge_device->multicast_enabled)
2094 		mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, false);
2095 	mlxsw_sp_mdb_entry_port_put(mlxsw_sp, mdb_entry, local_port, force);
2096 	mlxsw_sp_mc_mdb_mrouters_set(mlxsw_sp, bridge_device, mdb_entry, false);
2097 	WARN_ON(!list_empty(&mdb_entry->ports_list));
2098 	mlxsw_sp_pgt_mid_free(mlxsw_sp, mdb_entry->mid);
2099 	kfree(mdb_entry);
2100 }
2101 
2102 static struct mlxsw_sp_mdb_entry *
2103 mlxsw_sp_mc_mdb_entry_get(struct mlxsw_sp *mlxsw_sp,
2104 			  struct mlxsw_sp_bridge_device *bridge_device,
2105 			  const unsigned char *addr, u16 fid, u16 local_port)
2106 {
2107 	struct mlxsw_sp_mdb_entry_key key = {};
2108 	struct mlxsw_sp_mdb_entry *mdb_entry;
2109 
2110 	ether_addr_copy(key.addr, addr);
2111 	key.fid = fid;
2112 	mdb_entry = rhashtable_lookup_fast(&bridge_device->mdb_ht, &key,
2113 					   mlxsw_sp_mdb_ht_params);
2114 	if (mdb_entry) {
2115 		struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
2116 
2117 		mdb_entry_port = mlxsw_sp_mdb_entry_port_get(mlxsw_sp,
2118 							     mdb_entry,
2119 							     local_port);
2120 		if (IS_ERR(mdb_entry_port))
2121 			return ERR_CAST(mdb_entry_port);
2122 
2123 		return mdb_entry;
2124 	}
2125 
2126 	return mlxsw_sp_mc_mdb_entry_init(mlxsw_sp, bridge_device, addr, fid,
2127 					  local_port);
2128 }
2129 
2130 static bool
2131 mlxsw_sp_mc_mdb_entry_remove(struct mlxsw_sp_mdb_entry *mdb_entry,
2132 			     struct mlxsw_sp_mdb_entry_port *removed_entry_port,
2133 			     bool force)
2134 {
2135 	if (mdb_entry->ports_count > 1)
2136 		return false;
2137 
2138 	if (force)
2139 		return true;
2140 
2141 	if (!removed_entry_port->mrouter &&
2142 	    refcount_read(&removed_entry_port->refcount) > 1)
2143 		return false;
2144 
2145 	if (removed_entry_port->mrouter &&
2146 	    refcount_read(&removed_entry_port->refcount) > 2)
2147 		return false;
2148 
2149 	return true;
2150 }
2151 
2152 static void
2153 mlxsw_sp_mc_mdb_entry_put(struct mlxsw_sp *mlxsw_sp,
2154 			  struct mlxsw_sp_bridge_device *bridge_device,
2155 			  struct mlxsw_sp_mdb_entry *mdb_entry, u16 local_port,
2156 			  bool force)
2157 {
2158 	struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
2159 
2160 	mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port);
2161 	if (!mdb_entry_port)
2162 		return;
2163 
2164 	/* Avoid a temporary situation in which the MDB entry points to an empty
2165 	 * PGT entry, as otherwise packets will be temporarily dropped instead
2166 	 * of being flooded. Instead, in this situation, call
2167 	 * mlxsw_sp_mc_mdb_entry_fini(), which first deletes the MDB entry and
2168 	 * then releases the PGT entry.
2169 	 */
2170 	if (mlxsw_sp_mc_mdb_entry_remove(mdb_entry, mdb_entry_port, force))
2171 		mlxsw_sp_mc_mdb_entry_fini(mlxsw_sp, mdb_entry, bridge_device,
2172 					   local_port, force);
2173 	else
2174 		mlxsw_sp_mdb_entry_port_put(mlxsw_sp, mdb_entry, local_port,
2175 					    force);
2176 }
2177 
2178 static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
2179 				 const struct switchdev_obj_port_mdb *mdb)
2180 {
2181 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2182 	struct net_device *orig_dev = mdb->obj.orig_dev;
2183 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2184 	struct mlxsw_sp_bridge_device *bridge_device;
2185 	struct mlxsw_sp_bridge_port *bridge_port;
2186 	struct mlxsw_sp_mdb_entry *mdb_entry;
2187 	u16 fid_index;
2188 
2189 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
2190 	if (!bridge_port)
2191 		return 0;
2192 
2193 	bridge_device = bridge_port->bridge_device;
2194 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
2195 							       bridge_device,
2196 							       mdb->vid);
2197 	if (!mlxsw_sp_port_vlan)
2198 		return 0;
2199 
2200 	fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
2201 
2202 	mdb_entry = mlxsw_sp_mc_mdb_entry_get(mlxsw_sp, bridge_device,
2203 					      mdb->addr, fid_index,
2204 					      mlxsw_sp_port->local_port);
2205 	if (IS_ERR(mdb_entry))
2206 		return PTR_ERR(mdb_entry);
2207 
2208 	return 0;
2209 }
2210 
2211 static int
2212 mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp *mlxsw_sp,
2213 				   struct mlxsw_sp_bridge_device *bridge_device,
2214 				   bool mc_enabled)
2215 {
2216 	struct mlxsw_sp_mdb_entry *mdb_entry;
2217 	int err;
2218 
2219 	list_for_each_entry(mdb_entry, &bridge_device->mdb_list, list) {
2220 		err = mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, mc_enabled);
2221 		if (err)
2222 			goto err_mdb_entry_write;
2223 	}
2224 	return 0;
2225 
2226 err_mdb_entry_write:
2227 	list_for_each_entry_continue_reverse(mdb_entry,
2228 					     &bridge_device->mdb_list, list)
2229 		mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, !mc_enabled);
2230 	return err;
2231 }
2232 
2233 static void
2234 mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
2235 				 struct mlxsw_sp_bridge_port *bridge_port,
2236 				 bool add)
2237 {
2238 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2239 	struct mlxsw_sp_bridge_device *bridge_device;
2240 	u16 local_port = mlxsw_sp_port->local_port;
2241 	struct mlxsw_sp_mdb_entry *mdb_entry;
2242 
2243 	bridge_device = bridge_port->bridge_device;
2244 
2245 	list_for_each_entry(mdb_entry, &bridge_device->mdb_list, list) {
2246 		if (add)
2247 			mlxsw_sp_mdb_entry_mrouter_port_get(mlxsw_sp, mdb_entry,
2248 							    local_port);
2249 		else
2250 			mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp, mdb_entry,
2251 							    local_port);
2252 	}
2253 }
2254 
2255 static int mlxsw_sp_port_obj_add(struct net_device *dev, const void *ctx,
2256 				 const struct switchdev_obj *obj,
2257 				 struct netlink_ext_ack *extack)
2258 {
2259 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2260 	const struct switchdev_obj_port_vlan *vlan;
2261 	int err = 0;
2262 
2263 	switch (obj->id) {
2264 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
2265 		vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
2266 
2267 		err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, vlan, extack);
2268 
2269 		/* The event is emitted before the changes are actually
2270 		 * applied to the bridge. Therefore schedule the respin
2271 		 * call for later, so that the respin logic sees the
2272 		 * updated bridge state.
2273 		 */
2274 		mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
2275 		break;
2276 	case SWITCHDEV_OBJ_ID_PORT_MDB:
2277 		err = mlxsw_sp_port_mdb_add(mlxsw_sp_port,
2278 					    SWITCHDEV_OBJ_PORT_MDB(obj));
2279 		break;
2280 	default:
2281 		err = -EOPNOTSUPP;
2282 		break;
2283 	}
2284 
2285 	return err;
2286 }
2287 
2288 static void
2289 mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port,
2290 			      struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
2291 {
2292 	u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : mlxsw_sp_port->pvid;
2293 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2294 	u16 proto;
2295 
2296 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
2297 	if (WARN_ON(!mlxsw_sp_port_vlan))
2298 		return;
2299 
2300 	mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
2301 	br_vlan_get_proto(bridge_port->bridge_device->dev, &proto);
2302 	mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid, proto);
2303 	mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
2304 	mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
2305 }
2306 
2307 static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
2308 				   const struct switchdev_obj_port_vlan *vlan)
2309 {
2310 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2311 	struct net_device *orig_dev = vlan->obj.orig_dev;
2312 	struct mlxsw_sp_bridge_port *bridge_port;
2313 
2314 	if (netif_is_bridge_master(orig_dev))
2315 		return -EOPNOTSUPP;
2316 
2317 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
2318 	if (WARN_ON(!bridge_port))
2319 		return -EINVAL;
2320 
2321 	if (!bridge_port->bridge_device->vlan_enabled)
2322 		return 0;
2323 
2324 	mlxsw_sp_bridge_port_vlan_del(mlxsw_sp_port, bridge_port, vlan->vid);
2325 
2326 	return 0;
2327 }
2328 
2329 static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
2330 				 const struct switchdev_obj_port_mdb *mdb)
2331 {
2332 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2333 	struct net_device *orig_dev = mdb->obj.orig_dev;
2334 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2335 	struct mlxsw_sp_bridge_device *bridge_device;
2336 	struct net_device *dev = mlxsw_sp_port->dev;
2337 	struct mlxsw_sp_bridge_port *bridge_port;
2338 	struct mlxsw_sp_mdb_entry_key key = {};
2339 	struct mlxsw_sp_mdb_entry *mdb_entry;
2340 	u16 fid_index;
2341 
2342 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
2343 	if (!bridge_port)
2344 		return 0;
2345 
2346 	bridge_device = bridge_port->bridge_device;
2347 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
2348 							       bridge_device,
2349 							       mdb->vid);
2350 	if (!mlxsw_sp_port_vlan)
2351 		return 0;
2352 
2353 	fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
2354 
2355 	ether_addr_copy(key.addr, mdb->addr);
2356 	key.fid = fid_index;
2357 	mdb_entry = rhashtable_lookup_fast(&bridge_device->mdb_ht, &key,
2358 					   mlxsw_sp_mdb_ht_params);
2359 	if (!mdb_entry) {
2360 		netdev_err(dev, "Unable to remove port from MC DB\n");
2361 		return -EINVAL;
2362 	}
2363 
2364 	mlxsw_sp_mc_mdb_entry_put(mlxsw_sp, bridge_device, mdb_entry,
2365 				  mlxsw_sp_port->local_port, false);
2366 	return 0;
2367 }
2368 
2369 static void
2370 mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
2371 			       struct mlxsw_sp_bridge_port *bridge_port,
2372 			       u16 fid_index)
2373 {
2374 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2375 	struct mlxsw_sp_bridge_device *bridge_device;
2376 	struct mlxsw_sp_mdb_entry *mdb_entry, *tmp;
2377 	u16 local_port = mlxsw_sp_port->local_port;
2378 
2379 	bridge_device = bridge_port->bridge_device;
2380 
2381 	list_for_each_entry_safe(mdb_entry, tmp, &bridge_device->mdb_list,
2382 				 list) {
2383 		if (mdb_entry->key.fid != fid_index)
2384 			continue;
2385 
2386 		if (bridge_port->mrouter)
2387 			mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp,
2388 							    mdb_entry,
2389 							    local_port);
2390 
2391 		mlxsw_sp_mc_mdb_entry_put(mlxsw_sp, bridge_device, mdb_entry,
2392 					  local_port, true);
2393 	}
2394 }
2395 
2396 static int mlxsw_sp_port_obj_del(struct net_device *dev, const void *ctx,
2397 				 const struct switchdev_obj *obj)
2398 {
2399 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2400 	int err = 0;
2401 
2402 	switch (obj->id) {
2403 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
2404 		err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
2405 					      SWITCHDEV_OBJ_PORT_VLAN(obj));
2406 		break;
2407 	case SWITCHDEV_OBJ_ID_PORT_MDB:
2408 		err = mlxsw_sp_port_mdb_del(mlxsw_sp_port,
2409 					    SWITCHDEV_OBJ_PORT_MDB(obj));
2410 		break;
2411 	default:
2412 		err = -EOPNOTSUPP;
2413 		break;
2414 	}
2415 
2416 	mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
2417 
2418 	return err;
2419 }
2420 
2421 static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
2422 						   u16 lag_id)
2423 {
2424 	struct mlxsw_sp_port *mlxsw_sp_port;
2425 	u64 max_lag_members;
2426 	int i;
2427 
2428 	max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
2429 					     MAX_LAG_MEMBERS);
2430 	for (i = 0; i < max_lag_members; i++) {
2431 		mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
2432 		if (mlxsw_sp_port)
2433 			return mlxsw_sp_port;
2434 	}
2435 	return NULL;
2436 }
2437 
2438 static int
2439 mlxsw_sp_bridge_port_replay(struct mlxsw_sp_bridge_port *bridge_port,
2440 			    struct mlxsw_sp_port *mlxsw_sp_port,
2441 			    struct netlink_ext_ack *extack)
2442 {
2443 	struct mlxsw_sp_bridge_port_replay_switchdev_objs rso = {
2444 		.brport_dev = bridge_port->dev,
2445 		.mlxsw_sp_port = mlxsw_sp_port,
2446 	};
2447 	struct notifier_block *nb;
2448 	int err;
2449 
2450 	nb = &mlxsw_sp_bridge_port_replay_switchdev_objs_nb;
2451 	err = switchdev_bridge_port_replay(bridge_port->dev, mlxsw_sp_port->dev,
2452 					   &rso, NULL, nb, extack);
2453 	if (err)
2454 		goto err_replay;
2455 
2456 	return 0;
2457 
2458 err_replay:
2459 	nb = &mlxsw_sp_bridge_port_unreplay_switchdev_objs_nb;
2460 	switchdev_bridge_port_replay(bridge_port->dev, mlxsw_sp_port->dev,
2461 				     &rso, NULL, nb, extack);
2462 	return err;
2463 }
2464 
2465 static int
2466 mlxsw_sp_bridge_vlan_aware_port_join(struct mlxsw_sp_bridge_port *bridge_port,
2467 				     struct mlxsw_sp_port *mlxsw_sp_port,
2468 				     struct netlink_ext_ack *extack)
2469 {
2470 	if (is_vlan_dev(bridge_port->dev)) {
2471 		NL_SET_ERR_MSG_MOD(extack, "Can not enslave a VLAN device to a VLAN-aware bridge");
2472 		return -EINVAL;
2473 	}
2474 
2475 	/* Port is no longer usable as a router interface */
2476 	if (mlxsw_sp_port->default_vlan->fid)
2477 		mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan);
2478 
2479 	return mlxsw_sp_bridge_port_replay(bridge_port, mlxsw_sp_port, extack);
2480 }
2481 
2482 static int
2483 mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device,
2484 				struct mlxsw_sp_bridge_port *bridge_port,
2485 				struct mlxsw_sp_port *mlxsw_sp_port,
2486 				struct netlink_ext_ack *extack)
2487 {
2488 	return mlxsw_sp_bridge_vlan_aware_port_join(bridge_port, mlxsw_sp_port,
2489 						    extack);
2490 }
2491 
2492 static void
2493 mlxsw_sp_bridge_vlan_aware_port_leave(struct mlxsw_sp_port *mlxsw_sp_port)
2494 {
2495 	/* Make sure untagged frames are allowed to ingress */
2496 	mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID,
2497 			       ETH_P_8021Q);
2498 }
2499 
2500 static void
2501 mlxsw_sp_bridge_8021q_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
2502 				 struct mlxsw_sp_bridge_port *bridge_port,
2503 				 struct mlxsw_sp_port *mlxsw_sp_port)
2504 {
2505 	mlxsw_sp_bridge_vlan_aware_port_leave(mlxsw_sp_port);
2506 }
2507 
2508 static int
2509 mlxsw_sp_bridge_vlan_aware_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
2510 				      const struct net_device *vxlan_dev,
2511 				      u16 vid, u16 ethertype,
2512 				      struct netlink_ext_ack *extack)
2513 {
2514 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2515 	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
2516 	struct mlxsw_sp_nve_params params = {
2517 		.type = MLXSW_SP_NVE_TYPE_VXLAN,
2518 		.vni = vxlan->cfg.vni,
2519 		.dev = vxlan_dev,
2520 		.ethertype = ethertype,
2521 	};
2522 	struct mlxsw_sp_fid *fid;
2523 	int err;
2524 
2525 	/* If the VLAN is 0, we need to find the VLAN that is configured as
2526 	 * PVID and egress untagged on the bridge port of the VxLAN device.
2527 	 * It is possible no such VLAN exists
2528 	 */
2529 	if (!vid) {
2530 		err = mlxsw_sp_vxlan_mapped_vid(vxlan_dev, &vid);
2531 		if (err || !vid)
2532 			return err;
2533 	}
2534 
2535 	fid = mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
2536 	if (IS_ERR(fid)) {
2537 		NL_SET_ERR_MSG_MOD(extack, "Failed to create 802.1Q FID");
2538 		return PTR_ERR(fid);
2539 	}
2540 
2541 	if (mlxsw_sp_fid_vni_is_set(fid)) {
2542 		NL_SET_ERR_MSG_MOD(extack, "VNI is already set on FID");
2543 		err = -EINVAL;
2544 		goto err_vni_exists;
2545 	}
2546 
2547 	err = mlxsw_sp_nve_fid_enable(mlxsw_sp, fid, &params, extack);
2548 	if (err)
2549 		goto err_nve_fid_enable;
2550 
2551 	return 0;
2552 
2553 err_nve_fid_enable:
2554 err_vni_exists:
2555 	mlxsw_sp_fid_put(fid);
2556 	return err;
2557 }
2558 
2559 static int
2560 mlxsw_sp_bridge_8021q_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
2561 				 const struct net_device *vxlan_dev, u16 vid,
2562 				 struct netlink_ext_ack *extack)
2563 {
2564 	return mlxsw_sp_bridge_vlan_aware_vxlan_join(bridge_device, vxlan_dev,
2565 						     vid, ETH_P_8021Q, extack);
2566 }
2567 
2568 static struct net_device *
2569 mlxsw_sp_bridge_8021q_vxlan_dev_find(struct net_device *br_dev, u16 vid)
2570 {
2571 	struct net_device *dev;
2572 	struct list_head *iter;
2573 
2574 	netdev_for_each_lower_dev(br_dev, dev, iter) {
2575 		u16 pvid;
2576 		int err;
2577 
2578 		if (!netif_is_vxlan(dev))
2579 			continue;
2580 
2581 		err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid);
2582 		if (err || pvid != vid)
2583 			continue;
2584 
2585 		return dev;
2586 	}
2587 
2588 	return NULL;
2589 }
2590 
2591 static struct mlxsw_sp_fid *
2592 mlxsw_sp_bridge_8021q_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
2593 			      u16 vid, struct netlink_ext_ack *extack)
2594 {
2595 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2596 
2597 	return mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
2598 }
2599 
2600 static struct mlxsw_sp_fid *
2601 mlxsw_sp_bridge_8021q_fid_lookup(struct mlxsw_sp_bridge_device *bridge_device,
2602 				 u16 vid)
2603 {
2604 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2605 
2606 	return mlxsw_sp_fid_8021q_lookup(mlxsw_sp, vid);
2607 }
2608 
2609 static u16
2610 mlxsw_sp_bridge_8021q_fid_vid(struct mlxsw_sp_bridge_device *bridge_device,
2611 			      const struct mlxsw_sp_fid *fid)
2612 {
2613 	return mlxsw_sp_fid_8021q_vid(fid);
2614 }
2615 
2616 static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021q_ops = {
2617 	.port_join	= mlxsw_sp_bridge_8021q_port_join,
2618 	.port_leave	= mlxsw_sp_bridge_8021q_port_leave,
2619 	.vxlan_join	= mlxsw_sp_bridge_8021q_vxlan_join,
2620 	.fid_get	= mlxsw_sp_bridge_8021q_fid_get,
2621 	.fid_lookup	= mlxsw_sp_bridge_8021q_fid_lookup,
2622 	.fid_vid	= mlxsw_sp_bridge_8021q_fid_vid,
2623 };
2624 
2625 static bool
2626 mlxsw_sp_port_is_br_member(const struct mlxsw_sp_port *mlxsw_sp_port,
2627 			   const struct net_device *br_dev)
2628 {
2629 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2630 
2631 	list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
2632 			    list) {
2633 		if (mlxsw_sp_port_vlan->bridge_port &&
2634 		    mlxsw_sp_port_vlan->bridge_port->bridge_device->dev ==
2635 		    br_dev)
2636 			return true;
2637 	}
2638 
2639 	return false;
2640 }
2641 
2642 static int
2643 mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device,
2644 				struct mlxsw_sp_bridge_port *bridge_port,
2645 				struct mlxsw_sp_port *mlxsw_sp_port,
2646 				struct netlink_ext_ack *extack)
2647 {
2648 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2649 	struct net_device *dev = bridge_port->dev;
2650 	u16 vid;
2651 	int err;
2652 
2653 	vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : MLXSW_SP_DEFAULT_VID;
2654 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
2655 	if (WARN_ON(!mlxsw_sp_port_vlan))
2656 		return -EINVAL;
2657 
2658 	if (mlxsw_sp_port_is_br_member(mlxsw_sp_port, bridge_device->dev)) {
2659 		NL_SET_ERR_MSG_MOD(extack, "Can not bridge VLAN uppers of the same port");
2660 		return -EINVAL;
2661 	}
2662 
2663 	/* Port is no longer usable as a router interface */
2664 	if (mlxsw_sp_port_vlan->fid)
2665 		mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
2666 
2667 	err = mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port,
2668 					     extack);
2669 	if (err)
2670 		return err;
2671 
2672 	err = mlxsw_sp_bridge_port_replay(bridge_port, mlxsw_sp_port, extack);
2673 	if (err)
2674 		goto err_replay;
2675 
2676 	return 0;
2677 
2678 err_replay:
2679 	mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
2680 	return err;
2681 }
2682 
2683 static void
2684 mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
2685 				 struct mlxsw_sp_bridge_port *bridge_port,
2686 				 struct mlxsw_sp_port *mlxsw_sp_port)
2687 {
2688 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2689 	struct net_device *dev = bridge_port->dev;
2690 	u16 vid;
2691 
2692 	vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : MLXSW_SP_DEFAULT_VID;
2693 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
2694 	if (!mlxsw_sp_port_vlan || !mlxsw_sp_port_vlan->bridge_port)
2695 		return;
2696 
2697 	mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
2698 }
2699 
2700 static int
2701 mlxsw_sp_bridge_8021d_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
2702 				 const struct net_device *vxlan_dev, u16 vid,
2703 				 struct netlink_ext_ack *extack)
2704 {
2705 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2706 	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
2707 	struct mlxsw_sp_nve_params params = {
2708 		.type = MLXSW_SP_NVE_TYPE_VXLAN,
2709 		.vni = vxlan->cfg.vni,
2710 		.dev = vxlan_dev,
2711 		.ethertype = ETH_P_8021Q,
2712 	};
2713 	struct mlxsw_sp_fid *fid;
2714 	int err;
2715 
2716 	fid = mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex);
2717 	if (IS_ERR(fid)) {
2718 		NL_SET_ERR_MSG_MOD(extack, "Failed to create 802.1D FID");
2719 		return -EINVAL;
2720 	}
2721 
2722 	if (mlxsw_sp_fid_vni_is_set(fid)) {
2723 		NL_SET_ERR_MSG_MOD(extack, "VNI is already set on FID");
2724 		err = -EINVAL;
2725 		goto err_vni_exists;
2726 	}
2727 
2728 	err = mlxsw_sp_nve_fid_enable(mlxsw_sp, fid, &params, extack);
2729 	if (err)
2730 		goto err_nve_fid_enable;
2731 
2732 	return 0;
2733 
2734 err_nve_fid_enable:
2735 err_vni_exists:
2736 	mlxsw_sp_fid_put(fid);
2737 	return err;
2738 }
2739 
2740 static struct mlxsw_sp_fid *
2741 mlxsw_sp_bridge_8021d_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
2742 			      u16 vid, struct netlink_ext_ack *extack)
2743 {
2744 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2745 
2746 	return mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex);
2747 }
2748 
2749 static struct mlxsw_sp_fid *
2750 mlxsw_sp_bridge_8021d_fid_lookup(struct mlxsw_sp_bridge_device *bridge_device,
2751 				 u16 vid)
2752 {
2753 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2754 
2755 	/* The only valid VLAN for a VLAN-unaware bridge is 0 */
2756 	if (vid)
2757 		return NULL;
2758 
2759 	return mlxsw_sp_fid_8021d_lookup(mlxsw_sp, bridge_device->dev->ifindex);
2760 }
2761 
2762 static u16
2763 mlxsw_sp_bridge_8021d_fid_vid(struct mlxsw_sp_bridge_device *bridge_device,
2764 			      const struct mlxsw_sp_fid *fid)
2765 {
2766 	return 0;
2767 }
2768 
2769 static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021d_ops = {
2770 	.port_join	= mlxsw_sp_bridge_8021d_port_join,
2771 	.port_leave	= mlxsw_sp_bridge_8021d_port_leave,
2772 	.vxlan_join	= mlxsw_sp_bridge_8021d_vxlan_join,
2773 	.fid_get	= mlxsw_sp_bridge_8021d_fid_get,
2774 	.fid_lookup	= mlxsw_sp_bridge_8021d_fid_lookup,
2775 	.fid_vid	= mlxsw_sp_bridge_8021d_fid_vid,
2776 };
2777 
2778 static int
2779 mlxsw_sp_bridge_8021ad_port_join(struct mlxsw_sp_bridge_device *bridge_device,
2780 				 struct mlxsw_sp_bridge_port *bridge_port,
2781 				 struct mlxsw_sp_port *mlxsw_sp_port,
2782 				 struct netlink_ext_ack *extack)
2783 {
2784 	int err;
2785 
2786 	err = mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, false);
2787 	if (err)
2788 		return err;
2789 
2790 	err = mlxsw_sp_bridge_vlan_aware_port_join(bridge_port, mlxsw_sp_port,
2791 						   extack);
2792 	if (err)
2793 		goto err_bridge_vlan_aware_port_join;
2794 
2795 	return 0;
2796 
2797 err_bridge_vlan_aware_port_join:
2798 	mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true);
2799 	return err;
2800 }
2801 
2802 static void
2803 mlxsw_sp_bridge_8021ad_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
2804 				  struct mlxsw_sp_bridge_port *bridge_port,
2805 				  struct mlxsw_sp_port *mlxsw_sp_port)
2806 {
2807 	mlxsw_sp_bridge_vlan_aware_port_leave(mlxsw_sp_port);
2808 	mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true);
2809 }
2810 
2811 static int
2812 mlxsw_sp_bridge_8021ad_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
2813 				  const struct net_device *vxlan_dev, u16 vid,
2814 				  struct netlink_ext_ack *extack)
2815 {
2816 	return mlxsw_sp_bridge_vlan_aware_vxlan_join(bridge_device, vxlan_dev,
2817 						     vid, ETH_P_8021AD, extack);
2818 }
2819 
2820 static const struct mlxsw_sp_bridge_ops mlxsw_sp1_bridge_8021ad_ops = {
2821 	.port_join	= mlxsw_sp_bridge_8021ad_port_join,
2822 	.port_leave	= mlxsw_sp_bridge_8021ad_port_leave,
2823 	.vxlan_join	= mlxsw_sp_bridge_8021ad_vxlan_join,
2824 	.fid_get	= mlxsw_sp_bridge_8021q_fid_get,
2825 	.fid_lookup	= mlxsw_sp_bridge_8021q_fid_lookup,
2826 	.fid_vid	= mlxsw_sp_bridge_8021q_fid_vid,
2827 };
2828 
2829 static int
2830 mlxsw_sp2_bridge_8021ad_port_join(struct mlxsw_sp_bridge_device *bridge_device,
2831 				  struct mlxsw_sp_bridge_port *bridge_port,
2832 				  struct mlxsw_sp_port *mlxsw_sp_port,
2833 				  struct netlink_ext_ack *extack)
2834 {
2835 	int err;
2836 
2837 	/* The EtherType of decapsulated packets is determined at the egress
2838 	 * port to allow 802.1d and 802.1ad bridges with VXLAN devices to
2839 	 * co-exist.
2840 	 */
2841 	err = mlxsw_sp_port_egress_ethtype_set(mlxsw_sp_port, ETH_P_8021AD);
2842 	if (err)
2843 		return err;
2844 
2845 	err = mlxsw_sp_bridge_8021ad_port_join(bridge_device, bridge_port,
2846 					       mlxsw_sp_port, extack);
2847 	if (err)
2848 		goto err_bridge_8021ad_port_join;
2849 
2850 	return 0;
2851 
2852 err_bridge_8021ad_port_join:
2853 	mlxsw_sp_port_egress_ethtype_set(mlxsw_sp_port, ETH_P_8021Q);
2854 	return err;
2855 }
2856 
2857 static void
2858 mlxsw_sp2_bridge_8021ad_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
2859 				   struct mlxsw_sp_bridge_port *bridge_port,
2860 				   struct mlxsw_sp_port *mlxsw_sp_port)
2861 {
2862 	mlxsw_sp_bridge_8021ad_port_leave(bridge_device, bridge_port,
2863 					  mlxsw_sp_port);
2864 	mlxsw_sp_port_egress_ethtype_set(mlxsw_sp_port, ETH_P_8021Q);
2865 }
2866 
2867 static const struct mlxsw_sp_bridge_ops mlxsw_sp2_bridge_8021ad_ops = {
2868 	.port_join	= mlxsw_sp2_bridge_8021ad_port_join,
2869 	.port_leave	= mlxsw_sp2_bridge_8021ad_port_leave,
2870 	.vxlan_join	= mlxsw_sp_bridge_8021ad_vxlan_join,
2871 	.fid_get	= mlxsw_sp_bridge_8021q_fid_get,
2872 	.fid_lookup	= mlxsw_sp_bridge_8021q_fid_lookup,
2873 	.fid_vid	= mlxsw_sp_bridge_8021q_fid_vid,
2874 };
2875 
2876 int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
2877 			      struct net_device *brport_dev,
2878 			      struct net_device *br_dev,
2879 			      struct netlink_ext_ack *extack)
2880 {
2881 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2882 	struct mlxsw_sp_bridge_device *bridge_device;
2883 	struct mlxsw_sp_bridge_port *bridge_port;
2884 	int err;
2885 
2886 	bridge_port = mlxsw_sp_bridge_port_get(mlxsw_sp->bridge, brport_dev,
2887 					       extack);
2888 	if (IS_ERR(bridge_port))
2889 		return PTR_ERR(bridge_port);
2890 	bridge_device = bridge_port->bridge_device;
2891 
2892 	err = bridge_device->ops->port_join(bridge_device, bridge_port,
2893 					    mlxsw_sp_port, extack);
2894 	if (err)
2895 		goto err_port_join;
2896 
2897 	err = mlxsw_sp_netdevice_enslavement_replay(mlxsw_sp, br_dev, extack);
2898 	if (err)
2899 		goto err_replay;
2900 
2901 	return 0;
2902 
2903 err_replay:
2904 	bridge_device->ops->port_leave(bridge_device, bridge_port,
2905 				       mlxsw_sp_port);
2906 err_port_join:
2907 	mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
2908 	return err;
2909 }
2910 
2911 void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2912 				struct net_device *brport_dev,
2913 				struct net_device *br_dev)
2914 {
2915 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2916 	struct mlxsw_sp_bridge_device *bridge_device;
2917 	struct mlxsw_sp_bridge_port *bridge_port;
2918 
2919 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2920 	if (!bridge_device)
2921 		return;
2922 	bridge_port = __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
2923 	if (!bridge_port)
2924 		return;
2925 
2926 	bridge_device->ops->port_leave(bridge_device, bridge_port,
2927 				       mlxsw_sp_port);
2928 	mlxsw_sp_port_security_set(mlxsw_sp_port, false);
2929 	mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
2930 }
2931 
2932 int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp,
2933 			       const struct net_device *br_dev,
2934 			       const struct net_device *vxlan_dev, u16 vid,
2935 			       struct netlink_ext_ack *extack)
2936 {
2937 	struct mlxsw_sp_bridge_device *bridge_device;
2938 
2939 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2940 	if (WARN_ON(!bridge_device))
2941 		return -EINVAL;
2942 
2943 	return bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, vid,
2944 					      extack);
2945 }
2946 
2947 void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
2948 				 const struct net_device *vxlan_dev)
2949 {
2950 	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
2951 	struct mlxsw_sp_fid *fid;
2952 
2953 	/* If the VxLAN device is down, then the FID does not have a VNI */
2954 	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan->cfg.vni);
2955 	if (!fid)
2956 		return;
2957 
2958 	mlxsw_sp_nve_fid_disable(mlxsw_sp, fid);
2959 	/* Drop both the reference we just took during lookup and the reference
2960 	 * the VXLAN device took.
2961 	 */
2962 	mlxsw_sp_fid_put(fid);
2963 	mlxsw_sp_fid_put(fid);
2964 }
2965 
2966 static void
2967 mlxsw_sp_switchdev_vxlan_addr_convert(const union vxlan_addr *vxlan_addr,
2968 				      enum mlxsw_sp_l3proto *proto,
2969 				      union mlxsw_sp_l3addr *addr)
2970 {
2971 	if (vxlan_addr->sa.sa_family == AF_INET) {
2972 		addr->addr4 = vxlan_addr->sin.sin_addr.s_addr;
2973 		*proto = MLXSW_SP_L3_PROTO_IPV4;
2974 	} else {
2975 		addr->addr6 = vxlan_addr->sin6.sin6_addr;
2976 		*proto = MLXSW_SP_L3_PROTO_IPV6;
2977 	}
2978 }
2979 
2980 static void
2981 mlxsw_sp_switchdev_addr_vxlan_convert(enum mlxsw_sp_l3proto proto,
2982 				      const union mlxsw_sp_l3addr *addr,
2983 				      union vxlan_addr *vxlan_addr)
2984 {
2985 	switch (proto) {
2986 	case MLXSW_SP_L3_PROTO_IPV4:
2987 		vxlan_addr->sa.sa_family = AF_INET;
2988 		vxlan_addr->sin.sin_addr.s_addr = addr->addr4;
2989 		break;
2990 	case MLXSW_SP_L3_PROTO_IPV6:
2991 		vxlan_addr->sa.sa_family = AF_INET6;
2992 		vxlan_addr->sin6.sin6_addr = addr->addr6;
2993 		break;
2994 	}
2995 }
2996 
2997 static void mlxsw_sp_fdb_vxlan_call_notifiers(struct net_device *dev,
2998 					      const char *mac,
2999 					      enum mlxsw_sp_l3proto proto,
3000 					      union mlxsw_sp_l3addr *addr,
3001 					      __be32 vni, bool adding)
3002 {
3003 	struct switchdev_notifier_vxlan_fdb_info info;
3004 	struct vxlan_dev *vxlan = netdev_priv(dev);
3005 	enum switchdev_notifier_type type;
3006 
3007 	type = adding ? SWITCHDEV_VXLAN_FDB_ADD_TO_BRIDGE :
3008 			SWITCHDEV_VXLAN_FDB_DEL_TO_BRIDGE;
3009 	mlxsw_sp_switchdev_addr_vxlan_convert(proto, addr, &info.remote_ip);
3010 	info.remote_port = vxlan->cfg.dst_port;
3011 	info.remote_vni = vni;
3012 	info.remote_ifindex = 0;
3013 	ether_addr_copy(info.eth_addr, mac);
3014 	info.vni = vni;
3015 	info.offloaded = adding;
3016 	call_switchdev_notifiers(type, dev, &info.info, NULL);
3017 }
3018 
3019 static void mlxsw_sp_fdb_nve_call_notifiers(struct net_device *dev,
3020 					    const char *mac,
3021 					    enum mlxsw_sp_l3proto proto,
3022 					    union mlxsw_sp_l3addr *addr,
3023 					    __be32 vni,
3024 					    bool adding)
3025 {
3026 	if (netif_is_vxlan(dev))
3027 		mlxsw_sp_fdb_vxlan_call_notifiers(dev, mac, proto, addr, vni,
3028 						  adding);
3029 }
3030 
3031 static void
3032 mlxsw_sp_fdb_call_notifiers(enum switchdev_notifier_type type,
3033 			    const char *mac, u16 vid,
3034 			    struct net_device *dev, bool offloaded, bool locked)
3035 {
3036 	struct switchdev_notifier_fdb_info info = {};
3037 
3038 	info.addr = mac;
3039 	info.vid = vid;
3040 	info.offloaded = offloaded;
3041 	info.locked = locked;
3042 	call_switchdev_notifiers(type, dev, &info.info, NULL);
3043 }
3044 
3045 static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
3046 					    char *sfn_pl, int rec_index,
3047 					    bool adding)
3048 {
3049 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
3050 	struct mlxsw_sp_bridge_device *bridge_device;
3051 	struct mlxsw_sp_bridge_port *bridge_port;
3052 	struct mlxsw_sp_port *mlxsw_sp_port;
3053 	u16 local_port, vid, fid, evid = 0;
3054 	enum switchdev_notifier_type type;
3055 	char mac[ETH_ALEN];
3056 	bool do_notification = true;
3057 	int err;
3058 
3059 	mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port);
3060 
3061 	if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port)))
3062 		return;
3063 	mlxsw_sp_port = mlxsw_sp->ports[local_port];
3064 	if (!mlxsw_sp_port) {
3065 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n");
3066 		goto just_remove;
3067 	}
3068 
3069 	if (mlxsw_sp_fid_is_dummy(mlxsw_sp, fid))
3070 		goto just_remove;
3071 
3072 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
3073 	if (!mlxsw_sp_port_vlan) {
3074 		netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
3075 		goto just_remove;
3076 	}
3077 
3078 	bridge_port = mlxsw_sp_port_vlan->bridge_port;
3079 	if (!bridge_port) {
3080 		netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
3081 		goto just_remove;
3082 	}
3083 
3084 	bridge_device = bridge_port->bridge_device;
3085 	vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
3086 	evid = mlxsw_sp_port_vlan->vid;
3087 
3088 	if (adding && mlxsw_sp_port->security) {
3089 		mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE, mac,
3090 					    vid, bridge_port->dev, false, true);
3091 		return;
3092 	}
3093 
3094 do_fdb_op:
3095 	err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, evid,
3096 				      adding, true);
3097 	if (err) {
3098 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
3099 		return;
3100 	}
3101 
3102 	if (!do_notification)
3103 		return;
3104 	type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
3105 	mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding,
3106 				    false);
3107 
3108 	return;
3109 
3110 just_remove:
3111 	adding = false;
3112 	do_notification = false;
3113 	goto do_fdb_op;
3114 }
3115 
3116 static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
3117 						char *sfn_pl, int rec_index,
3118 						bool adding)
3119 {
3120 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
3121 	struct mlxsw_sp_bridge_device *bridge_device;
3122 	struct mlxsw_sp_bridge_port *bridge_port;
3123 	struct mlxsw_sp_port *mlxsw_sp_port;
3124 	enum switchdev_notifier_type type;
3125 	char mac[ETH_ALEN];
3126 	u16 lag_vid = 0;
3127 	u16 lag_id;
3128 	u16 vid, fid;
3129 	bool do_notification = true;
3130 	int err;
3131 
3132 	mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &fid, &lag_id);
3133 	mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
3134 	if (!mlxsw_sp_port) {
3135 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n");
3136 		goto just_remove;
3137 	}
3138 
3139 	if (mlxsw_sp_fid_is_dummy(mlxsw_sp, fid))
3140 		goto just_remove;
3141 
3142 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
3143 	if (!mlxsw_sp_port_vlan) {
3144 		netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
3145 		goto just_remove;
3146 	}
3147 
3148 	bridge_port = mlxsw_sp_port_vlan->bridge_port;
3149 	if (!bridge_port) {
3150 		netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
3151 		goto just_remove;
3152 	}
3153 
3154 	bridge_device = bridge_port->bridge_device;
3155 	vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
3156 	lag_vid = mlxsw_sp_port_vlan->vid;
3157 
3158 	if (adding && mlxsw_sp_port->security) {
3159 		mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE, mac,
3160 					    vid, bridge_port->dev, false, true);
3161 		return;
3162 	}
3163 
3164 do_fdb_op:
3165 	err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
3166 					  adding, true);
3167 	if (err) {
3168 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
3169 		return;
3170 	}
3171 
3172 	if (!do_notification)
3173 		return;
3174 	type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
3175 	mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding,
3176 				    false);
3177 
3178 	return;
3179 
3180 just_remove:
3181 	adding = false;
3182 	do_notification = false;
3183 	goto do_fdb_op;
3184 }
3185 
3186 static int
3187 __mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp *mlxsw_sp,
3188 					    const struct mlxsw_sp_fid *fid,
3189 					    bool adding,
3190 					    struct net_device **nve_dev,
3191 					    u16 *p_vid, __be32 *p_vni)
3192 {
3193 	struct mlxsw_sp_bridge_device *bridge_device;
3194 	struct net_device *br_dev, *dev;
3195 	int nve_ifindex;
3196 	int err;
3197 
3198 	err = mlxsw_sp_fid_nve_ifindex(fid, &nve_ifindex);
3199 	if (err)
3200 		return err;
3201 
3202 	err = mlxsw_sp_fid_vni(fid, p_vni);
3203 	if (err)
3204 		return err;
3205 
3206 	dev = __dev_get_by_index(mlxsw_sp_net(mlxsw_sp), nve_ifindex);
3207 	if (!dev)
3208 		return -EINVAL;
3209 	*nve_dev = dev;
3210 
3211 	if (!netif_running(dev))
3212 		return -EINVAL;
3213 
3214 	if (adding && !br_port_flag_is_set(dev, BR_LEARNING))
3215 		return -EINVAL;
3216 
3217 	if (adding && netif_is_vxlan(dev)) {
3218 		struct vxlan_dev *vxlan = netdev_priv(dev);
3219 
3220 		if (!(vxlan->cfg.flags & VXLAN_F_LEARN))
3221 			return -EINVAL;
3222 	}
3223 
3224 	br_dev = netdev_master_upper_dev_get(dev);
3225 	if (!br_dev)
3226 		return -EINVAL;
3227 
3228 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3229 	if (!bridge_device)
3230 		return -EINVAL;
3231 
3232 	*p_vid = bridge_device->ops->fid_vid(bridge_device, fid);
3233 
3234 	return 0;
3235 }
3236 
3237 static void mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp *mlxsw_sp,
3238 						      char *sfn_pl,
3239 						      int rec_index,
3240 						      bool adding)
3241 {
3242 	enum mlxsw_reg_sfn_uc_tunnel_protocol sfn_proto;
3243 	enum switchdev_notifier_type type;
3244 	struct net_device *nve_dev;
3245 	union mlxsw_sp_l3addr addr;
3246 	struct mlxsw_sp_fid *fid;
3247 	char mac[ETH_ALEN];
3248 	u16 fid_index, vid;
3249 	__be32 vni;
3250 	u32 uip;
3251 	int err;
3252 
3253 	mlxsw_reg_sfn_uc_tunnel_unpack(sfn_pl, rec_index, mac, &fid_index,
3254 				       &uip, &sfn_proto);
3255 
3256 	fid = mlxsw_sp_fid_lookup_by_index(mlxsw_sp, fid_index);
3257 	if (!fid)
3258 		goto err_fid_lookup;
3259 
3260 	err = mlxsw_sp_nve_learned_ip_resolve(mlxsw_sp, uip,
3261 					      (enum mlxsw_sp_l3proto) sfn_proto,
3262 					      &addr);
3263 	if (err)
3264 		goto err_ip_resolve;
3265 
3266 	err = __mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, fid, adding,
3267 							  &nve_dev, &vid, &vni);
3268 	if (err)
3269 		goto err_fdb_process;
3270 
3271 	err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, mac, fid_index,
3272 					     (enum mlxsw_sp_l3proto) sfn_proto,
3273 					     &addr, adding, true);
3274 	if (err)
3275 		goto err_fdb_op;
3276 
3277 	mlxsw_sp_fdb_nve_call_notifiers(nve_dev, mac,
3278 					(enum mlxsw_sp_l3proto) sfn_proto,
3279 					&addr, vni, adding);
3280 
3281 	type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE :
3282 			SWITCHDEV_FDB_DEL_TO_BRIDGE;
3283 	mlxsw_sp_fdb_call_notifiers(type, mac, vid, nve_dev, adding, false);
3284 
3285 	mlxsw_sp_fid_put(fid);
3286 
3287 	return;
3288 
3289 err_fdb_op:
3290 err_fdb_process:
3291 err_ip_resolve:
3292 	mlxsw_sp_fid_put(fid);
3293 err_fid_lookup:
3294 	/* Remove an FDB entry in case we cannot process it. Otherwise the
3295 	 * device will keep sending the same notification over and over again.
3296 	 */
3297 	mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, mac, fid_index,
3298 				       (enum mlxsw_sp_l3proto) sfn_proto, &addr,
3299 				       false, true);
3300 }
3301 
3302 static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
3303 					    char *sfn_pl, int rec_index)
3304 {
3305 	switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) {
3306 	case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC:
3307 		mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
3308 						rec_index, true);
3309 		break;
3310 	case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC:
3311 		mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
3312 						rec_index, false);
3313 		break;
3314 	case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG:
3315 		mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
3316 						    rec_index, true);
3317 		break;
3318 	case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG:
3319 		mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
3320 						    rec_index, false);
3321 		break;
3322 	case MLXSW_REG_SFN_REC_TYPE_LEARNED_UNICAST_TUNNEL:
3323 		mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, sfn_pl,
3324 							  rec_index, true);
3325 		break;
3326 	case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_UNICAST_TUNNEL:
3327 		mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, sfn_pl,
3328 							  rec_index, false);
3329 		break;
3330 	}
3331 }
3332 
3333 #define MLXSW_SP_FDB_SFN_QUERIES_PER_SESSION 10
3334 
3335 static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
3336 {
3337 	struct mlxsw_sp_bridge *bridge;
3338 	struct mlxsw_sp *mlxsw_sp;
3339 	bool reschedule = false;
3340 	char *sfn_pl;
3341 	int queries;
3342 	u8 num_rec;
3343 	int i;
3344 	int err;
3345 
3346 	sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL);
3347 	if (!sfn_pl)
3348 		return;
3349 
3350 	bridge = container_of(work, struct mlxsw_sp_bridge, fdb_notify.dw.work);
3351 	mlxsw_sp = bridge->mlxsw_sp;
3352 
3353 	rtnl_lock();
3354 	if (list_empty(&bridge->bridges_list))
3355 		goto out;
3356 	reschedule = true;
3357 	queries = MLXSW_SP_FDB_SFN_QUERIES_PER_SESSION;
3358 	while (queries > 0) {
3359 		mlxsw_reg_sfn_pack(sfn_pl);
3360 		err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
3361 		if (err) {
3362 			dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
3363 			goto out;
3364 		}
3365 		num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
3366 		for (i = 0; i < num_rec; i++)
3367 			mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
3368 		if (num_rec != MLXSW_REG_SFN_REC_MAX_COUNT)
3369 			goto out;
3370 		queries--;
3371 	}
3372 
3373 out:
3374 	rtnl_unlock();
3375 	kfree(sfn_pl);
3376 	if (!reschedule)
3377 		return;
3378 	mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp, !queries);
3379 }
3380 
3381 struct mlxsw_sp_switchdev_event_work {
3382 	struct work_struct work;
3383 	netdevice_tracker dev_tracker;
3384 	union {
3385 		struct switchdev_notifier_fdb_info fdb_info;
3386 		struct switchdev_notifier_vxlan_fdb_info vxlan_fdb_info;
3387 	};
3388 	struct net_device *dev;
3389 	unsigned long event;
3390 };
3391 
3392 static void
3393 mlxsw_sp_switchdev_bridge_vxlan_fdb_event(struct mlxsw_sp *mlxsw_sp,
3394 					  struct mlxsw_sp_switchdev_event_work *
3395 					  switchdev_work,
3396 					  struct mlxsw_sp_fid *fid, __be32 vni)
3397 {
3398 	struct switchdev_notifier_vxlan_fdb_info vxlan_fdb_info;
3399 	struct switchdev_notifier_fdb_info *fdb_info;
3400 	struct net_device *dev = switchdev_work->dev;
3401 	enum mlxsw_sp_l3proto proto;
3402 	union mlxsw_sp_l3addr addr;
3403 	int err;
3404 
3405 	fdb_info = &switchdev_work->fdb_info;
3406 	err = vxlan_fdb_find_uc(dev, fdb_info->addr, vni, &vxlan_fdb_info);
3407 	if (err)
3408 		return;
3409 
3410 	mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info.remote_ip,
3411 					      &proto, &addr);
3412 
3413 	switch (switchdev_work->event) {
3414 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
3415 		err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp,
3416 						     vxlan_fdb_info.eth_addr,
3417 						     mlxsw_sp_fid_index(fid),
3418 						     proto, &addr, true, false);
3419 		if (err)
3420 			return;
3421 		vxlan_fdb_info.offloaded = true;
3422 		call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
3423 					 &vxlan_fdb_info.info, NULL);
3424 		mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
3425 					    vxlan_fdb_info.eth_addr,
3426 					    fdb_info->vid, dev, true, false);
3427 		break;
3428 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
3429 		err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp,
3430 						     vxlan_fdb_info.eth_addr,
3431 						     mlxsw_sp_fid_index(fid),
3432 						     proto, &addr, false,
3433 						     false);
3434 		vxlan_fdb_info.offloaded = false;
3435 		call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
3436 					 &vxlan_fdb_info.info, NULL);
3437 		break;
3438 	}
3439 }
3440 
3441 static void
3442 mlxsw_sp_switchdev_bridge_nve_fdb_event(struct mlxsw_sp_switchdev_event_work *
3443 					switchdev_work)
3444 {
3445 	struct mlxsw_sp_bridge_device *bridge_device;
3446 	struct net_device *dev = switchdev_work->dev;
3447 	struct net_device *br_dev;
3448 	struct mlxsw_sp *mlxsw_sp;
3449 	struct mlxsw_sp_fid *fid;
3450 	__be32 vni;
3451 	int err;
3452 
3453 	if (switchdev_work->event != SWITCHDEV_FDB_ADD_TO_DEVICE &&
3454 	    switchdev_work->event != SWITCHDEV_FDB_DEL_TO_DEVICE)
3455 		return;
3456 
3457 	if (switchdev_work->event == SWITCHDEV_FDB_ADD_TO_DEVICE &&
3458 	    (!switchdev_work->fdb_info.added_by_user ||
3459 	     switchdev_work->fdb_info.is_local))
3460 		return;
3461 
3462 	if (!netif_running(dev))
3463 		return;
3464 	br_dev = netdev_master_upper_dev_get(dev);
3465 	if (!br_dev)
3466 		return;
3467 	if (!netif_is_bridge_master(br_dev))
3468 		return;
3469 	mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3470 	if (!mlxsw_sp)
3471 		return;
3472 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3473 	if (!bridge_device)
3474 		return;
3475 
3476 	fid = bridge_device->ops->fid_lookup(bridge_device,
3477 					     switchdev_work->fdb_info.vid);
3478 	if (!fid)
3479 		return;
3480 
3481 	err = mlxsw_sp_fid_vni(fid, &vni);
3482 	if (err)
3483 		goto out;
3484 
3485 	mlxsw_sp_switchdev_bridge_vxlan_fdb_event(mlxsw_sp, switchdev_work, fid,
3486 						  vni);
3487 
3488 out:
3489 	mlxsw_sp_fid_put(fid);
3490 }
3491 
3492 static void mlxsw_sp_switchdev_bridge_fdb_event_work(struct work_struct *work)
3493 {
3494 	struct mlxsw_sp_switchdev_event_work *switchdev_work =
3495 		container_of(work, struct mlxsw_sp_switchdev_event_work, work);
3496 	struct net_device *dev = switchdev_work->dev;
3497 	struct switchdev_notifier_fdb_info *fdb_info;
3498 	struct mlxsw_sp_port *mlxsw_sp_port;
3499 	int err;
3500 
3501 	rtnl_lock();
3502 	if (netif_is_vxlan(dev)) {
3503 		mlxsw_sp_switchdev_bridge_nve_fdb_event(switchdev_work);
3504 		goto out;
3505 	}
3506 
3507 	mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
3508 	if (!mlxsw_sp_port)
3509 		goto out;
3510 
3511 	switch (switchdev_work->event) {
3512 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
3513 		fdb_info = &switchdev_work->fdb_info;
3514 		if (!fdb_info->added_by_user || fdb_info->is_local)
3515 			break;
3516 		err = mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, true);
3517 		if (err)
3518 			break;
3519 		mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
3520 					    fdb_info->addr,
3521 					    fdb_info->vid, dev, true, false);
3522 		break;
3523 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
3524 		fdb_info = &switchdev_work->fdb_info;
3525 		mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, false);
3526 		break;
3527 	case SWITCHDEV_FDB_ADD_TO_BRIDGE:
3528 	case SWITCHDEV_FDB_DEL_TO_BRIDGE:
3529 		/* These events are only used to potentially update an existing
3530 		 * SPAN mirror.
3531 		 */
3532 		break;
3533 	}
3534 
3535 	mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
3536 
3537 out:
3538 	rtnl_unlock();
3539 	kfree(switchdev_work->fdb_info.addr);
3540 	netdev_put(dev, &switchdev_work->dev_tracker);
3541 	kfree(switchdev_work);
3542 }
3543 
3544 static void
3545 mlxsw_sp_switchdev_vxlan_fdb_add(struct mlxsw_sp *mlxsw_sp,
3546 				 struct mlxsw_sp_switchdev_event_work *
3547 				 switchdev_work)
3548 {
3549 	struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
3550 	struct mlxsw_sp_bridge_device *bridge_device;
3551 	struct net_device *dev = switchdev_work->dev;
3552 	enum mlxsw_sp_l3proto proto;
3553 	union mlxsw_sp_l3addr addr;
3554 	struct net_device *br_dev;
3555 	struct mlxsw_sp_fid *fid;
3556 	u16 vid;
3557 	int err;
3558 
3559 	vxlan_fdb_info = &switchdev_work->vxlan_fdb_info;
3560 	br_dev = netdev_master_upper_dev_get(dev);
3561 
3562 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3563 	if (!bridge_device)
3564 		return;
3565 
3566 	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan_fdb_info->vni);
3567 	if (!fid)
3568 		return;
3569 
3570 	mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip,
3571 					      &proto, &addr);
3572 
3573 	if (is_zero_ether_addr(vxlan_fdb_info->eth_addr)) {
3574 		err = mlxsw_sp_nve_flood_ip_add(mlxsw_sp, fid, proto, &addr);
3575 		if (err) {
3576 			mlxsw_sp_fid_put(fid);
3577 			return;
3578 		}
3579 		vxlan_fdb_info->offloaded = true;
3580 		call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
3581 					 &vxlan_fdb_info->info, NULL);
3582 		mlxsw_sp_fid_put(fid);
3583 		return;
3584 	}
3585 
3586 	/* The device has a single FDB table, whereas Linux has two - one
3587 	 * in the bridge driver and another in the VxLAN driver. We only
3588 	 * program an entry to the device if the MAC points to the VxLAN
3589 	 * device in the bridge's FDB table
3590 	 */
3591 	vid = bridge_device->ops->fid_vid(bridge_device, fid);
3592 	if (br_fdb_find_port(br_dev, vxlan_fdb_info->eth_addr, vid) != dev)
3593 		goto err_br_fdb_find;
3594 
3595 	err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, vxlan_fdb_info->eth_addr,
3596 					     mlxsw_sp_fid_index(fid), proto,
3597 					     &addr, true, false);
3598 	if (err)
3599 		goto err_fdb_tunnel_uc_op;
3600 	vxlan_fdb_info->offloaded = true;
3601 	call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
3602 				 &vxlan_fdb_info->info, NULL);
3603 	mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
3604 				    vxlan_fdb_info->eth_addr, vid, dev, true,
3605 				    false);
3606 
3607 	mlxsw_sp_fid_put(fid);
3608 
3609 	return;
3610 
3611 err_fdb_tunnel_uc_op:
3612 err_br_fdb_find:
3613 	mlxsw_sp_fid_put(fid);
3614 }
3615 
3616 static void
3617 mlxsw_sp_switchdev_vxlan_fdb_del(struct mlxsw_sp *mlxsw_sp,
3618 				 struct mlxsw_sp_switchdev_event_work *
3619 				 switchdev_work)
3620 {
3621 	struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
3622 	struct mlxsw_sp_bridge_device *bridge_device;
3623 	struct net_device *dev = switchdev_work->dev;
3624 	struct net_device *br_dev = netdev_master_upper_dev_get(dev);
3625 	enum mlxsw_sp_l3proto proto;
3626 	union mlxsw_sp_l3addr addr;
3627 	struct mlxsw_sp_fid *fid;
3628 	u16 vid;
3629 
3630 	vxlan_fdb_info = &switchdev_work->vxlan_fdb_info;
3631 	if (!vxlan_fdb_info->offloaded)
3632 		return;
3633 
3634 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3635 	if (!bridge_device)
3636 		return;
3637 
3638 	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan_fdb_info->vni);
3639 	if (!fid)
3640 		return;
3641 
3642 	mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip,
3643 					      &proto, &addr);
3644 
3645 	if (is_zero_ether_addr(vxlan_fdb_info->eth_addr)) {
3646 		mlxsw_sp_nve_flood_ip_del(mlxsw_sp, fid, proto, &addr);
3647 		mlxsw_sp_fid_put(fid);
3648 		return;
3649 	}
3650 
3651 	mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, vxlan_fdb_info->eth_addr,
3652 				       mlxsw_sp_fid_index(fid), proto, &addr,
3653 				       false, false);
3654 	vid = bridge_device->ops->fid_vid(bridge_device, fid);
3655 	mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
3656 				    vxlan_fdb_info->eth_addr, vid, dev, false,
3657 				    false);
3658 
3659 	mlxsw_sp_fid_put(fid);
3660 }
3661 
3662 static void mlxsw_sp_switchdev_vxlan_fdb_event_work(struct work_struct *work)
3663 {
3664 	struct mlxsw_sp_switchdev_event_work *switchdev_work =
3665 		container_of(work, struct mlxsw_sp_switchdev_event_work, work);
3666 	struct net_device *dev = switchdev_work->dev;
3667 	struct mlxsw_sp *mlxsw_sp;
3668 	struct net_device *br_dev;
3669 
3670 	rtnl_lock();
3671 
3672 	if (!netif_running(dev))
3673 		goto out;
3674 	br_dev = netdev_master_upper_dev_get(dev);
3675 	if (!br_dev)
3676 		goto out;
3677 	if (!netif_is_bridge_master(br_dev))
3678 		goto out;
3679 	mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3680 	if (!mlxsw_sp)
3681 		goto out;
3682 
3683 	switch (switchdev_work->event) {
3684 	case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE:
3685 		mlxsw_sp_switchdev_vxlan_fdb_add(mlxsw_sp, switchdev_work);
3686 		break;
3687 	case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE:
3688 		mlxsw_sp_switchdev_vxlan_fdb_del(mlxsw_sp, switchdev_work);
3689 		break;
3690 	}
3691 
3692 out:
3693 	rtnl_unlock();
3694 	netdev_put(dev, &switchdev_work->dev_tracker);
3695 	kfree(switchdev_work);
3696 }
3697 
3698 static int
3699 mlxsw_sp_switchdev_vxlan_work_prepare(struct mlxsw_sp_switchdev_event_work *
3700 				      switchdev_work,
3701 				      struct switchdev_notifier_info *info)
3702 {
3703 	struct vxlan_dev *vxlan = netdev_priv(switchdev_work->dev);
3704 	struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
3705 	struct vxlan_config *cfg = &vxlan->cfg;
3706 	struct netlink_ext_ack *extack;
3707 
3708 	extack = switchdev_notifier_info_to_extack(info);
3709 	vxlan_fdb_info = container_of(info,
3710 				      struct switchdev_notifier_vxlan_fdb_info,
3711 				      info);
3712 
3713 	if (vxlan_fdb_info->remote_port != cfg->dst_port) {
3714 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Non-default remote port is not supported");
3715 		return -EOPNOTSUPP;
3716 	}
3717 	if (vxlan_fdb_info->remote_vni != cfg->vni ||
3718 	    vxlan_fdb_info->vni != cfg->vni) {
3719 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Non-default VNI is not supported");
3720 		return -EOPNOTSUPP;
3721 	}
3722 	if (vxlan_fdb_info->remote_ifindex) {
3723 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Local interface is not supported");
3724 		return -EOPNOTSUPP;
3725 	}
3726 	if (is_multicast_ether_addr(vxlan_fdb_info->eth_addr)) {
3727 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Multicast MAC addresses not supported");
3728 		return -EOPNOTSUPP;
3729 	}
3730 	if (vxlan_addr_multicast(&vxlan_fdb_info->remote_ip)) {
3731 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Multicast destination IP is not supported");
3732 		return -EOPNOTSUPP;
3733 	}
3734 
3735 	switchdev_work->vxlan_fdb_info = *vxlan_fdb_info;
3736 
3737 	return 0;
3738 }
3739 
3740 /* Called under rcu_read_lock() */
3741 static int mlxsw_sp_switchdev_event(struct notifier_block *unused,
3742 				    unsigned long event, void *ptr)
3743 {
3744 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
3745 	struct mlxsw_sp_switchdev_event_work *switchdev_work;
3746 	struct switchdev_notifier_fdb_info *fdb_info;
3747 	struct switchdev_notifier_info *info = ptr;
3748 	struct net_device *br_dev;
3749 	int err;
3750 
3751 	if (event == SWITCHDEV_PORT_ATTR_SET) {
3752 		err = switchdev_handle_port_attr_set(dev, ptr,
3753 						     mlxsw_sp_port_dev_check,
3754 						     mlxsw_sp_port_attr_set);
3755 		return notifier_from_errno(err);
3756 	}
3757 
3758 	/* Tunnel devices are not our uppers, so check their master instead */
3759 	br_dev = netdev_master_upper_dev_get_rcu(dev);
3760 	if (!br_dev)
3761 		return NOTIFY_DONE;
3762 	if (!netif_is_bridge_master(br_dev))
3763 		return NOTIFY_DONE;
3764 	if (!mlxsw_sp_port_dev_lower_find_rcu(br_dev))
3765 		return NOTIFY_DONE;
3766 
3767 	switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
3768 	if (!switchdev_work)
3769 		return NOTIFY_BAD;
3770 
3771 	switchdev_work->dev = dev;
3772 	switchdev_work->event = event;
3773 
3774 	switch (event) {
3775 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
3776 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
3777 	case SWITCHDEV_FDB_ADD_TO_BRIDGE:
3778 	case SWITCHDEV_FDB_DEL_TO_BRIDGE:
3779 		fdb_info = container_of(info,
3780 					struct switchdev_notifier_fdb_info,
3781 					info);
3782 		INIT_WORK(&switchdev_work->work,
3783 			  mlxsw_sp_switchdev_bridge_fdb_event_work);
3784 		memcpy(&switchdev_work->fdb_info, ptr,
3785 		       sizeof(switchdev_work->fdb_info));
3786 		switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
3787 		if (!switchdev_work->fdb_info.addr)
3788 			goto err_addr_alloc;
3789 		ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
3790 				fdb_info->addr);
3791 		/* Take a reference on the device. This can be either
3792 		 * upper device containig mlxsw_sp_port or just a
3793 		 * mlxsw_sp_port
3794 		 */
3795 		netdev_hold(dev, &switchdev_work->dev_tracker, GFP_ATOMIC);
3796 		break;
3797 	case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE:
3798 	case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE:
3799 		INIT_WORK(&switchdev_work->work,
3800 			  mlxsw_sp_switchdev_vxlan_fdb_event_work);
3801 		err = mlxsw_sp_switchdev_vxlan_work_prepare(switchdev_work,
3802 							    info);
3803 		if (err)
3804 			goto err_vxlan_work_prepare;
3805 		netdev_hold(dev, &switchdev_work->dev_tracker, GFP_ATOMIC);
3806 		break;
3807 	default:
3808 		kfree(switchdev_work);
3809 		return NOTIFY_DONE;
3810 	}
3811 
3812 	mlxsw_core_schedule_work(&switchdev_work->work);
3813 
3814 	return NOTIFY_DONE;
3815 
3816 err_vxlan_work_prepare:
3817 err_addr_alloc:
3818 	kfree(switchdev_work);
3819 	return NOTIFY_BAD;
3820 }
3821 
3822 struct notifier_block mlxsw_sp_switchdev_notifier = {
3823 	.notifier_call = mlxsw_sp_switchdev_event,
3824 };
3825 
3826 static int
3827 mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp,
3828 				  struct mlxsw_sp_bridge_device *bridge_device,
3829 				  const struct net_device *vxlan_dev, u16 vid,
3830 				  bool flag_untagged, bool flag_pvid,
3831 				  struct netlink_ext_ack *extack)
3832 {
3833 	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
3834 	__be32 vni = vxlan->cfg.vni;
3835 	struct mlxsw_sp_fid *fid;
3836 	u16 old_vid;
3837 	int err;
3838 
3839 	/* We cannot have the same VLAN as PVID and egress untagged on multiple
3840 	 * VxLAN devices. Note that we get this notification before the VLAN is
3841 	 * actually added to the bridge's database, so it is not possible for
3842 	 * the lookup function to return 'vxlan_dev'
3843 	 */
3844 	if (flag_untagged && flag_pvid &&
3845 	    mlxsw_sp_bridge_8021q_vxlan_dev_find(bridge_device->dev, vid)) {
3846 		NL_SET_ERR_MSG_MOD(extack, "VLAN already mapped to a different VNI");
3847 		return -EINVAL;
3848 	}
3849 
3850 	if (!netif_running(vxlan_dev))
3851 		return 0;
3852 
3853 	/* First case: FID is not associated with this VNI, but the new VLAN
3854 	 * is both PVID and egress untagged. Need to enable NVE on the FID, if
3855 	 * it exists
3856 	 */
3857 	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vni);
3858 	if (!fid) {
3859 		if (!flag_untagged || !flag_pvid)
3860 			return 0;
3861 		return bridge_device->ops->vxlan_join(bridge_device, vxlan_dev,
3862 						      vid, extack);
3863 	}
3864 
3865 	/* Second case: FID is associated with the VNI and the VLAN associated
3866 	 * with the FID is the same as the notified VLAN. This means the flags
3867 	 * (PVID / egress untagged) were toggled and that NVE should be
3868 	 * disabled on the FID
3869 	 */
3870 	old_vid = mlxsw_sp_fid_8021q_vid(fid);
3871 	if (vid == old_vid) {
3872 		if (WARN_ON(flag_untagged && flag_pvid)) {
3873 			mlxsw_sp_fid_put(fid);
3874 			return -EINVAL;
3875 		}
3876 		mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
3877 		mlxsw_sp_fid_put(fid);
3878 		return 0;
3879 	}
3880 
3881 	/* Third case: A new VLAN was configured on the VxLAN device, but this
3882 	 * VLAN is not PVID, so there is nothing to do.
3883 	 */
3884 	if (!flag_pvid) {
3885 		mlxsw_sp_fid_put(fid);
3886 		return 0;
3887 	}
3888 
3889 	/* Fourth case: Thew new VLAN is PVID, which means the VLAN currently
3890 	 * mapped to the VNI should be unmapped
3891 	 */
3892 	mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
3893 	mlxsw_sp_fid_put(fid);
3894 
3895 	/* Fifth case: The new VLAN is also egress untagged, which means the
3896 	 * VLAN needs to be mapped to the VNI
3897 	 */
3898 	if (!flag_untagged)
3899 		return 0;
3900 
3901 	err = bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, vid, extack);
3902 	if (err)
3903 		goto err_vxlan_join;
3904 
3905 	return 0;
3906 
3907 err_vxlan_join:
3908 	bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, old_vid, NULL);
3909 	return err;
3910 }
3911 
3912 static void
3913 mlxsw_sp_switchdev_vxlan_vlan_del(struct mlxsw_sp *mlxsw_sp,
3914 				  struct mlxsw_sp_bridge_device *bridge_device,
3915 				  const struct net_device *vxlan_dev, u16 vid)
3916 {
3917 	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
3918 	__be32 vni = vxlan->cfg.vni;
3919 	struct mlxsw_sp_fid *fid;
3920 
3921 	if (!netif_running(vxlan_dev))
3922 		return;
3923 
3924 	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vni);
3925 	if (!fid)
3926 		return;
3927 
3928 	/* A different VLAN than the one mapped to the VNI is deleted */
3929 	if (mlxsw_sp_fid_8021q_vid(fid) != vid)
3930 		goto out;
3931 
3932 	mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
3933 
3934 out:
3935 	mlxsw_sp_fid_put(fid);
3936 }
3937 
3938 static int
3939 mlxsw_sp_switchdev_vxlan_vlans_add(struct net_device *vxlan_dev,
3940 				   struct switchdev_notifier_port_obj_info *
3941 				   port_obj_info)
3942 {
3943 	struct switchdev_obj_port_vlan *vlan =
3944 		SWITCHDEV_OBJ_PORT_VLAN(port_obj_info->obj);
3945 	bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
3946 	bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
3947 	struct mlxsw_sp_bridge_device *bridge_device;
3948 	struct netlink_ext_ack *extack;
3949 	struct mlxsw_sp *mlxsw_sp;
3950 	struct net_device *br_dev;
3951 
3952 	extack = switchdev_notifier_info_to_extack(&port_obj_info->info);
3953 	br_dev = netdev_master_upper_dev_get(vxlan_dev);
3954 	if (!br_dev)
3955 		return 0;
3956 
3957 	mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3958 	if (!mlxsw_sp)
3959 		return 0;
3960 
3961 	port_obj_info->handled = true;
3962 
3963 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3964 	if (!bridge_device)
3965 		return -EINVAL;
3966 
3967 	if (!bridge_device->vlan_enabled)
3968 		return 0;
3969 
3970 	return mlxsw_sp_switchdev_vxlan_vlan_add(mlxsw_sp, bridge_device,
3971 						 vxlan_dev, vlan->vid,
3972 						 flag_untagged,
3973 						 flag_pvid, extack);
3974 }
3975 
3976 static void
3977 mlxsw_sp_switchdev_vxlan_vlans_del(struct net_device *vxlan_dev,
3978 				   struct switchdev_notifier_port_obj_info *
3979 				   port_obj_info)
3980 {
3981 	struct switchdev_obj_port_vlan *vlan =
3982 		SWITCHDEV_OBJ_PORT_VLAN(port_obj_info->obj);
3983 	struct mlxsw_sp_bridge_device *bridge_device;
3984 	struct mlxsw_sp *mlxsw_sp;
3985 	struct net_device *br_dev;
3986 
3987 	br_dev = netdev_master_upper_dev_get(vxlan_dev);
3988 	if (!br_dev)
3989 		return;
3990 
3991 	mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3992 	if (!mlxsw_sp)
3993 		return;
3994 
3995 	port_obj_info->handled = true;
3996 
3997 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3998 	if (!bridge_device)
3999 		return;
4000 
4001 	if (!bridge_device->vlan_enabled)
4002 		return;
4003 
4004 	mlxsw_sp_switchdev_vxlan_vlan_del(mlxsw_sp, bridge_device, vxlan_dev,
4005 					  vlan->vid);
4006 }
4007 
4008 static int
4009 mlxsw_sp_switchdev_handle_vxlan_obj_add(struct net_device *vxlan_dev,
4010 					struct switchdev_notifier_port_obj_info *
4011 					port_obj_info)
4012 {
4013 	int err = 0;
4014 
4015 	switch (port_obj_info->obj->id) {
4016 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
4017 		err = mlxsw_sp_switchdev_vxlan_vlans_add(vxlan_dev,
4018 							 port_obj_info);
4019 		break;
4020 	default:
4021 		break;
4022 	}
4023 
4024 	return err;
4025 }
4026 
4027 static void
4028 mlxsw_sp_switchdev_handle_vxlan_obj_del(struct net_device *vxlan_dev,
4029 					struct switchdev_notifier_port_obj_info *
4030 					port_obj_info)
4031 {
4032 	switch (port_obj_info->obj->id) {
4033 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
4034 		mlxsw_sp_switchdev_vxlan_vlans_del(vxlan_dev, port_obj_info);
4035 		break;
4036 	default:
4037 		break;
4038 	}
4039 }
4040 
4041 static int mlxsw_sp_switchdev_blocking_event(struct notifier_block *unused,
4042 					     unsigned long event, void *ptr)
4043 {
4044 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
4045 	int err = 0;
4046 
4047 	switch (event) {
4048 	case SWITCHDEV_PORT_OBJ_ADD:
4049 		if (netif_is_vxlan(dev))
4050 			err = mlxsw_sp_switchdev_handle_vxlan_obj_add(dev, ptr);
4051 		else
4052 			err = switchdev_handle_port_obj_add(dev, ptr,
4053 							mlxsw_sp_port_dev_check,
4054 							mlxsw_sp_port_obj_add);
4055 		return notifier_from_errno(err);
4056 	case SWITCHDEV_PORT_OBJ_DEL:
4057 		if (netif_is_vxlan(dev))
4058 			mlxsw_sp_switchdev_handle_vxlan_obj_del(dev, ptr);
4059 		else
4060 			err = switchdev_handle_port_obj_del(dev, ptr,
4061 							mlxsw_sp_port_dev_check,
4062 							mlxsw_sp_port_obj_del);
4063 		return notifier_from_errno(err);
4064 	case SWITCHDEV_PORT_ATTR_SET:
4065 		err = switchdev_handle_port_attr_set(dev, ptr,
4066 						     mlxsw_sp_port_dev_check,
4067 						     mlxsw_sp_port_attr_set);
4068 		return notifier_from_errno(err);
4069 	}
4070 
4071 	return NOTIFY_DONE;
4072 }
4073 
4074 static struct notifier_block mlxsw_sp_switchdev_blocking_notifier = {
4075 	.notifier_call = mlxsw_sp_switchdev_blocking_event,
4076 };
4077 
4078 u8
4079 mlxsw_sp_bridge_port_stp_state(struct mlxsw_sp_bridge_port *bridge_port)
4080 {
4081 	return bridge_port->stp_state;
4082 }
4083 
4084 static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
4085 {
4086 	struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
4087 	struct notifier_block *nb;
4088 	int err;
4089 
4090 	err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
4091 	if (err) {
4092 		dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
4093 		return err;
4094 	}
4095 
4096 	err = register_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
4097 	if (err) {
4098 		dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev notifier\n");
4099 		return err;
4100 	}
4101 
4102 	nb = &mlxsw_sp_switchdev_blocking_notifier;
4103 	err = register_switchdev_blocking_notifier(nb);
4104 	if (err) {
4105 		dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev blocking notifier\n");
4106 		goto err_register_switchdev_blocking_notifier;
4107 	}
4108 
4109 	INIT_DELAYED_WORK(&bridge->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
4110 	bridge->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
4111 	return 0;
4112 
4113 err_register_switchdev_blocking_notifier:
4114 	unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
4115 	return err;
4116 }
4117 
4118 static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
4119 {
4120 	struct notifier_block *nb;
4121 
4122 	cancel_delayed_work_sync(&mlxsw_sp->bridge->fdb_notify.dw);
4123 
4124 	nb = &mlxsw_sp_switchdev_blocking_notifier;
4125 	unregister_switchdev_blocking_notifier(nb);
4126 
4127 	unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
4128 }
4129 
4130 static void mlxsw_sp1_switchdev_init(struct mlxsw_sp *mlxsw_sp)
4131 {
4132 	mlxsw_sp->bridge->bridge_8021ad_ops = &mlxsw_sp1_bridge_8021ad_ops;
4133 }
4134 
4135 const struct mlxsw_sp_switchdev_ops mlxsw_sp1_switchdev_ops = {
4136 	.init	= mlxsw_sp1_switchdev_init,
4137 };
4138 
4139 static void mlxsw_sp2_switchdev_init(struct mlxsw_sp *mlxsw_sp)
4140 {
4141 	mlxsw_sp->bridge->bridge_8021ad_ops = &mlxsw_sp2_bridge_8021ad_ops;
4142 }
4143 
4144 const struct mlxsw_sp_switchdev_ops mlxsw_sp2_switchdev_ops = {
4145 	.init	= mlxsw_sp2_switchdev_init,
4146 };
4147 
4148 int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
4149 {
4150 	struct mlxsw_sp_bridge *bridge;
4151 
4152 	bridge = kzalloc(sizeof(*mlxsw_sp->bridge), GFP_KERNEL);
4153 	if (!bridge)
4154 		return -ENOMEM;
4155 	mlxsw_sp->bridge = bridge;
4156 	bridge->mlxsw_sp = mlxsw_sp;
4157 
4158 	INIT_LIST_HEAD(&mlxsw_sp->bridge->bridges_list);
4159 
4160 	bridge->bridge_8021q_ops = &mlxsw_sp_bridge_8021q_ops;
4161 	bridge->bridge_8021d_ops = &mlxsw_sp_bridge_8021d_ops;
4162 
4163 	mlxsw_sp->switchdev_ops->init(mlxsw_sp);
4164 
4165 	return mlxsw_sp_fdb_init(mlxsw_sp);
4166 }
4167 
4168 void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
4169 {
4170 	mlxsw_sp_fdb_fini(mlxsw_sp);
4171 	WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list));
4172 	kfree(mlxsw_sp->bridge);
4173 }
4174 
4175