1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/slab.h>
9 #include <linux/device.h>
10 #include <linux/skbuff.h>
11 #include <linux/if_vlan.h>
12 #include <linux/if_bridge.h>
13 #include <linux/workqueue.h>
14 #include <linux/jiffies.h>
15 #include <linux/rtnetlink.h>
16 #include <linux/netlink.h>
17 #include <net/switchdev.h>
18 #include <net/vxlan.h>
19 
20 #include "spectrum_span.h"
21 #include "spectrum_switchdev.h"
22 #include "spectrum.h"
23 #include "core.h"
24 #include "reg.h"
25 
26 struct mlxsw_sp_bridge_ops;
27 
28 struct mlxsw_sp_bridge {
29 	struct mlxsw_sp *mlxsw_sp;
30 	struct {
31 		struct delayed_work dw;
32 #define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100
33 		unsigned int interval; /* ms */
34 	} fdb_notify;
35 #define MLXSW_SP_MIN_AGEING_TIME 10
36 #define MLXSW_SP_MAX_AGEING_TIME 1000000
37 #define MLXSW_SP_DEFAULT_AGEING_TIME 300
38 	u32 ageing_time;
39 	bool vlan_enabled_exists;
40 	struct list_head bridges_list;
41 	DECLARE_BITMAP(mids_bitmap, MLXSW_SP_MID_MAX);
42 	const struct mlxsw_sp_bridge_ops *bridge_8021q_ops;
43 	const struct mlxsw_sp_bridge_ops *bridge_8021d_ops;
44 	const struct mlxsw_sp_bridge_ops *bridge_8021ad_ops;
45 };
46 
47 struct mlxsw_sp_bridge_device {
48 	struct net_device *dev;
49 	struct list_head list;
50 	struct list_head ports_list;
51 	struct list_head mids_list;
52 	u8 vlan_enabled:1,
53 	   multicast_enabled:1,
54 	   mrouter:1;
55 	const struct mlxsw_sp_bridge_ops *ops;
56 };
57 
58 struct mlxsw_sp_bridge_port {
59 	struct net_device *dev;
60 	struct mlxsw_sp_bridge_device *bridge_device;
61 	struct list_head list;
62 	struct list_head vlans_list;
63 	unsigned int ref_count;
64 	u8 stp_state;
65 	unsigned long flags;
66 	bool mrouter;
67 	bool lagged;
68 	union {
69 		u16 lag_id;
70 		u16 system_port;
71 	};
72 };
73 
74 struct mlxsw_sp_bridge_vlan {
75 	struct list_head list;
76 	struct list_head port_vlan_list;
77 	u16 vid;
78 };
79 
80 struct mlxsw_sp_bridge_ops {
81 	int (*port_join)(struct mlxsw_sp_bridge_device *bridge_device,
82 			 struct mlxsw_sp_bridge_port *bridge_port,
83 			 struct mlxsw_sp_port *mlxsw_sp_port,
84 			 struct netlink_ext_ack *extack);
85 	void (*port_leave)(struct mlxsw_sp_bridge_device *bridge_device,
86 			   struct mlxsw_sp_bridge_port *bridge_port,
87 			   struct mlxsw_sp_port *mlxsw_sp_port);
88 	int (*vxlan_join)(struct mlxsw_sp_bridge_device *bridge_device,
89 			  const struct net_device *vxlan_dev, u16 vid,
90 			  struct netlink_ext_ack *extack);
91 	struct mlxsw_sp_fid *
92 		(*fid_get)(struct mlxsw_sp_bridge_device *bridge_device,
93 			   u16 vid, struct netlink_ext_ack *extack);
94 	struct mlxsw_sp_fid *
95 		(*fid_lookup)(struct mlxsw_sp_bridge_device *bridge_device,
96 			      u16 vid);
97 	u16 (*fid_vid)(struct mlxsw_sp_bridge_device *bridge_device,
98 		       const struct mlxsw_sp_fid *fid);
99 };
100 
101 static int
102 mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
103 			       struct mlxsw_sp_bridge_port *bridge_port,
104 			       u16 fid_index);
105 
106 static void
107 mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
108 			       struct mlxsw_sp_bridge_port *bridge_port);
109 
110 static void
111 mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port,
112 				   struct mlxsw_sp_bridge_device
113 				   *bridge_device);
114 
115 static void
116 mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
117 				 struct mlxsw_sp_bridge_port *bridge_port,
118 				 bool add);
119 
120 static struct mlxsw_sp_bridge_device *
121 mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge,
122 			    const struct net_device *br_dev)
123 {
124 	struct mlxsw_sp_bridge_device *bridge_device;
125 
126 	list_for_each_entry(bridge_device, &bridge->bridges_list, list)
127 		if (bridge_device->dev == br_dev)
128 			return bridge_device;
129 
130 	return NULL;
131 }
132 
133 bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
134 					 const struct net_device *br_dev)
135 {
136 	return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
137 }
138 
139 static int mlxsw_sp_bridge_device_upper_rif_destroy(struct net_device *dev,
140 						    struct netdev_nested_priv *priv)
141 {
142 	struct mlxsw_sp *mlxsw_sp = priv->data;
143 
144 	mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
145 	return 0;
146 }
147 
148 static void mlxsw_sp_bridge_device_rifs_destroy(struct mlxsw_sp *mlxsw_sp,
149 						struct net_device *dev)
150 {
151 	struct netdev_nested_priv priv = {
152 		.data = (void *)mlxsw_sp,
153 	};
154 
155 	mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
156 	netdev_walk_all_upper_dev_rcu(dev,
157 				      mlxsw_sp_bridge_device_upper_rif_destroy,
158 				      &priv);
159 }
160 
161 static int mlxsw_sp_bridge_device_vxlan_init(struct mlxsw_sp_bridge *bridge,
162 					     struct net_device *br_dev,
163 					     struct netlink_ext_ack *extack)
164 {
165 	struct net_device *dev, *stop_dev;
166 	struct list_head *iter;
167 	int err;
168 
169 	netdev_for_each_lower_dev(br_dev, dev, iter) {
170 		if (netif_is_vxlan(dev) && netif_running(dev)) {
171 			err = mlxsw_sp_bridge_vxlan_join(bridge->mlxsw_sp,
172 							 br_dev, dev, 0,
173 							 extack);
174 			if (err) {
175 				stop_dev = dev;
176 				goto err_vxlan_join;
177 			}
178 		}
179 	}
180 
181 	return 0;
182 
183 err_vxlan_join:
184 	netdev_for_each_lower_dev(br_dev, dev, iter) {
185 		if (netif_is_vxlan(dev) && netif_running(dev)) {
186 			if (stop_dev == dev)
187 				break;
188 			mlxsw_sp_bridge_vxlan_leave(bridge->mlxsw_sp, dev);
189 		}
190 	}
191 	return err;
192 }
193 
194 static void mlxsw_sp_bridge_device_vxlan_fini(struct mlxsw_sp_bridge *bridge,
195 					      struct net_device *br_dev)
196 {
197 	struct net_device *dev;
198 	struct list_head *iter;
199 
200 	netdev_for_each_lower_dev(br_dev, dev, iter) {
201 		if (netif_is_vxlan(dev) && netif_running(dev))
202 			mlxsw_sp_bridge_vxlan_leave(bridge->mlxsw_sp, dev);
203 	}
204 }
205 
206 static struct mlxsw_sp_bridge_device *
207 mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
208 			      struct net_device *br_dev,
209 			      struct netlink_ext_ack *extack)
210 {
211 	struct device *dev = bridge->mlxsw_sp->bus_info->dev;
212 	struct mlxsw_sp_bridge_device *bridge_device;
213 	bool vlan_enabled = br_vlan_enabled(br_dev);
214 	int err;
215 
216 	if (vlan_enabled && bridge->vlan_enabled_exists) {
217 		dev_err(dev, "Only one VLAN-aware bridge is supported\n");
218 		NL_SET_ERR_MSG_MOD(extack, "Only one VLAN-aware bridge is supported");
219 		return ERR_PTR(-EINVAL);
220 	}
221 
222 	bridge_device = kzalloc(sizeof(*bridge_device), GFP_KERNEL);
223 	if (!bridge_device)
224 		return ERR_PTR(-ENOMEM);
225 
226 	bridge_device->dev = br_dev;
227 	bridge_device->vlan_enabled = vlan_enabled;
228 	bridge_device->multicast_enabled = br_multicast_enabled(br_dev);
229 	bridge_device->mrouter = br_multicast_router(br_dev);
230 	INIT_LIST_HEAD(&bridge_device->ports_list);
231 	if (vlan_enabled) {
232 		u16 proto;
233 
234 		bridge->vlan_enabled_exists = true;
235 		br_vlan_get_proto(br_dev, &proto);
236 		if (proto == ETH_P_8021AD)
237 			bridge_device->ops = bridge->bridge_8021ad_ops;
238 		else
239 			bridge_device->ops = bridge->bridge_8021q_ops;
240 	} else {
241 		bridge_device->ops = bridge->bridge_8021d_ops;
242 	}
243 	INIT_LIST_HEAD(&bridge_device->mids_list);
244 	list_add(&bridge_device->list, &bridge->bridges_list);
245 
246 	/* It is possible we already have VXLAN devices enslaved to the bridge.
247 	 * In which case, we need to replay their configuration as if they were
248 	 * just now enslaved to the bridge.
249 	 */
250 	err = mlxsw_sp_bridge_device_vxlan_init(bridge, br_dev, extack);
251 	if (err)
252 		goto err_vxlan_init;
253 
254 	return bridge_device;
255 
256 err_vxlan_init:
257 	list_del(&bridge_device->list);
258 	if (bridge_device->vlan_enabled)
259 		bridge->vlan_enabled_exists = false;
260 	kfree(bridge_device);
261 	return ERR_PTR(err);
262 }
263 
264 static void
265 mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
266 			       struct mlxsw_sp_bridge_device *bridge_device)
267 {
268 	mlxsw_sp_bridge_device_vxlan_fini(bridge, bridge_device->dev);
269 	mlxsw_sp_bridge_device_rifs_destroy(bridge->mlxsw_sp,
270 					    bridge_device->dev);
271 	list_del(&bridge_device->list);
272 	if (bridge_device->vlan_enabled)
273 		bridge->vlan_enabled_exists = false;
274 	WARN_ON(!list_empty(&bridge_device->ports_list));
275 	WARN_ON(!list_empty(&bridge_device->mids_list));
276 	kfree(bridge_device);
277 }
278 
279 static struct mlxsw_sp_bridge_device *
280 mlxsw_sp_bridge_device_get(struct mlxsw_sp_bridge *bridge,
281 			   struct net_device *br_dev,
282 			   struct netlink_ext_ack *extack)
283 {
284 	struct mlxsw_sp_bridge_device *bridge_device;
285 
286 	bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
287 	if (bridge_device)
288 		return bridge_device;
289 
290 	return mlxsw_sp_bridge_device_create(bridge, br_dev, extack);
291 }
292 
293 static void
294 mlxsw_sp_bridge_device_put(struct mlxsw_sp_bridge *bridge,
295 			   struct mlxsw_sp_bridge_device *bridge_device)
296 {
297 	if (list_empty(&bridge_device->ports_list))
298 		mlxsw_sp_bridge_device_destroy(bridge, bridge_device);
299 }
300 
301 static struct mlxsw_sp_bridge_port *
302 __mlxsw_sp_bridge_port_find(const struct mlxsw_sp_bridge_device *bridge_device,
303 			    const struct net_device *brport_dev)
304 {
305 	struct mlxsw_sp_bridge_port *bridge_port;
306 
307 	list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
308 		if (bridge_port->dev == brport_dev)
309 			return bridge_port;
310 	}
311 
312 	return NULL;
313 }
314 
315 struct mlxsw_sp_bridge_port *
316 mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge *bridge,
317 			  struct net_device *brport_dev)
318 {
319 	struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
320 	struct mlxsw_sp_bridge_device *bridge_device;
321 
322 	if (!br_dev)
323 		return NULL;
324 
325 	bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
326 	if (!bridge_device)
327 		return NULL;
328 
329 	return __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
330 }
331 
332 static struct mlxsw_sp_bridge_port *
333 mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device,
334 			    struct net_device *brport_dev)
335 {
336 	struct mlxsw_sp_bridge_port *bridge_port;
337 	struct mlxsw_sp_port *mlxsw_sp_port;
338 
339 	bridge_port = kzalloc(sizeof(*bridge_port), GFP_KERNEL);
340 	if (!bridge_port)
341 		return NULL;
342 
343 	mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(brport_dev);
344 	bridge_port->lagged = mlxsw_sp_port->lagged;
345 	if (bridge_port->lagged)
346 		bridge_port->lag_id = mlxsw_sp_port->lag_id;
347 	else
348 		bridge_port->system_port = mlxsw_sp_port->local_port;
349 	bridge_port->dev = brport_dev;
350 	bridge_port->bridge_device = bridge_device;
351 	bridge_port->stp_state = BR_STATE_DISABLED;
352 	bridge_port->flags = BR_LEARNING | BR_FLOOD | BR_LEARNING_SYNC |
353 			     BR_MCAST_FLOOD;
354 	INIT_LIST_HEAD(&bridge_port->vlans_list);
355 	list_add(&bridge_port->list, &bridge_device->ports_list);
356 	bridge_port->ref_count = 1;
357 
358 	return bridge_port;
359 }
360 
361 static void
362 mlxsw_sp_bridge_port_destroy(struct mlxsw_sp_bridge_port *bridge_port)
363 {
364 	list_del(&bridge_port->list);
365 	WARN_ON(!list_empty(&bridge_port->vlans_list));
366 	kfree(bridge_port);
367 }
368 
369 static struct mlxsw_sp_bridge_port *
370 mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge,
371 			 struct net_device *brport_dev,
372 			 struct netlink_ext_ack *extack)
373 {
374 	struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
375 	struct mlxsw_sp_bridge_device *bridge_device;
376 	struct mlxsw_sp_bridge_port *bridge_port;
377 	int err;
378 
379 	bridge_port = mlxsw_sp_bridge_port_find(bridge, brport_dev);
380 	if (bridge_port) {
381 		bridge_port->ref_count++;
382 		return bridge_port;
383 	}
384 
385 	bridge_device = mlxsw_sp_bridge_device_get(bridge, br_dev, extack);
386 	if (IS_ERR(bridge_device))
387 		return ERR_CAST(bridge_device);
388 
389 	bridge_port = mlxsw_sp_bridge_port_create(bridge_device, brport_dev);
390 	if (!bridge_port) {
391 		err = -ENOMEM;
392 		goto err_bridge_port_create;
393 	}
394 
395 	return bridge_port;
396 
397 err_bridge_port_create:
398 	mlxsw_sp_bridge_device_put(bridge, bridge_device);
399 	return ERR_PTR(err);
400 }
401 
402 static void mlxsw_sp_bridge_port_put(struct mlxsw_sp_bridge *bridge,
403 				     struct mlxsw_sp_bridge_port *bridge_port)
404 {
405 	struct mlxsw_sp_bridge_device *bridge_device;
406 
407 	if (--bridge_port->ref_count != 0)
408 		return;
409 	bridge_device = bridge_port->bridge_device;
410 	mlxsw_sp_bridge_port_destroy(bridge_port);
411 	mlxsw_sp_bridge_device_put(bridge, bridge_device);
412 }
413 
414 static struct mlxsw_sp_port_vlan *
415 mlxsw_sp_port_vlan_find_by_bridge(struct mlxsw_sp_port *mlxsw_sp_port,
416 				  const struct mlxsw_sp_bridge_device *
417 				  bridge_device,
418 				  u16 vid)
419 {
420 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
421 
422 	list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
423 			    list) {
424 		if (!mlxsw_sp_port_vlan->bridge_port)
425 			continue;
426 		if (mlxsw_sp_port_vlan->bridge_port->bridge_device !=
427 		    bridge_device)
428 			continue;
429 		if (bridge_device->vlan_enabled &&
430 		    mlxsw_sp_port_vlan->vid != vid)
431 			continue;
432 		return mlxsw_sp_port_vlan;
433 	}
434 
435 	return NULL;
436 }
437 
438 static struct mlxsw_sp_port_vlan*
439 mlxsw_sp_port_vlan_find_by_fid(struct mlxsw_sp_port *mlxsw_sp_port,
440 			       u16 fid_index)
441 {
442 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
443 
444 	list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
445 			    list) {
446 		struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
447 
448 		if (fid && mlxsw_sp_fid_index(fid) == fid_index)
449 			return mlxsw_sp_port_vlan;
450 	}
451 
452 	return NULL;
453 }
454 
455 static struct mlxsw_sp_bridge_vlan *
456 mlxsw_sp_bridge_vlan_find(const struct mlxsw_sp_bridge_port *bridge_port,
457 			  u16 vid)
458 {
459 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
460 
461 	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
462 		if (bridge_vlan->vid == vid)
463 			return bridge_vlan;
464 	}
465 
466 	return NULL;
467 }
468 
469 static struct mlxsw_sp_bridge_vlan *
470 mlxsw_sp_bridge_vlan_create(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
471 {
472 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
473 
474 	bridge_vlan = kzalloc(sizeof(*bridge_vlan), GFP_KERNEL);
475 	if (!bridge_vlan)
476 		return NULL;
477 
478 	INIT_LIST_HEAD(&bridge_vlan->port_vlan_list);
479 	bridge_vlan->vid = vid;
480 	list_add(&bridge_vlan->list, &bridge_port->vlans_list);
481 
482 	return bridge_vlan;
483 }
484 
485 static void
486 mlxsw_sp_bridge_vlan_destroy(struct mlxsw_sp_bridge_vlan *bridge_vlan)
487 {
488 	list_del(&bridge_vlan->list);
489 	WARN_ON(!list_empty(&bridge_vlan->port_vlan_list));
490 	kfree(bridge_vlan);
491 }
492 
493 static struct mlxsw_sp_bridge_vlan *
494 mlxsw_sp_bridge_vlan_get(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
495 {
496 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
497 
498 	bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
499 	if (bridge_vlan)
500 		return bridge_vlan;
501 
502 	return mlxsw_sp_bridge_vlan_create(bridge_port, vid);
503 }
504 
505 static void mlxsw_sp_bridge_vlan_put(struct mlxsw_sp_bridge_vlan *bridge_vlan)
506 {
507 	if (list_empty(&bridge_vlan->port_vlan_list))
508 		mlxsw_sp_bridge_vlan_destroy(bridge_vlan);
509 }
510 
511 static int
512 mlxsw_sp_port_bridge_vlan_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
513 				  struct mlxsw_sp_bridge_vlan *bridge_vlan,
514 				  u8 state)
515 {
516 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
517 
518 	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
519 			    bridge_vlan_node) {
520 		if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
521 			continue;
522 		return mlxsw_sp_port_vid_stp_set(mlxsw_sp_port,
523 						 bridge_vlan->vid, state);
524 	}
525 
526 	return 0;
527 }
528 
529 static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
530 					    struct net_device *orig_dev,
531 					    u8 state)
532 {
533 	struct mlxsw_sp_bridge_port *bridge_port;
534 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
535 	int err;
536 
537 	/* It's possible we failed to enslave the port, yet this
538 	 * operation is executed due to it being deferred.
539 	 */
540 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
541 						orig_dev);
542 	if (!bridge_port)
543 		return 0;
544 
545 	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
546 		err = mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port,
547 							bridge_vlan, state);
548 		if (err)
549 			goto err_port_bridge_vlan_stp_set;
550 	}
551 
552 	bridge_port->stp_state = state;
553 
554 	return 0;
555 
556 err_port_bridge_vlan_stp_set:
557 	list_for_each_entry_continue_reverse(bridge_vlan,
558 					     &bridge_port->vlans_list, list)
559 		mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port, bridge_vlan,
560 						  bridge_port->stp_state);
561 	return err;
562 }
563 
564 static int
565 mlxsw_sp_port_bridge_vlan_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
566 				    struct mlxsw_sp_bridge_vlan *bridge_vlan,
567 				    enum mlxsw_sp_flood_type packet_type,
568 				    bool member)
569 {
570 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
571 
572 	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
573 			    bridge_vlan_node) {
574 		if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
575 			continue;
576 		return mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid,
577 					      packet_type,
578 					      mlxsw_sp_port->local_port,
579 					      member);
580 	}
581 
582 	return 0;
583 }
584 
585 static int
586 mlxsw_sp_bridge_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port,
587 				     struct mlxsw_sp_bridge_port *bridge_port,
588 				     enum mlxsw_sp_flood_type packet_type,
589 				     bool member)
590 {
591 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
592 	int err;
593 
594 	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
595 		err = mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port,
596 							  bridge_vlan,
597 							  packet_type,
598 							  member);
599 		if (err)
600 			goto err_port_bridge_vlan_flood_set;
601 	}
602 
603 	return 0;
604 
605 err_port_bridge_vlan_flood_set:
606 	list_for_each_entry_continue_reverse(bridge_vlan,
607 					     &bridge_port->vlans_list, list)
608 		mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port, bridge_vlan,
609 						    packet_type, !member);
610 	return err;
611 }
612 
613 static int
614 mlxsw_sp_port_bridge_vlan_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
615 				       struct mlxsw_sp_bridge_vlan *bridge_vlan,
616 				       bool set)
617 {
618 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
619 	u16 vid = bridge_vlan->vid;
620 
621 	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
622 			    bridge_vlan_node) {
623 		if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
624 			continue;
625 		return mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, set);
626 	}
627 
628 	return 0;
629 }
630 
631 static int
632 mlxsw_sp_bridge_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
633 				  struct mlxsw_sp_bridge_port *bridge_port,
634 				  bool set)
635 {
636 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
637 	int err;
638 
639 	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
640 		err = mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
641 							     bridge_vlan, set);
642 		if (err)
643 			goto err_port_bridge_vlan_learning_set;
644 	}
645 
646 	return 0;
647 
648 err_port_bridge_vlan_learning_set:
649 	list_for_each_entry_continue_reverse(bridge_vlan,
650 					     &bridge_port->vlans_list, list)
651 		mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
652 						       bridge_vlan, !set);
653 	return err;
654 }
655 
656 static int
657 mlxsw_sp_port_attr_br_pre_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
658 				    struct switchdev_brport_flags flags)
659 {
660 	if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD))
661 		return -EINVAL;
662 
663 	return 0;
664 }
665 
666 static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
667 					   struct net_device *orig_dev,
668 					   struct switchdev_brport_flags flags)
669 {
670 	struct mlxsw_sp_bridge_port *bridge_port;
671 	int err;
672 
673 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
674 						orig_dev);
675 	if (!bridge_port)
676 		return 0;
677 
678 	if (flags.mask & BR_FLOOD) {
679 		err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
680 							   bridge_port,
681 							   MLXSW_SP_FLOOD_TYPE_UC,
682 							   flags.val & BR_FLOOD);
683 		if (err)
684 			return err;
685 	}
686 
687 	if (flags.mask & BR_LEARNING) {
688 		err = mlxsw_sp_bridge_port_learning_set(mlxsw_sp_port,
689 							bridge_port,
690 							flags.val & BR_LEARNING);
691 		if (err)
692 			return err;
693 	}
694 
695 	if (bridge_port->bridge_device->multicast_enabled)
696 		goto out;
697 
698 	if (flags.mask & BR_MCAST_FLOOD) {
699 		err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
700 							   bridge_port,
701 							   MLXSW_SP_FLOOD_TYPE_MC,
702 							   flags.val & BR_MCAST_FLOOD);
703 		if (err)
704 			return err;
705 	}
706 
707 out:
708 	memcpy(&bridge_port->flags, &flags.val, sizeof(flags.val));
709 	return 0;
710 }
711 
712 static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
713 {
714 	char sfdat_pl[MLXSW_REG_SFDAT_LEN];
715 	int err;
716 
717 	mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time);
718 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl);
719 	if (err)
720 		return err;
721 	mlxsw_sp->bridge->ageing_time = ageing_time;
722 	return 0;
723 }
724 
725 static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
726 					    unsigned long ageing_clock_t)
727 {
728 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
729 	unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
730 	u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
731 
732 	if (ageing_time < MLXSW_SP_MIN_AGEING_TIME ||
733 	    ageing_time > MLXSW_SP_MAX_AGEING_TIME)
734 		return -ERANGE;
735 
736 	return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
737 }
738 
739 static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
740 					  struct net_device *orig_dev,
741 					  bool vlan_enabled)
742 {
743 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
744 	struct mlxsw_sp_bridge_device *bridge_device;
745 
746 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
747 	if (WARN_ON(!bridge_device))
748 		return -EINVAL;
749 
750 	if (bridge_device->vlan_enabled == vlan_enabled)
751 		return 0;
752 
753 	netdev_err(bridge_device->dev, "VLAN filtering can't be changed for existing bridge\n");
754 	return -EINVAL;
755 }
756 
757 static int mlxsw_sp_port_attr_br_vlan_proto_set(struct mlxsw_sp_port *mlxsw_sp_port,
758 						struct net_device *orig_dev,
759 						u16 vlan_proto)
760 {
761 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
762 	struct mlxsw_sp_bridge_device *bridge_device;
763 
764 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
765 	if (WARN_ON(!bridge_device))
766 		return -EINVAL;
767 
768 	netdev_err(bridge_device->dev, "VLAN protocol can't be changed on existing bridge\n");
769 	return -EINVAL;
770 }
771 
772 static int mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
773 					  struct net_device *orig_dev,
774 					  bool is_port_mrouter)
775 {
776 	struct mlxsw_sp_bridge_port *bridge_port;
777 	int err;
778 
779 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
780 						orig_dev);
781 	if (!bridge_port)
782 		return 0;
783 
784 	if (!bridge_port->bridge_device->multicast_enabled)
785 		goto out;
786 
787 	err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
788 						   MLXSW_SP_FLOOD_TYPE_MC,
789 						   is_port_mrouter);
790 	if (err)
791 		return err;
792 
793 	mlxsw_sp_port_mrouter_update_mdb(mlxsw_sp_port, bridge_port,
794 					 is_port_mrouter);
795 out:
796 	bridge_port->mrouter = is_port_mrouter;
797 	return 0;
798 }
799 
800 static bool mlxsw_sp_mc_flood(const struct mlxsw_sp_bridge_port *bridge_port)
801 {
802 	const struct mlxsw_sp_bridge_device *bridge_device;
803 
804 	bridge_device = bridge_port->bridge_device;
805 	return bridge_device->multicast_enabled ? bridge_port->mrouter :
806 					bridge_port->flags & BR_MCAST_FLOOD;
807 }
808 
809 static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
810 					 struct net_device *orig_dev,
811 					 bool mc_disabled)
812 {
813 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
814 	struct mlxsw_sp_bridge_device *bridge_device;
815 	struct mlxsw_sp_bridge_port *bridge_port;
816 	int err;
817 
818 	/* It's possible we failed to enslave the port, yet this
819 	 * operation is executed due to it being deferred.
820 	 */
821 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
822 	if (!bridge_device)
823 		return 0;
824 
825 	if (bridge_device->multicast_enabled != !mc_disabled) {
826 		bridge_device->multicast_enabled = !mc_disabled;
827 		mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp_port,
828 						   bridge_device);
829 	}
830 
831 	list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
832 		enum mlxsw_sp_flood_type packet_type = MLXSW_SP_FLOOD_TYPE_MC;
833 		bool member = mlxsw_sp_mc_flood(bridge_port);
834 
835 		err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
836 							   bridge_port,
837 							   packet_type, member);
838 		if (err)
839 			return err;
840 	}
841 
842 	bridge_device->multicast_enabled = !mc_disabled;
843 
844 	return 0;
845 }
846 
847 static int mlxsw_sp_smid_router_port_set(struct mlxsw_sp *mlxsw_sp,
848 					 u16 mid_idx, bool add)
849 {
850 	char *smid_pl;
851 	int err;
852 
853 	smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
854 	if (!smid_pl)
855 		return -ENOMEM;
856 
857 	mlxsw_reg_smid_pack(smid_pl, mid_idx,
858 			    mlxsw_sp_router_port(mlxsw_sp), add);
859 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
860 	kfree(smid_pl);
861 	return err;
862 }
863 
864 static void
865 mlxsw_sp_bridge_mrouter_update_mdb(struct mlxsw_sp *mlxsw_sp,
866 				   struct mlxsw_sp_bridge_device *bridge_device,
867 				   bool add)
868 {
869 	struct mlxsw_sp_mid *mid;
870 
871 	list_for_each_entry(mid, &bridge_device->mids_list, list)
872 		mlxsw_sp_smid_router_port_set(mlxsw_sp, mid->mid, add);
873 }
874 
875 static int
876 mlxsw_sp_port_attr_br_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
877 				  struct net_device *orig_dev,
878 				  bool is_mrouter)
879 {
880 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
881 	struct mlxsw_sp_bridge_device *bridge_device;
882 
883 	/* It's possible we failed to enslave the port, yet this
884 	 * operation is executed due to it being deferred.
885 	 */
886 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
887 	if (!bridge_device)
888 		return 0;
889 
890 	if (bridge_device->mrouter != is_mrouter)
891 		mlxsw_sp_bridge_mrouter_update_mdb(mlxsw_sp, bridge_device,
892 						   is_mrouter);
893 	bridge_device->mrouter = is_mrouter;
894 	return 0;
895 }
896 
897 static int mlxsw_sp_port_attr_set(struct net_device *dev,
898 				  const struct switchdev_attr *attr,
899 				  struct netlink_ext_ack *extack)
900 {
901 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
902 	int err;
903 
904 	switch (attr->id) {
905 	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
906 		err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port,
907 						       attr->orig_dev,
908 						       attr->u.stp_state);
909 		break;
910 	case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
911 		err = mlxsw_sp_port_attr_br_pre_flags_set(mlxsw_sp_port,
912 							  attr->u.brport_flags);
913 		break;
914 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
915 		err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port,
916 						      attr->orig_dev,
917 						      attr->u.brport_flags);
918 		break;
919 	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
920 		err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port,
921 						       attr->u.ageing_time);
922 		break;
923 	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
924 		err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port,
925 						     attr->orig_dev,
926 						     attr->u.vlan_filtering);
927 		break;
928 	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL:
929 		err = mlxsw_sp_port_attr_br_vlan_proto_set(mlxsw_sp_port,
930 							   attr->orig_dev,
931 							   attr->u.vlan_protocol);
932 		break;
933 	case SWITCHDEV_ATTR_ID_PORT_MROUTER:
934 		err = mlxsw_sp_port_attr_mrouter_set(mlxsw_sp_port,
935 						     attr->orig_dev,
936 						     attr->u.mrouter);
937 		break;
938 	case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
939 		err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port,
940 						    attr->orig_dev,
941 						    attr->u.mc_disabled);
942 		break;
943 	case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER:
944 		err = mlxsw_sp_port_attr_br_mrouter_set(mlxsw_sp_port,
945 							attr->orig_dev,
946 							attr->u.mrouter);
947 		break;
948 	default:
949 		err = -EOPNOTSUPP;
950 		break;
951 	}
952 
953 	mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
954 
955 	return err;
956 }
957 
958 static int
959 mlxsw_sp_port_vlan_fid_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
960 			    struct mlxsw_sp_bridge_port *bridge_port,
961 			    struct netlink_ext_ack *extack)
962 {
963 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
964 	struct mlxsw_sp_bridge_device *bridge_device;
965 	u8 local_port = mlxsw_sp_port->local_port;
966 	u16 vid = mlxsw_sp_port_vlan->vid;
967 	struct mlxsw_sp_fid *fid;
968 	int err;
969 
970 	bridge_device = bridge_port->bridge_device;
971 	fid = bridge_device->ops->fid_get(bridge_device, vid, extack);
972 	if (IS_ERR(fid))
973 		return PTR_ERR(fid);
974 
975 	err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port,
976 				     bridge_port->flags & BR_FLOOD);
977 	if (err)
978 		goto err_fid_uc_flood_set;
979 
980 	err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port,
981 				     mlxsw_sp_mc_flood(bridge_port));
982 	if (err)
983 		goto err_fid_mc_flood_set;
984 
985 	err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port,
986 				     true);
987 	if (err)
988 		goto err_fid_bc_flood_set;
989 
990 	err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
991 	if (err)
992 		goto err_fid_port_vid_map;
993 
994 	mlxsw_sp_port_vlan->fid = fid;
995 
996 	return 0;
997 
998 err_fid_port_vid_map:
999 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
1000 err_fid_bc_flood_set:
1001 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
1002 err_fid_mc_flood_set:
1003 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
1004 err_fid_uc_flood_set:
1005 	mlxsw_sp_fid_put(fid);
1006 	return err;
1007 }
1008 
1009 static void
1010 mlxsw_sp_port_vlan_fid_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1011 {
1012 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1013 	struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
1014 	u8 local_port = mlxsw_sp_port->local_port;
1015 	u16 vid = mlxsw_sp_port_vlan->vid;
1016 
1017 	mlxsw_sp_port_vlan->fid = NULL;
1018 	mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
1019 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
1020 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
1021 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
1022 	mlxsw_sp_fid_put(fid);
1023 }
1024 
1025 static u16
1026 mlxsw_sp_port_pvid_determine(const struct mlxsw_sp_port *mlxsw_sp_port,
1027 			     u16 vid, bool is_pvid)
1028 {
1029 	if (is_pvid)
1030 		return vid;
1031 	else if (mlxsw_sp_port->pvid == vid)
1032 		return 0;	/* Dis-allow untagged packets */
1033 	else
1034 		return mlxsw_sp_port->pvid;
1035 }
1036 
1037 static int
1038 mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
1039 			       struct mlxsw_sp_bridge_port *bridge_port,
1040 			       struct netlink_ext_ack *extack)
1041 {
1042 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1043 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
1044 	u16 vid = mlxsw_sp_port_vlan->vid;
1045 	int err;
1046 
1047 	/* No need to continue if only VLAN flags were changed */
1048 	if (mlxsw_sp_port_vlan->bridge_port)
1049 		return 0;
1050 
1051 	err = mlxsw_sp_port_vlan_fid_join(mlxsw_sp_port_vlan, bridge_port,
1052 					  extack);
1053 	if (err)
1054 		return err;
1055 
1056 	err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid,
1057 					     bridge_port->flags & BR_LEARNING);
1058 	if (err)
1059 		goto err_port_vid_learning_set;
1060 
1061 	err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
1062 					bridge_port->stp_state);
1063 	if (err)
1064 		goto err_port_vid_stp_set;
1065 
1066 	bridge_vlan = mlxsw_sp_bridge_vlan_get(bridge_port, vid);
1067 	if (!bridge_vlan) {
1068 		err = -ENOMEM;
1069 		goto err_bridge_vlan_get;
1070 	}
1071 
1072 	list_add(&mlxsw_sp_port_vlan->bridge_vlan_node,
1073 		 &bridge_vlan->port_vlan_list);
1074 
1075 	mlxsw_sp_bridge_port_get(mlxsw_sp_port->mlxsw_sp->bridge,
1076 				 bridge_port->dev, extack);
1077 	mlxsw_sp_port_vlan->bridge_port = bridge_port;
1078 
1079 	return 0;
1080 
1081 err_bridge_vlan_get:
1082 	mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
1083 err_port_vid_stp_set:
1084 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
1085 err_port_vid_learning_set:
1086 	mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
1087 	return err;
1088 }
1089 
1090 void
1091 mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1092 {
1093 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1094 	struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
1095 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
1096 	struct mlxsw_sp_bridge_port *bridge_port;
1097 	u16 vid = mlxsw_sp_port_vlan->vid;
1098 	bool last_port, last_vlan;
1099 
1100 	if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021Q &&
1101 		    mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021D))
1102 		return;
1103 
1104 	bridge_port = mlxsw_sp_port_vlan->bridge_port;
1105 	last_vlan = list_is_singular(&bridge_port->vlans_list);
1106 	bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
1107 	last_port = list_is_singular(&bridge_vlan->port_vlan_list);
1108 
1109 	list_del(&mlxsw_sp_port_vlan->bridge_vlan_node);
1110 	mlxsw_sp_bridge_vlan_put(bridge_vlan);
1111 	mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
1112 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
1113 	if (last_port)
1114 		mlxsw_sp_bridge_port_fdb_flush(mlxsw_sp_port->mlxsw_sp,
1115 					       bridge_port,
1116 					       mlxsw_sp_fid_index(fid));
1117 	if (last_vlan)
1118 		mlxsw_sp_bridge_port_mdb_flush(mlxsw_sp_port, bridge_port);
1119 
1120 	mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
1121 
1122 	mlxsw_sp_bridge_port_put(mlxsw_sp_port->mlxsw_sp->bridge, bridge_port);
1123 	mlxsw_sp_port_vlan->bridge_port = NULL;
1124 }
1125 
1126 static int
1127 mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port,
1128 			      struct mlxsw_sp_bridge_port *bridge_port,
1129 			      u16 vid, bool is_untagged, bool is_pvid,
1130 			      struct netlink_ext_ack *extack)
1131 {
1132 	u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid);
1133 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1134 	u16 old_pvid = mlxsw_sp_port->pvid;
1135 	u16 proto;
1136 	int err;
1137 
1138 	/* The only valid scenario in which a port-vlan already exists, is if
1139 	 * the VLAN flags were changed and the port-vlan is associated with the
1140 	 * correct bridge port
1141 	 */
1142 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1143 	if (mlxsw_sp_port_vlan &&
1144 	    mlxsw_sp_port_vlan->bridge_port != bridge_port)
1145 		return -EEXIST;
1146 
1147 	if (!mlxsw_sp_port_vlan) {
1148 		mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port,
1149 							       vid);
1150 		if (IS_ERR(mlxsw_sp_port_vlan))
1151 			return PTR_ERR(mlxsw_sp_port_vlan);
1152 	}
1153 
1154 	err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true,
1155 				     is_untagged);
1156 	if (err)
1157 		goto err_port_vlan_set;
1158 
1159 	br_vlan_get_proto(bridge_port->bridge_device->dev, &proto);
1160 	err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid, proto);
1161 	if (err)
1162 		goto err_port_pvid_set;
1163 
1164 	err = mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port,
1165 					     extack);
1166 	if (err)
1167 		goto err_port_vlan_bridge_join;
1168 
1169 	return 0;
1170 
1171 err_port_vlan_bridge_join:
1172 	mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid, proto);
1173 err_port_pvid_set:
1174 	mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1175 err_port_vlan_set:
1176 	mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1177 	return err;
1178 }
1179 
1180 static int
1181 mlxsw_sp_br_ban_rif_pvid_change(struct mlxsw_sp *mlxsw_sp,
1182 				const struct net_device *br_dev,
1183 				const struct switchdev_obj_port_vlan *vlan)
1184 {
1185 	u16 pvid;
1186 
1187 	pvid = mlxsw_sp_rif_vid(mlxsw_sp, br_dev);
1188 	if (!pvid)
1189 		return 0;
1190 
1191 	if (vlan->flags & BRIDGE_VLAN_INFO_PVID) {
1192 		if (vlan->vid != pvid) {
1193 			netdev_err(br_dev, "Can't change PVID, it's used by router interface\n");
1194 			return -EBUSY;
1195 		}
1196 	} else {
1197 		if (vlan->vid == pvid) {
1198 			netdev_err(br_dev, "Can't remove PVID, it's used by router interface\n");
1199 			return -EBUSY;
1200 		}
1201 	}
1202 
1203 	return 0;
1204 }
1205 
1206 static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
1207 				   const struct switchdev_obj_port_vlan *vlan,
1208 				   struct netlink_ext_ack *extack)
1209 {
1210 	bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
1211 	bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
1212 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1213 	struct net_device *orig_dev = vlan->obj.orig_dev;
1214 	struct mlxsw_sp_bridge_port *bridge_port;
1215 
1216 	if (netif_is_bridge_master(orig_dev)) {
1217 		int err = 0;
1218 
1219 		if ((vlan->flags & BRIDGE_VLAN_INFO_BRENTRY) &&
1220 		    br_vlan_enabled(orig_dev))
1221 			err = mlxsw_sp_br_ban_rif_pvid_change(mlxsw_sp,
1222 							      orig_dev, vlan);
1223 		if (!err)
1224 			err = -EOPNOTSUPP;
1225 		return err;
1226 	}
1227 
1228 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1229 	if (WARN_ON(!bridge_port))
1230 		return -EINVAL;
1231 
1232 	if (!bridge_port->bridge_device->vlan_enabled)
1233 		return 0;
1234 
1235 	return mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port,
1236 					     vlan->vid, flag_untagged,
1237 					     flag_pvid, extack);
1238 }
1239 
1240 static enum mlxsw_reg_sfdf_flush_type mlxsw_sp_fdb_flush_type(bool lagged)
1241 {
1242 	return lagged ? MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID :
1243 			MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID;
1244 }
1245 
1246 static int
1247 mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
1248 			       struct mlxsw_sp_bridge_port *bridge_port,
1249 			       u16 fid_index)
1250 {
1251 	bool lagged = bridge_port->lagged;
1252 	char sfdf_pl[MLXSW_REG_SFDF_LEN];
1253 	u16 system_port;
1254 
1255 	system_port = lagged ? bridge_port->lag_id : bridge_port->system_port;
1256 	mlxsw_reg_sfdf_pack(sfdf_pl, mlxsw_sp_fdb_flush_type(lagged));
1257 	mlxsw_reg_sfdf_fid_set(sfdf_pl, fid_index);
1258 	mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl, system_port);
1259 
1260 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
1261 }
1262 
1263 static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
1264 {
1265 	return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
1266 			 MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_MLAG;
1267 }
1268 
1269 static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
1270 {
1271 	return adding ? MLXSW_REG_SFD_OP_WRITE_EDIT :
1272 			MLXSW_REG_SFD_OP_WRITE_REMOVE;
1273 }
1274 
1275 static int mlxsw_sp_port_fdb_tunnel_uc_op(struct mlxsw_sp *mlxsw_sp,
1276 					  const char *mac, u16 fid,
1277 					  enum mlxsw_sp_l3proto proto,
1278 					  const union mlxsw_sp_l3addr *addr,
1279 					  bool adding, bool dynamic)
1280 {
1281 	enum mlxsw_reg_sfd_uc_tunnel_protocol sfd_proto;
1282 	char *sfd_pl;
1283 	u8 num_rec;
1284 	u32 uip;
1285 	int err;
1286 
1287 	switch (proto) {
1288 	case MLXSW_SP_L3_PROTO_IPV4:
1289 		uip = be32_to_cpu(addr->addr4);
1290 		sfd_proto = MLXSW_REG_SFD_UC_TUNNEL_PROTOCOL_IPV4;
1291 		break;
1292 	case MLXSW_SP_L3_PROTO_IPV6:
1293 	default:
1294 		WARN_ON(1);
1295 		return -EOPNOTSUPP;
1296 	}
1297 
1298 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1299 	if (!sfd_pl)
1300 		return -ENOMEM;
1301 
1302 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1303 	mlxsw_reg_sfd_uc_tunnel_pack(sfd_pl, 0,
1304 				     mlxsw_sp_sfd_rec_policy(dynamic), mac, fid,
1305 				     MLXSW_REG_SFD_REC_ACTION_NOP, uip,
1306 				     sfd_proto);
1307 	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1308 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1309 	if (err)
1310 		goto out;
1311 
1312 	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1313 		err = -EBUSY;
1314 
1315 out:
1316 	kfree(sfd_pl);
1317 	return err;
1318 }
1319 
1320 static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1321 				     const char *mac, u16 fid, bool adding,
1322 				     enum mlxsw_reg_sfd_rec_action action,
1323 				     enum mlxsw_reg_sfd_rec_policy policy)
1324 {
1325 	char *sfd_pl;
1326 	u8 num_rec;
1327 	int err;
1328 
1329 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1330 	if (!sfd_pl)
1331 		return -ENOMEM;
1332 
1333 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1334 	mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, mac, fid, action, local_port);
1335 	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1336 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1337 	if (err)
1338 		goto out;
1339 
1340 	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1341 		err = -EBUSY;
1342 
1343 out:
1344 	kfree(sfd_pl);
1345 	return err;
1346 }
1347 
1348 static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1349 				   const char *mac, u16 fid, bool adding,
1350 				   bool dynamic)
1351 {
1352 	return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
1353 					 MLXSW_REG_SFD_REC_ACTION_NOP,
1354 					 mlxsw_sp_sfd_rec_policy(dynamic));
1355 }
1356 
1357 int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
1358 			bool adding)
1359 {
1360 	return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
1361 					 MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
1362 					 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY);
1363 }
1364 
1365 static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
1366 				       const char *mac, u16 fid, u16 lag_vid,
1367 				       bool adding, bool dynamic)
1368 {
1369 	char *sfd_pl;
1370 	u8 num_rec;
1371 	int err;
1372 
1373 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1374 	if (!sfd_pl)
1375 		return -ENOMEM;
1376 
1377 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1378 	mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
1379 				  mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
1380 				  lag_vid, lag_id);
1381 	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1382 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1383 	if (err)
1384 		goto out;
1385 
1386 	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1387 		err = -EBUSY;
1388 
1389 out:
1390 	kfree(sfd_pl);
1391 	return err;
1392 }
1393 
1394 static int
1395 mlxsw_sp_port_fdb_set(struct mlxsw_sp_port *mlxsw_sp_port,
1396 		      struct switchdev_notifier_fdb_info *fdb_info, bool adding)
1397 {
1398 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1399 	struct net_device *orig_dev = fdb_info->info.dev;
1400 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1401 	struct mlxsw_sp_bridge_device *bridge_device;
1402 	struct mlxsw_sp_bridge_port *bridge_port;
1403 	u16 fid_index, vid;
1404 
1405 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1406 	if (!bridge_port)
1407 		return -EINVAL;
1408 
1409 	bridge_device = bridge_port->bridge_device;
1410 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1411 							       bridge_device,
1412 							       fdb_info->vid);
1413 	if (!mlxsw_sp_port_vlan)
1414 		return 0;
1415 
1416 	fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1417 	vid = mlxsw_sp_port_vlan->vid;
1418 
1419 	if (!bridge_port->lagged)
1420 		return mlxsw_sp_port_fdb_uc_op(mlxsw_sp,
1421 					       bridge_port->system_port,
1422 					       fdb_info->addr, fid_index,
1423 					       adding, false);
1424 	else
1425 		return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp,
1426 						   bridge_port->lag_id,
1427 						   fdb_info->addr, fid_index,
1428 						   vid, adding, false);
1429 }
1430 
1431 static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
1432 				u16 fid, u16 mid_idx, bool adding)
1433 {
1434 	char *sfd_pl;
1435 	u8 num_rec;
1436 	int err;
1437 
1438 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1439 	if (!sfd_pl)
1440 		return -ENOMEM;
1441 
1442 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1443 	mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
1444 			      MLXSW_REG_SFD_REC_ACTION_NOP, mid_idx);
1445 	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1446 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1447 	if (err)
1448 		goto out;
1449 
1450 	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1451 		err = -EBUSY;
1452 
1453 out:
1454 	kfree(sfd_pl);
1455 	return err;
1456 }
1457 
1458 static int mlxsw_sp_port_smid_full_entry(struct mlxsw_sp *mlxsw_sp, u16 mid_idx,
1459 					 long *ports_bitmap,
1460 					 bool set_router_port)
1461 {
1462 	char *smid_pl;
1463 	int err, i;
1464 
1465 	smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
1466 	if (!smid_pl)
1467 		return -ENOMEM;
1468 
1469 	mlxsw_reg_smid_pack(smid_pl, mid_idx, 0, false);
1470 	for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) {
1471 		if (mlxsw_sp->ports[i])
1472 			mlxsw_reg_smid_port_mask_set(smid_pl, i, 1);
1473 	}
1474 
1475 	mlxsw_reg_smid_port_mask_set(smid_pl,
1476 				     mlxsw_sp_router_port(mlxsw_sp), 1);
1477 
1478 	for_each_set_bit(i, ports_bitmap, mlxsw_core_max_ports(mlxsw_sp->core))
1479 		mlxsw_reg_smid_port_set(smid_pl, i, 1);
1480 
1481 	mlxsw_reg_smid_port_set(smid_pl, mlxsw_sp_router_port(mlxsw_sp),
1482 				set_router_port);
1483 
1484 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
1485 	kfree(smid_pl);
1486 	return err;
1487 }
1488 
1489 static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port,
1490 				  u16 mid_idx, bool add)
1491 {
1492 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1493 	char *smid_pl;
1494 	int err;
1495 
1496 	smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
1497 	if (!smid_pl)
1498 		return -ENOMEM;
1499 
1500 	mlxsw_reg_smid_pack(smid_pl, mid_idx, mlxsw_sp_port->local_port, add);
1501 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
1502 	kfree(smid_pl);
1503 	return err;
1504 }
1505 
1506 static struct
1507 mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp_bridge_device *bridge_device,
1508 				const unsigned char *addr,
1509 				u16 fid)
1510 {
1511 	struct mlxsw_sp_mid *mid;
1512 
1513 	list_for_each_entry(mid, &bridge_device->mids_list, list) {
1514 		if (ether_addr_equal(mid->addr, addr) && mid->fid == fid)
1515 			return mid;
1516 	}
1517 	return NULL;
1518 }
1519 
1520 static void
1521 mlxsw_sp_bridge_port_get_ports_bitmap(struct mlxsw_sp *mlxsw_sp,
1522 				      struct mlxsw_sp_bridge_port *bridge_port,
1523 				      unsigned long *ports_bitmap)
1524 {
1525 	struct mlxsw_sp_port *mlxsw_sp_port;
1526 	u64 max_lag_members, i;
1527 	int lag_id;
1528 
1529 	if (!bridge_port->lagged) {
1530 		set_bit(bridge_port->system_port, ports_bitmap);
1531 	} else {
1532 		max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1533 						     MAX_LAG_MEMBERS);
1534 		lag_id = bridge_port->lag_id;
1535 		for (i = 0; i < max_lag_members; i++) {
1536 			mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp,
1537 								 lag_id, i);
1538 			if (mlxsw_sp_port)
1539 				set_bit(mlxsw_sp_port->local_port,
1540 					ports_bitmap);
1541 		}
1542 	}
1543 }
1544 
1545 static void
1546 mlxsw_sp_mc_get_mrouters_bitmap(unsigned long *flood_bitmap,
1547 				struct mlxsw_sp_bridge_device *bridge_device,
1548 				struct mlxsw_sp *mlxsw_sp)
1549 {
1550 	struct mlxsw_sp_bridge_port *bridge_port;
1551 
1552 	list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
1553 		if (bridge_port->mrouter) {
1554 			mlxsw_sp_bridge_port_get_ports_bitmap(mlxsw_sp,
1555 							      bridge_port,
1556 							      flood_bitmap);
1557 		}
1558 	}
1559 }
1560 
1561 static bool
1562 mlxsw_sp_mc_write_mdb_entry(struct mlxsw_sp *mlxsw_sp,
1563 			    struct mlxsw_sp_mid *mid,
1564 			    struct mlxsw_sp_bridge_device *bridge_device)
1565 {
1566 	long *flood_bitmap;
1567 	int num_of_ports;
1568 	int alloc_size;
1569 	u16 mid_idx;
1570 	int err;
1571 
1572 	mid_idx = find_first_zero_bit(mlxsw_sp->bridge->mids_bitmap,
1573 				      MLXSW_SP_MID_MAX);
1574 	if (mid_idx == MLXSW_SP_MID_MAX)
1575 		return false;
1576 
1577 	num_of_ports = mlxsw_core_max_ports(mlxsw_sp->core);
1578 	alloc_size = sizeof(long) * BITS_TO_LONGS(num_of_ports);
1579 	flood_bitmap = kzalloc(alloc_size, GFP_KERNEL);
1580 	if (!flood_bitmap)
1581 		return false;
1582 
1583 	bitmap_copy(flood_bitmap,  mid->ports_in_mid, num_of_ports);
1584 	mlxsw_sp_mc_get_mrouters_bitmap(flood_bitmap, bridge_device, mlxsw_sp);
1585 
1586 	mid->mid = mid_idx;
1587 	err = mlxsw_sp_port_smid_full_entry(mlxsw_sp, mid_idx, flood_bitmap,
1588 					    bridge_device->mrouter);
1589 	kfree(flood_bitmap);
1590 	if (err)
1591 		return false;
1592 
1593 	err = mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid_idx,
1594 				   true);
1595 	if (err)
1596 		return false;
1597 
1598 	set_bit(mid_idx, mlxsw_sp->bridge->mids_bitmap);
1599 	mid->in_hw = true;
1600 	return true;
1601 }
1602 
1603 static int mlxsw_sp_mc_remove_mdb_entry(struct mlxsw_sp *mlxsw_sp,
1604 					struct mlxsw_sp_mid *mid)
1605 {
1606 	if (!mid->in_hw)
1607 		return 0;
1608 
1609 	clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap);
1610 	mid->in_hw = false;
1611 	return mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid->mid,
1612 				    false);
1613 }
1614 
1615 static struct
1616 mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
1617 				  struct mlxsw_sp_bridge_device *bridge_device,
1618 				  const unsigned char *addr,
1619 				  u16 fid)
1620 {
1621 	struct mlxsw_sp_mid *mid;
1622 	size_t alloc_size;
1623 
1624 	mid = kzalloc(sizeof(*mid), GFP_KERNEL);
1625 	if (!mid)
1626 		return NULL;
1627 
1628 	alloc_size = sizeof(unsigned long) *
1629 		     BITS_TO_LONGS(mlxsw_core_max_ports(mlxsw_sp->core));
1630 
1631 	mid->ports_in_mid = kzalloc(alloc_size, GFP_KERNEL);
1632 	if (!mid->ports_in_mid)
1633 		goto err_ports_in_mid_alloc;
1634 
1635 	ether_addr_copy(mid->addr, addr);
1636 	mid->fid = fid;
1637 	mid->in_hw = false;
1638 
1639 	if (!bridge_device->multicast_enabled)
1640 		goto out;
1641 
1642 	if (!mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid, bridge_device))
1643 		goto err_write_mdb_entry;
1644 
1645 out:
1646 	list_add_tail(&mid->list, &bridge_device->mids_list);
1647 	return mid;
1648 
1649 err_write_mdb_entry:
1650 	kfree(mid->ports_in_mid);
1651 err_ports_in_mid_alloc:
1652 	kfree(mid);
1653 	return NULL;
1654 }
1655 
1656 static int mlxsw_sp_port_remove_from_mid(struct mlxsw_sp_port *mlxsw_sp_port,
1657 					 struct mlxsw_sp_mid *mid)
1658 {
1659 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1660 	int err = 0;
1661 
1662 	clear_bit(mlxsw_sp_port->local_port, mid->ports_in_mid);
1663 	if (bitmap_empty(mid->ports_in_mid,
1664 			 mlxsw_core_max_ports(mlxsw_sp->core))) {
1665 		err = mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
1666 		list_del(&mid->list);
1667 		kfree(mid->ports_in_mid);
1668 		kfree(mid);
1669 	}
1670 	return err;
1671 }
1672 
1673 static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
1674 				 const struct switchdev_obj_port_mdb *mdb)
1675 {
1676 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1677 	struct net_device *orig_dev = mdb->obj.orig_dev;
1678 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1679 	struct net_device *dev = mlxsw_sp_port->dev;
1680 	struct mlxsw_sp_bridge_device *bridge_device;
1681 	struct mlxsw_sp_bridge_port *bridge_port;
1682 	struct mlxsw_sp_mid *mid;
1683 	u16 fid_index;
1684 	int err = 0;
1685 
1686 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1687 	if (!bridge_port)
1688 		return 0;
1689 
1690 	bridge_device = bridge_port->bridge_device;
1691 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1692 							       bridge_device,
1693 							       mdb->vid);
1694 	if (!mlxsw_sp_port_vlan)
1695 		return 0;
1696 
1697 	fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1698 
1699 	mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
1700 	if (!mid) {
1701 		mid = __mlxsw_sp_mc_alloc(mlxsw_sp, bridge_device, mdb->addr,
1702 					  fid_index);
1703 		if (!mid) {
1704 			netdev_err(dev, "Unable to allocate MC group\n");
1705 			return -ENOMEM;
1706 		}
1707 	}
1708 	set_bit(mlxsw_sp_port->local_port, mid->ports_in_mid);
1709 
1710 	if (!bridge_device->multicast_enabled)
1711 		return 0;
1712 
1713 	if (bridge_port->mrouter)
1714 		return 0;
1715 
1716 	err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true);
1717 	if (err) {
1718 		netdev_err(dev, "Unable to set SMID\n");
1719 		goto err_out;
1720 	}
1721 
1722 	return 0;
1723 
1724 err_out:
1725 	mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
1726 	return err;
1727 }
1728 
1729 static void
1730 mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port,
1731 				   struct mlxsw_sp_bridge_device
1732 				   *bridge_device)
1733 {
1734 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1735 	struct mlxsw_sp_mid *mid;
1736 	bool mc_enabled;
1737 
1738 	mc_enabled = bridge_device->multicast_enabled;
1739 
1740 	list_for_each_entry(mid, &bridge_device->mids_list, list) {
1741 		if (mc_enabled)
1742 			mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid,
1743 						    bridge_device);
1744 		else
1745 			mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
1746 	}
1747 }
1748 
1749 static void
1750 mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
1751 				 struct mlxsw_sp_bridge_port *bridge_port,
1752 				 bool add)
1753 {
1754 	struct mlxsw_sp_bridge_device *bridge_device;
1755 	struct mlxsw_sp_mid *mid;
1756 
1757 	bridge_device = bridge_port->bridge_device;
1758 
1759 	list_for_each_entry(mid, &bridge_device->mids_list, list) {
1760 		if (!test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid))
1761 			mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, add);
1762 	}
1763 }
1764 
1765 static int mlxsw_sp_port_obj_add(struct net_device *dev,
1766 				 const struct switchdev_obj *obj,
1767 				 struct netlink_ext_ack *extack)
1768 {
1769 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1770 	const struct switchdev_obj_port_vlan *vlan;
1771 	int err = 0;
1772 
1773 	switch (obj->id) {
1774 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
1775 		vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
1776 
1777 		err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, vlan, extack);
1778 
1779 		/* The event is emitted before the changes are actually
1780 		 * applied to the bridge. Therefore schedule the respin
1781 		 * call for later, so that the respin logic sees the
1782 		 * updated bridge state.
1783 		 */
1784 		mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
1785 		break;
1786 	case SWITCHDEV_OBJ_ID_PORT_MDB:
1787 		err = mlxsw_sp_port_mdb_add(mlxsw_sp_port,
1788 					    SWITCHDEV_OBJ_PORT_MDB(obj));
1789 		break;
1790 	default:
1791 		err = -EOPNOTSUPP;
1792 		break;
1793 	}
1794 
1795 	return err;
1796 }
1797 
1798 static void
1799 mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port,
1800 			      struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
1801 {
1802 	u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : mlxsw_sp_port->pvid;
1803 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1804 	u16 proto;
1805 
1806 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1807 	if (WARN_ON(!mlxsw_sp_port_vlan))
1808 		return;
1809 
1810 	mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
1811 	br_vlan_get_proto(bridge_port->bridge_device->dev, &proto);
1812 	mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid, proto);
1813 	mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1814 	mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1815 }
1816 
1817 static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
1818 				   const struct switchdev_obj_port_vlan *vlan)
1819 {
1820 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1821 	struct net_device *orig_dev = vlan->obj.orig_dev;
1822 	struct mlxsw_sp_bridge_port *bridge_port;
1823 
1824 	if (netif_is_bridge_master(orig_dev))
1825 		return -EOPNOTSUPP;
1826 
1827 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1828 	if (WARN_ON(!bridge_port))
1829 		return -EINVAL;
1830 
1831 	if (!bridge_port->bridge_device->vlan_enabled)
1832 		return 0;
1833 
1834 	mlxsw_sp_bridge_port_vlan_del(mlxsw_sp_port, bridge_port, vlan->vid);
1835 
1836 	return 0;
1837 }
1838 
1839 static int
1840 __mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
1841 			struct mlxsw_sp_bridge_port *bridge_port,
1842 			struct mlxsw_sp_mid *mid)
1843 {
1844 	struct net_device *dev = mlxsw_sp_port->dev;
1845 	int err;
1846 
1847 	if (bridge_port->bridge_device->multicast_enabled &&
1848 	    !bridge_port->mrouter) {
1849 		err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
1850 		if (err)
1851 			netdev_err(dev, "Unable to remove port from SMID\n");
1852 	}
1853 
1854 	err = mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
1855 	if (err)
1856 		netdev_err(dev, "Unable to remove MC SFD\n");
1857 
1858 	return err;
1859 }
1860 
1861 static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
1862 				 const struct switchdev_obj_port_mdb *mdb)
1863 {
1864 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1865 	struct net_device *orig_dev = mdb->obj.orig_dev;
1866 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1867 	struct mlxsw_sp_bridge_device *bridge_device;
1868 	struct net_device *dev = mlxsw_sp_port->dev;
1869 	struct mlxsw_sp_bridge_port *bridge_port;
1870 	struct mlxsw_sp_mid *mid;
1871 	u16 fid_index;
1872 
1873 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1874 	if (!bridge_port)
1875 		return 0;
1876 
1877 	bridge_device = bridge_port->bridge_device;
1878 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1879 							       bridge_device,
1880 							       mdb->vid);
1881 	if (!mlxsw_sp_port_vlan)
1882 		return 0;
1883 
1884 	fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1885 
1886 	mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
1887 	if (!mid) {
1888 		netdev_err(dev, "Unable to remove port from MC DB\n");
1889 		return -EINVAL;
1890 	}
1891 
1892 	return __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port, mid);
1893 }
1894 
1895 static void
1896 mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
1897 			       struct mlxsw_sp_bridge_port *bridge_port)
1898 {
1899 	struct mlxsw_sp_bridge_device *bridge_device;
1900 	struct mlxsw_sp_mid *mid, *tmp;
1901 
1902 	bridge_device = bridge_port->bridge_device;
1903 
1904 	list_for_each_entry_safe(mid, tmp, &bridge_device->mids_list, list) {
1905 		if (test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid)) {
1906 			__mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port,
1907 						mid);
1908 		} else if (bridge_device->multicast_enabled &&
1909 			   bridge_port->mrouter) {
1910 			mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
1911 		}
1912 	}
1913 }
1914 
1915 static int mlxsw_sp_port_obj_del(struct net_device *dev,
1916 				 const struct switchdev_obj *obj)
1917 {
1918 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1919 	int err = 0;
1920 
1921 	switch (obj->id) {
1922 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
1923 		err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
1924 					      SWITCHDEV_OBJ_PORT_VLAN(obj));
1925 		break;
1926 	case SWITCHDEV_OBJ_ID_PORT_MDB:
1927 		err = mlxsw_sp_port_mdb_del(mlxsw_sp_port,
1928 					    SWITCHDEV_OBJ_PORT_MDB(obj));
1929 		break;
1930 	default:
1931 		err = -EOPNOTSUPP;
1932 		break;
1933 	}
1934 
1935 	mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
1936 
1937 	return err;
1938 }
1939 
1940 static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
1941 						   u16 lag_id)
1942 {
1943 	struct mlxsw_sp_port *mlxsw_sp_port;
1944 	u64 max_lag_members;
1945 	int i;
1946 
1947 	max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1948 					     MAX_LAG_MEMBERS);
1949 	for (i = 0; i < max_lag_members; i++) {
1950 		mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
1951 		if (mlxsw_sp_port)
1952 			return mlxsw_sp_port;
1953 	}
1954 	return NULL;
1955 }
1956 
1957 static int
1958 mlxsw_sp_bridge_vlan_aware_port_join(struct mlxsw_sp_bridge_port *bridge_port,
1959 				     struct mlxsw_sp_port *mlxsw_sp_port,
1960 				     struct netlink_ext_ack *extack)
1961 {
1962 	if (is_vlan_dev(bridge_port->dev)) {
1963 		NL_SET_ERR_MSG_MOD(extack, "Can not enslave a VLAN device to a VLAN-aware bridge");
1964 		return -EINVAL;
1965 	}
1966 
1967 	/* Port is no longer usable as a router interface */
1968 	if (mlxsw_sp_port->default_vlan->fid)
1969 		mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan);
1970 
1971 	return 0;
1972 }
1973 
1974 static int
1975 mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device,
1976 				struct mlxsw_sp_bridge_port *bridge_port,
1977 				struct mlxsw_sp_port *mlxsw_sp_port,
1978 				struct netlink_ext_ack *extack)
1979 {
1980 	return mlxsw_sp_bridge_vlan_aware_port_join(bridge_port, mlxsw_sp_port,
1981 						    extack);
1982 }
1983 
1984 static void
1985 mlxsw_sp_bridge_vlan_aware_port_leave(struct mlxsw_sp_port *mlxsw_sp_port)
1986 {
1987 	/* Make sure untagged frames are allowed to ingress */
1988 	mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID,
1989 			       ETH_P_8021Q);
1990 }
1991 
1992 static void
1993 mlxsw_sp_bridge_8021q_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
1994 				 struct mlxsw_sp_bridge_port *bridge_port,
1995 				 struct mlxsw_sp_port *mlxsw_sp_port)
1996 {
1997 	mlxsw_sp_bridge_vlan_aware_port_leave(mlxsw_sp_port);
1998 }
1999 
2000 static int
2001 mlxsw_sp_bridge_vlan_aware_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
2002 				      const struct net_device *vxlan_dev,
2003 				      u16 vid, u16 ethertype,
2004 				      struct netlink_ext_ack *extack)
2005 {
2006 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2007 	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
2008 	struct mlxsw_sp_nve_params params = {
2009 		.type = MLXSW_SP_NVE_TYPE_VXLAN,
2010 		.vni = vxlan->cfg.vni,
2011 		.dev = vxlan_dev,
2012 		.ethertype = ethertype,
2013 	};
2014 	struct mlxsw_sp_fid *fid;
2015 	int err;
2016 
2017 	/* If the VLAN is 0, we need to find the VLAN that is configured as
2018 	 * PVID and egress untagged on the bridge port of the VxLAN device.
2019 	 * It is possible no such VLAN exists
2020 	 */
2021 	if (!vid) {
2022 		err = mlxsw_sp_vxlan_mapped_vid(vxlan_dev, &vid);
2023 		if (err || !vid)
2024 			return err;
2025 	}
2026 
2027 	fid = mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
2028 	if (IS_ERR(fid)) {
2029 		NL_SET_ERR_MSG_MOD(extack, "Failed to create 802.1Q FID");
2030 		return PTR_ERR(fid);
2031 	}
2032 
2033 	if (mlxsw_sp_fid_vni_is_set(fid)) {
2034 		NL_SET_ERR_MSG_MOD(extack, "VNI is already set on FID");
2035 		err = -EINVAL;
2036 		goto err_vni_exists;
2037 	}
2038 
2039 	err = mlxsw_sp_nve_fid_enable(mlxsw_sp, fid, &params, extack);
2040 	if (err)
2041 		goto err_nve_fid_enable;
2042 
2043 	return 0;
2044 
2045 err_nve_fid_enable:
2046 err_vni_exists:
2047 	mlxsw_sp_fid_put(fid);
2048 	return err;
2049 }
2050 
2051 static int
2052 mlxsw_sp_bridge_8021q_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
2053 				 const struct net_device *vxlan_dev, u16 vid,
2054 				 struct netlink_ext_ack *extack)
2055 {
2056 	return mlxsw_sp_bridge_vlan_aware_vxlan_join(bridge_device, vxlan_dev,
2057 						     vid, ETH_P_8021Q, extack);
2058 }
2059 
2060 static struct net_device *
2061 mlxsw_sp_bridge_8021q_vxlan_dev_find(struct net_device *br_dev, u16 vid)
2062 {
2063 	struct net_device *dev;
2064 	struct list_head *iter;
2065 
2066 	netdev_for_each_lower_dev(br_dev, dev, iter) {
2067 		u16 pvid;
2068 		int err;
2069 
2070 		if (!netif_is_vxlan(dev))
2071 			continue;
2072 
2073 		err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid);
2074 		if (err || pvid != vid)
2075 			continue;
2076 
2077 		return dev;
2078 	}
2079 
2080 	return NULL;
2081 }
2082 
2083 static struct mlxsw_sp_fid *
2084 mlxsw_sp_bridge_8021q_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
2085 			      u16 vid, struct netlink_ext_ack *extack)
2086 {
2087 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2088 
2089 	return mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
2090 }
2091 
2092 static struct mlxsw_sp_fid *
2093 mlxsw_sp_bridge_8021q_fid_lookup(struct mlxsw_sp_bridge_device *bridge_device,
2094 				 u16 vid)
2095 {
2096 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2097 
2098 	return mlxsw_sp_fid_8021q_lookup(mlxsw_sp, vid);
2099 }
2100 
2101 static u16
2102 mlxsw_sp_bridge_8021q_fid_vid(struct mlxsw_sp_bridge_device *bridge_device,
2103 			      const struct mlxsw_sp_fid *fid)
2104 {
2105 	return mlxsw_sp_fid_8021q_vid(fid);
2106 }
2107 
2108 static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021q_ops = {
2109 	.port_join	= mlxsw_sp_bridge_8021q_port_join,
2110 	.port_leave	= mlxsw_sp_bridge_8021q_port_leave,
2111 	.vxlan_join	= mlxsw_sp_bridge_8021q_vxlan_join,
2112 	.fid_get	= mlxsw_sp_bridge_8021q_fid_get,
2113 	.fid_lookup	= mlxsw_sp_bridge_8021q_fid_lookup,
2114 	.fid_vid	= mlxsw_sp_bridge_8021q_fid_vid,
2115 };
2116 
2117 static bool
2118 mlxsw_sp_port_is_br_member(const struct mlxsw_sp_port *mlxsw_sp_port,
2119 			   const struct net_device *br_dev)
2120 {
2121 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2122 
2123 	list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
2124 			    list) {
2125 		if (mlxsw_sp_port_vlan->bridge_port &&
2126 		    mlxsw_sp_port_vlan->bridge_port->bridge_device->dev ==
2127 		    br_dev)
2128 			return true;
2129 	}
2130 
2131 	return false;
2132 }
2133 
2134 static int
2135 mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device,
2136 				struct mlxsw_sp_bridge_port *bridge_port,
2137 				struct mlxsw_sp_port *mlxsw_sp_port,
2138 				struct netlink_ext_ack *extack)
2139 {
2140 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2141 	struct net_device *dev = bridge_port->dev;
2142 	u16 vid;
2143 
2144 	vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : MLXSW_SP_DEFAULT_VID;
2145 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
2146 	if (WARN_ON(!mlxsw_sp_port_vlan))
2147 		return -EINVAL;
2148 
2149 	if (mlxsw_sp_port_is_br_member(mlxsw_sp_port, bridge_device->dev)) {
2150 		NL_SET_ERR_MSG_MOD(extack, "Can not bridge VLAN uppers of the same port");
2151 		return -EINVAL;
2152 	}
2153 
2154 	/* Port is no longer usable as a router interface */
2155 	if (mlxsw_sp_port_vlan->fid)
2156 		mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
2157 
2158 	return mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port,
2159 					      extack);
2160 }
2161 
2162 static void
2163 mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
2164 				 struct mlxsw_sp_bridge_port *bridge_port,
2165 				 struct mlxsw_sp_port *mlxsw_sp_port)
2166 {
2167 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2168 	struct net_device *dev = bridge_port->dev;
2169 	u16 vid;
2170 
2171 	vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : MLXSW_SP_DEFAULT_VID;
2172 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
2173 	if (!mlxsw_sp_port_vlan || !mlxsw_sp_port_vlan->bridge_port)
2174 		return;
2175 
2176 	mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
2177 }
2178 
2179 static int
2180 mlxsw_sp_bridge_8021d_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
2181 				 const struct net_device *vxlan_dev, u16 vid,
2182 				 struct netlink_ext_ack *extack)
2183 {
2184 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2185 	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
2186 	struct mlxsw_sp_nve_params params = {
2187 		.type = MLXSW_SP_NVE_TYPE_VXLAN,
2188 		.vni = vxlan->cfg.vni,
2189 		.dev = vxlan_dev,
2190 		.ethertype = ETH_P_8021Q,
2191 	};
2192 	struct mlxsw_sp_fid *fid;
2193 	int err;
2194 
2195 	fid = mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex);
2196 	if (IS_ERR(fid)) {
2197 		NL_SET_ERR_MSG_MOD(extack, "Failed to create 802.1D FID");
2198 		return -EINVAL;
2199 	}
2200 
2201 	if (mlxsw_sp_fid_vni_is_set(fid)) {
2202 		NL_SET_ERR_MSG_MOD(extack, "VNI is already set on FID");
2203 		err = -EINVAL;
2204 		goto err_vni_exists;
2205 	}
2206 
2207 	err = mlxsw_sp_nve_fid_enable(mlxsw_sp, fid, &params, extack);
2208 	if (err)
2209 		goto err_nve_fid_enable;
2210 
2211 	return 0;
2212 
2213 err_nve_fid_enable:
2214 err_vni_exists:
2215 	mlxsw_sp_fid_put(fid);
2216 	return err;
2217 }
2218 
2219 static struct mlxsw_sp_fid *
2220 mlxsw_sp_bridge_8021d_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
2221 			      u16 vid, struct netlink_ext_ack *extack)
2222 {
2223 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2224 
2225 	return mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex);
2226 }
2227 
2228 static struct mlxsw_sp_fid *
2229 mlxsw_sp_bridge_8021d_fid_lookup(struct mlxsw_sp_bridge_device *bridge_device,
2230 				 u16 vid)
2231 {
2232 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2233 
2234 	/* The only valid VLAN for a VLAN-unaware bridge is 0 */
2235 	if (vid)
2236 		return NULL;
2237 
2238 	return mlxsw_sp_fid_8021d_lookup(mlxsw_sp, bridge_device->dev->ifindex);
2239 }
2240 
2241 static u16
2242 mlxsw_sp_bridge_8021d_fid_vid(struct mlxsw_sp_bridge_device *bridge_device,
2243 			      const struct mlxsw_sp_fid *fid)
2244 {
2245 	return 0;
2246 }
2247 
2248 static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021d_ops = {
2249 	.port_join	= mlxsw_sp_bridge_8021d_port_join,
2250 	.port_leave	= mlxsw_sp_bridge_8021d_port_leave,
2251 	.vxlan_join	= mlxsw_sp_bridge_8021d_vxlan_join,
2252 	.fid_get	= mlxsw_sp_bridge_8021d_fid_get,
2253 	.fid_lookup	= mlxsw_sp_bridge_8021d_fid_lookup,
2254 	.fid_vid	= mlxsw_sp_bridge_8021d_fid_vid,
2255 };
2256 
2257 static int
2258 mlxsw_sp_bridge_8021ad_port_join(struct mlxsw_sp_bridge_device *bridge_device,
2259 				 struct mlxsw_sp_bridge_port *bridge_port,
2260 				 struct mlxsw_sp_port *mlxsw_sp_port,
2261 				 struct netlink_ext_ack *extack)
2262 {
2263 	int err;
2264 
2265 	err = mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, false);
2266 	if (err)
2267 		return err;
2268 
2269 	err = mlxsw_sp_bridge_vlan_aware_port_join(bridge_port, mlxsw_sp_port,
2270 						   extack);
2271 	if (err)
2272 		goto err_bridge_vlan_aware_port_join;
2273 
2274 	return 0;
2275 
2276 err_bridge_vlan_aware_port_join:
2277 	mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true);
2278 	return err;
2279 }
2280 
2281 static void
2282 mlxsw_sp_bridge_8021ad_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
2283 				  struct mlxsw_sp_bridge_port *bridge_port,
2284 				  struct mlxsw_sp_port *mlxsw_sp_port)
2285 {
2286 	mlxsw_sp_bridge_vlan_aware_port_leave(mlxsw_sp_port);
2287 	mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true);
2288 }
2289 
2290 static int
2291 mlxsw_sp_bridge_8021ad_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
2292 				  const struct net_device *vxlan_dev, u16 vid,
2293 				  struct netlink_ext_ack *extack)
2294 {
2295 	return mlxsw_sp_bridge_vlan_aware_vxlan_join(bridge_device, vxlan_dev,
2296 						     vid, ETH_P_8021AD, extack);
2297 }
2298 
2299 static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021ad_ops = {
2300 	.port_join	= mlxsw_sp_bridge_8021ad_port_join,
2301 	.port_leave	= mlxsw_sp_bridge_8021ad_port_leave,
2302 	.vxlan_join	= mlxsw_sp_bridge_8021ad_vxlan_join,
2303 	.fid_get	= mlxsw_sp_bridge_8021q_fid_get,
2304 	.fid_lookup	= mlxsw_sp_bridge_8021q_fid_lookup,
2305 	.fid_vid	= mlxsw_sp_bridge_8021q_fid_vid,
2306 };
2307 
2308 int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
2309 			      struct net_device *brport_dev,
2310 			      struct net_device *br_dev,
2311 			      struct netlink_ext_ack *extack)
2312 {
2313 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2314 	struct mlxsw_sp_bridge_device *bridge_device;
2315 	struct mlxsw_sp_bridge_port *bridge_port;
2316 	int err;
2317 
2318 	bridge_port = mlxsw_sp_bridge_port_get(mlxsw_sp->bridge, brport_dev,
2319 					       extack);
2320 	if (IS_ERR(bridge_port))
2321 		return PTR_ERR(bridge_port);
2322 	bridge_device = bridge_port->bridge_device;
2323 
2324 	err = bridge_device->ops->port_join(bridge_device, bridge_port,
2325 					    mlxsw_sp_port, extack);
2326 	if (err)
2327 		goto err_port_join;
2328 
2329 	return 0;
2330 
2331 err_port_join:
2332 	mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
2333 	return err;
2334 }
2335 
2336 void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2337 				struct net_device *brport_dev,
2338 				struct net_device *br_dev)
2339 {
2340 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2341 	struct mlxsw_sp_bridge_device *bridge_device;
2342 	struct mlxsw_sp_bridge_port *bridge_port;
2343 
2344 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2345 	if (!bridge_device)
2346 		return;
2347 	bridge_port = __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
2348 	if (!bridge_port)
2349 		return;
2350 
2351 	bridge_device->ops->port_leave(bridge_device, bridge_port,
2352 				       mlxsw_sp_port);
2353 	mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
2354 }
2355 
2356 int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp,
2357 			       const struct net_device *br_dev,
2358 			       const struct net_device *vxlan_dev, u16 vid,
2359 			       struct netlink_ext_ack *extack)
2360 {
2361 	struct mlxsw_sp_bridge_device *bridge_device;
2362 
2363 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2364 	if (WARN_ON(!bridge_device))
2365 		return -EINVAL;
2366 
2367 	return bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, vid,
2368 					      extack);
2369 }
2370 
2371 void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
2372 				 const struct net_device *vxlan_dev)
2373 {
2374 	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
2375 	struct mlxsw_sp_fid *fid;
2376 
2377 	/* If the VxLAN device is down, then the FID does not have a VNI */
2378 	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan->cfg.vni);
2379 	if (!fid)
2380 		return;
2381 
2382 	mlxsw_sp_nve_fid_disable(mlxsw_sp, fid);
2383 	/* Drop both the reference we just took during lookup and the reference
2384 	 * the VXLAN device took.
2385 	 */
2386 	mlxsw_sp_fid_put(fid);
2387 	mlxsw_sp_fid_put(fid);
2388 }
2389 
2390 static void
2391 mlxsw_sp_switchdev_vxlan_addr_convert(const union vxlan_addr *vxlan_addr,
2392 				      enum mlxsw_sp_l3proto *proto,
2393 				      union mlxsw_sp_l3addr *addr)
2394 {
2395 	if (vxlan_addr->sa.sa_family == AF_INET) {
2396 		addr->addr4 = vxlan_addr->sin.sin_addr.s_addr;
2397 		*proto = MLXSW_SP_L3_PROTO_IPV4;
2398 	} else {
2399 		addr->addr6 = vxlan_addr->sin6.sin6_addr;
2400 		*proto = MLXSW_SP_L3_PROTO_IPV6;
2401 	}
2402 }
2403 
2404 static void
2405 mlxsw_sp_switchdev_addr_vxlan_convert(enum mlxsw_sp_l3proto proto,
2406 				      const union mlxsw_sp_l3addr *addr,
2407 				      union vxlan_addr *vxlan_addr)
2408 {
2409 	switch (proto) {
2410 	case MLXSW_SP_L3_PROTO_IPV4:
2411 		vxlan_addr->sa.sa_family = AF_INET;
2412 		vxlan_addr->sin.sin_addr.s_addr = addr->addr4;
2413 		break;
2414 	case MLXSW_SP_L3_PROTO_IPV6:
2415 		vxlan_addr->sa.sa_family = AF_INET6;
2416 		vxlan_addr->sin6.sin6_addr = addr->addr6;
2417 		break;
2418 	}
2419 }
2420 
2421 static void mlxsw_sp_fdb_vxlan_call_notifiers(struct net_device *dev,
2422 					      const char *mac,
2423 					      enum mlxsw_sp_l3proto proto,
2424 					      union mlxsw_sp_l3addr *addr,
2425 					      __be32 vni, bool adding)
2426 {
2427 	struct switchdev_notifier_vxlan_fdb_info info;
2428 	struct vxlan_dev *vxlan = netdev_priv(dev);
2429 	enum switchdev_notifier_type type;
2430 
2431 	type = adding ? SWITCHDEV_VXLAN_FDB_ADD_TO_BRIDGE :
2432 			SWITCHDEV_VXLAN_FDB_DEL_TO_BRIDGE;
2433 	mlxsw_sp_switchdev_addr_vxlan_convert(proto, addr, &info.remote_ip);
2434 	info.remote_port = vxlan->cfg.dst_port;
2435 	info.remote_vni = vni;
2436 	info.remote_ifindex = 0;
2437 	ether_addr_copy(info.eth_addr, mac);
2438 	info.vni = vni;
2439 	info.offloaded = adding;
2440 	call_switchdev_notifiers(type, dev, &info.info, NULL);
2441 }
2442 
2443 static void mlxsw_sp_fdb_nve_call_notifiers(struct net_device *dev,
2444 					    const char *mac,
2445 					    enum mlxsw_sp_l3proto proto,
2446 					    union mlxsw_sp_l3addr *addr,
2447 					    __be32 vni,
2448 					    bool adding)
2449 {
2450 	if (netif_is_vxlan(dev))
2451 		mlxsw_sp_fdb_vxlan_call_notifiers(dev, mac, proto, addr, vni,
2452 						  adding);
2453 }
2454 
2455 static void
2456 mlxsw_sp_fdb_call_notifiers(enum switchdev_notifier_type type,
2457 			    const char *mac, u16 vid,
2458 			    struct net_device *dev, bool offloaded)
2459 {
2460 	struct switchdev_notifier_fdb_info info;
2461 
2462 	info.addr = mac;
2463 	info.vid = vid;
2464 	info.offloaded = offloaded;
2465 	call_switchdev_notifiers(type, dev, &info.info, NULL);
2466 }
2467 
2468 static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
2469 					    char *sfn_pl, int rec_index,
2470 					    bool adding)
2471 {
2472 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2473 	struct mlxsw_sp_bridge_device *bridge_device;
2474 	struct mlxsw_sp_bridge_port *bridge_port;
2475 	struct mlxsw_sp_port *mlxsw_sp_port;
2476 	enum switchdev_notifier_type type;
2477 	char mac[ETH_ALEN];
2478 	u8 local_port;
2479 	u16 vid, fid;
2480 	bool do_notification = true;
2481 	int err;
2482 
2483 	mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port);
2484 	mlxsw_sp_port = mlxsw_sp->ports[local_port];
2485 	if (!mlxsw_sp_port) {
2486 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n");
2487 		goto just_remove;
2488 	}
2489 
2490 	if (mlxsw_sp_fid_is_dummy(mlxsw_sp, fid))
2491 		goto just_remove;
2492 
2493 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
2494 	if (!mlxsw_sp_port_vlan) {
2495 		netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
2496 		goto just_remove;
2497 	}
2498 
2499 	bridge_port = mlxsw_sp_port_vlan->bridge_port;
2500 	if (!bridge_port) {
2501 		netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
2502 		goto just_remove;
2503 	}
2504 
2505 	bridge_device = bridge_port->bridge_device;
2506 	vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
2507 
2508 do_fdb_op:
2509 	err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid,
2510 				      adding, true);
2511 	if (err) {
2512 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
2513 		return;
2514 	}
2515 
2516 	if (!do_notification)
2517 		return;
2518 	type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
2519 	mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding);
2520 
2521 	return;
2522 
2523 just_remove:
2524 	adding = false;
2525 	do_notification = false;
2526 	goto do_fdb_op;
2527 }
2528 
2529 static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
2530 						char *sfn_pl, int rec_index,
2531 						bool adding)
2532 {
2533 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2534 	struct mlxsw_sp_bridge_device *bridge_device;
2535 	struct mlxsw_sp_bridge_port *bridge_port;
2536 	struct mlxsw_sp_port *mlxsw_sp_port;
2537 	enum switchdev_notifier_type type;
2538 	char mac[ETH_ALEN];
2539 	u16 lag_vid = 0;
2540 	u16 lag_id;
2541 	u16 vid, fid;
2542 	bool do_notification = true;
2543 	int err;
2544 
2545 	mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &fid, &lag_id);
2546 	mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
2547 	if (!mlxsw_sp_port) {
2548 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n");
2549 		goto just_remove;
2550 	}
2551 
2552 	if (mlxsw_sp_fid_is_dummy(mlxsw_sp, fid))
2553 		goto just_remove;
2554 
2555 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
2556 	if (!mlxsw_sp_port_vlan) {
2557 		netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
2558 		goto just_remove;
2559 	}
2560 
2561 	bridge_port = mlxsw_sp_port_vlan->bridge_port;
2562 	if (!bridge_port) {
2563 		netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
2564 		goto just_remove;
2565 	}
2566 
2567 	bridge_device = bridge_port->bridge_device;
2568 	vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
2569 	lag_vid = mlxsw_sp_fid_lag_vid_valid(mlxsw_sp_port_vlan->fid) ?
2570 		  mlxsw_sp_port_vlan->vid : 0;
2571 
2572 do_fdb_op:
2573 	err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
2574 					  adding, true);
2575 	if (err) {
2576 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
2577 		return;
2578 	}
2579 
2580 	if (!do_notification)
2581 		return;
2582 	type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
2583 	mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding);
2584 
2585 	return;
2586 
2587 just_remove:
2588 	adding = false;
2589 	do_notification = false;
2590 	goto do_fdb_op;
2591 }
2592 
2593 static int
2594 __mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp *mlxsw_sp,
2595 					    const struct mlxsw_sp_fid *fid,
2596 					    bool adding,
2597 					    struct net_device **nve_dev,
2598 					    u16 *p_vid, __be32 *p_vni)
2599 {
2600 	struct mlxsw_sp_bridge_device *bridge_device;
2601 	struct net_device *br_dev, *dev;
2602 	int nve_ifindex;
2603 	int err;
2604 
2605 	err = mlxsw_sp_fid_nve_ifindex(fid, &nve_ifindex);
2606 	if (err)
2607 		return err;
2608 
2609 	err = mlxsw_sp_fid_vni(fid, p_vni);
2610 	if (err)
2611 		return err;
2612 
2613 	dev = __dev_get_by_index(mlxsw_sp_net(mlxsw_sp), nve_ifindex);
2614 	if (!dev)
2615 		return -EINVAL;
2616 	*nve_dev = dev;
2617 
2618 	if (!netif_running(dev))
2619 		return -EINVAL;
2620 
2621 	if (adding && !br_port_flag_is_set(dev, BR_LEARNING))
2622 		return -EINVAL;
2623 
2624 	if (adding && netif_is_vxlan(dev)) {
2625 		struct vxlan_dev *vxlan = netdev_priv(dev);
2626 
2627 		if (!(vxlan->cfg.flags & VXLAN_F_LEARN))
2628 			return -EINVAL;
2629 	}
2630 
2631 	br_dev = netdev_master_upper_dev_get(dev);
2632 	if (!br_dev)
2633 		return -EINVAL;
2634 
2635 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2636 	if (!bridge_device)
2637 		return -EINVAL;
2638 
2639 	*p_vid = bridge_device->ops->fid_vid(bridge_device, fid);
2640 
2641 	return 0;
2642 }
2643 
2644 static void mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp *mlxsw_sp,
2645 						      char *sfn_pl,
2646 						      int rec_index,
2647 						      bool adding)
2648 {
2649 	enum mlxsw_reg_sfn_uc_tunnel_protocol sfn_proto;
2650 	enum switchdev_notifier_type type;
2651 	struct net_device *nve_dev;
2652 	union mlxsw_sp_l3addr addr;
2653 	struct mlxsw_sp_fid *fid;
2654 	char mac[ETH_ALEN];
2655 	u16 fid_index, vid;
2656 	__be32 vni;
2657 	u32 uip;
2658 	int err;
2659 
2660 	mlxsw_reg_sfn_uc_tunnel_unpack(sfn_pl, rec_index, mac, &fid_index,
2661 				       &uip, &sfn_proto);
2662 
2663 	fid = mlxsw_sp_fid_lookup_by_index(mlxsw_sp, fid_index);
2664 	if (!fid)
2665 		goto err_fid_lookup;
2666 
2667 	err = mlxsw_sp_nve_learned_ip_resolve(mlxsw_sp, uip,
2668 					      (enum mlxsw_sp_l3proto) sfn_proto,
2669 					      &addr);
2670 	if (err)
2671 		goto err_ip_resolve;
2672 
2673 	err = __mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, fid, adding,
2674 							  &nve_dev, &vid, &vni);
2675 	if (err)
2676 		goto err_fdb_process;
2677 
2678 	err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, mac, fid_index,
2679 					     (enum mlxsw_sp_l3proto) sfn_proto,
2680 					     &addr, adding, true);
2681 	if (err)
2682 		goto err_fdb_op;
2683 
2684 	mlxsw_sp_fdb_nve_call_notifiers(nve_dev, mac,
2685 					(enum mlxsw_sp_l3proto) sfn_proto,
2686 					&addr, vni, adding);
2687 
2688 	type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE :
2689 			SWITCHDEV_FDB_DEL_TO_BRIDGE;
2690 	mlxsw_sp_fdb_call_notifiers(type, mac, vid, nve_dev, adding);
2691 
2692 	mlxsw_sp_fid_put(fid);
2693 
2694 	return;
2695 
2696 err_fdb_op:
2697 err_fdb_process:
2698 err_ip_resolve:
2699 	mlxsw_sp_fid_put(fid);
2700 err_fid_lookup:
2701 	/* Remove an FDB entry in case we cannot process it. Otherwise the
2702 	 * device will keep sending the same notification over and over again.
2703 	 */
2704 	mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, mac, fid_index,
2705 				       (enum mlxsw_sp_l3proto) sfn_proto, &addr,
2706 				       false, true);
2707 }
2708 
2709 static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
2710 					    char *sfn_pl, int rec_index)
2711 {
2712 	switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) {
2713 	case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC:
2714 		mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
2715 						rec_index, true);
2716 		break;
2717 	case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC:
2718 		mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
2719 						rec_index, false);
2720 		break;
2721 	case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG:
2722 		mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
2723 						    rec_index, true);
2724 		break;
2725 	case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG:
2726 		mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
2727 						    rec_index, false);
2728 		break;
2729 	case MLXSW_REG_SFN_REC_TYPE_LEARNED_UNICAST_TUNNEL:
2730 		mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, sfn_pl,
2731 							  rec_index, true);
2732 		break;
2733 	case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_UNICAST_TUNNEL:
2734 		mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, sfn_pl,
2735 							  rec_index, false);
2736 		break;
2737 	}
2738 }
2739 
2740 static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp,
2741 					      bool no_delay)
2742 {
2743 	struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
2744 	unsigned int interval = no_delay ? 0 : bridge->fdb_notify.interval;
2745 
2746 	mlxsw_core_schedule_dw(&bridge->fdb_notify.dw,
2747 			       msecs_to_jiffies(interval));
2748 }
2749 
2750 #define MLXSW_SP_FDB_SFN_QUERIES_PER_SESSION 10
2751 
2752 static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
2753 {
2754 	struct mlxsw_sp_bridge *bridge;
2755 	struct mlxsw_sp *mlxsw_sp;
2756 	char *sfn_pl;
2757 	int queries;
2758 	u8 num_rec;
2759 	int i;
2760 	int err;
2761 
2762 	sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL);
2763 	if (!sfn_pl)
2764 		return;
2765 
2766 	bridge = container_of(work, struct mlxsw_sp_bridge, fdb_notify.dw.work);
2767 	mlxsw_sp = bridge->mlxsw_sp;
2768 
2769 	rtnl_lock();
2770 	queries = MLXSW_SP_FDB_SFN_QUERIES_PER_SESSION;
2771 	while (queries > 0) {
2772 		mlxsw_reg_sfn_pack(sfn_pl);
2773 		err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
2774 		if (err) {
2775 			dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
2776 			goto out;
2777 		}
2778 		num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
2779 		for (i = 0; i < num_rec; i++)
2780 			mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
2781 		if (num_rec != MLXSW_REG_SFN_REC_MAX_COUNT)
2782 			goto out;
2783 		queries--;
2784 	}
2785 
2786 out:
2787 	rtnl_unlock();
2788 	kfree(sfn_pl);
2789 	mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp, !queries);
2790 }
2791 
2792 struct mlxsw_sp_switchdev_event_work {
2793 	struct work_struct work;
2794 	union {
2795 		struct switchdev_notifier_fdb_info fdb_info;
2796 		struct switchdev_notifier_vxlan_fdb_info vxlan_fdb_info;
2797 	};
2798 	struct net_device *dev;
2799 	unsigned long event;
2800 };
2801 
2802 static void
2803 mlxsw_sp_switchdev_bridge_vxlan_fdb_event(struct mlxsw_sp *mlxsw_sp,
2804 					  struct mlxsw_sp_switchdev_event_work *
2805 					  switchdev_work,
2806 					  struct mlxsw_sp_fid *fid, __be32 vni)
2807 {
2808 	struct switchdev_notifier_vxlan_fdb_info vxlan_fdb_info;
2809 	struct switchdev_notifier_fdb_info *fdb_info;
2810 	struct net_device *dev = switchdev_work->dev;
2811 	enum mlxsw_sp_l3proto proto;
2812 	union mlxsw_sp_l3addr addr;
2813 	int err;
2814 
2815 	fdb_info = &switchdev_work->fdb_info;
2816 	err = vxlan_fdb_find_uc(dev, fdb_info->addr, vni, &vxlan_fdb_info);
2817 	if (err)
2818 		return;
2819 
2820 	mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info.remote_ip,
2821 					      &proto, &addr);
2822 
2823 	switch (switchdev_work->event) {
2824 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
2825 		err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp,
2826 						     vxlan_fdb_info.eth_addr,
2827 						     mlxsw_sp_fid_index(fid),
2828 						     proto, &addr, true, false);
2829 		if (err)
2830 			return;
2831 		vxlan_fdb_info.offloaded = true;
2832 		call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
2833 					 &vxlan_fdb_info.info, NULL);
2834 		mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
2835 					    vxlan_fdb_info.eth_addr,
2836 					    fdb_info->vid, dev, true);
2837 		break;
2838 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
2839 		err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp,
2840 						     vxlan_fdb_info.eth_addr,
2841 						     mlxsw_sp_fid_index(fid),
2842 						     proto, &addr, false,
2843 						     false);
2844 		vxlan_fdb_info.offloaded = false;
2845 		call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
2846 					 &vxlan_fdb_info.info, NULL);
2847 		break;
2848 	}
2849 }
2850 
2851 static void
2852 mlxsw_sp_switchdev_bridge_nve_fdb_event(struct mlxsw_sp_switchdev_event_work *
2853 					switchdev_work)
2854 {
2855 	struct mlxsw_sp_bridge_device *bridge_device;
2856 	struct net_device *dev = switchdev_work->dev;
2857 	struct net_device *br_dev;
2858 	struct mlxsw_sp *mlxsw_sp;
2859 	struct mlxsw_sp_fid *fid;
2860 	__be32 vni;
2861 	int err;
2862 
2863 	if (switchdev_work->event != SWITCHDEV_FDB_ADD_TO_DEVICE &&
2864 	    switchdev_work->event != SWITCHDEV_FDB_DEL_TO_DEVICE)
2865 		return;
2866 
2867 	if (switchdev_work->event == SWITCHDEV_FDB_ADD_TO_DEVICE &&
2868 	    !switchdev_work->fdb_info.added_by_user)
2869 		return;
2870 
2871 	if (!netif_running(dev))
2872 		return;
2873 	br_dev = netdev_master_upper_dev_get(dev);
2874 	if (!br_dev)
2875 		return;
2876 	if (!netif_is_bridge_master(br_dev))
2877 		return;
2878 	mlxsw_sp = mlxsw_sp_lower_get(br_dev);
2879 	if (!mlxsw_sp)
2880 		return;
2881 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2882 	if (!bridge_device)
2883 		return;
2884 
2885 	fid = bridge_device->ops->fid_lookup(bridge_device,
2886 					     switchdev_work->fdb_info.vid);
2887 	if (!fid)
2888 		return;
2889 
2890 	err = mlxsw_sp_fid_vni(fid, &vni);
2891 	if (err)
2892 		goto out;
2893 
2894 	mlxsw_sp_switchdev_bridge_vxlan_fdb_event(mlxsw_sp, switchdev_work, fid,
2895 						  vni);
2896 
2897 out:
2898 	mlxsw_sp_fid_put(fid);
2899 }
2900 
2901 static void mlxsw_sp_switchdev_bridge_fdb_event_work(struct work_struct *work)
2902 {
2903 	struct mlxsw_sp_switchdev_event_work *switchdev_work =
2904 		container_of(work, struct mlxsw_sp_switchdev_event_work, work);
2905 	struct net_device *dev = switchdev_work->dev;
2906 	struct switchdev_notifier_fdb_info *fdb_info;
2907 	struct mlxsw_sp_port *mlxsw_sp_port;
2908 	int err;
2909 
2910 	rtnl_lock();
2911 	if (netif_is_vxlan(dev)) {
2912 		mlxsw_sp_switchdev_bridge_nve_fdb_event(switchdev_work);
2913 		goto out;
2914 	}
2915 
2916 	mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
2917 	if (!mlxsw_sp_port)
2918 		goto out;
2919 
2920 	switch (switchdev_work->event) {
2921 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
2922 		fdb_info = &switchdev_work->fdb_info;
2923 		if (!fdb_info->added_by_user)
2924 			break;
2925 		err = mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, true);
2926 		if (err)
2927 			break;
2928 		mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
2929 					    fdb_info->addr,
2930 					    fdb_info->vid, dev, true);
2931 		break;
2932 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
2933 		fdb_info = &switchdev_work->fdb_info;
2934 		mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, false);
2935 		break;
2936 	case SWITCHDEV_FDB_ADD_TO_BRIDGE:
2937 	case SWITCHDEV_FDB_DEL_TO_BRIDGE:
2938 		/* These events are only used to potentially update an existing
2939 		 * SPAN mirror.
2940 		 */
2941 		break;
2942 	}
2943 
2944 	mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
2945 
2946 out:
2947 	rtnl_unlock();
2948 	kfree(switchdev_work->fdb_info.addr);
2949 	kfree(switchdev_work);
2950 	dev_put(dev);
2951 }
2952 
2953 static void
2954 mlxsw_sp_switchdev_vxlan_fdb_add(struct mlxsw_sp *mlxsw_sp,
2955 				 struct mlxsw_sp_switchdev_event_work *
2956 				 switchdev_work)
2957 {
2958 	struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
2959 	struct mlxsw_sp_bridge_device *bridge_device;
2960 	struct net_device *dev = switchdev_work->dev;
2961 	u8 all_zeros_mac[ETH_ALEN] = { 0 };
2962 	enum mlxsw_sp_l3proto proto;
2963 	union mlxsw_sp_l3addr addr;
2964 	struct net_device *br_dev;
2965 	struct mlxsw_sp_fid *fid;
2966 	u16 vid;
2967 	int err;
2968 
2969 	vxlan_fdb_info = &switchdev_work->vxlan_fdb_info;
2970 	br_dev = netdev_master_upper_dev_get(dev);
2971 
2972 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2973 	if (!bridge_device)
2974 		return;
2975 
2976 	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan_fdb_info->vni);
2977 	if (!fid)
2978 		return;
2979 
2980 	mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip,
2981 					      &proto, &addr);
2982 
2983 	if (ether_addr_equal(vxlan_fdb_info->eth_addr, all_zeros_mac)) {
2984 		err = mlxsw_sp_nve_flood_ip_add(mlxsw_sp, fid, proto, &addr);
2985 		if (err) {
2986 			mlxsw_sp_fid_put(fid);
2987 			return;
2988 		}
2989 		vxlan_fdb_info->offloaded = true;
2990 		call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
2991 					 &vxlan_fdb_info->info, NULL);
2992 		mlxsw_sp_fid_put(fid);
2993 		return;
2994 	}
2995 
2996 	/* The device has a single FDB table, whereas Linux has two - one
2997 	 * in the bridge driver and another in the VxLAN driver. We only
2998 	 * program an entry to the device if the MAC points to the VxLAN
2999 	 * device in the bridge's FDB table
3000 	 */
3001 	vid = bridge_device->ops->fid_vid(bridge_device, fid);
3002 	if (br_fdb_find_port(br_dev, vxlan_fdb_info->eth_addr, vid) != dev)
3003 		goto err_br_fdb_find;
3004 
3005 	err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, vxlan_fdb_info->eth_addr,
3006 					     mlxsw_sp_fid_index(fid), proto,
3007 					     &addr, true, false);
3008 	if (err)
3009 		goto err_fdb_tunnel_uc_op;
3010 	vxlan_fdb_info->offloaded = true;
3011 	call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
3012 				 &vxlan_fdb_info->info, NULL);
3013 	mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
3014 				    vxlan_fdb_info->eth_addr, vid, dev, true);
3015 
3016 	mlxsw_sp_fid_put(fid);
3017 
3018 	return;
3019 
3020 err_fdb_tunnel_uc_op:
3021 err_br_fdb_find:
3022 	mlxsw_sp_fid_put(fid);
3023 }
3024 
3025 static void
3026 mlxsw_sp_switchdev_vxlan_fdb_del(struct mlxsw_sp *mlxsw_sp,
3027 				 struct mlxsw_sp_switchdev_event_work *
3028 				 switchdev_work)
3029 {
3030 	struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
3031 	struct mlxsw_sp_bridge_device *bridge_device;
3032 	struct net_device *dev = switchdev_work->dev;
3033 	struct net_device *br_dev = netdev_master_upper_dev_get(dev);
3034 	u8 all_zeros_mac[ETH_ALEN] = { 0 };
3035 	enum mlxsw_sp_l3proto proto;
3036 	union mlxsw_sp_l3addr addr;
3037 	struct mlxsw_sp_fid *fid;
3038 	u16 vid;
3039 
3040 	vxlan_fdb_info = &switchdev_work->vxlan_fdb_info;
3041 
3042 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3043 	if (!bridge_device)
3044 		return;
3045 
3046 	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan_fdb_info->vni);
3047 	if (!fid)
3048 		return;
3049 
3050 	mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip,
3051 					      &proto, &addr);
3052 
3053 	if (ether_addr_equal(vxlan_fdb_info->eth_addr, all_zeros_mac)) {
3054 		mlxsw_sp_nve_flood_ip_del(mlxsw_sp, fid, proto, &addr);
3055 		mlxsw_sp_fid_put(fid);
3056 		return;
3057 	}
3058 
3059 	mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, vxlan_fdb_info->eth_addr,
3060 				       mlxsw_sp_fid_index(fid), proto, &addr,
3061 				       false, false);
3062 	vid = bridge_device->ops->fid_vid(bridge_device, fid);
3063 	mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
3064 				    vxlan_fdb_info->eth_addr, vid, dev, false);
3065 
3066 	mlxsw_sp_fid_put(fid);
3067 }
3068 
3069 static void mlxsw_sp_switchdev_vxlan_fdb_event_work(struct work_struct *work)
3070 {
3071 	struct mlxsw_sp_switchdev_event_work *switchdev_work =
3072 		container_of(work, struct mlxsw_sp_switchdev_event_work, work);
3073 	struct net_device *dev = switchdev_work->dev;
3074 	struct mlxsw_sp *mlxsw_sp;
3075 	struct net_device *br_dev;
3076 
3077 	rtnl_lock();
3078 
3079 	if (!netif_running(dev))
3080 		goto out;
3081 	br_dev = netdev_master_upper_dev_get(dev);
3082 	if (!br_dev)
3083 		goto out;
3084 	if (!netif_is_bridge_master(br_dev))
3085 		goto out;
3086 	mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3087 	if (!mlxsw_sp)
3088 		goto out;
3089 
3090 	switch (switchdev_work->event) {
3091 	case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE:
3092 		mlxsw_sp_switchdev_vxlan_fdb_add(mlxsw_sp, switchdev_work);
3093 		break;
3094 	case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE:
3095 		mlxsw_sp_switchdev_vxlan_fdb_del(mlxsw_sp, switchdev_work);
3096 		break;
3097 	}
3098 
3099 out:
3100 	rtnl_unlock();
3101 	kfree(switchdev_work);
3102 	dev_put(dev);
3103 }
3104 
3105 static int
3106 mlxsw_sp_switchdev_vxlan_work_prepare(struct mlxsw_sp_switchdev_event_work *
3107 				      switchdev_work,
3108 				      struct switchdev_notifier_info *info)
3109 {
3110 	struct vxlan_dev *vxlan = netdev_priv(switchdev_work->dev);
3111 	struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
3112 	struct vxlan_config *cfg = &vxlan->cfg;
3113 	struct netlink_ext_ack *extack;
3114 
3115 	extack = switchdev_notifier_info_to_extack(info);
3116 	vxlan_fdb_info = container_of(info,
3117 				      struct switchdev_notifier_vxlan_fdb_info,
3118 				      info);
3119 
3120 	if (vxlan_fdb_info->remote_port != cfg->dst_port) {
3121 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Non-default remote port is not supported");
3122 		return -EOPNOTSUPP;
3123 	}
3124 	if (vxlan_fdb_info->remote_vni != cfg->vni ||
3125 	    vxlan_fdb_info->vni != cfg->vni) {
3126 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Non-default VNI is not supported");
3127 		return -EOPNOTSUPP;
3128 	}
3129 	if (vxlan_fdb_info->remote_ifindex) {
3130 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Local interface is not supported");
3131 		return -EOPNOTSUPP;
3132 	}
3133 	if (is_multicast_ether_addr(vxlan_fdb_info->eth_addr)) {
3134 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Multicast MAC addresses not supported");
3135 		return -EOPNOTSUPP;
3136 	}
3137 	if (vxlan_addr_multicast(&vxlan_fdb_info->remote_ip)) {
3138 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Multicast destination IP is not supported");
3139 		return -EOPNOTSUPP;
3140 	}
3141 
3142 	switchdev_work->vxlan_fdb_info = *vxlan_fdb_info;
3143 
3144 	return 0;
3145 }
3146 
3147 /* Called under rcu_read_lock() */
3148 static int mlxsw_sp_switchdev_event(struct notifier_block *unused,
3149 				    unsigned long event, void *ptr)
3150 {
3151 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
3152 	struct mlxsw_sp_switchdev_event_work *switchdev_work;
3153 	struct switchdev_notifier_fdb_info *fdb_info;
3154 	struct switchdev_notifier_info *info = ptr;
3155 	struct net_device *br_dev;
3156 	int err;
3157 
3158 	if (event == SWITCHDEV_PORT_ATTR_SET) {
3159 		err = switchdev_handle_port_attr_set(dev, ptr,
3160 						     mlxsw_sp_port_dev_check,
3161 						     mlxsw_sp_port_attr_set);
3162 		return notifier_from_errno(err);
3163 	}
3164 
3165 	/* Tunnel devices are not our uppers, so check their master instead */
3166 	br_dev = netdev_master_upper_dev_get_rcu(dev);
3167 	if (!br_dev)
3168 		return NOTIFY_DONE;
3169 	if (!netif_is_bridge_master(br_dev))
3170 		return NOTIFY_DONE;
3171 	if (!mlxsw_sp_port_dev_lower_find_rcu(br_dev))
3172 		return NOTIFY_DONE;
3173 
3174 	switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
3175 	if (!switchdev_work)
3176 		return NOTIFY_BAD;
3177 
3178 	switchdev_work->dev = dev;
3179 	switchdev_work->event = event;
3180 
3181 	switch (event) {
3182 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
3183 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
3184 	case SWITCHDEV_FDB_ADD_TO_BRIDGE:
3185 	case SWITCHDEV_FDB_DEL_TO_BRIDGE:
3186 		fdb_info = container_of(info,
3187 					struct switchdev_notifier_fdb_info,
3188 					info);
3189 		INIT_WORK(&switchdev_work->work,
3190 			  mlxsw_sp_switchdev_bridge_fdb_event_work);
3191 		memcpy(&switchdev_work->fdb_info, ptr,
3192 		       sizeof(switchdev_work->fdb_info));
3193 		switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
3194 		if (!switchdev_work->fdb_info.addr)
3195 			goto err_addr_alloc;
3196 		ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
3197 				fdb_info->addr);
3198 		/* Take a reference on the device. This can be either
3199 		 * upper device containig mlxsw_sp_port or just a
3200 		 * mlxsw_sp_port
3201 		 */
3202 		dev_hold(dev);
3203 		break;
3204 	case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE:
3205 	case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE:
3206 		INIT_WORK(&switchdev_work->work,
3207 			  mlxsw_sp_switchdev_vxlan_fdb_event_work);
3208 		err = mlxsw_sp_switchdev_vxlan_work_prepare(switchdev_work,
3209 							    info);
3210 		if (err)
3211 			goto err_vxlan_work_prepare;
3212 		dev_hold(dev);
3213 		break;
3214 	default:
3215 		kfree(switchdev_work);
3216 		return NOTIFY_DONE;
3217 	}
3218 
3219 	mlxsw_core_schedule_work(&switchdev_work->work);
3220 
3221 	return NOTIFY_DONE;
3222 
3223 err_vxlan_work_prepare:
3224 err_addr_alloc:
3225 	kfree(switchdev_work);
3226 	return NOTIFY_BAD;
3227 }
3228 
3229 struct notifier_block mlxsw_sp_switchdev_notifier = {
3230 	.notifier_call = mlxsw_sp_switchdev_event,
3231 };
3232 
3233 static int
3234 mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp,
3235 				  struct mlxsw_sp_bridge_device *bridge_device,
3236 				  const struct net_device *vxlan_dev, u16 vid,
3237 				  bool flag_untagged, bool flag_pvid,
3238 				  struct netlink_ext_ack *extack)
3239 {
3240 	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
3241 	__be32 vni = vxlan->cfg.vni;
3242 	struct mlxsw_sp_fid *fid;
3243 	u16 old_vid;
3244 	int err;
3245 
3246 	/* We cannot have the same VLAN as PVID and egress untagged on multiple
3247 	 * VxLAN devices. Note that we get this notification before the VLAN is
3248 	 * actually added to the bridge's database, so it is not possible for
3249 	 * the lookup function to return 'vxlan_dev'
3250 	 */
3251 	if (flag_untagged && flag_pvid &&
3252 	    mlxsw_sp_bridge_8021q_vxlan_dev_find(bridge_device->dev, vid)) {
3253 		NL_SET_ERR_MSG_MOD(extack, "VLAN already mapped to a different VNI");
3254 		return -EINVAL;
3255 	}
3256 
3257 	if (!netif_running(vxlan_dev))
3258 		return 0;
3259 
3260 	/* First case: FID is not associated with this VNI, but the new VLAN
3261 	 * is both PVID and egress untagged. Need to enable NVE on the FID, if
3262 	 * it exists
3263 	 */
3264 	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vni);
3265 	if (!fid) {
3266 		if (!flag_untagged || !flag_pvid)
3267 			return 0;
3268 		return bridge_device->ops->vxlan_join(bridge_device, vxlan_dev,
3269 						      vid, extack);
3270 	}
3271 
3272 	/* Second case: FID is associated with the VNI and the VLAN associated
3273 	 * with the FID is the same as the notified VLAN. This means the flags
3274 	 * (PVID / egress untagged) were toggled and that NVE should be
3275 	 * disabled on the FID
3276 	 */
3277 	old_vid = mlxsw_sp_fid_8021q_vid(fid);
3278 	if (vid == old_vid) {
3279 		if (WARN_ON(flag_untagged && flag_pvid)) {
3280 			mlxsw_sp_fid_put(fid);
3281 			return -EINVAL;
3282 		}
3283 		mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
3284 		mlxsw_sp_fid_put(fid);
3285 		return 0;
3286 	}
3287 
3288 	/* Third case: A new VLAN was configured on the VxLAN device, but this
3289 	 * VLAN is not PVID, so there is nothing to do.
3290 	 */
3291 	if (!flag_pvid) {
3292 		mlxsw_sp_fid_put(fid);
3293 		return 0;
3294 	}
3295 
3296 	/* Fourth case: Thew new VLAN is PVID, which means the VLAN currently
3297 	 * mapped to the VNI should be unmapped
3298 	 */
3299 	mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
3300 	mlxsw_sp_fid_put(fid);
3301 
3302 	/* Fifth case: The new VLAN is also egress untagged, which means the
3303 	 * VLAN needs to be mapped to the VNI
3304 	 */
3305 	if (!flag_untagged)
3306 		return 0;
3307 
3308 	err = bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, vid, extack);
3309 	if (err)
3310 		goto err_vxlan_join;
3311 
3312 	return 0;
3313 
3314 err_vxlan_join:
3315 	bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, old_vid, NULL);
3316 	return err;
3317 }
3318 
3319 static void
3320 mlxsw_sp_switchdev_vxlan_vlan_del(struct mlxsw_sp *mlxsw_sp,
3321 				  struct mlxsw_sp_bridge_device *bridge_device,
3322 				  const struct net_device *vxlan_dev, u16 vid)
3323 {
3324 	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
3325 	__be32 vni = vxlan->cfg.vni;
3326 	struct mlxsw_sp_fid *fid;
3327 
3328 	if (!netif_running(vxlan_dev))
3329 		return;
3330 
3331 	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vni);
3332 	if (!fid)
3333 		return;
3334 
3335 	/* A different VLAN than the one mapped to the VNI is deleted */
3336 	if (mlxsw_sp_fid_8021q_vid(fid) != vid)
3337 		goto out;
3338 
3339 	mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
3340 
3341 out:
3342 	mlxsw_sp_fid_put(fid);
3343 }
3344 
3345 static int
3346 mlxsw_sp_switchdev_vxlan_vlans_add(struct net_device *vxlan_dev,
3347 				   struct switchdev_notifier_port_obj_info *
3348 				   port_obj_info)
3349 {
3350 	struct switchdev_obj_port_vlan *vlan =
3351 		SWITCHDEV_OBJ_PORT_VLAN(port_obj_info->obj);
3352 	bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
3353 	bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
3354 	struct mlxsw_sp_bridge_device *bridge_device;
3355 	struct netlink_ext_ack *extack;
3356 	struct mlxsw_sp *mlxsw_sp;
3357 	struct net_device *br_dev;
3358 
3359 	extack = switchdev_notifier_info_to_extack(&port_obj_info->info);
3360 	br_dev = netdev_master_upper_dev_get(vxlan_dev);
3361 	if (!br_dev)
3362 		return 0;
3363 
3364 	mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3365 	if (!mlxsw_sp)
3366 		return 0;
3367 
3368 	port_obj_info->handled = true;
3369 
3370 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3371 	if (!bridge_device)
3372 		return -EINVAL;
3373 
3374 	if (!bridge_device->vlan_enabled)
3375 		return 0;
3376 
3377 	return mlxsw_sp_switchdev_vxlan_vlan_add(mlxsw_sp, bridge_device,
3378 						 vxlan_dev, vlan->vid,
3379 						 flag_untagged,
3380 						 flag_pvid, extack);
3381 }
3382 
3383 static void
3384 mlxsw_sp_switchdev_vxlan_vlans_del(struct net_device *vxlan_dev,
3385 				   struct switchdev_notifier_port_obj_info *
3386 				   port_obj_info)
3387 {
3388 	struct switchdev_obj_port_vlan *vlan =
3389 		SWITCHDEV_OBJ_PORT_VLAN(port_obj_info->obj);
3390 	struct mlxsw_sp_bridge_device *bridge_device;
3391 	struct mlxsw_sp *mlxsw_sp;
3392 	struct net_device *br_dev;
3393 
3394 	br_dev = netdev_master_upper_dev_get(vxlan_dev);
3395 	if (!br_dev)
3396 		return;
3397 
3398 	mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3399 	if (!mlxsw_sp)
3400 		return;
3401 
3402 	port_obj_info->handled = true;
3403 
3404 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3405 	if (!bridge_device)
3406 		return;
3407 
3408 	if (!bridge_device->vlan_enabled)
3409 		return;
3410 
3411 	mlxsw_sp_switchdev_vxlan_vlan_del(mlxsw_sp, bridge_device, vxlan_dev,
3412 					  vlan->vid);
3413 }
3414 
3415 static int
3416 mlxsw_sp_switchdev_handle_vxlan_obj_add(struct net_device *vxlan_dev,
3417 					struct switchdev_notifier_port_obj_info *
3418 					port_obj_info)
3419 {
3420 	int err = 0;
3421 
3422 	switch (port_obj_info->obj->id) {
3423 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
3424 		err = mlxsw_sp_switchdev_vxlan_vlans_add(vxlan_dev,
3425 							 port_obj_info);
3426 		break;
3427 	default:
3428 		break;
3429 	}
3430 
3431 	return err;
3432 }
3433 
3434 static void
3435 mlxsw_sp_switchdev_handle_vxlan_obj_del(struct net_device *vxlan_dev,
3436 					struct switchdev_notifier_port_obj_info *
3437 					port_obj_info)
3438 {
3439 	switch (port_obj_info->obj->id) {
3440 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
3441 		mlxsw_sp_switchdev_vxlan_vlans_del(vxlan_dev, port_obj_info);
3442 		break;
3443 	default:
3444 		break;
3445 	}
3446 }
3447 
3448 static int mlxsw_sp_switchdev_blocking_event(struct notifier_block *unused,
3449 					     unsigned long event, void *ptr)
3450 {
3451 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
3452 	int err = 0;
3453 
3454 	switch (event) {
3455 	case SWITCHDEV_PORT_OBJ_ADD:
3456 		if (netif_is_vxlan(dev))
3457 			err = mlxsw_sp_switchdev_handle_vxlan_obj_add(dev, ptr);
3458 		else
3459 			err = switchdev_handle_port_obj_add(dev, ptr,
3460 							mlxsw_sp_port_dev_check,
3461 							mlxsw_sp_port_obj_add);
3462 		return notifier_from_errno(err);
3463 	case SWITCHDEV_PORT_OBJ_DEL:
3464 		if (netif_is_vxlan(dev))
3465 			mlxsw_sp_switchdev_handle_vxlan_obj_del(dev, ptr);
3466 		else
3467 			err = switchdev_handle_port_obj_del(dev, ptr,
3468 							mlxsw_sp_port_dev_check,
3469 							mlxsw_sp_port_obj_del);
3470 		return notifier_from_errno(err);
3471 	case SWITCHDEV_PORT_ATTR_SET:
3472 		err = switchdev_handle_port_attr_set(dev, ptr,
3473 						     mlxsw_sp_port_dev_check,
3474 						     mlxsw_sp_port_attr_set);
3475 		return notifier_from_errno(err);
3476 	}
3477 
3478 	return NOTIFY_DONE;
3479 }
3480 
3481 static struct notifier_block mlxsw_sp_switchdev_blocking_notifier = {
3482 	.notifier_call = mlxsw_sp_switchdev_blocking_event,
3483 };
3484 
3485 u8
3486 mlxsw_sp_bridge_port_stp_state(struct mlxsw_sp_bridge_port *bridge_port)
3487 {
3488 	return bridge_port->stp_state;
3489 }
3490 
3491 static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
3492 {
3493 	struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
3494 	struct notifier_block *nb;
3495 	int err;
3496 
3497 	err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
3498 	if (err) {
3499 		dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
3500 		return err;
3501 	}
3502 
3503 	err = register_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
3504 	if (err) {
3505 		dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev notifier\n");
3506 		return err;
3507 	}
3508 
3509 	nb = &mlxsw_sp_switchdev_blocking_notifier;
3510 	err = register_switchdev_blocking_notifier(nb);
3511 	if (err) {
3512 		dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev blocking notifier\n");
3513 		goto err_register_switchdev_blocking_notifier;
3514 	}
3515 
3516 	INIT_DELAYED_WORK(&bridge->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
3517 	bridge->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
3518 	mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp, false);
3519 	return 0;
3520 
3521 err_register_switchdev_blocking_notifier:
3522 	unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
3523 	return err;
3524 }
3525 
3526 static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
3527 {
3528 	struct notifier_block *nb;
3529 
3530 	cancel_delayed_work_sync(&mlxsw_sp->bridge->fdb_notify.dw);
3531 
3532 	nb = &mlxsw_sp_switchdev_blocking_notifier;
3533 	unregister_switchdev_blocking_notifier(nb);
3534 
3535 	unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
3536 }
3537 
3538 int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
3539 {
3540 	struct mlxsw_sp_bridge *bridge;
3541 
3542 	bridge = kzalloc(sizeof(*mlxsw_sp->bridge), GFP_KERNEL);
3543 	if (!bridge)
3544 		return -ENOMEM;
3545 	mlxsw_sp->bridge = bridge;
3546 	bridge->mlxsw_sp = mlxsw_sp;
3547 
3548 	INIT_LIST_HEAD(&mlxsw_sp->bridge->bridges_list);
3549 
3550 	bridge->bridge_8021q_ops = &mlxsw_sp_bridge_8021q_ops;
3551 	bridge->bridge_8021d_ops = &mlxsw_sp_bridge_8021d_ops;
3552 	bridge->bridge_8021ad_ops = &mlxsw_sp_bridge_8021ad_ops;
3553 
3554 	return mlxsw_sp_fdb_init(mlxsw_sp);
3555 }
3556 
3557 void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
3558 {
3559 	mlxsw_sp_fdb_fini(mlxsw_sp);
3560 	WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list));
3561 	kfree(mlxsw_sp->bridge);
3562 }
3563 
3564