1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/slab.h>
9 #include <linux/device.h>
10 #include <linux/skbuff.h>
11 #include <linux/if_vlan.h>
12 #include <linux/if_bridge.h>
13 #include <linux/workqueue.h>
14 #include <linux/jiffies.h>
15 #include <linux/rtnetlink.h>
16 #include <linux/netlink.h>
17 #include <net/switchdev.h>
18 #include <net/vxlan.h>
19 
20 #include "spectrum_span.h"
21 #include "spectrum_switchdev.h"
22 #include "spectrum.h"
23 #include "core.h"
24 #include "reg.h"
25 
26 struct mlxsw_sp_bridge_ops;
27 
28 struct mlxsw_sp_bridge {
29 	struct mlxsw_sp *mlxsw_sp;
30 	struct {
31 		struct delayed_work dw;
32 #define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100
33 		unsigned int interval; /* ms */
34 	} fdb_notify;
35 #define MLXSW_SP_MIN_AGEING_TIME 10
36 #define MLXSW_SP_MAX_AGEING_TIME 1000000
37 #define MLXSW_SP_DEFAULT_AGEING_TIME 300
38 	u32 ageing_time;
39 	bool vlan_enabled_exists;
40 	struct list_head bridges_list;
41 	DECLARE_BITMAP(mids_bitmap, MLXSW_SP_MID_MAX);
42 	const struct mlxsw_sp_bridge_ops *bridge_8021q_ops;
43 	const struct mlxsw_sp_bridge_ops *bridge_8021d_ops;
44 	const struct mlxsw_sp_bridge_ops *bridge_8021ad_ops;
45 };
46 
47 struct mlxsw_sp_bridge_device {
48 	struct net_device *dev;
49 	struct list_head list;
50 	struct list_head ports_list;
51 	struct list_head mids_list;
52 	u8 vlan_enabled:1,
53 	   multicast_enabled:1,
54 	   mrouter:1;
55 	const struct mlxsw_sp_bridge_ops *ops;
56 };
57 
58 struct mlxsw_sp_bridge_port {
59 	struct net_device *dev;
60 	struct mlxsw_sp_bridge_device *bridge_device;
61 	struct list_head list;
62 	struct list_head vlans_list;
63 	unsigned int ref_count;
64 	u8 stp_state;
65 	unsigned long flags;
66 	bool mrouter;
67 	bool lagged;
68 	union {
69 		u16 lag_id;
70 		u16 system_port;
71 	};
72 };
73 
74 struct mlxsw_sp_bridge_vlan {
75 	struct list_head list;
76 	struct list_head port_vlan_list;
77 	u16 vid;
78 };
79 
80 struct mlxsw_sp_bridge_ops {
81 	int (*port_join)(struct mlxsw_sp_bridge_device *bridge_device,
82 			 struct mlxsw_sp_bridge_port *bridge_port,
83 			 struct mlxsw_sp_port *mlxsw_sp_port,
84 			 struct netlink_ext_ack *extack);
85 	void (*port_leave)(struct mlxsw_sp_bridge_device *bridge_device,
86 			   struct mlxsw_sp_bridge_port *bridge_port,
87 			   struct mlxsw_sp_port *mlxsw_sp_port);
88 	int (*vxlan_join)(struct mlxsw_sp_bridge_device *bridge_device,
89 			  const struct net_device *vxlan_dev, u16 vid,
90 			  struct netlink_ext_ack *extack);
91 	struct mlxsw_sp_fid *
92 		(*fid_get)(struct mlxsw_sp_bridge_device *bridge_device,
93 			   u16 vid, struct netlink_ext_ack *extack);
94 	struct mlxsw_sp_fid *
95 		(*fid_lookup)(struct mlxsw_sp_bridge_device *bridge_device,
96 			      u16 vid);
97 	u16 (*fid_vid)(struct mlxsw_sp_bridge_device *bridge_device,
98 		       const struct mlxsw_sp_fid *fid);
99 };
100 
101 static int
102 mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
103 			       struct mlxsw_sp_bridge_port *bridge_port,
104 			       u16 fid_index);
105 
106 static void
107 mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
108 			       struct mlxsw_sp_bridge_port *bridge_port);
109 
110 static void
111 mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port,
112 				   struct mlxsw_sp_bridge_device
113 				   *bridge_device);
114 
115 static void
116 mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
117 				 struct mlxsw_sp_bridge_port *bridge_port,
118 				 bool add);
119 
120 static struct mlxsw_sp_bridge_device *
121 mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge,
122 			    const struct net_device *br_dev)
123 {
124 	struct mlxsw_sp_bridge_device *bridge_device;
125 
126 	list_for_each_entry(bridge_device, &bridge->bridges_list, list)
127 		if (bridge_device->dev == br_dev)
128 			return bridge_device;
129 
130 	return NULL;
131 }
132 
133 bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
134 					 const struct net_device *br_dev)
135 {
136 	return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
137 }
138 
139 static int mlxsw_sp_bridge_device_upper_rif_destroy(struct net_device *dev,
140 						    struct netdev_nested_priv *priv)
141 {
142 	struct mlxsw_sp *mlxsw_sp = priv->data;
143 
144 	mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
145 	return 0;
146 }
147 
148 static void mlxsw_sp_bridge_device_rifs_destroy(struct mlxsw_sp *mlxsw_sp,
149 						struct net_device *dev)
150 {
151 	struct netdev_nested_priv priv = {
152 		.data = (void *)mlxsw_sp,
153 	};
154 
155 	mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
156 	netdev_walk_all_upper_dev_rcu(dev,
157 				      mlxsw_sp_bridge_device_upper_rif_destroy,
158 				      &priv);
159 }
160 
161 static int mlxsw_sp_bridge_device_vxlan_init(struct mlxsw_sp_bridge *bridge,
162 					     struct net_device *br_dev,
163 					     struct netlink_ext_ack *extack)
164 {
165 	struct net_device *dev, *stop_dev;
166 	struct list_head *iter;
167 	int err;
168 
169 	netdev_for_each_lower_dev(br_dev, dev, iter) {
170 		if (netif_is_vxlan(dev) && netif_running(dev)) {
171 			err = mlxsw_sp_bridge_vxlan_join(bridge->mlxsw_sp,
172 							 br_dev, dev, 0,
173 							 extack);
174 			if (err) {
175 				stop_dev = dev;
176 				goto err_vxlan_join;
177 			}
178 		}
179 	}
180 
181 	return 0;
182 
183 err_vxlan_join:
184 	netdev_for_each_lower_dev(br_dev, dev, iter) {
185 		if (netif_is_vxlan(dev) && netif_running(dev)) {
186 			if (stop_dev == dev)
187 				break;
188 			mlxsw_sp_bridge_vxlan_leave(bridge->mlxsw_sp, dev);
189 		}
190 	}
191 	return err;
192 }
193 
194 static void mlxsw_sp_bridge_device_vxlan_fini(struct mlxsw_sp_bridge *bridge,
195 					      struct net_device *br_dev)
196 {
197 	struct net_device *dev;
198 	struct list_head *iter;
199 
200 	netdev_for_each_lower_dev(br_dev, dev, iter) {
201 		if (netif_is_vxlan(dev) && netif_running(dev))
202 			mlxsw_sp_bridge_vxlan_leave(bridge->mlxsw_sp, dev);
203 	}
204 }
205 
206 static struct mlxsw_sp_bridge_device *
207 mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
208 			      struct net_device *br_dev,
209 			      struct netlink_ext_ack *extack)
210 {
211 	struct device *dev = bridge->mlxsw_sp->bus_info->dev;
212 	struct mlxsw_sp_bridge_device *bridge_device;
213 	bool vlan_enabled = br_vlan_enabled(br_dev);
214 	int err;
215 
216 	if (vlan_enabled && bridge->vlan_enabled_exists) {
217 		dev_err(dev, "Only one VLAN-aware bridge is supported\n");
218 		NL_SET_ERR_MSG_MOD(extack, "Only one VLAN-aware bridge is supported");
219 		return ERR_PTR(-EINVAL);
220 	}
221 
222 	bridge_device = kzalloc(sizeof(*bridge_device), GFP_KERNEL);
223 	if (!bridge_device)
224 		return ERR_PTR(-ENOMEM);
225 
226 	bridge_device->dev = br_dev;
227 	bridge_device->vlan_enabled = vlan_enabled;
228 	bridge_device->multicast_enabled = br_multicast_enabled(br_dev);
229 	bridge_device->mrouter = br_multicast_router(br_dev);
230 	INIT_LIST_HEAD(&bridge_device->ports_list);
231 	if (vlan_enabled) {
232 		u16 proto;
233 
234 		bridge->vlan_enabled_exists = true;
235 		br_vlan_get_proto(br_dev, &proto);
236 		if (proto == ETH_P_8021AD)
237 			bridge_device->ops = bridge->bridge_8021ad_ops;
238 		else
239 			bridge_device->ops = bridge->bridge_8021q_ops;
240 	} else {
241 		bridge_device->ops = bridge->bridge_8021d_ops;
242 	}
243 	INIT_LIST_HEAD(&bridge_device->mids_list);
244 	list_add(&bridge_device->list, &bridge->bridges_list);
245 
246 	/* It is possible we already have VXLAN devices enslaved to the bridge.
247 	 * In which case, we need to replay their configuration as if they were
248 	 * just now enslaved to the bridge.
249 	 */
250 	err = mlxsw_sp_bridge_device_vxlan_init(bridge, br_dev, extack);
251 	if (err)
252 		goto err_vxlan_init;
253 
254 	return bridge_device;
255 
256 err_vxlan_init:
257 	list_del(&bridge_device->list);
258 	if (bridge_device->vlan_enabled)
259 		bridge->vlan_enabled_exists = false;
260 	kfree(bridge_device);
261 	return ERR_PTR(err);
262 }
263 
264 static void
265 mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
266 			       struct mlxsw_sp_bridge_device *bridge_device)
267 {
268 	mlxsw_sp_bridge_device_vxlan_fini(bridge, bridge_device->dev);
269 	mlxsw_sp_bridge_device_rifs_destroy(bridge->mlxsw_sp,
270 					    bridge_device->dev);
271 	list_del(&bridge_device->list);
272 	if (bridge_device->vlan_enabled)
273 		bridge->vlan_enabled_exists = false;
274 	WARN_ON(!list_empty(&bridge_device->ports_list));
275 	WARN_ON(!list_empty(&bridge_device->mids_list));
276 	kfree(bridge_device);
277 }
278 
279 static struct mlxsw_sp_bridge_device *
280 mlxsw_sp_bridge_device_get(struct mlxsw_sp_bridge *bridge,
281 			   struct net_device *br_dev,
282 			   struct netlink_ext_ack *extack)
283 {
284 	struct mlxsw_sp_bridge_device *bridge_device;
285 
286 	bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
287 	if (bridge_device)
288 		return bridge_device;
289 
290 	return mlxsw_sp_bridge_device_create(bridge, br_dev, extack);
291 }
292 
293 static void
294 mlxsw_sp_bridge_device_put(struct mlxsw_sp_bridge *bridge,
295 			   struct mlxsw_sp_bridge_device *bridge_device)
296 {
297 	if (list_empty(&bridge_device->ports_list))
298 		mlxsw_sp_bridge_device_destroy(bridge, bridge_device);
299 }
300 
301 static struct mlxsw_sp_bridge_port *
302 __mlxsw_sp_bridge_port_find(const struct mlxsw_sp_bridge_device *bridge_device,
303 			    const struct net_device *brport_dev)
304 {
305 	struct mlxsw_sp_bridge_port *bridge_port;
306 
307 	list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
308 		if (bridge_port->dev == brport_dev)
309 			return bridge_port;
310 	}
311 
312 	return NULL;
313 }
314 
315 struct mlxsw_sp_bridge_port *
316 mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge *bridge,
317 			  struct net_device *brport_dev)
318 {
319 	struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
320 	struct mlxsw_sp_bridge_device *bridge_device;
321 
322 	if (!br_dev)
323 		return NULL;
324 
325 	bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
326 	if (!bridge_device)
327 		return NULL;
328 
329 	return __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
330 }
331 
332 static struct mlxsw_sp_bridge_port *
333 mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device,
334 			    struct net_device *brport_dev)
335 {
336 	struct mlxsw_sp_bridge_port *bridge_port;
337 	struct mlxsw_sp_port *mlxsw_sp_port;
338 
339 	bridge_port = kzalloc(sizeof(*bridge_port), GFP_KERNEL);
340 	if (!bridge_port)
341 		return NULL;
342 
343 	mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(brport_dev);
344 	bridge_port->lagged = mlxsw_sp_port->lagged;
345 	if (bridge_port->lagged)
346 		bridge_port->lag_id = mlxsw_sp_port->lag_id;
347 	else
348 		bridge_port->system_port = mlxsw_sp_port->local_port;
349 	bridge_port->dev = brport_dev;
350 	bridge_port->bridge_device = bridge_device;
351 	bridge_port->stp_state = BR_STATE_DISABLED;
352 	bridge_port->flags = BR_LEARNING | BR_FLOOD | BR_LEARNING_SYNC |
353 			     BR_MCAST_FLOOD;
354 	INIT_LIST_HEAD(&bridge_port->vlans_list);
355 	list_add(&bridge_port->list, &bridge_device->ports_list);
356 	bridge_port->ref_count = 1;
357 
358 	return bridge_port;
359 }
360 
361 static void
362 mlxsw_sp_bridge_port_destroy(struct mlxsw_sp_bridge_port *bridge_port)
363 {
364 	list_del(&bridge_port->list);
365 	WARN_ON(!list_empty(&bridge_port->vlans_list));
366 	kfree(bridge_port);
367 }
368 
369 static struct mlxsw_sp_bridge_port *
370 mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge,
371 			 struct net_device *brport_dev,
372 			 struct netlink_ext_ack *extack)
373 {
374 	struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
375 	struct mlxsw_sp_bridge_device *bridge_device;
376 	struct mlxsw_sp_bridge_port *bridge_port;
377 	int err;
378 
379 	bridge_port = mlxsw_sp_bridge_port_find(bridge, brport_dev);
380 	if (bridge_port) {
381 		bridge_port->ref_count++;
382 		return bridge_port;
383 	}
384 
385 	bridge_device = mlxsw_sp_bridge_device_get(bridge, br_dev, extack);
386 	if (IS_ERR(bridge_device))
387 		return ERR_CAST(bridge_device);
388 
389 	bridge_port = mlxsw_sp_bridge_port_create(bridge_device, brport_dev);
390 	if (!bridge_port) {
391 		err = -ENOMEM;
392 		goto err_bridge_port_create;
393 	}
394 
395 	return bridge_port;
396 
397 err_bridge_port_create:
398 	mlxsw_sp_bridge_device_put(bridge, bridge_device);
399 	return ERR_PTR(err);
400 }
401 
402 static void mlxsw_sp_bridge_port_put(struct mlxsw_sp_bridge *bridge,
403 				     struct mlxsw_sp_bridge_port *bridge_port)
404 {
405 	struct mlxsw_sp_bridge_device *bridge_device;
406 
407 	if (--bridge_port->ref_count != 0)
408 		return;
409 	bridge_device = bridge_port->bridge_device;
410 	mlxsw_sp_bridge_port_destroy(bridge_port);
411 	mlxsw_sp_bridge_device_put(bridge, bridge_device);
412 }
413 
414 static struct mlxsw_sp_port_vlan *
415 mlxsw_sp_port_vlan_find_by_bridge(struct mlxsw_sp_port *mlxsw_sp_port,
416 				  const struct mlxsw_sp_bridge_device *
417 				  bridge_device,
418 				  u16 vid)
419 {
420 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
421 
422 	list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
423 			    list) {
424 		if (!mlxsw_sp_port_vlan->bridge_port)
425 			continue;
426 		if (mlxsw_sp_port_vlan->bridge_port->bridge_device !=
427 		    bridge_device)
428 			continue;
429 		if (bridge_device->vlan_enabled &&
430 		    mlxsw_sp_port_vlan->vid != vid)
431 			continue;
432 		return mlxsw_sp_port_vlan;
433 	}
434 
435 	return NULL;
436 }
437 
438 static struct mlxsw_sp_port_vlan*
439 mlxsw_sp_port_vlan_find_by_fid(struct mlxsw_sp_port *mlxsw_sp_port,
440 			       u16 fid_index)
441 {
442 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
443 
444 	list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
445 			    list) {
446 		struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
447 
448 		if (fid && mlxsw_sp_fid_index(fid) == fid_index)
449 			return mlxsw_sp_port_vlan;
450 	}
451 
452 	return NULL;
453 }
454 
455 static struct mlxsw_sp_bridge_vlan *
456 mlxsw_sp_bridge_vlan_find(const struct mlxsw_sp_bridge_port *bridge_port,
457 			  u16 vid)
458 {
459 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
460 
461 	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
462 		if (bridge_vlan->vid == vid)
463 			return bridge_vlan;
464 	}
465 
466 	return NULL;
467 }
468 
469 static struct mlxsw_sp_bridge_vlan *
470 mlxsw_sp_bridge_vlan_create(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
471 {
472 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
473 
474 	bridge_vlan = kzalloc(sizeof(*bridge_vlan), GFP_KERNEL);
475 	if (!bridge_vlan)
476 		return NULL;
477 
478 	INIT_LIST_HEAD(&bridge_vlan->port_vlan_list);
479 	bridge_vlan->vid = vid;
480 	list_add(&bridge_vlan->list, &bridge_port->vlans_list);
481 
482 	return bridge_vlan;
483 }
484 
485 static void
486 mlxsw_sp_bridge_vlan_destroy(struct mlxsw_sp_bridge_vlan *bridge_vlan)
487 {
488 	list_del(&bridge_vlan->list);
489 	WARN_ON(!list_empty(&bridge_vlan->port_vlan_list));
490 	kfree(bridge_vlan);
491 }
492 
493 static struct mlxsw_sp_bridge_vlan *
494 mlxsw_sp_bridge_vlan_get(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
495 {
496 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
497 
498 	bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
499 	if (bridge_vlan)
500 		return bridge_vlan;
501 
502 	return mlxsw_sp_bridge_vlan_create(bridge_port, vid);
503 }
504 
505 static void mlxsw_sp_bridge_vlan_put(struct mlxsw_sp_bridge_vlan *bridge_vlan)
506 {
507 	if (list_empty(&bridge_vlan->port_vlan_list))
508 		mlxsw_sp_bridge_vlan_destroy(bridge_vlan);
509 }
510 
511 static int
512 mlxsw_sp_port_bridge_vlan_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
513 				  struct mlxsw_sp_bridge_vlan *bridge_vlan,
514 				  u8 state)
515 {
516 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
517 
518 	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
519 			    bridge_vlan_node) {
520 		if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
521 			continue;
522 		return mlxsw_sp_port_vid_stp_set(mlxsw_sp_port,
523 						 bridge_vlan->vid, state);
524 	}
525 
526 	return 0;
527 }
528 
529 static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
530 					    struct net_device *orig_dev,
531 					    u8 state)
532 {
533 	struct mlxsw_sp_bridge_port *bridge_port;
534 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
535 	int err;
536 
537 	/* It's possible we failed to enslave the port, yet this
538 	 * operation is executed due to it being deferred.
539 	 */
540 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
541 						orig_dev);
542 	if (!bridge_port)
543 		return 0;
544 
545 	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
546 		err = mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port,
547 							bridge_vlan, state);
548 		if (err)
549 			goto err_port_bridge_vlan_stp_set;
550 	}
551 
552 	bridge_port->stp_state = state;
553 
554 	return 0;
555 
556 err_port_bridge_vlan_stp_set:
557 	list_for_each_entry_continue_reverse(bridge_vlan,
558 					     &bridge_port->vlans_list, list)
559 		mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port, bridge_vlan,
560 						  bridge_port->stp_state);
561 	return err;
562 }
563 
564 static int
565 mlxsw_sp_port_bridge_vlan_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
566 				    struct mlxsw_sp_bridge_vlan *bridge_vlan,
567 				    enum mlxsw_sp_flood_type packet_type,
568 				    bool member)
569 {
570 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
571 
572 	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
573 			    bridge_vlan_node) {
574 		if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
575 			continue;
576 		return mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid,
577 					      packet_type,
578 					      mlxsw_sp_port->local_port,
579 					      member);
580 	}
581 
582 	return 0;
583 }
584 
585 static int
586 mlxsw_sp_bridge_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port,
587 				     struct mlxsw_sp_bridge_port *bridge_port,
588 				     enum mlxsw_sp_flood_type packet_type,
589 				     bool member)
590 {
591 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
592 	int err;
593 
594 	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
595 		err = mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port,
596 							  bridge_vlan,
597 							  packet_type,
598 							  member);
599 		if (err)
600 			goto err_port_bridge_vlan_flood_set;
601 	}
602 
603 	return 0;
604 
605 err_port_bridge_vlan_flood_set:
606 	list_for_each_entry_continue_reverse(bridge_vlan,
607 					     &bridge_port->vlans_list, list)
608 		mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port, bridge_vlan,
609 						    packet_type, !member);
610 	return err;
611 }
612 
613 static int
614 mlxsw_sp_port_bridge_vlan_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
615 				       struct mlxsw_sp_bridge_vlan *bridge_vlan,
616 				       bool set)
617 {
618 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
619 	u16 vid = bridge_vlan->vid;
620 
621 	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
622 			    bridge_vlan_node) {
623 		if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
624 			continue;
625 		return mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, set);
626 	}
627 
628 	return 0;
629 }
630 
631 static int
632 mlxsw_sp_bridge_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
633 				  struct mlxsw_sp_bridge_port *bridge_port,
634 				  bool set)
635 {
636 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
637 	int err;
638 
639 	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
640 		err = mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
641 							     bridge_vlan, set);
642 		if (err)
643 			goto err_port_bridge_vlan_learning_set;
644 	}
645 
646 	return 0;
647 
648 err_port_bridge_vlan_learning_set:
649 	list_for_each_entry_continue_reverse(bridge_vlan,
650 					     &bridge_port->vlans_list, list)
651 		mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
652 						       bridge_vlan, !set);
653 	return err;
654 }
655 
656 static int mlxsw_sp_port_attr_br_pre_flags_set(struct mlxsw_sp_port
657 					       *mlxsw_sp_port,
658 					       unsigned long brport_flags)
659 {
660 	if (brport_flags & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD))
661 		return -EINVAL;
662 
663 	return 0;
664 }
665 
666 static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
667 					   struct net_device *orig_dev,
668 					   unsigned long brport_flags)
669 {
670 	struct mlxsw_sp_bridge_port *bridge_port;
671 	int err;
672 
673 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
674 						orig_dev);
675 	if (!bridge_port)
676 		return 0;
677 
678 	err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
679 						   MLXSW_SP_FLOOD_TYPE_UC,
680 						   brport_flags & BR_FLOOD);
681 	if (err)
682 		return err;
683 
684 	err = mlxsw_sp_bridge_port_learning_set(mlxsw_sp_port, bridge_port,
685 						brport_flags & BR_LEARNING);
686 	if (err)
687 		return err;
688 
689 	if (bridge_port->bridge_device->multicast_enabled)
690 		goto out;
691 
692 	err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
693 						   MLXSW_SP_FLOOD_TYPE_MC,
694 						   brport_flags &
695 						   BR_MCAST_FLOOD);
696 	if (err)
697 		return err;
698 
699 out:
700 	memcpy(&bridge_port->flags, &brport_flags, sizeof(brport_flags));
701 	return 0;
702 }
703 
704 static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
705 {
706 	char sfdat_pl[MLXSW_REG_SFDAT_LEN];
707 	int err;
708 
709 	mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time);
710 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl);
711 	if (err)
712 		return err;
713 	mlxsw_sp->bridge->ageing_time = ageing_time;
714 	return 0;
715 }
716 
717 static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
718 					    unsigned long ageing_clock_t)
719 {
720 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
721 	unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
722 	u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
723 
724 	if (ageing_time < MLXSW_SP_MIN_AGEING_TIME ||
725 	    ageing_time > MLXSW_SP_MAX_AGEING_TIME)
726 		return -ERANGE;
727 
728 	return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
729 }
730 
731 static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
732 					  struct net_device *orig_dev,
733 					  bool vlan_enabled)
734 {
735 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
736 	struct mlxsw_sp_bridge_device *bridge_device;
737 
738 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
739 	if (WARN_ON(!bridge_device))
740 		return -EINVAL;
741 
742 	if (bridge_device->vlan_enabled == vlan_enabled)
743 		return 0;
744 
745 	netdev_err(bridge_device->dev, "VLAN filtering can't be changed for existing bridge\n");
746 	return -EINVAL;
747 }
748 
749 static int mlxsw_sp_port_attr_br_vlan_proto_set(struct mlxsw_sp_port *mlxsw_sp_port,
750 						struct net_device *orig_dev,
751 						u16 vlan_proto)
752 {
753 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
754 	struct mlxsw_sp_bridge_device *bridge_device;
755 
756 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
757 	if (WARN_ON(!bridge_device))
758 		return -EINVAL;
759 
760 	netdev_err(bridge_device->dev, "VLAN protocol can't be changed on existing bridge\n");
761 	return -EINVAL;
762 }
763 
764 static int mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
765 					  struct net_device *orig_dev,
766 					  bool is_port_mrouter)
767 {
768 	struct mlxsw_sp_bridge_port *bridge_port;
769 	int err;
770 
771 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
772 						orig_dev);
773 	if (!bridge_port)
774 		return 0;
775 
776 	if (!bridge_port->bridge_device->multicast_enabled)
777 		goto out;
778 
779 	err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
780 						   MLXSW_SP_FLOOD_TYPE_MC,
781 						   is_port_mrouter);
782 	if (err)
783 		return err;
784 
785 	mlxsw_sp_port_mrouter_update_mdb(mlxsw_sp_port, bridge_port,
786 					 is_port_mrouter);
787 out:
788 	bridge_port->mrouter = is_port_mrouter;
789 	return 0;
790 }
791 
792 static bool mlxsw_sp_mc_flood(const struct mlxsw_sp_bridge_port *bridge_port)
793 {
794 	const struct mlxsw_sp_bridge_device *bridge_device;
795 
796 	bridge_device = bridge_port->bridge_device;
797 	return bridge_device->multicast_enabled ? bridge_port->mrouter :
798 					bridge_port->flags & BR_MCAST_FLOOD;
799 }
800 
801 static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
802 					 struct net_device *orig_dev,
803 					 bool mc_disabled)
804 {
805 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
806 	struct mlxsw_sp_bridge_device *bridge_device;
807 	struct mlxsw_sp_bridge_port *bridge_port;
808 	int err;
809 
810 	/* It's possible we failed to enslave the port, yet this
811 	 * operation is executed due to it being deferred.
812 	 */
813 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
814 	if (!bridge_device)
815 		return 0;
816 
817 	if (bridge_device->multicast_enabled != !mc_disabled) {
818 		bridge_device->multicast_enabled = !mc_disabled;
819 		mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp_port,
820 						   bridge_device);
821 	}
822 
823 	list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
824 		enum mlxsw_sp_flood_type packet_type = MLXSW_SP_FLOOD_TYPE_MC;
825 		bool member = mlxsw_sp_mc_flood(bridge_port);
826 
827 		err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
828 							   bridge_port,
829 							   packet_type, member);
830 		if (err)
831 			return err;
832 	}
833 
834 	bridge_device->multicast_enabled = !mc_disabled;
835 
836 	return 0;
837 }
838 
839 static int mlxsw_sp_smid_router_port_set(struct mlxsw_sp *mlxsw_sp,
840 					 u16 mid_idx, bool add)
841 {
842 	char *smid_pl;
843 	int err;
844 
845 	smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
846 	if (!smid_pl)
847 		return -ENOMEM;
848 
849 	mlxsw_reg_smid_pack(smid_pl, mid_idx,
850 			    mlxsw_sp_router_port(mlxsw_sp), add);
851 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
852 	kfree(smid_pl);
853 	return err;
854 }
855 
856 static void
857 mlxsw_sp_bridge_mrouter_update_mdb(struct mlxsw_sp *mlxsw_sp,
858 				   struct mlxsw_sp_bridge_device *bridge_device,
859 				   bool add)
860 {
861 	struct mlxsw_sp_mid *mid;
862 
863 	list_for_each_entry(mid, &bridge_device->mids_list, list)
864 		mlxsw_sp_smid_router_port_set(mlxsw_sp, mid->mid, add);
865 }
866 
867 static int
868 mlxsw_sp_port_attr_br_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
869 				  struct net_device *orig_dev,
870 				  bool is_mrouter)
871 {
872 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
873 	struct mlxsw_sp_bridge_device *bridge_device;
874 
875 	/* It's possible we failed to enslave the port, yet this
876 	 * operation is executed due to it being deferred.
877 	 */
878 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
879 	if (!bridge_device)
880 		return 0;
881 
882 	if (bridge_device->mrouter != is_mrouter)
883 		mlxsw_sp_bridge_mrouter_update_mdb(mlxsw_sp, bridge_device,
884 						   is_mrouter);
885 	bridge_device->mrouter = is_mrouter;
886 	return 0;
887 }
888 
889 static int mlxsw_sp_port_attr_set(struct net_device *dev,
890 				  const struct switchdev_attr *attr)
891 {
892 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
893 	int err;
894 
895 	switch (attr->id) {
896 	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
897 		err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port,
898 						       attr->orig_dev,
899 						       attr->u.stp_state);
900 		break;
901 	case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
902 		err = mlxsw_sp_port_attr_br_pre_flags_set(mlxsw_sp_port,
903 							  attr->u.brport_flags);
904 		break;
905 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
906 		err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port,
907 						      attr->orig_dev,
908 						      attr->u.brport_flags);
909 		break;
910 	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
911 		err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port,
912 						       attr->u.ageing_time);
913 		break;
914 	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
915 		err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port,
916 						     attr->orig_dev,
917 						     attr->u.vlan_filtering);
918 		break;
919 	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL:
920 		err = mlxsw_sp_port_attr_br_vlan_proto_set(mlxsw_sp_port,
921 							   attr->orig_dev,
922 							   attr->u.vlan_protocol);
923 		break;
924 	case SWITCHDEV_ATTR_ID_PORT_MROUTER:
925 		err = mlxsw_sp_port_attr_mrouter_set(mlxsw_sp_port,
926 						     attr->orig_dev,
927 						     attr->u.mrouter);
928 		break;
929 	case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
930 		err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port,
931 						    attr->orig_dev,
932 						    attr->u.mc_disabled);
933 		break;
934 	case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER:
935 		err = mlxsw_sp_port_attr_br_mrouter_set(mlxsw_sp_port,
936 							attr->orig_dev,
937 							attr->u.mrouter);
938 		break;
939 	default:
940 		err = -EOPNOTSUPP;
941 		break;
942 	}
943 
944 	mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
945 
946 	return err;
947 }
948 
949 static int
950 mlxsw_sp_port_vlan_fid_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
951 			    struct mlxsw_sp_bridge_port *bridge_port,
952 			    struct netlink_ext_ack *extack)
953 {
954 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
955 	struct mlxsw_sp_bridge_device *bridge_device;
956 	u8 local_port = mlxsw_sp_port->local_port;
957 	u16 vid = mlxsw_sp_port_vlan->vid;
958 	struct mlxsw_sp_fid *fid;
959 	int err;
960 
961 	bridge_device = bridge_port->bridge_device;
962 	fid = bridge_device->ops->fid_get(bridge_device, vid, extack);
963 	if (IS_ERR(fid))
964 		return PTR_ERR(fid);
965 
966 	err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port,
967 				     bridge_port->flags & BR_FLOOD);
968 	if (err)
969 		goto err_fid_uc_flood_set;
970 
971 	err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port,
972 				     mlxsw_sp_mc_flood(bridge_port));
973 	if (err)
974 		goto err_fid_mc_flood_set;
975 
976 	err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port,
977 				     true);
978 	if (err)
979 		goto err_fid_bc_flood_set;
980 
981 	err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
982 	if (err)
983 		goto err_fid_port_vid_map;
984 
985 	mlxsw_sp_port_vlan->fid = fid;
986 
987 	return 0;
988 
989 err_fid_port_vid_map:
990 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
991 err_fid_bc_flood_set:
992 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
993 err_fid_mc_flood_set:
994 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
995 err_fid_uc_flood_set:
996 	mlxsw_sp_fid_put(fid);
997 	return err;
998 }
999 
1000 static void
1001 mlxsw_sp_port_vlan_fid_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1002 {
1003 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1004 	struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
1005 	u8 local_port = mlxsw_sp_port->local_port;
1006 	u16 vid = mlxsw_sp_port_vlan->vid;
1007 
1008 	mlxsw_sp_port_vlan->fid = NULL;
1009 	mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
1010 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
1011 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
1012 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
1013 	mlxsw_sp_fid_put(fid);
1014 }
1015 
1016 static u16
1017 mlxsw_sp_port_pvid_determine(const struct mlxsw_sp_port *mlxsw_sp_port,
1018 			     u16 vid, bool is_pvid)
1019 {
1020 	if (is_pvid)
1021 		return vid;
1022 	else if (mlxsw_sp_port->pvid == vid)
1023 		return 0;	/* Dis-allow untagged packets */
1024 	else
1025 		return mlxsw_sp_port->pvid;
1026 }
1027 
1028 static int
1029 mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
1030 			       struct mlxsw_sp_bridge_port *bridge_port,
1031 			       struct netlink_ext_ack *extack)
1032 {
1033 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1034 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
1035 	u16 vid = mlxsw_sp_port_vlan->vid;
1036 	int err;
1037 
1038 	/* No need to continue if only VLAN flags were changed */
1039 	if (mlxsw_sp_port_vlan->bridge_port)
1040 		return 0;
1041 
1042 	err = mlxsw_sp_port_vlan_fid_join(mlxsw_sp_port_vlan, bridge_port,
1043 					  extack);
1044 	if (err)
1045 		return err;
1046 
1047 	err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid,
1048 					     bridge_port->flags & BR_LEARNING);
1049 	if (err)
1050 		goto err_port_vid_learning_set;
1051 
1052 	err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
1053 					bridge_port->stp_state);
1054 	if (err)
1055 		goto err_port_vid_stp_set;
1056 
1057 	bridge_vlan = mlxsw_sp_bridge_vlan_get(bridge_port, vid);
1058 	if (!bridge_vlan) {
1059 		err = -ENOMEM;
1060 		goto err_bridge_vlan_get;
1061 	}
1062 
1063 	list_add(&mlxsw_sp_port_vlan->bridge_vlan_node,
1064 		 &bridge_vlan->port_vlan_list);
1065 
1066 	mlxsw_sp_bridge_port_get(mlxsw_sp_port->mlxsw_sp->bridge,
1067 				 bridge_port->dev, extack);
1068 	mlxsw_sp_port_vlan->bridge_port = bridge_port;
1069 
1070 	return 0;
1071 
1072 err_bridge_vlan_get:
1073 	mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
1074 err_port_vid_stp_set:
1075 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
1076 err_port_vid_learning_set:
1077 	mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
1078 	return err;
1079 }
1080 
1081 void
1082 mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1083 {
1084 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1085 	struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
1086 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
1087 	struct mlxsw_sp_bridge_port *bridge_port;
1088 	u16 vid = mlxsw_sp_port_vlan->vid;
1089 	bool last_port, last_vlan;
1090 
1091 	if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021Q &&
1092 		    mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021D))
1093 		return;
1094 
1095 	bridge_port = mlxsw_sp_port_vlan->bridge_port;
1096 	last_vlan = list_is_singular(&bridge_port->vlans_list);
1097 	bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
1098 	last_port = list_is_singular(&bridge_vlan->port_vlan_list);
1099 
1100 	list_del(&mlxsw_sp_port_vlan->bridge_vlan_node);
1101 	mlxsw_sp_bridge_vlan_put(bridge_vlan);
1102 	mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
1103 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
1104 	if (last_port)
1105 		mlxsw_sp_bridge_port_fdb_flush(mlxsw_sp_port->mlxsw_sp,
1106 					       bridge_port,
1107 					       mlxsw_sp_fid_index(fid));
1108 	if (last_vlan)
1109 		mlxsw_sp_bridge_port_mdb_flush(mlxsw_sp_port, bridge_port);
1110 
1111 	mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
1112 
1113 	mlxsw_sp_bridge_port_put(mlxsw_sp_port->mlxsw_sp->bridge, bridge_port);
1114 	mlxsw_sp_port_vlan->bridge_port = NULL;
1115 }
1116 
1117 static int
1118 mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port,
1119 			      struct mlxsw_sp_bridge_port *bridge_port,
1120 			      u16 vid, bool is_untagged, bool is_pvid,
1121 			      struct netlink_ext_ack *extack)
1122 {
1123 	u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid);
1124 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1125 	u16 old_pvid = mlxsw_sp_port->pvid;
1126 	u16 proto;
1127 	int err;
1128 
1129 	/* The only valid scenario in which a port-vlan already exists, is if
1130 	 * the VLAN flags were changed and the port-vlan is associated with the
1131 	 * correct bridge port
1132 	 */
1133 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1134 	if (mlxsw_sp_port_vlan &&
1135 	    mlxsw_sp_port_vlan->bridge_port != bridge_port)
1136 		return -EEXIST;
1137 
1138 	if (!mlxsw_sp_port_vlan) {
1139 		mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port,
1140 							       vid);
1141 		if (IS_ERR(mlxsw_sp_port_vlan))
1142 			return PTR_ERR(mlxsw_sp_port_vlan);
1143 	}
1144 
1145 	err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true,
1146 				     is_untagged);
1147 	if (err)
1148 		goto err_port_vlan_set;
1149 
1150 	br_vlan_get_proto(bridge_port->bridge_device->dev, &proto);
1151 	err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid, proto);
1152 	if (err)
1153 		goto err_port_pvid_set;
1154 
1155 	err = mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port,
1156 					     extack);
1157 	if (err)
1158 		goto err_port_vlan_bridge_join;
1159 
1160 	return 0;
1161 
1162 err_port_vlan_bridge_join:
1163 	mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid, proto);
1164 err_port_pvid_set:
1165 	mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1166 err_port_vlan_set:
1167 	mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1168 	return err;
1169 }
1170 
1171 static int
1172 mlxsw_sp_br_ban_rif_pvid_change(struct mlxsw_sp *mlxsw_sp,
1173 				const struct net_device *br_dev,
1174 				const struct switchdev_obj_port_vlan *vlan)
1175 {
1176 	u16 pvid;
1177 
1178 	pvid = mlxsw_sp_rif_vid(mlxsw_sp, br_dev);
1179 	if (!pvid)
1180 		return 0;
1181 
1182 	if (vlan->flags & BRIDGE_VLAN_INFO_PVID) {
1183 		if (vlan->vid != pvid) {
1184 			netdev_err(br_dev, "Can't change PVID, it's used by router interface\n");
1185 			return -EBUSY;
1186 		}
1187 	} else {
1188 		if (vlan->vid == pvid) {
1189 			netdev_err(br_dev, "Can't remove PVID, it's used by router interface\n");
1190 			return -EBUSY;
1191 		}
1192 	}
1193 
1194 	return 0;
1195 }
1196 
1197 static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
1198 				   const struct switchdev_obj_port_vlan *vlan,
1199 				   struct netlink_ext_ack *extack)
1200 {
1201 	bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
1202 	bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
1203 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1204 	struct net_device *orig_dev = vlan->obj.orig_dev;
1205 	struct mlxsw_sp_bridge_port *bridge_port;
1206 
1207 	if (netif_is_bridge_master(orig_dev)) {
1208 		int err = 0;
1209 
1210 		if ((vlan->flags & BRIDGE_VLAN_INFO_BRENTRY) &&
1211 		    br_vlan_enabled(orig_dev))
1212 			err = mlxsw_sp_br_ban_rif_pvid_change(mlxsw_sp,
1213 							      orig_dev, vlan);
1214 		if (!err)
1215 			err = -EOPNOTSUPP;
1216 		return err;
1217 	}
1218 
1219 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1220 	if (WARN_ON(!bridge_port))
1221 		return -EINVAL;
1222 
1223 	if (!bridge_port->bridge_device->vlan_enabled)
1224 		return 0;
1225 
1226 	return mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port,
1227 					     vlan->vid, flag_untagged,
1228 					     flag_pvid, extack);
1229 }
1230 
1231 static enum mlxsw_reg_sfdf_flush_type mlxsw_sp_fdb_flush_type(bool lagged)
1232 {
1233 	return lagged ? MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID :
1234 			MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID;
1235 }
1236 
1237 static int
1238 mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
1239 			       struct mlxsw_sp_bridge_port *bridge_port,
1240 			       u16 fid_index)
1241 {
1242 	bool lagged = bridge_port->lagged;
1243 	char sfdf_pl[MLXSW_REG_SFDF_LEN];
1244 	u16 system_port;
1245 
1246 	system_port = lagged ? bridge_port->lag_id : bridge_port->system_port;
1247 	mlxsw_reg_sfdf_pack(sfdf_pl, mlxsw_sp_fdb_flush_type(lagged));
1248 	mlxsw_reg_sfdf_fid_set(sfdf_pl, fid_index);
1249 	mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl, system_port);
1250 
1251 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
1252 }
1253 
1254 static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
1255 {
1256 	return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
1257 			 MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_MLAG;
1258 }
1259 
1260 static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
1261 {
1262 	return adding ? MLXSW_REG_SFD_OP_WRITE_EDIT :
1263 			MLXSW_REG_SFD_OP_WRITE_REMOVE;
1264 }
1265 
1266 static int mlxsw_sp_port_fdb_tunnel_uc_op(struct mlxsw_sp *mlxsw_sp,
1267 					  const char *mac, u16 fid,
1268 					  enum mlxsw_sp_l3proto proto,
1269 					  const union mlxsw_sp_l3addr *addr,
1270 					  bool adding, bool dynamic)
1271 {
1272 	enum mlxsw_reg_sfd_uc_tunnel_protocol sfd_proto;
1273 	char *sfd_pl;
1274 	u8 num_rec;
1275 	u32 uip;
1276 	int err;
1277 
1278 	switch (proto) {
1279 	case MLXSW_SP_L3_PROTO_IPV4:
1280 		uip = be32_to_cpu(addr->addr4);
1281 		sfd_proto = MLXSW_REG_SFD_UC_TUNNEL_PROTOCOL_IPV4;
1282 		break;
1283 	case MLXSW_SP_L3_PROTO_IPV6:
1284 	default:
1285 		WARN_ON(1);
1286 		return -EOPNOTSUPP;
1287 	}
1288 
1289 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1290 	if (!sfd_pl)
1291 		return -ENOMEM;
1292 
1293 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1294 	mlxsw_reg_sfd_uc_tunnel_pack(sfd_pl, 0,
1295 				     mlxsw_sp_sfd_rec_policy(dynamic), mac, fid,
1296 				     MLXSW_REG_SFD_REC_ACTION_NOP, uip,
1297 				     sfd_proto);
1298 	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1299 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1300 	if (err)
1301 		goto out;
1302 
1303 	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1304 		err = -EBUSY;
1305 
1306 out:
1307 	kfree(sfd_pl);
1308 	return err;
1309 }
1310 
1311 static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1312 				     const char *mac, u16 fid, bool adding,
1313 				     enum mlxsw_reg_sfd_rec_action action,
1314 				     enum mlxsw_reg_sfd_rec_policy policy)
1315 {
1316 	char *sfd_pl;
1317 	u8 num_rec;
1318 	int err;
1319 
1320 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1321 	if (!sfd_pl)
1322 		return -ENOMEM;
1323 
1324 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1325 	mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, mac, fid, action, local_port);
1326 	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1327 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1328 	if (err)
1329 		goto out;
1330 
1331 	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1332 		err = -EBUSY;
1333 
1334 out:
1335 	kfree(sfd_pl);
1336 	return err;
1337 }
1338 
1339 static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1340 				   const char *mac, u16 fid, bool adding,
1341 				   bool dynamic)
1342 {
1343 	return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
1344 					 MLXSW_REG_SFD_REC_ACTION_NOP,
1345 					 mlxsw_sp_sfd_rec_policy(dynamic));
1346 }
1347 
1348 int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
1349 			bool adding)
1350 {
1351 	return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
1352 					 MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
1353 					 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY);
1354 }
1355 
1356 static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
1357 				       const char *mac, u16 fid, u16 lag_vid,
1358 				       bool adding, bool dynamic)
1359 {
1360 	char *sfd_pl;
1361 	u8 num_rec;
1362 	int err;
1363 
1364 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1365 	if (!sfd_pl)
1366 		return -ENOMEM;
1367 
1368 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1369 	mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
1370 				  mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
1371 				  lag_vid, lag_id);
1372 	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1373 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1374 	if (err)
1375 		goto out;
1376 
1377 	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1378 		err = -EBUSY;
1379 
1380 out:
1381 	kfree(sfd_pl);
1382 	return err;
1383 }
1384 
1385 static int
1386 mlxsw_sp_port_fdb_set(struct mlxsw_sp_port *mlxsw_sp_port,
1387 		      struct switchdev_notifier_fdb_info *fdb_info, bool adding)
1388 {
1389 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1390 	struct net_device *orig_dev = fdb_info->info.dev;
1391 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1392 	struct mlxsw_sp_bridge_device *bridge_device;
1393 	struct mlxsw_sp_bridge_port *bridge_port;
1394 	u16 fid_index, vid;
1395 
1396 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1397 	if (!bridge_port)
1398 		return -EINVAL;
1399 
1400 	bridge_device = bridge_port->bridge_device;
1401 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1402 							       bridge_device,
1403 							       fdb_info->vid);
1404 	if (!mlxsw_sp_port_vlan)
1405 		return 0;
1406 
1407 	fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1408 	vid = mlxsw_sp_port_vlan->vid;
1409 
1410 	if (!bridge_port->lagged)
1411 		return mlxsw_sp_port_fdb_uc_op(mlxsw_sp,
1412 					       bridge_port->system_port,
1413 					       fdb_info->addr, fid_index,
1414 					       adding, false);
1415 	else
1416 		return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp,
1417 						   bridge_port->lag_id,
1418 						   fdb_info->addr, fid_index,
1419 						   vid, adding, false);
1420 }
1421 
1422 static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
1423 				u16 fid, u16 mid_idx, bool adding)
1424 {
1425 	char *sfd_pl;
1426 	u8 num_rec;
1427 	int err;
1428 
1429 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1430 	if (!sfd_pl)
1431 		return -ENOMEM;
1432 
1433 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1434 	mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
1435 			      MLXSW_REG_SFD_REC_ACTION_NOP, mid_idx);
1436 	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1437 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1438 	if (err)
1439 		goto out;
1440 
1441 	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1442 		err = -EBUSY;
1443 
1444 out:
1445 	kfree(sfd_pl);
1446 	return err;
1447 }
1448 
1449 static int mlxsw_sp_port_smid_full_entry(struct mlxsw_sp *mlxsw_sp, u16 mid_idx,
1450 					 long *ports_bitmap,
1451 					 bool set_router_port)
1452 {
1453 	char *smid_pl;
1454 	int err, i;
1455 
1456 	smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
1457 	if (!smid_pl)
1458 		return -ENOMEM;
1459 
1460 	mlxsw_reg_smid_pack(smid_pl, mid_idx, 0, false);
1461 	for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) {
1462 		if (mlxsw_sp->ports[i])
1463 			mlxsw_reg_smid_port_mask_set(smid_pl, i, 1);
1464 	}
1465 
1466 	mlxsw_reg_smid_port_mask_set(smid_pl,
1467 				     mlxsw_sp_router_port(mlxsw_sp), 1);
1468 
1469 	for_each_set_bit(i, ports_bitmap, mlxsw_core_max_ports(mlxsw_sp->core))
1470 		mlxsw_reg_smid_port_set(smid_pl, i, 1);
1471 
1472 	mlxsw_reg_smid_port_set(smid_pl, mlxsw_sp_router_port(mlxsw_sp),
1473 				set_router_port);
1474 
1475 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
1476 	kfree(smid_pl);
1477 	return err;
1478 }
1479 
1480 static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port,
1481 				  u16 mid_idx, bool add)
1482 {
1483 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1484 	char *smid_pl;
1485 	int err;
1486 
1487 	smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
1488 	if (!smid_pl)
1489 		return -ENOMEM;
1490 
1491 	mlxsw_reg_smid_pack(smid_pl, mid_idx, mlxsw_sp_port->local_port, add);
1492 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
1493 	kfree(smid_pl);
1494 	return err;
1495 }
1496 
1497 static struct
1498 mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp_bridge_device *bridge_device,
1499 				const unsigned char *addr,
1500 				u16 fid)
1501 {
1502 	struct mlxsw_sp_mid *mid;
1503 
1504 	list_for_each_entry(mid, &bridge_device->mids_list, list) {
1505 		if (ether_addr_equal(mid->addr, addr) && mid->fid == fid)
1506 			return mid;
1507 	}
1508 	return NULL;
1509 }
1510 
1511 static void
1512 mlxsw_sp_bridge_port_get_ports_bitmap(struct mlxsw_sp *mlxsw_sp,
1513 				      struct mlxsw_sp_bridge_port *bridge_port,
1514 				      unsigned long *ports_bitmap)
1515 {
1516 	struct mlxsw_sp_port *mlxsw_sp_port;
1517 	u64 max_lag_members, i;
1518 	int lag_id;
1519 
1520 	if (!bridge_port->lagged) {
1521 		set_bit(bridge_port->system_port, ports_bitmap);
1522 	} else {
1523 		max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1524 						     MAX_LAG_MEMBERS);
1525 		lag_id = bridge_port->lag_id;
1526 		for (i = 0; i < max_lag_members; i++) {
1527 			mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp,
1528 								 lag_id, i);
1529 			if (mlxsw_sp_port)
1530 				set_bit(mlxsw_sp_port->local_port,
1531 					ports_bitmap);
1532 		}
1533 	}
1534 }
1535 
1536 static void
1537 mlxsw_sp_mc_get_mrouters_bitmap(unsigned long *flood_bitmap,
1538 				struct mlxsw_sp_bridge_device *bridge_device,
1539 				struct mlxsw_sp *mlxsw_sp)
1540 {
1541 	struct mlxsw_sp_bridge_port *bridge_port;
1542 
1543 	list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
1544 		if (bridge_port->mrouter) {
1545 			mlxsw_sp_bridge_port_get_ports_bitmap(mlxsw_sp,
1546 							      bridge_port,
1547 							      flood_bitmap);
1548 		}
1549 	}
1550 }
1551 
1552 static bool
1553 mlxsw_sp_mc_write_mdb_entry(struct mlxsw_sp *mlxsw_sp,
1554 			    struct mlxsw_sp_mid *mid,
1555 			    struct mlxsw_sp_bridge_device *bridge_device)
1556 {
1557 	long *flood_bitmap;
1558 	int num_of_ports;
1559 	int alloc_size;
1560 	u16 mid_idx;
1561 	int err;
1562 
1563 	mid_idx = find_first_zero_bit(mlxsw_sp->bridge->mids_bitmap,
1564 				      MLXSW_SP_MID_MAX);
1565 	if (mid_idx == MLXSW_SP_MID_MAX)
1566 		return false;
1567 
1568 	num_of_ports = mlxsw_core_max_ports(mlxsw_sp->core);
1569 	alloc_size = sizeof(long) * BITS_TO_LONGS(num_of_ports);
1570 	flood_bitmap = kzalloc(alloc_size, GFP_KERNEL);
1571 	if (!flood_bitmap)
1572 		return false;
1573 
1574 	bitmap_copy(flood_bitmap,  mid->ports_in_mid, num_of_ports);
1575 	mlxsw_sp_mc_get_mrouters_bitmap(flood_bitmap, bridge_device, mlxsw_sp);
1576 
1577 	mid->mid = mid_idx;
1578 	err = mlxsw_sp_port_smid_full_entry(mlxsw_sp, mid_idx, flood_bitmap,
1579 					    bridge_device->mrouter);
1580 	kfree(flood_bitmap);
1581 	if (err)
1582 		return false;
1583 
1584 	err = mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid_idx,
1585 				   true);
1586 	if (err)
1587 		return false;
1588 
1589 	set_bit(mid_idx, mlxsw_sp->bridge->mids_bitmap);
1590 	mid->in_hw = true;
1591 	return true;
1592 }
1593 
1594 static int mlxsw_sp_mc_remove_mdb_entry(struct mlxsw_sp *mlxsw_sp,
1595 					struct mlxsw_sp_mid *mid)
1596 {
1597 	if (!mid->in_hw)
1598 		return 0;
1599 
1600 	clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap);
1601 	mid->in_hw = false;
1602 	return mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid->mid,
1603 				    false);
1604 }
1605 
1606 static struct
1607 mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
1608 				  struct mlxsw_sp_bridge_device *bridge_device,
1609 				  const unsigned char *addr,
1610 				  u16 fid)
1611 {
1612 	struct mlxsw_sp_mid *mid;
1613 	size_t alloc_size;
1614 
1615 	mid = kzalloc(sizeof(*mid), GFP_KERNEL);
1616 	if (!mid)
1617 		return NULL;
1618 
1619 	alloc_size = sizeof(unsigned long) *
1620 		     BITS_TO_LONGS(mlxsw_core_max_ports(mlxsw_sp->core));
1621 
1622 	mid->ports_in_mid = kzalloc(alloc_size, GFP_KERNEL);
1623 	if (!mid->ports_in_mid)
1624 		goto err_ports_in_mid_alloc;
1625 
1626 	ether_addr_copy(mid->addr, addr);
1627 	mid->fid = fid;
1628 	mid->in_hw = false;
1629 
1630 	if (!bridge_device->multicast_enabled)
1631 		goto out;
1632 
1633 	if (!mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid, bridge_device))
1634 		goto err_write_mdb_entry;
1635 
1636 out:
1637 	list_add_tail(&mid->list, &bridge_device->mids_list);
1638 	return mid;
1639 
1640 err_write_mdb_entry:
1641 	kfree(mid->ports_in_mid);
1642 err_ports_in_mid_alloc:
1643 	kfree(mid);
1644 	return NULL;
1645 }
1646 
1647 static int mlxsw_sp_port_remove_from_mid(struct mlxsw_sp_port *mlxsw_sp_port,
1648 					 struct mlxsw_sp_mid *mid)
1649 {
1650 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1651 	int err = 0;
1652 
1653 	clear_bit(mlxsw_sp_port->local_port, mid->ports_in_mid);
1654 	if (bitmap_empty(mid->ports_in_mid,
1655 			 mlxsw_core_max_ports(mlxsw_sp->core))) {
1656 		err = mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
1657 		list_del(&mid->list);
1658 		kfree(mid->ports_in_mid);
1659 		kfree(mid);
1660 	}
1661 	return err;
1662 }
1663 
1664 static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
1665 				 const struct switchdev_obj_port_mdb *mdb)
1666 {
1667 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1668 	struct net_device *orig_dev = mdb->obj.orig_dev;
1669 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1670 	struct net_device *dev = mlxsw_sp_port->dev;
1671 	struct mlxsw_sp_bridge_device *bridge_device;
1672 	struct mlxsw_sp_bridge_port *bridge_port;
1673 	struct mlxsw_sp_mid *mid;
1674 	u16 fid_index;
1675 	int err = 0;
1676 
1677 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1678 	if (!bridge_port)
1679 		return 0;
1680 
1681 	bridge_device = bridge_port->bridge_device;
1682 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1683 							       bridge_device,
1684 							       mdb->vid);
1685 	if (!mlxsw_sp_port_vlan)
1686 		return 0;
1687 
1688 	fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1689 
1690 	mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
1691 	if (!mid) {
1692 		mid = __mlxsw_sp_mc_alloc(mlxsw_sp, bridge_device, mdb->addr,
1693 					  fid_index);
1694 		if (!mid) {
1695 			netdev_err(dev, "Unable to allocate MC group\n");
1696 			return -ENOMEM;
1697 		}
1698 	}
1699 	set_bit(mlxsw_sp_port->local_port, mid->ports_in_mid);
1700 
1701 	if (!bridge_device->multicast_enabled)
1702 		return 0;
1703 
1704 	if (bridge_port->mrouter)
1705 		return 0;
1706 
1707 	err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true);
1708 	if (err) {
1709 		netdev_err(dev, "Unable to set SMID\n");
1710 		goto err_out;
1711 	}
1712 
1713 	return 0;
1714 
1715 err_out:
1716 	mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
1717 	return err;
1718 }
1719 
1720 static void
1721 mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port,
1722 				   struct mlxsw_sp_bridge_device
1723 				   *bridge_device)
1724 {
1725 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1726 	struct mlxsw_sp_mid *mid;
1727 	bool mc_enabled;
1728 
1729 	mc_enabled = bridge_device->multicast_enabled;
1730 
1731 	list_for_each_entry(mid, &bridge_device->mids_list, list) {
1732 		if (mc_enabled)
1733 			mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid,
1734 						    bridge_device);
1735 		else
1736 			mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
1737 	}
1738 }
1739 
1740 static void
1741 mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
1742 				 struct mlxsw_sp_bridge_port *bridge_port,
1743 				 bool add)
1744 {
1745 	struct mlxsw_sp_bridge_device *bridge_device;
1746 	struct mlxsw_sp_mid *mid;
1747 
1748 	bridge_device = bridge_port->bridge_device;
1749 
1750 	list_for_each_entry(mid, &bridge_device->mids_list, list) {
1751 		if (!test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid))
1752 			mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, add);
1753 	}
1754 }
1755 
1756 static int mlxsw_sp_port_obj_add(struct net_device *dev,
1757 				 const struct switchdev_obj *obj,
1758 				 struct netlink_ext_ack *extack)
1759 {
1760 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1761 	const struct switchdev_obj_port_vlan *vlan;
1762 	int err = 0;
1763 
1764 	switch (obj->id) {
1765 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
1766 		vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
1767 
1768 		err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, vlan, extack);
1769 
1770 		/* The event is emitted before the changes are actually
1771 		 * applied to the bridge. Therefore schedule the respin
1772 		 * call for later, so that the respin logic sees the
1773 		 * updated bridge state.
1774 		 */
1775 		mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
1776 		break;
1777 	case SWITCHDEV_OBJ_ID_PORT_MDB:
1778 		err = mlxsw_sp_port_mdb_add(mlxsw_sp_port,
1779 					    SWITCHDEV_OBJ_PORT_MDB(obj));
1780 		break;
1781 	default:
1782 		err = -EOPNOTSUPP;
1783 		break;
1784 	}
1785 
1786 	return err;
1787 }
1788 
1789 static void
1790 mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port,
1791 			      struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
1792 {
1793 	u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : mlxsw_sp_port->pvid;
1794 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1795 	u16 proto;
1796 
1797 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1798 	if (WARN_ON(!mlxsw_sp_port_vlan))
1799 		return;
1800 
1801 	mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
1802 	br_vlan_get_proto(bridge_port->bridge_device->dev, &proto);
1803 	mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid, proto);
1804 	mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1805 	mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1806 }
1807 
1808 static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
1809 				   const struct switchdev_obj_port_vlan *vlan)
1810 {
1811 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1812 	struct net_device *orig_dev = vlan->obj.orig_dev;
1813 	struct mlxsw_sp_bridge_port *bridge_port;
1814 
1815 	if (netif_is_bridge_master(orig_dev))
1816 		return -EOPNOTSUPP;
1817 
1818 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1819 	if (WARN_ON(!bridge_port))
1820 		return -EINVAL;
1821 
1822 	if (!bridge_port->bridge_device->vlan_enabled)
1823 		return 0;
1824 
1825 	mlxsw_sp_bridge_port_vlan_del(mlxsw_sp_port, bridge_port, vlan->vid);
1826 
1827 	return 0;
1828 }
1829 
1830 static int
1831 __mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
1832 			struct mlxsw_sp_bridge_port *bridge_port,
1833 			struct mlxsw_sp_mid *mid)
1834 {
1835 	struct net_device *dev = mlxsw_sp_port->dev;
1836 	int err;
1837 
1838 	if (bridge_port->bridge_device->multicast_enabled &&
1839 	    !bridge_port->mrouter) {
1840 		err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
1841 		if (err)
1842 			netdev_err(dev, "Unable to remove port from SMID\n");
1843 	}
1844 
1845 	err = mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
1846 	if (err)
1847 		netdev_err(dev, "Unable to remove MC SFD\n");
1848 
1849 	return err;
1850 }
1851 
1852 static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
1853 				 const struct switchdev_obj_port_mdb *mdb)
1854 {
1855 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1856 	struct net_device *orig_dev = mdb->obj.orig_dev;
1857 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1858 	struct mlxsw_sp_bridge_device *bridge_device;
1859 	struct net_device *dev = mlxsw_sp_port->dev;
1860 	struct mlxsw_sp_bridge_port *bridge_port;
1861 	struct mlxsw_sp_mid *mid;
1862 	u16 fid_index;
1863 
1864 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1865 	if (!bridge_port)
1866 		return 0;
1867 
1868 	bridge_device = bridge_port->bridge_device;
1869 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1870 							       bridge_device,
1871 							       mdb->vid);
1872 	if (!mlxsw_sp_port_vlan)
1873 		return 0;
1874 
1875 	fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1876 
1877 	mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
1878 	if (!mid) {
1879 		netdev_err(dev, "Unable to remove port from MC DB\n");
1880 		return -EINVAL;
1881 	}
1882 
1883 	return __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port, mid);
1884 }
1885 
1886 static void
1887 mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
1888 			       struct mlxsw_sp_bridge_port *bridge_port)
1889 {
1890 	struct mlxsw_sp_bridge_device *bridge_device;
1891 	struct mlxsw_sp_mid *mid, *tmp;
1892 
1893 	bridge_device = bridge_port->bridge_device;
1894 
1895 	list_for_each_entry_safe(mid, tmp, &bridge_device->mids_list, list) {
1896 		if (test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid)) {
1897 			__mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port,
1898 						mid);
1899 		} else if (bridge_device->multicast_enabled &&
1900 			   bridge_port->mrouter) {
1901 			mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
1902 		}
1903 	}
1904 }
1905 
1906 static int mlxsw_sp_port_obj_del(struct net_device *dev,
1907 				 const struct switchdev_obj *obj)
1908 {
1909 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1910 	int err = 0;
1911 
1912 	switch (obj->id) {
1913 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
1914 		err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
1915 					      SWITCHDEV_OBJ_PORT_VLAN(obj));
1916 		break;
1917 	case SWITCHDEV_OBJ_ID_PORT_MDB:
1918 		err = mlxsw_sp_port_mdb_del(mlxsw_sp_port,
1919 					    SWITCHDEV_OBJ_PORT_MDB(obj));
1920 		break;
1921 	default:
1922 		err = -EOPNOTSUPP;
1923 		break;
1924 	}
1925 
1926 	mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
1927 
1928 	return err;
1929 }
1930 
1931 static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
1932 						   u16 lag_id)
1933 {
1934 	struct mlxsw_sp_port *mlxsw_sp_port;
1935 	u64 max_lag_members;
1936 	int i;
1937 
1938 	max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1939 					     MAX_LAG_MEMBERS);
1940 	for (i = 0; i < max_lag_members; i++) {
1941 		mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
1942 		if (mlxsw_sp_port)
1943 			return mlxsw_sp_port;
1944 	}
1945 	return NULL;
1946 }
1947 
1948 static int
1949 mlxsw_sp_bridge_vlan_aware_port_join(struct mlxsw_sp_bridge_port *bridge_port,
1950 				     struct mlxsw_sp_port *mlxsw_sp_port,
1951 				     struct netlink_ext_ack *extack)
1952 {
1953 	if (is_vlan_dev(bridge_port->dev)) {
1954 		NL_SET_ERR_MSG_MOD(extack, "Can not enslave a VLAN device to a VLAN-aware bridge");
1955 		return -EINVAL;
1956 	}
1957 
1958 	/* Port is no longer usable as a router interface */
1959 	if (mlxsw_sp_port->default_vlan->fid)
1960 		mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan);
1961 
1962 	return 0;
1963 }
1964 
1965 static int
1966 mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device,
1967 				struct mlxsw_sp_bridge_port *bridge_port,
1968 				struct mlxsw_sp_port *mlxsw_sp_port,
1969 				struct netlink_ext_ack *extack)
1970 {
1971 	return mlxsw_sp_bridge_vlan_aware_port_join(bridge_port, mlxsw_sp_port,
1972 						    extack);
1973 }
1974 
1975 static void
1976 mlxsw_sp_bridge_vlan_aware_port_leave(struct mlxsw_sp_port *mlxsw_sp_port)
1977 {
1978 	/* Make sure untagged frames are allowed to ingress */
1979 	mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID,
1980 			       ETH_P_8021Q);
1981 }
1982 
1983 static void
1984 mlxsw_sp_bridge_8021q_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
1985 				 struct mlxsw_sp_bridge_port *bridge_port,
1986 				 struct mlxsw_sp_port *mlxsw_sp_port)
1987 {
1988 	mlxsw_sp_bridge_vlan_aware_port_leave(mlxsw_sp_port);
1989 }
1990 
1991 static int
1992 mlxsw_sp_bridge_vlan_aware_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
1993 				      const struct net_device *vxlan_dev,
1994 				      u16 vid, u16 ethertype,
1995 				      struct netlink_ext_ack *extack)
1996 {
1997 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
1998 	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
1999 	struct mlxsw_sp_nve_params params = {
2000 		.type = MLXSW_SP_NVE_TYPE_VXLAN,
2001 		.vni = vxlan->cfg.vni,
2002 		.dev = vxlan_dev,
2003 		.ethertype = ethertype,
2004 	};
2005 	struct mlxsw_sp_fid *fid;
2006 	int err;
2007 
2008 	/* If the VLAN is 0, we need to find the VLAN that is configured as
2009 	 * PVID and egress untagged on the bridge port of the VxLAN device.
2010 	 * It is possible no such VLAN exists
2011 	 */
2012 	if (!vid) {
2013 		err = mlxsw_sp_vxlan_mapped_vid(vxlan_dev, &vid);
2014 		if (err || !vid)
2015 			return err;
2016 	}
2017 
2018 	fid = mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
2019 	if (IS_ERR(fid)) {
2020 		NL_SET_ERR_MSG_MOD(extack, "Failed to create 802.1Q FID");
2021 		return PTR_ERR(fid);
2022 	}
2023 
2024 	if (mlxsw_sp_fid_vni_is_set(fid)) {
2025 		NL_SET_ERR_MSG_MOD(extack, "VNI is already set on FID");
2026 		err = -EINVAL;
2027 		goto err_vni_exists;
2028 	}
2029 
2030 	err = mlxsw_sp_nve_fid_enable(mlxsw_sp, fid, &params, extack);
2031 	if (err)
2032 		goto err_nve_fid_enable;
2033 
2034 	return 0;
2035 
2036 err_nve_fid_enable:
2037 err_vni_exists:
2038 	mlxsw_sp_fid_put(fid);
2039 	return err;
2040 }
2041 
2042 static int
2043 mlxsw_sp_bridge_8021q_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
2044 				 const struct net_device *vxlan_dev, u16 vid,
2045 				 struct netlink_ext_ack *extack)
2046 {
2047 	return mlxsw_sp_bridge_vlan_aware_vxlan_join(bridge_device, vxlan_dev,
2048 						     vid, ETH_P_8021Q, extack);
2049 }
2050 
2051 static struct net_device *
2052 mlxsw_sp_bridge_8021q_vxlan_dev_find(struct net_device *br_dev, u16 vid)
2053 {
2054 	struct net_device *dev;
2055 	struct list_head *iter;
2056 
2057 	netdev_for_each_lower_dev(br_dev, dev, iter) {
2058 		u16 pvid;
2059 		int err;
2060 
2061 		if (!netif_is_vxlan(dev))
2062 			continue;
2063 
2064 		err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid);
2065 		if (err || pvid != vid)
2066 			continue;
2067 
2068 		return dev;
2069 	}
2070 
2071 	return NULL;
2072 }
2073 
2074 static struct mlxsw_sp_fid *
2075 mlxsw_sp_bridge_8021q_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
2076 			      u16 vid, struct netlink_ext_ack *extack)
2077 {
2078 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2079 
2080 	return mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
2081 }
2082 
2083 static struct mlxsw_sp_fid *
2084 mlxsw_sp_bridge_8021q_fid_lookup(struct mlxsw_sp_bridge_device *bridge_device,
2085 				 u16 vid)
2086 {
2087 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2088 
2089 	return mlxsw_sp_fid_8021q_lookup(mlxsw_sp, vid);
2090 }
2091 
2092 static u16
2093 mlxsw_sp_bridge_8021q_fid_vid(struct mlxsw_sp_bridge_device *bridge_device,
2094 			      const struct mlxsw_sp_fid *fid)
2095 {
2096 	return mlxsw_sp_fid_8021q_vid(fid);
2097 }
2098 
2099 static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021q_ops = {
2100 	.port_join	= mlxsw_sp_bridge_8021q_port_join,
2101 	.port_leave	= mlxsw_sp_bridge_8021q_port_leave,
2102 	.vxlan_join	= mlxsw_sp_bridge_8021q_vxlan_join,
2103 	.fid_get	= mlxsw_sp_bridge_8021q_fid_get,
2104 	.fid_lookup	= mlxsw_sp_bridge_8021q_fid_lookup,
2105 	.fid_vid	= mlxsw_sp_bridge_8021q_fid_vid,
2106 };
2107 
2108 static bool
2109 mlxsw_sp_port_is_br_member(const struct mlxsw_sp_port *mlxsw_sp_port,
2110 			   const struct net_device *br_dev)
2111 {
2112 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2113 
2114 	list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
2115 			    list) {
2116 		if (mlxsw_sp_port_vlan->bridge_port &&
2117 		    mlxsw_sp_port_vlan->bridge_port->bridge_device->dev ==
2118 		    br_dev)
2119 			return true;
2120 	}
2121 
2122 	return false;
2123 }
2124 
2125 static int
2126 mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device,
2127 				struct mlxsw_sp_bridge_port *bridge_port,
2128 				struct mlxsw_sp_port *mlxsw_sp_port,
2129 				struct netlink_ext_ack *extack)
2130 {
2131 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2132 	struct net_device *dev = bridge_port->dev;
2133 	u16 vid;
2134 
2135 	vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : MLXSW_SP_DEFAULT_VID;
2136 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
2137 	if (WARN_ON(!mlxsw_sp_port_vlan))
2138 		return -EINVAL;
2139 
2140 	if (mlxsw_sp_port_is_br_member(mlxsw_sp_port, bridge_device->dev)) {
2141 		NL_SET_ERR_MSG_MOD(extack, "Can not bridge VLAN uppers of the same port");
2142 		return -EINVAL;
2143 	}
2144 
2145 	/* Port is no longer usable as a router interface */
2146 	if (mlxsw_sp_port_vlan->fid)
2147 		mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
2148 
2149 	return mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port,
2150 					      extack);
2151 }
2152 
2153 static void
2154 mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
2155 				 struct mlxsw_sp_bridge_port *bridge_port,
2156 				 struct mlxsw_sp_port *mlxsw_sp_port)
2157 {
2158 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2159 	struct net_device *dev = bridge_port->dev;
2160 	u16 vid;
2161 
2162 	vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : MLXSW_SP_DEFAULT_VID;
2163 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
2164 	if (!mlxsw_sp_port_vlan || !mlxsw_sp_port_vlan->bridge_port)
2165 		return;
2166 
2167 	mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
2168 }
2169 
2170 static int
2171 mlxsw_sp_bridge_8021d_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
2172 				 const struct net_device *vxlan_dev, u16 vid,
2173 				 struct netlink_ext_ack *extack)
2174 {
2175 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2176 	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
2177 	struct mlxsw_sp_nve_params params = {
2178 		.type = MLXSW_SP_NVE_TYPE_VXLAN,
2179 		.vni = vxlan->cfg.vni,
2180 		.dev = vxlan_dev,
2181 		.ethertype = ETH_P_8021Q,
2182 	};
2183 	struct mlxsw_sp_fid *fid;
2184 	int err;
2185 
2186 	fid = mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex);
2187 	if (IS_ERR(fid)) {
2188 		NL_SET_ERR_MSG_MOD(extack, "Failed to create 802.1D FID");
2189 		return -EINVAL;
2190 	}
2191 
2192 	if (mlxsw_sp_fid_vni_is_set(fid)) {
2193 		NL_SET_ERR_MSG_MOD(extack, "VNI is already set on FID");
2194 		err = -EINVAL;
2195 		goto err_vni_exists;
2196 	}
2197 
2198 	err = mlxsw_sp_nve_fid_enable(mlxsw_sp, fid, &params, extack);
2199 	if (err)
2200 		goto err_nve_fid_enable;
2201 
2202 	return 0;
2203 
2204 err_nve_fid_enable:
2205 err_vni_exists:
2206 	mlxsw_sp_fid_put(fid);
2207 	return err;
2208 }
2209 
2210 static struct mlxsw_sp_fid *
2211 mlxsw_sp_bridge_8021d_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
2212 			      u16 vid, struct netlink_ext_ack *extack)
2213 {
2214 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2215 
2216 	return mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex);
2217 }
2218 
2219 static struct mlxsw_sp_fid *
2220 mlxsw_sp_bridge_8021d_fid_lookup(struct mlxsw_sp_bridge_device *bridge_device,
2221 				 u16 vid)
2222 {
2223 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2224 
2225 	/* The only valid VLAN for a VLAN-unaware bridge is 0 */
2226 	if (vid)
2227 		return NULL;
2228 
2229 	return mlxsw_sp_fid_8021d_lookup(mlxsw_sp, bridge_device->dev->ifindex);
2230 }
2231 
2232 static u16
2233 mlxsw_sp_bridge_8021d_fid_vid(struct mlxsw_sp_bridge_device *bridge_device,
2234 			      const struct mlxsw_sp_fid *fid)
2235 {
2236 	return 0;
2237 }
2238 
2239 static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021d_ops = {
2240 	.port_join	= mlxsw_sp_bridge_8021d_port_join,
2241 	.port_leave	= mlxsw_sp_bridge_8021d_port_leave,
2242 	.vxlan_join	= mlxsw_sp_bridge_8021d_vxlan_join,
2243 	.fid_get	= mlxsw_sp_bridge_8021d_fid_get,
2244 	.fid_lookup	= mlxsw_sp_bridge_8021d_fid_lookup,
2245 	.fid_vid	= mlxsw_sp_bridge_8021d_fid_vid,
2246 };
2247 
2248 static int
2249 mlxsw_sp_bridge_8021ad_port_join(struct mlxsw_sp_bridge_device *bridge_device,
2250 				 struct mlxsw_sp_bridge_port *bridge_port,
2251 				 struct mlxsw_sp_port *mlxsw_sp_port,
2252 				 struct netlink_ext_ack *extack)
2253 {
2254 	int err;
2255 
2256 	err = mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, false);
2257 	if (err)
2258 		return err;
2259 
2260 	err = mlxsw_sp_bridge_vlan_aware_port_join(bridge_port, mlxsw_sp_port,
2261 						   extack);
2262 	if (err)
2263 		goto err_bridge_vlan_aware_port_join;
2264 
2265 	return 0;
2266 
2267 err_bridge_vlan_aware_port_join:
2268 	mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true);
2269 	return err;
2270 }
2271 
2272 static void
2273 mlxsw_sp_bridge_8021ad_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
2274 				  struct mlxsw_sp_bridge_port *bridge_port,
2275 				  struct mlxsw_sp_port *mlxsw_sp_port)
2276 {
2277 	mlxsw_sp_bridge_vlan_aware_port_leave(mlxsw_sp_port);
2278 	mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true);
2279 }
2280 
2281 static int
2282 mlxsw_sp_bridge_8021ad_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
2283 				  const struct net_device *vxlan_dev, u16 vid,
2284 				  struct netlink_ext_ack *extack)
2285 {
2286 	return mlxsw_sp_bridge_vlan_aware_vxlan_join(bridge_device, vxlan_dev,
2287 						     vid, ETH_P_8021AD, extack);
2288 }
2289 
2290 static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021ad_ops = {
2291 	.port_join	= mlxsw_sp_bridge_8021ad_port_join,
2292 	.port_leave	= mlxsw_sp_bridge_8021ad_port_leave,
2293 	.vxlan_join	= mlxsw_sp_bridge_8021ad_vxlan_join,
2294 	.fid_get	= mlxsw_sp_bridge_8021q_fid_get,
2295 	.fid_lookup	= mlxsw_sp_bridge_8021q_fid_lookup,
2296 	.fid_vid	= mlxsw_sp_bridge_8021q_fid_vid,
2297 };
2298 
2299 int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
2300 			      struct net_device *brport_dev,
2301 			      struct net_device *br_dev,
2302 			      struct netlink_ext_ack *extack)
2303 {
2304 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2305 	struct mlxsw_sp_bridge_device *bridge_device;
2306 	struct mlxsw_sp_bridge_port *bridge_port;
2307 	int err;
2308 
2309 	bridge_port = mlxsw_sp_bridge_port_get(mlxsw_sp->bridge, brport_dev,
2310 					       extack);
2311 	if (IS_ERR(bridge_port))
2312 		return PTR_ERR(bridge_port);
2313 	bridge_device = bridge_port->bridge_device;
2314 
2315 	err = bridge_device->ops->port_join(bridge_device, bridge_port,
2316 					    mlxsw_sp_port, extack);
2317 	if (err)
2318 		goto err_port_join;
2319 
2320 	return 0;
2321 
2322 err_port_join:
2323 	mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
2324 	return err;
2325 }
2326 
2327 void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2328 				struct net_device *brport_dev,
2329 				struct net_device *br_dev)
2330 {
2331 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2332 	struct mlxsw_sp_bridge_device *bridge_device;
2333 	struct mlxsw_sp_bridge_port *bridge_port;
2334 
2335 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2336 	if (!bridge_device)
2337 		return;
2338 	bridge_port = __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
2339 	if (!bridge_port)
2340 		return;
2341 
2342 	bridge_device->ops->port_leave(bridge_device, bridge_port,
2343 				       mlxsw_sp_port);
2344 	mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
2345 }
2346 
2347 int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp,
2348 			       const struct net_device *br_dev,
2349 			       const struct net_device *vxlan_dev, u16 vid,
2350 			       struct netlink_ext_ack *extack)
2351 {
2352 	struct mlxsw_sp_bridge_device *bridge_device;
2353 
2354 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2355 	if (WARN_ON(!bridge_device))
2356 		return -EINVAL;
2357 
2358 	return bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, vid,
2359 					      extack);
2360 }
2361 
2362 void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
2363 				 const struct net_device *vxlan_dev)
2364 {
2365 	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
2366 	struct mlxsw_sp_fid *fid;
2367 
2368 	/* If the VxLAN device is down, then the FID does not have a VNI */
2369 	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan->cfg.vni);
2370 	if (!fid)
2371 		return;
2372 
2373 	mlxsw_sp_nve_fid_disable(mlxsw_sp, fid);
2374 	/* Drop both the reference we just took during lookup and the reference
2375 	 * the VXLAN device took.
2376 	 */
2377 	mlxsw_sp_fid_put(fid);
2378 	mlxsw_sp_fid_put(fid);
2379 }
2380 
2381 static void
2382 mlxsw_sp_switchdev_vxlan_addr_convert(const union vxlan_addr *vxlan_addr,
2383 				      enum mlxsw_sp_l3proto *proto,
2384 				      union mlxsw_sp_l3addr *addr)
2385 {
2386 	if (vxlan_addr->sa.sa_family == AF_INET) {
2387 		addr->addr4 = vxlan_addr->sin.sin_addr.s_addr;
2388 		*proto = MLXSW_SP_L3_PROTO_IPV4;
2389 	} else {
2390 		addr->addr6 = vxlan_addr->sin6.sin6_addr;
2391 		*proto = MLXSW_SP_L3_PROTO_IPV6;
2392 	}
2393 }
2394 
2395 static void
2396 mlxsw_sp_switchdev_addr_vxlan_convert(enum mlxsw_sp_l3proto proto,
2397 				      const union mlxsw_sp_l3addr *addr,
2398 				      union vxlan_addr *vxlan_addr)
2399 {
2400 	switch (proto) {
2401 	case MLXSW_SP_L3_PROTO_IPV4:
2402 		vxlan_addr->sa.sa_family = AF_INET;
2403 		vxlan_addr->sin.sin_addr.s_addr = addr->addr4;
2404 		break;
2405 	case MLXSW_SP_L3_PROTO_IPV6:
2406 		vxlan_addr->sa.sa_family = AF_INET6;
2407 		vxlan_addr->sin6.sin6_addr = addr->addr6;
2408 		break;
2409 	}
2410 }
2411 
2412 static void mlxsw_sp_fdb_vxlan_call_notifiers(struct net_device *dev,
2413 					      const char *mac,
2414 					      enum mlxsw_sp_l3proto proto,
2415 					      union mlxsw_sp_l3addr *addr,
2416 					      __be32 vni, bool adding)
2417 {
2418 	struct switchdev_notifier_vxlan_fdb_info info;
2419 	struct vxlan_dev *vxlan = netdev_priv(dev);
2420 	enum switchdev_notifier_type type;
2421 
2422 	type = adding ? SWITCHDEV_VXLAN_FDB_ADD_TO_BRIDGE :
2423 			SWITCHDEV_VXLAN_FDB_DEL_TO_BRIDGE;
2424 	mlxsw_sp_switchdev_addr_vxlan_convert(proto, addr, &info.remote_ip);
2425 	info.remote_port = vxlan->cfg.dst_port;
2426 	info.remote_vni = vni;
2427 	info.remote_ifindex = 0;
2428 	ether_addr_copy(info.eth_addr, mac);
2429 	info.vni = vni;
2430 	info.offloaded = adding;
2431 	call_switchdev_notifiers(type, dev, &info.info, NULL);
2432 }
2433 
2434 static void mlxsw_sp_fdb_nve_call_notifiers(struct net_device *dev,
2435 					    const char *mac,
2436 					    enum mlxsw_sp_l3proto proto,
2437 					    union mlxsw_sp_l3addr *addr,
2438 					    __be32 vni,
2439 					    bool adding)
2440 {
2441 	if (netif_is_vxlan(dev))
2442 		mlxsw_sp_fdb_vxlan_call_notifiers(dev, mac, proto, addr, vni,
2443 						  adding);
2444 }
2445 
2446 static void
2447 mlxsw_sp_fdb_call_notifiers(enum switchdev_notifier_type type,
2448 			    const char *mac, u16 vid,
2449 			    struct net_device *dev, bool offloaded)
2450 {
2451 	struct switchdev_notifier_fdb_info info;
2452 
2453 	info.addr = mac;
2454 	info.vid = vid;
2455 	info.offloaded = offloaded;
2456 	call_switchdev_notifiers(type, dev, &info.info, NULL);
2457 }
2458 
2459 static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
2460 					    char *sfn_pl, int rec_index,
2461 					    bool adding)
2462 {
2463 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2464 	struct mlxsw_sp_bridge_device *bridge_device;
2465 	struct mlxsw_sp_bridge_port *bridge_port;
2466 	struct mlxsw_sp_port *mlxsw_sp_port;
2467 	enum switchdev_notifier_type type;
2468 	char mac[ETH_ALEN];
2469 	u8 local_port;
2470 	u16 vid, fid;
2471 	bool do_notification = true;
2472 	int err;
2473 
2474 	mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port);
2475 	mlxsw_sp_port = mlxsw_sp->ports[local_port];
2476 	if (!mlxsw_sp_port) {
2477 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n");
2478 		goto just_remove;
2479 	}
2480 
2481 	if (mlxsw_sp_fid_is_dummy(mlxsw_sp, fid))
2482 		goto just_remove;
2483 
2484 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
2485 	if (!mlxsw_sp_port_vlan) {
2486 		netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
2487 		goto just_remove;
2488 	}
2489 
2490 	bridge_port = mlxsw_sp_port_vlan->bridge_port;
2491 	if (!bridge_port) {
2492 		netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
2493 		goto just_remove;
2494 	}
2495 
2496 	bridge_device = bridge_port->bridge_device;
2497 	vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
2498 
2499 do_fdb_op:
2500 	err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid,
2501 				      adding, true);
2502 	if (err) {
2503 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
2504 		return;
2505 	}
2506 
2507 	if (!do_notification)
2508 		return;
2509 	type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
2510 	mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding);
2511 
2512 	return;
2513 
2514 just_remove:
2515 	adding = false;
2516 	do_notification = false;
2517 	goto do_fdb_op;
2518 }
2519 
2520 static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
2521 						char *sfn_pl, int rec_index,
2522 						bool adding)
2523 {
2524 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2525 	struct mlxsw_sp_bridge_device *bridge_device;
2526 	struct mlxsw_sp_bridge_port *bridge_port;
2527 	struct mlxsw_sp_port *mlxsw_sp_port;
2528 	enum switchdev_notifier_type type;
2529 	char mac[ETH_ALEN];
2530 	u16 lag_vid = 0;
2531 	u16 lag_id;
2532 	u16 vid, fid;
2533 	bool do_notification = true;
2534 	int err;
2535 
2536 	mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &fid, &lag_id);
2537 	mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
2538 	if (!mlxsw_sp_port) {
2539 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n");
2540 		goto just_remove;
2541 	}
2542 
2543 	if (mlxsw_sp_fid_is_dummy(mlxsw_sp, fid))
2544 		goto just_remove;
2545 
2546 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
2547 	if (!mlxsw_sp_port_vlan) {
2548 		netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
2549 		goto just_remove;
2550 	}
2551 
2552 	bridge_port = mlxsw_sp_port_vlan->bridge_port;
2553 	if (!bridge_port) {
2554 		netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
2555 		goto just_remove;
2556 	}
2557 
2558 	bridge_device = bridge_port->bridge_device;
2559 	vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
2560 	lag_vid = mlxsw_sp_fid_lag_vid_valid(mlxsw_sp_port_vlan->fid) ?
2561 		  mlxsw_sp_port_vlan->vid : 0;
2562 
2563 do_fdb_op:
2564 	err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
2565 					  adding, true);
2566 	if (err) {
2567 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
2568 		return;
2569 	}
2570 
2571 	if (!do_notification)
2572 		return;
2573 	type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
2574 	mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding);
2575 
2576 	return;
2577 
2578 just_remove:
2579 	adding = false;
2580 	do_notification = false;
2581 	goto do_fdb_op;
2582 }
2583 
2584 static int
2585 __mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp *mlxsw_sp,
2586 					    const struct mlxsw_sp_fid *fid,
2587 					    bool adding,
2588 					    struct net_device **nve_dev,
2589 					    u16 *p_vid, __be32 *p_vni)
2590 {
2591 	struct mlxsw_sp_bridge_device *bridge_device;
2592 	struct net_device *br_dev, *dev;
2593 	int nve_ifindex;
2594 	int err;
2595 
2596 	err = mlxsw_sp_fid_nve_ifindex(fid, &nve_ifindex);
2597 	if (err)
2598 		return err;
2599 
2600 	err = mlxsw_sp_fid_vni(fid, p_vni);
2601 	if (err)
2602 		return err;
2603 
2604 	dev = __dev_get_by_index(mlxsw_sp_net(mlxsw_sp), nve_ifindex);
2605 	if (!dev)
2606 		return -EINVAL;
2607 	*nve_dev = dev;
2608 
2609 	if (!netif_running(dev))
2610 		return -EINVAL;
2611 
2612 	if (adding && !br_port_flag_is_set(dev, BR_LEARNING))
2613 		return -EINVAL;
2614 
2615 	if (adding && netif_is_vxlan(dev)) {
2616 		struct vxlan_dev *vxlan = netdev_priv(dev);
2617 
2618 		if (!(vxlan->cfg.flags & VXLAN_F_LEARN))
2619 			return -EINVAL;
2620 	}
2621 
2622 	br_dev = netdev_master_upper_dev_get(dev);
2623 	if (!br_dev)
2624 		return -EINVAL;
2625 
2626 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2627 	if (!bridge_device)
2628 		return -EINVAL;
2629 
2630 	*p_vid = bridge_device->ops->fid_vid(bridge_device, fid);
2631 
2632 	return 0;
2633 }
2634 
2635 static void mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp *mlxsw_sp,
2636 						      char *sfn_pl,
2637 						      int rec_index,
2638 						      bool adding)
2639 {
2640 	enum mlxsw_reg_sfn_uc_tunnel_protocol sfn_proto;
2641 	enum switchdev_notifier_type type;
2642 	struct net_device *nve_dev;
2643 	union mlxsw_sp_l3addr addr;
2644 	struct mlxsw_sp_fid *fid;
2645 	char mac[ETH_ALEN];
2646 	u16 fid_index, vid;
2647 	__be32 vni;
2648 	u32 uip;
2649 	int err;
2650 
2651 	mlxsw_reg_sfn_uc_tunnel_unpack(sfn_pl, rec_index, mac, &fid_index,
2652 				       &uip, &sfn_proto);
2653 
2654 	fid = mlxsw_sp_fid_lookup_by_index(mlxsw_sp, fid_index);
2655 	if (!fid)
2656 		goto err_fid_lookup;
2657 
2658 	err = mlxsw_sp_nve_learned_ip_resolve(mlxsw_sp, uip,
2659 					      (enum mlxsw_sp_l3proto) sfn_proto,
2660 					      &addr);
2661 	if (err)
2662 		goto err_ip_resolve;
2663 
2664 	err = __mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, fid, adding,
2665 							  &nve_dev, &vid, &vni);
2666 	if (err)
2667 		goto err_fdb_process;
2668 
2669 	err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, mac, fid_index,
2670 					     (enum mlxsw_sp_l3proto) sfn_proto,
2671 					     &addr, adding, true);
2672 	if (err)
2673 		goto err_fdb_op;
2674 
2675 	mlxsw_sp_fdb_nve_call_notifiers(nve_dev, mac,
2676 					(enum mlxsw_sp_l3proto) sfn_proto,
2677 					&addr, vni, adding);
2678 
2679 	type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE :
2680 			SWITCHDEV_FDB_DEL_TO_BRIDGE;
2681 	mlxsw_sp_fdb_call_notifiers(type, mac, vid, nve_dev, adding);
2682 
2683 	mlxsw_sp_fid_put(fid);
2684 
2685 	return;
2686 
2687 err_fdb_op:
2688 err_fdb_process:
2689 err_ip_resolve:
2690 	mlxsw_sp_fid_put(fid);
2691 err_fid_lookup:
2692 	/* Remove an FDB entry in case we cannot process it. Otherwise the
2693 	 * device will keep sending the same notification over and over again.
2694 	 */
2695 	mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, mac, fid_index,
2696 				       (enum mlxsw_sp_l3proto) sfn_proto, &addr,
2697 				       false, true);
2698 }
2699 
2700 static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
2701 					    char *sfn_pl, int rec_index)
2702 {
2703 	switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) {
2704 	case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC:
2705 		mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
2706 						rec_index, true);
2707 		break;
2708 	case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC:
2709 		mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
2710 						rec_index, false);
2711 		break;
2712 	case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG:
2713 		mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
2714 						    rec_index, true);
2715 		break;
2716 	case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG:
2717 		mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
2718 						    rec_index, false);
2719 		break;
2720 	case MLXSW_REG_SFN_REC_TYPE_LEARNED_UNICAST_TUNNEL:
2721 		mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, sfn_pl,
2722 							  rec_index, true);
2723 		break;
2724 	case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_UNICAST_TUNNEL:
2725 		mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, sfn_pl,
2726 							  rec_index, false);
2727 		break;
2728 	}
2729 }
2730 
2731 static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp,
2732 					      bool no_delay)
2733 {
2734 	struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
2735 	unsigned int interval = no_delay ? 0 : bridge->fdb_notify.interval;
2736 
2737 	mlxsw_core_schedule_dw(&bridge->fdb_notify.dw,
2738 			       msecs_to_jiffies(interval));
2739 }
2740 
2741 #define MLXSW_SP_FDB_SFN_QUERIES_PER_SESSION 10
2742 
2743 static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
2744 {
2745 	struct mlxsw_sp_bridge *bridge;
2746 	struct mlxsw_sp *mlxsw_sp;
2747 	char *sfn_pl;
2748 	int queries;
2749 	u8 num_rec;
2750 	int i;
2751 	int err;
2752 
2753 	sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL);
2754 	if (!sfn_pl)
2755 		return;
2756 
2757 	bridge = container_of(work, struct mlxsw_sp_bridge, fdb_notify.dw.work);
2758 	mlxsw_sp = bridge->mlxsw_sp;
2759 
2760 	rtnl_lock();
2761 	queries = MLXSW_SP_FDB_SFN_QUERIES_PER_SESSION;
2762 	while (queries > 0) {
2763 		mlxsw_reg_sfn_pack(sfn_pl);
2764 		err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
2765 		if (err) {
2766 			dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
2767 			goto out;
2768 		}
2769 		num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
2770 		for (i = 0; i < num_rec; i++)
2771 			mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
2772 		if (num_rec != MLXSW_REG_SFN_REC_MAX_COUNT)
2773 			goto out;
2774 		queries--;
2775 	}
2776 
2777 out:
2778 	rtnl_unlock();
2779 	kfree(sfn_pl);
2780 	mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp, !queries);
2781 }
2782 
2783 struct mlxsw_sp_switchdev_event_work {
2784 	struct work_struct work;
2785 	union {
2786 		struct switchdev_notifier_fdb_info fdb_info;
2787 		struct switchdev_notifier_vxlan_fdb_info vxlan_fdb_info;
2788 	};
2789 	struct net_device *dev;
2790 	unsigned long event;
2791 };
2792 
2793 static void
2794 mlxsw_sp_switchdev_bridge_vxlan_fdb_event(struct mlxsw_sp *mlxsw_sp,
2795 					  struct mlxsw_sp_switchdev_event_work *
2796 					  switchdev_work,
2797 					  struct mlxsw_sp_fid *fid, __be32 vni)
2798 {
2799 	struct switchdev_notifier_vxlan_fdb_info vxlan_fdb_info;
2800 	struct switchdev_notifier_fdb_info *fdb_info;
2801 	struct net_device *dev = switchdev_work->dev;
2802 	enum mlxsw_sp_l3proto proto;
2803 	union mlxsw_sp_l3addr addr;
2804 	int err;
2805 
2806 	fdb_info = &switchdev_work->fdb_info;
2807 	err = vxlan_fdb_find_uc(dev, fdb_info->addr, vni, &vxlan_fdb_info);
2808 	if (err)
2809 		return;
2810 
2811 	mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info.remote_ip,
2812 					      &proto, &addr);
2813 
2814 	switch (switchdev_work->event) {
2815 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
2816 		err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp,
2817 						     vxlan_fdb_info.eth_addr,
2818 						     mlxsw_sp_fid_index(fid),
2819 						     proto, &addr, true, false);
2820 		if (err)
2821 			return;
2822 		vxlan_fdb_info.offloaded = true;
2823 		call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
2824 					 &vxlan_fdb_info.info, NULL);
2825 		mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
2826 					    vxlan_fdb_info.eth_addr,
2827 					    fdb_info->vid, dev, true);
2828 		break;
2829 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
2830 		err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp,
2831 						     vxlan_fdb_info.eth_addr,
2832 						     mlxsw_sp_fid_index(fid),
2833 						     proto, &addr, false,
2834 						     false);
2835 		vxlan_fdb_info.offloaded = false;
2836 		call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
2837 					 &vxlan_fdb_info.info, NULL);
2838 		break;
2839 	}
2840 }
2841 
2842 static void
2843 mlxsw_sp_switchdev_bridge_nve_fdb_event(struct mlxsw_sp_switchdev_event_work *
2844 					switchdev_work)
2845 {
2846 	struct mlxsw_sp_bridge_device *bridge_device;
2847 	struct net_device *dev = switchdev_work->dev;
2848 	struct net_device *br_dev;
2849 	struct mlxsw_sp *mlxsw_sp;
2850 	struct mlxsw_sp_fid *fid;
2851 	__be32 vni;
2852 	int err;
2853 
2854 	if (switchdev_work->event != SWITCHDEV_FDB_ADD_TO_DEVICE &&
2855 	    switchdev_work->event != SWITCHDEV_FDB_DEL_TO_DEVICE)
2856 		return;
2857 
2858 	if (switchdev_work->event == SWITCHDEV_FDB_ADD_TO_DEVICE &&
2859 	    !switchdev_work->fdb_info.added_by_user)
2860 		return;
2861 
2862 	if (!netif_running(dev))
2863 		return;
2864 	br_dev = netdev_master_upper_dev_get(dev);
2865 	if (!br_dev)
2866 		return;
2867 	if (!netif_is_bridge_master(br_dev))
2868 		return;
2869 	mlxsw_sp = mlxsw_sp_lower_get(br_dev);
2870 	if (!mlxsw_sp)
2871 		return;
2872 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2873 	if (!bridge_device)
2874 		return;
2875 
2876 	fid = bridge_device->ops->fid_lookup(bridge_device,
2877 					     switchdev_work->fdb_info.vid);
2878 	if (!fid)
2879 		return;
2880 
2881 	err = mlxsw_sp_fid_vni(fid, &vni);
2882 	if (err)
2883 		goto out;
2884 
2885 	mlxsw_sp_switchdev_bridge_vxlan_fdb_event(mlxsw_sp, switchdev_work, fid,
2886 						  vni);
2887 
2888 out:
2889 	mlxsw_sp_fid_put(fid);
2890 }
2891 
2892 static void mlxsw_sp_switchdev_bridge_fdb_event_work(struct work_struct *work)
2893 {
2894 	struct mlxsw_sp_switchdev_event_work *switchdev_work =
2895 		container_of(work, struct mlxsw_sp_switchdev_event_work, work);
2896 	struct net_device *dev = switchdev_work->dev;
2897 	struct switchdev_notifier_fdb_info *fdb_info;
2898 	struct mlxsw_sp_port *mlxsw_sp_port;
2899 	int err;
2900 
2901 	rtnl_lock();
2902 	if (netif_is_vxlan(dev)) {
2903 		mlxsw_sp_switchdev_bridge_nve_fdb_event(switchdev_work);
2904 		goto out;
2905 	}
2906 
2907 	mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
2908 	if (!mlxsw_sp_port)
2909 		goto out;
2910 
2911 	switch (switchdev_work->event) {
2912 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
2913 		fdb_info = &switchdev_work->fdb_info;
2914 		if (!fdb_info->added_by_user)
2915 			break;
2916 		err = mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, true);
2917 		if (err)
2918 			break;
2919 		mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
2920 					    fdb_info->addr,
2921 					    fdb_info->vid, dev, true);
2922 		break;
2923 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
2924 		fdb_info = &switchdev_work->fdb_info;
2925 		mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, false);
2926 		break;
2927 	case SWITCHDEV_FDB_ADD_TO_BRIDGE:
2928 	case SWITCHDEV_FDB_DEL_TO_BRIDGE:
2929 		/* These events are only used to potentially update an existing
2930 		 * SPAN mirror.
2931 		 */
2932 		break;
2933 	}
2934 
2935 	mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
2936 
2937 out:
2938 	rtnl_unlock();
2939 	kfree(switchdev_work->fdb_info.addr);
2940 	kfree(switchdev_work);
2941 	dev_put(dev);
2942 }
2943 
2944 static void
2945 mlxsw_sp_switchdev_vxlan_fdb_add(struct mlxsw_sp *mlxsw_sp,
2946 				 struct mlxsw_sp_switchdev_event_work *
2947 				 switchdev_work)
2948 {
2949 	struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
2950 	struct mlxsw_sp_bridge_device *bridge_device;
2951 	struct net_device *dev = switchdev_work->dev;
2952 	u8 all_zeros_mac[ETH_ALEN] = { 0 };
2953 	enum mlxsw_sp_l3proto proto;
2954 	union mlxsw_sp_l3addr addr;
2955 	struct net_device *br_dev;
2956 	struct mlxsw_sp_fid *fid;
2957 	u16 vid;
2958 	int err;
2959 
2960 	vxlan_fdb_info = &switchdev_work->vxlan_fdb_info;
2961 	br_dev = netdev_master_upper_dev_get(dev);
2962 
2963 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2964 	if (!bridge_device)
2965 		return;
2966 
2967 	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan_fdb_info->vni);
2968 	if (!fid)
2969 		return;
2970 
2971 	mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip,
2972 					      &proto, &addr);
2973 
2974 	if (ether_addr_equal(vxlan_fdb_info->eth_addr, all_zeros_mac)) {
2975 		err = mlxsw_sp_nve_flood_ip_add(mlxsw_sp, fid, proto, &addr);
2976 		if (err) {
2977 			mlxsw_sp_fid_put(fid);
2978 			return;
2979 		}
2980 		vxlan_fdb_info->offloaded = true;
2981 		call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
2982 					 &vxlan_fdb_info->info, NULL);
2983 		mlxsw_sp_fid_put(fid);
2984 		return;
2985 	}
2986 
2987 	/* The device has a single FDB table, whereas Linux has two - one
2988 	 * in the bridge driver and another in the VxLAN driver. We only
2989 	 * program an entry to the device if the MAC points to the VxLAN
2990 	 * device in the bridge's FDB table
2991 	 */
2992 	vid = bridge_device->ops->fid_vid(bridge_device, fid);
2993 	if (br_fdb_find_port(br_dev, vxlan_fdb_info->eth_addr, vid) != dev)
2994 		goto err_br_fdb_find;
2995 
2996 	err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, vxlan_fdb_info->eth_addr,
2997 					     mlxsw_sp_fid_index(fid), proto,
2998 					     &addr, true, false);
2999 	if (err)
3000 		goto err_fdb_tunnel_uc_op;
3001 	vxlan_fdb_info->offloaded = true;
3002 	call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
3003 				 &vxlan_fdb_info->info, NULL);
3004 	mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
3005 				    vxlan_fdb_info->eth_addr, vid, dev, true);
3006 
3007 	mlxsw_sp_fid_put(fid);
3008 
3009 	return;
3010 
3011 err_fdb_tunnel_uc_op:
3012 err_br_fdb_find:
3013 	mlxsw_sp_fid_put(fid);
3014 }
3015 
3016 static void
3017 mlxsw_sp_switchdev_vxlan_fdb_del(struct mlxsw_sp *mlxsw_sp,
3018 				 struct mlxsw_sp_switchdev_event_work *
3019 				 switchdev_work)
3020 {
3021 	struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
3022 	struct mlxsw_sp_bridge_device *bridge_device;
3023 	struct net_device *dev = switchdev_work->dev;
3024 	struct net_device *br_dev = netdev_master_upper_dev_get(dev);
3025 	u8 all_zeros_mac[ETH_ALEN] = { 0 };
3026 	enum mlxsw_sp_l3proto proto;
3027 	union mlxsw_sp_l3addr addr;
3028 	struct mlxsw_sp_fid *fid;
3029 	u16 vid;
3030 
3031 	vxlan_fdb_info = &switchdev_work->vxlan_fdb_info;
3032 
3033 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3034 	if (!bridge_device)
3035 		return;
3036 
3037 	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan_fdb_info->vni);
3038 	if (!fid)
3039 		return;
3040 
3041 	mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip,
3042 					      &proto, &addr);
3043 
3044 	if (ether_addr_equal(vxlan_fdb_info->eth_addr, all_zeros_mac)) {
3045 		mlxsw_sp_nve_flood_ip_del(mlxsw_sp, fid, proto, &addr);
3046 		mlxsw_sp_fid_put(fid);
3047 		return;
3048 	}
3049 
3050 	mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, vxlan_fdb_info->eth_addr,
3051 				       mlxsw_sp_fid_index(fid), proto, &addr,
3052 				       false, false);
3053 	vid = bridge_device->ops->fid_vid(bridge_device, fid);
3054 	mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
3055 				    vxlan_fdb_info->eth_addr, vid, dev, false);
3056 
3057 	mlxsw_sp_fid_put(fid);
3058 }
3059 
3060 static void mlxsw_sp_switchdev_vxlan_fdb_event_work(struct work_struct *work)
3061 {
3062 	struct mlxsw_sp_switchdev_event_work *switchdev_work =
3063 		container_of(work, struct mlxsw_sp_switchdev_event_work, work);
3064 	struct net_device *dev = switchdev_work->dev;
3065 	struct mlxsw_sp *mlxsw_sp;
3066 	struct net_device *br_dev;
3067 
3068 	rtnl_lock();
3069 
3070 	if (!netif_running(dev))
3071 		goto out;
3072 	br_dev = netdev_master_upper_dev_get(dev);
3073 	if (!br_dev)
3074 		goto out;
3075 	if (!netif_is_bridge_master(br_dev))
3076 		goto out;
3077 	mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3078 	if (!mlxsw_sp)
3079 		goto out;
3080 
3081 	switch (switchdev_work->event) {
3082 	case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE:
3083 		mlxsw_sp_switchdev_vxlan_fdb_add(mlxsw_sp, switchdev_work);
3084 		break;
3085 	case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE:
3086 		mlxsw_sp_switchdev_vxlan_fdb_del(mlxsw_sp, switchdev_work);
3087 		break;
3088 	}
3089 
3090 out:
3091 	rtnl_unlock();
3092 	kfree(switchdev_work);
3093 	dev_put(dev);
3094 }
3095 
3096 static int
3097 mlxsw_sp_switchdev_vxlan_work_prepare(struct mlxsw_sp_switchdev_event_work *
3098 				      switchdev_work,
3099 				      struct switchdev_notifier_info *info)
3100 {
3101 	struct vxlan_dev *vxlan = netdev_priv(switchdev_work->dev);
3102 	struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
3103 	struct vxlan_config *cfg = &vxlan->cfg;
3104 	struct netlink_ext_ack *extack;
3105 
3106 	extack = switchdev_notifier_info_to_extack(info);
3107 	vxlan_fdb_info = container_of(info,
3108 				      struct switchdev_notifier_vxlan_fdb_info,
3109 				      info);
3110 
3111 	if (vxlan_fdb_info->remote_port != cfg->dst_port) {
3112 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Non-default remote port is not supported");
3113 		return -EOPNOTSUPP;
3114 	}
3115 	if (vxlan_fdb_info->remote_vni != cfg->vni ||
3116 	    vxlan_fdb_info->vni != cfg->vni) {
3117 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Non-default VNI is not supported");
3118 		return -EOPNOTSUPP;
3119 	}
3120 	if (vxlan_fdb_info->remote_ifindex) {
3121 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Local interface is not supported");
3122 		return -EOPNOTSUPP;
3123 	}
3124 	if (is_multicast_ether_addr(vxlan_fdb_info->eth_addr)) {
3125 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Multicast MAC addresses not supported");
3126 		return -EOPNOTSUPP;
3127 	}
3128 	if (vxlan_addr_multicast(&vxlan_fdb_info->remote_ip)) {
3129 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Multicast destination IP is not supported");
3130 		return -EOPNOTSUPP;
3131 	}
3132 
3133 	switchdev_work->vxlan_fdb_info = *vxlan_fdb_info;
3134 
3135 	return 0;
3136 }
3137 
3138 /* Called under rcu_read_lock() */
3139 static int mlxsw_sp_switchdev_event(struct notifier_block *unused,
3140 				    unsigned long event, void *ptr)
3141 {
3142 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
3143 	struct mlxsw_sp_switchdev_event_work *switchdev_work;
3144 	struct switchdev_notifier_fdb_info *fdb_info;
3145 	struct switchdev_notifier_info *info = ptr;
3146 	struct net_device *br_dev;
3147 	int err;
3148 
3149 	if (event == SWITCHDEV_PORT_ATTR_SET) {
3150 		err = switchdev_handle_port_attr_set(dev, ptr,
3151 						     mlxsw_sp_port_dev_check,
3152 						     mlxsw_sp_port_attr_set);
3153 		return notifier_from_errno(err);
3154 	}
3155 
3156 	/* Tunnel devices are not our uppers, so check their master instead */
3157 	br_dev = netdev_master_upper_dev_get_rcu(dev);
3158 	if (!br_dev)
3159 		return NOTIFY_DONE;
3160 	if (!netif_is_bridge_master(br_dev))
3161 		return NOTIFY_DONE;
3162 	if (!mlxsw_sp_port_dev_lower_find_rcu(br_dev))
3163 		return NOTIFY_DONE;
3164 
3165 	switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
3166 	if (!switchdev_work)
3167 		return NOTIFY_BAD;
3168 
3169 	switchdev_work->dev = dev;
3170 	switchdev_work->event = event;
3171 
3172 	switch (event) {
3173 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
3174 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
3175 	case SWITCHDEV_FDB_ADD_TO_BRIDGE:
3176 	case SWITCHDEV_FDB_DEL_TO_BRIDGE:
3177 		fdb_info = container_of(info,
3178 					struct switchdev_notifier_fdb_info,
3179 					info);
3180 		INIT_WORK(&switchdev_work->work,
3181 			  mlxsw_sp_switchdev_bridge_fdb_event_work);
3182 		memcpy(&switchdev_work->fdb_info, ptr,
3183 		       sizeof(switchdev_work->fdb_info));
3184 		switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
3185 		if (!switchdev_work->fdb_info.addr)
3186 			goto err_addr_alloc;
3187 		ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
3188 				fdb_info->addr);
3189 		/* Take a reference on the device. This can be either
3190 		 * upper device containig mlxsw_sp_port or just a
3191 		 * mlxsw_sp_port
3192 		 */
3193 		dev_hold(dev);
3194 		break;
3195 	case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE:
3196 	case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE:
3197 		INIT_WORK(&switchdev_work->work,
3198 			  mlxsw_sp_switchdev_vxlan_fdb_event_work);
3199 		err = mlxsw_sp_switchdev_vxlan_work_prepare(switchdev_work,
3200 							    info);
3201 		if (err)
3202 			goto err_vxlan_work_prepare;
3203 		dev_hold(dev);
3204 		break;
3205 	default:
3206 		kfree(switchdev_work);
3207 		return NOTIFY_DONE;
3208 	}
3209 
3210 	mlxsw_core_schedule_work(&switchdev_work->work);
3211 
3212 	return NOTIFY_DONE;
3213 
3214 err_vxlan_work_prepare:
3215 err_addr_alloc:
3216 	kfree(switchdev_work);
3217 	return NOTIFY_BAD;
3218 }
3219 
3220 struct notifier_block mlxsw_sp_switchdev_notifier = {
3221 	.notifier_call = mlxsw_sp_switchdev_event,
3222 };
3223 
3224 static int
3225 mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp,
3226 				  struct mlxsw_sp_bridge_device *bridge_device,
3227 				  const struct net_device *vxlan_dev, u16 vid,
3228 				  bool flag_untagged, bool flag_pvid,
3229 				  struct netlink_ext_ack *extack)
3230 {
3231 	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
3232 	__be32 vni = vxlan->cfg.vni;
3233 	struct mlxsw_sp_fid *fid;
3234 	u16 old_vid;
3235 	int err;
3236 
3237 	/* We cannot have the same VLAN as PVID and egress untagged on multiple
3238 	 * VxLAN devices. Note that we get this notification before the VLAN is
3239 	 * actually added to the bridge's database, so it is not possible for
3240 	 * the lookup function to return 'vxlan_dev'
3241 	 */
3242 	if (flag_untagged && flag_pvid &&
3243 	    mlxsw_sp_bridge_8021q_vxlan_dev_find(bridge_device->dev, vid)) {
3244 		NL_SET_ERR_MSG_MOD(extack, "VLAN already mapped to a different VNI");
3245 		return -EINVAL;
3246 	}
3247 
3248 	if (!netif_running(vxlan_dev))
3249 		return 0;
3250 
3251 	/* First case: FID is not associated with this VNI, but the new VLAN
3252 	 * is both PVID and egress untagged. Need to enable NVE on the FID, if
3253 	 * it exists
3254 	 */
3255 	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vni);
3256 	if (!fid) {
3257 		if (!flag_untagged || !flag_pvid)
3258 			return 0;
3259 		return bridge_device->ops->vxlan_join(bridge_device, vxlan_dev,
3260 						      vid, extack);
3261 	}
3262 
3263 	/* Second case: FID is associated with the VNI and the VLAN associated
3264 	 * with the FID is the same as the notified VLAN. This means the flags
3265 	 * (PVID / egress untagged) were toggled and that NVE should be
3266 	 * disabled on the FID
3267 	 */
3268 	old_vid = mlxsw_sp_fid_8021q_vid(fid);
3269 	if (vid == old_vid) {
3270 		if (WARN_ON(flag_untagged && flag_pvid)) {
3271 			mlxsw_sp_fid_put(fid);
3272 			return -EINVAL;
3273 		}
3274 		mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
3275 		mlxsw_sp_fid_put(fid);
3276 		return 0;
3277 	}
3278 
3279 	/* Third case: A new VLAN was configured on the VxLAN device, but this
3280 	 * VLAN is not PVID, so there is nothing to do.
3281 	 */
3282 	if (!flag_pvid) {
3283 		mlxsw_sp_fid_put(fid);
3284 		return 0;
3285 	}
3286 
3287 	/* Fourth case: Thew new VLAN is PVID, which means the VLAN currently
3288 	 * mapped to the VNI should be unmapped
3289 	 */
3290 	mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
3291 	mlxsw_sp_fid_put(fid);
3292 
3293 	/* Fifth case: The new VLAN is also egress untagged, which means the
3294 	 * VLAN needs to be mapped to the VNI
3295 	 */
3296 	if (!flag_untagged)
3297 		return 0;
3298 
3299 	err = bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, vid, extack);
3300 	if (err)
3301 		goto err_vxlan_join;
3302 
3303 	return 0;
3304 
3305 err_vxlan_join:
3306 	bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, old_vid, NULL);
3307 	return err;
3308 }
3309 
3310 static void
3311 mlxsw_sp_switchdev_vxlan_vlan_del(struct mlxsw_sp *mlxsw_sp,
3312 				  struct mlxsw_sp_bridge_device *bridge_device,
3313 				  const struct net_device *vxlan_dev, u16 vid)
3314 {
3315 	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
3316 	__be32 vni = vxlan->cfg.vni;
3317 	struct mlxsw_sp_fid *fid;
3318 
3319 	if (!netif_running(vxlan_dev))
3320 		return;
3321 
3322 	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vni);
3323 	if (!fid)
3324 		return;
3325 
3326 	/* A different VLAN than the one mapped to the VNI is deleted */
3327 	if (mlxsw_sp_fid_8021q_vid(fid) != vid)
3328 		goto out;
3329 
3330 	mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
3331 
3332 out:
3333 	mlxsw_sp_fid_put(fid);
3334 }
3335 
3336 static int
3337 mlxsw_sp_switchdev_vxlan_vlans_add(struct net_device *vxlan_dev,
3338 				   struct switchdev_notifier_port_obj_info *
3339 				   port_obj_info)
3340 {
3341 	struct switchdev_obj_port_vlan *vlan =
3342 		SWITCHDEV_OBJ_PORT_VLAN(port_obj_info->obj);
3343 	bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
3344 	bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
3345 	struct mlxsw_sp_bridge_device *bridge_device;
3346 	struct netlink_ext_ack *extack;
3347 	struct mlxsw_sp *mlxsw_sp;
3348 	struct net_device *br_dev;
3349 
3350 	extack = switchdev_notifier_info_to_extack(&port_obj_info->info);
3351 	br_dev = netdev_master_upper_dev_get(vxlan_dev);
3352 	if (!br_dev)
3353 		return 0;
3354 
3355 	mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3356 	if (!mlxsw_sp)
3357 		return 0;
3358 
3359 	port_obj_info->handled = true;
3360 
3361 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3362 	if (!bridge_device)
3363 		return -EINVAL;
3364 
3365 	if (!bridge_device->vlan_enabled)
3366 		return 0;
3367 
3368 	return mlxsw_sp_switchdev_vxlan_vlan_add(mlxsw_sp, bridge_device,
3369 						 vxlan_dev, vlan->vid,
3370 						 flag_untagged,
3371 						 flag_pvid, extack);
3372 }
3373 
3374 static void
3375 mlxsw_sp_switchdev_vxlan_vlans_del(struct net_device *vxlan_dev,
3376 				   struct switchdev_notifier_port_obj_info *
3377 				   port_obj_info)
3378 {
3379 	struct switchdev_obj_port_vlan *vlan =
3380 		SWITCHDEV_OBJ_PORT_VLAN(port_obj_info->obj);
3381 	struct mlxsw_sp_bridge_device *bridge_device;
3382 	struct mlxsw_sp *mlxsw_sp;
3383 	struct net_device *br_dev;
3384 
3385 	br_dev = netdev_master_upper_dev_get(vxlan_dev);
3386 	if (!br_dev)
3387 		return;
3388 
3389 	mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3390 	if (!mlxsw_sp)
3391 		return;
3392 
3393 	port_obj_info->handled = true;
3394 
3395 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3396 	if (!bridge_device)
3397 		return;
3398 
3399 	if (!bridge_device->vlan_enabled)
3400 		return;
3401 
3402 	mlxsw_sp_switchdev_vxlan_vlan_del(mlxsw_sp, bridge_device, vxlan_dev,
3403 					  vlan->vid);
3404 }
3405 
3406 static int
3407 mlxsw_sp_switchdev_handle_vxlan_obj_add(struct net_device *vxlan_dev,
3408 					struct switchdev_notifier_port_obj_info *
3409 					port_obj_info)
3410 {
3411 	int err = 0;
3412 
3413 	switch (port_obj_info->obj->id) {
3414 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
3415 		err = mlxsw_sp_switchdev_vxlan_vlans_add(vxlan_dev,
3416 							 port_obj_info);
3417 		break;
3418 	default:
3419 		break;
3420 	}
3421 
3422 	return err;
3423 }
3424 
3425 static void
3426 mlxsw_sp_switchdev_handle_vxlan_obj_del(struct net_device *vxlan_dev,
3427 					struct switchdev_notifier_port_obj_info *
3428 					port_obj_info)
3429 {
3430 	switch (port_obj_info->obj->id) {
3431 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
3432 		mlxsw_sp_switchdev_vxlan_vlans_del(vxlan_dev, port_obj_info);
3433 		break;
3434 	default:
3435 		break;
3436 	}
3437 }
3438 
3439 static int mlxsw_sp_switchdev_blocking_event(struct notifier_block *unused,
3440 					     unsigned long event, void *ptr)
3441 {
3442 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
3443 	int err = 0;
3444 
3445 	switch (event) {
3446 	case SWITCHDEV_PORT_OBJ_ADD:
3447 		if (netif_is_vxlan(dev))
3448 			err = mlxsw_sp_switchdev_handle_vxlan_obj_add(dev, ptr);
3449 		else
3450 			err = switchdev_handle_port_obj_add(dev, ptr,
3451 							mlxsw_sp_port_dev_check,
3452 							mlxsw_sp_port_obj_add);
3453 		return notifier_from_errno(err);
3454 	case SWITCHDEV_PORT_OBJ_DEL:
3455 		if (netif_is_vxlan(dev))
3456 			mlxsw_sp_switchdev_handle_vxlan_obj_del(dev, ptr);
3457 		else
3458 			err = switchdev_handle_port_obj_del(dev, ptr,
3459 							mlxsw_sp_port_dev_check,
3460 							mlxsw_sp_port_obj_del);
3461 		return notifier_from_errno(err);
3462 	case SWITCHDEV_PORT_ATTR_SET:
3463 		err = switchdev_handle_port_attr_set(dev, ptr,
3464 						     mlxsw_sp_port_dev_check,
3465 						     mlxsw_sp_port_attr_set);
3466 		return notifier_from_errno(err);
3467 	}
3468 
3469 	return NOTIFY_DONE;
3470 }
3471 
3472 static struct notifier_block mlxsw_sp_switchdev_blocking_notifier = {
3473 	.notifier_call = mlxsw_sp_switchdev_blocking_event,
3474 };
3475 
3476 u8
3477 mlxsw_sp_bridge_port_stp_state(struct mlxsw_sp_bridge_port *bridge_port)
3478 {
3479 	return bridge_port->stp_state;
3480 }
3481 
3482 static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
3483 {
3484 	struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
3485 	struct notifier_block *nb;
3486 	int err;
3487 
3488 	err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
3489 	if (err) {
3490 		dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
3491 		return err;
3492 	}
3493 
3494 	err = register_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
3495 	if (err) {
3496 		dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev notifier\n");
3497 		return err;
3498 	}
3499 
3500 	nb = &mlxsw_sp_switchdev_blocking_notifier;
3501 	err = register_switchdev_blocking_notifier(nb);
3502 	if (err) {
3503 		dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev blocking notifier\n");
3504 		goto err_register_switchdev_blocking_notifier;
3505 	}
3506 
3507 	INIT_DELAYED_WORK(&bridge->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
3508 	bridge->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
3509 	mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp, false);
3510 	return 0;
3511 
3512 err_register_switchdev_blocking_notifier:
3513 	unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
3514 	return err;
3515 }
3516 
3517 static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
3518 {
3519 	struct notifier_block *nb;
3520 
3521 	cancel_delayed_work_sync(&mlxsw_sp->bridge->fdb_notify.dw);
3522 
3523 	nb = &mlxsw_sp_switchdev_blocking_notifier;
3524 	unregister_switchdev_blocking_notifier(nb);
3525 
3526 	unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
3527 }
3528 
3529 int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
3530 {
3531 	struct mlxsw_sp_bridge *bridge;
3532 
3533 	bridge = kzalloc(sizeof(*mlxsw_sp->bridge), GFP_KERNEL);
3534 	if (!bridge)
3535 		return -ENOMEM;
3536 	mlxsw_sp->bridge = bridge;
3537 	bridge->mlxsw_sp = mlxsw_sp;
3538 
3539 	INIT_LIST_HEAD(&mlxsw_sp->bridge->bridges_list);
3540 
3541 	bridge->bridge_8021q_ops = &mlxsw_sp_bridge_8021q_ops;
3542 	bridge->bridge_8021d_ops = &mlxsw_sp_bridge_8021d_ops;
3543 	bridge->bridge_8021ad_ops = &mlxsw_sp_bridge_8021ad_ops;
3544 
3545 	return mlxsw_sp_fdb_init(mlxsw_sp);
3546 }
3547 
3548 void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
3549 {
3550 	mlxsw_sp_fdb_fini(mlxsw_sp);
3551 	WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list));
3552 	kfree(mlxsw_sp->bridge);
3553 }
3554 
3555