1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/slab.h>
9 #include <linux/device.h>
10 #include <linux/skbuff.h>
11 #include <linux/if_vlan.h>
12 #include <linux/if_bridge.h>
13 #include <linux/workqueue.h>
14 #include <linux/jiffies.h>
15 #include <linux/rtnetlink.h>
16 #include <linux/netlink.h>
17 #include <net/switchdev.h>
18 #include <net/vxlan.h>
19 
20 #include "spectrum_span.h"
21 #include "spectrum_switchdev.h"
22 #include "spectrum.h"
23 #include "core.h"
24 #include "reg.h"
25 
26 struct mlxsw_sp_bridge_ops;
27 
28 struct mlxsw_sp_bridge {
29 	struct mlxsw_sp *mlxsw_sp;
30 	struct {
31 		struct delayed_work dw;
32 #define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100
33 		unsigned int interval; /* ms */
34 	} fdb_notify;
35 #define MLXSW_SP_MIN_AGEING_TIME 10
36 #define MLXSW_SP_MAX_AGEING_TIME 1000000
37 #define MLXSW_SP_DEFAULT_AGEING_TIME 300
38 	u32 ageing_time;
39 	bool vlan_enabled_exists;
40 	struct list_head bridges_list;
41 	DECLARE_BITMAP(mids_bitmap, MLXSW_SP_MID_MAX);
42 	const struct mlxsw_sp_bridge_ops *bridge_8021q_ops;
43 	const struct mlxsw_sp_bridge_ops *bridge_8021d_ops;
44 };
45 
46 struct mlxsw_sp_bridge_device {
47 	struct net_device *dev;
48 	struct list_head list;
49 	struct list_head ports_list;
50 	struct list_head mids_list;
51 	u8 vlan_enabled:1,
52 	   multicast_enabled:1,
53 	   mrouter:1;
54 	const struct mlxsw_sp_bridge_ops *ops;
55 };
56 
57 struct mlxsw_sp_bridge_port {
58 	struct net_device *dev;
59 	struct mlxsw_sp_bridge_device *bridge_device;
60 	struct list_head list;
61 	struct list_head vlans_list;
62 	unsigned int ref_count;
63 	u8 stp_state;
64 	unsigned long flags;
65 	bool mrouter;
66 	bool lagged;
67 	union {
68 		u16 lag_id;
69 		u16 system_port;
70 	};
71 };
72 
73 struct mlxsw_sp_bridge_vlan {
74 	struct list_head list;
75 	struct list_head port_vlan_list;
76 	u16 vid;
77 };
78 
79 struct mlxsw_sp_bridge_ops {
80 	int (*port_join)(struct mlxsw_sp_bridge_device *bridge_device,
81 			 struct mlxsw_sp_bridge_port *bridge_port,
82 			 struct mlxsw_sp_port *mlxsw_sp_port,
83 			 struct netlink_ext_ack *extack);
84 	void (*port_leave)(struct mlxsw_sp_bridge_device *bridge_device,
85 			   struct mlxsw_sp_bridge_port *bridge_port,
86 			   struct mlxsw_sp_port *mlxsw_sp_port);
87 	int (*vxlan_join)(struct mlxsw_sp_bridge_device *bridge_device,
88 			  const struct net_device *vxlan_dev,
89 			  struct netlink_ext_ack *extack);
90 	void (*vxlan_leave)(struct mlxsw_sp_bridge_device *bridge_device,
91 			    const struct net_device *vxlan_dev);
92 	struct mlxsw_sp_fid *
93 		(*fid_get)(struct mlxsw_sp_bridge_device *bridge_device,
94 			   u16 vid);
95 	struct mlxsw_sp_fid *
96 		(*fid_lookup)(struct mlxsw_sp_bridge_device *bridge_device,
97 			      u16 vid);
98 	u16 (*fid_vid)(struct mlxsw_sp_bridge_device *bridge_device,
99 		       const struct mlxsw_sp_fid *fid);
100 };
101 
102 static int
103 mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
104 			       struct mlxsw_sp_bridge_port *bridge_port,
105 			       u16 fid_index);
106 
107 static void
108 mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
109 			       struct mlxsw_sp_bridge_port *bridge_port);
110 
111 static void
112 mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port,
113 				   struct mlxsw_sp_bridge_device
114 				   *bridge_device);
115 
116 static void
117 mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
118 				 struct mlxsw_sp_bridge_port *bridge_port,
119 				 bool add);
120 
121 static struct mlxsw_sp_bridge_device *
122 mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge,
123 			    const struct net_device *br_dev)
124 {
125 	struct mlxsw_sp_bridge_device *bridge_device;
126 
127 	list_for_each_entry(bridge_device, &bridge->bridges_list, list)
128 		if (bridge_device->dev == br_dev)
129 			return bridge_device;
130 
131 	return NULL;
132 }
133 
134 bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
135 					 const struct net_device *br_dev)
136 {
137 	return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
138 }
139 
140 static int mlxsw_sp_bridge_device_upper_rif_destroy(struct net_device *dev,
141 						    void *data)
142 {
143 	struct mlxsw_sp *mlxsw_sp = data;
144 
145 	mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
146 	return 0;
147 }
148 
149 static void mlxsw_sp_bridge_device_rifs_destroy(struct mlxsw_sp *mlxsw_sp,
150 						struct net_device *dev)
151 {
152 	mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
153 	netdev_walk_all_upper_dev_rcu(dev,
154 				      mlxsw_sp_bridge_device_upper_rif_destroy,
155 				      mlxsw_sp);
156 }
157 
158 static struct mlxsw_sp_bridge_device *
159 mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
160 			      struct net_device *br_dev)
161 {
162 	struct device *dev = bridge->mlxsw_sp->bus_info->dev;
163 	struct mlxsw_sp_bridge_device *bridge_device;
164 	bool vlan_enabled = br_vlan_enabled(br_dev);
165 
166 	if (vlan_enabled && bridge->vlan_enabled_exists) {
167 		dev_err(dev, "Only one VLAN-aware bridge is supported\n");
168 		return ERR_PTR(-EINVAL);
169 	}
170 
171 	bridge_device = kzalloc(sizeof(*bridge_device), GFP_KERNEL);
172 	if (!bridge_device)
173 		return ERR_PTR(-ENOMEM);
174 
175 	bridge_device->dev = br_dev;
176 	bridge_device->vlan_enabled = vlan_enabled;
177 	bridge_device->multicast_enabled = br_multicast_enabled(br_dev);
178 	bridge_device->mrouter = br_multicast_router(br_dev);
179 	INIT_LIST_HEAD(&bridge_device->ports_list);
180 	if (vlan_enabled) {
181 		bridge->vlan_enabled_exists = true;
182 		bridge_device->ops = bridge->bridge_8021q_ops;
183 	} else {
184 		bridge_device->ops = bridge->bridge_8021d_ops;
185 	}
186 	INIT_LIST_HEAD(&bridge_device->mids_list);
187 	list_add(&bridge_device->list, &bridge->bridges_list);
188 
189 	return bridge_device;
190 }
191 
192 static void
193 mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
194 			       struct mlxsw_sp_bridge_device *bridge_device)
195 {
196 	mlxsw_sp_bridge_device_rifs_destroy(bridge->mlxsw_sp,
197 					    bridge_device->dev);
198 	list_del(&bridge_device->list);
199 	if (bridge_device->vlan_enabled)
200 		bridge->vlan_enabled_exists = false;
201 	WARN_ON(!list_empty(&bridge_device->ports_list));
202 	WARN_ON(!list_empty(&bridge_device->mids_list));
203 	kfree(bridge_device);
204 }
205 
206 static struct mlxsw_sp_bridge_device *
207 mlxsw_sp_bridge_device_get(struct mlxsw_sp_bridge *bridge,
208 			   struct net_device *br_dev)
209 {
210 	struct mlxsw_sp_bridge_device *bridge_device;
211 
212 	bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
213 	if (bridge_device)
214 		return bridge_device;
215 
216 	return mlxsw_sp_bridge_device_create(bridge, br_dev);
217 }
218 
219 static void
220 mlxsw_sp_bridge_device_put(struct mlxsw_sp_bridge *bridge,
221 			   struct mlxsw_sp_bridge_device *bridge_device)
222 {
223 	if (list_empty(&bridge_device->ports_list))
224 		mlxsw_sp_bridge_device_destroy(bridge, bridge_device);
225 }
226 
227 static struct mlxsw_sp_bridge_port *
228 __mlxsw_sp_bridge_port_find(const struct mlxsw_sp_bridge_device *bridge_device,
229 			    const struct net_device *brport_dev)
230 {
231 	struct mlxsw_sp_bridge_port *bridge_port;
232 
233 	list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
234 		if (bridge_port->dev == brport_dev)
235 			return bridge_port;
236 	}
237 
238 	return NULL;
239 }
240 
241 struct mlxsw_sp_bridge_port *
242 mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge *bridge,
243 			  struct net_device *brport_dev)
244 {
245 	struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
246 	struct mlxsw_sp_bridge_device *bridge_device;
247 
248 	if (!br_dev)
249 		return NULL;
250 
251 	bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
252 	if (!bridge_device)
253 		return NULL;
254 
255 	return __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
256 }
257 
258 static struct mlxsw_sp_bridge_port *
259 mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device,
260 			    struct net_device *brport_dev)
261 {
262 	struct mlxsw_sp_bridge_port *bridge_port;
263 	struct mlxsw_sp_port *mlxsw_sp_port;
264 
265 	bridge_port = kzalloc(sizeof(*bridge_port), GFP_KERNEL);
266 	if (!bridge_port)
267 		return NULL;
268 
269 	mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(brport_dev);
270 	bridge_port->lagged = mlxsw_sp_port->lagged;
271 	if (bridge_port->lagged)
272 		bridge_port->lag_id = mlxsw_sp_port->lag_id;
273 	else
274 		bridge_port->system_port = mlxsw_sp_port->local_port;
275 	bridge_port->dev = brport_dev;
276 	bridge_port->bridge_device = bridge_device;
277 	bridge_port->stp_state = BR_STATE_DISABLED;
278 	bridge_port->flags = BR_LEARNING | BR_FLOOD | BR_LEARNING_SYNC |
279 			     BR_MCAST_FLOOD;
280 	INIT_LIST_HEAD(&bridge_port->vlans_list);
281 	list_add(&bridge_port->list, &bridge_device->ports_list);
282 	bridge_port->ref_count = 1;
283 
284 	return bridge_port;
285 }
286 
287 static void
288 mlxsw_sp_bridge_port_destroy(struct mlxsw_sp_bridge_port *bridge_port)
289 {
290 	list_del(&bridge_port->list);
291 	WARN_ON(!list_empty(&bridge_port->vlans_list));
292 	kfree(bridge_port);
293 }
294 
295 static bool
296 mlxsw_sp_bridge_port_should_destroy(const struct mlxsw_sp_bridge_port *
297 				    bridge_port)
298 {
299 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_port->dev);
300 
301 	/* In case ports were pulled from out of a bridged LAG, then
302 	 * it's possible the reference count isn't zero, yet the bridge
303 	 * port should be destroyed, as it's no longer an upper of ours.
304 	 */
305 	if (!mlxsw_sp && list_empty(&bridge_port->vlans_list))
306 		return true;
307 	else if (bridge_port->ref_count == 0)
308 		return true;
309 	else
310 		return false;
311 }
312 
313 static struct mlxsw_sp_bridge_port *
314 mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge,
315 			 struct net_device *brport_dev)
316 {
317 	struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
318 	struct mlxsw_sp_bridge_device *bridge_device;
319 	struct mlxsw_sp_bridge_port *bridge_port;
320 	int err;
321 
322 	bridge_port = mlxsw_sp_bridge_port_find(bridge, brport_dev);
323 	if (bridge_port) {
324 		bridge_port->ref_count++;
325 		return bridge_port;
326 	}
327 
328 	bridge_device = mlxsw_sp_bridge_device_get(bridge, br_dev);
329 	if (IS_ERR(bridge_device))
330 		return ERR_CAST(bridge_device);
331 
332 	bridge_port = mlxsw_sp_bridge_port_create(bridge_device, brport_dev);
333 	if (!bridge_port) {
334 		err = -ENOMEM;
335 		goto err_bridge_port_create;
336 	}
337 
338 	return bridge_port;
339 
340 err_bridge_port_create:
341 	mlxsw_sp_bridge_device_put(bridge, bridge_device);
342 	return ERR_PTR(err);
343 }
344 
345 static void mlxsw_sp_bridge_port_put(struct mlxsw_sp_bridge *bridge,
346 				     struct mlxsw_sp_bridge_port *bridge_port)
347 {
348 	struct mlxsw_sp_bridge_device *bridge_device;
349 
350 	bridge_port->ref_count--;
351 	if (!mlxsw_sp_bridge_port_should_destroy(bridge_port))
352 		return;
353 	bridge_device = bridge_port->bridge_device;
354 	mlxsw_sp_bridge_port_destroy(bridge_port);
355 	mlxsw_sp_bridge_device_put(bridge, bridge_device);
356 }
357 
358 static struct mlxsw_sp_port_vlan *
359 mlxsw_sp_port_vlan_find_by_bridge(struct mlxsw_sp_port *mlxsw_sp_port,
360 				  const struct mlxsw_sp_bridge_device *
361 				  bridge_device,
362 				  u16 vid)
363 {
364 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
365 
366 	list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
367 			    list) {
368 		if (!mlxsw_sp_port_vlan->bridge_port)
369 			continue;
370 		if (mlxsw_sp_port_vlan->bridge_port->bridge_device !=
371 		    bridge_device)
372 			continue;
373 		if (bridge_device->vlan_enabled &&
374 		    mlxsw_sp_port_vlan->vid != vid)
375 			continue;
376 		return mlxsw_sp_port_vlan;
377 	}
378 
379 	return NULL;
380 }
381 
382 static struct mlxsw_sp_port_vlan*
383 mlxsw_sp_port_vlan_find_by_fid(struct mlxsw_sp_port *mlxsw_sp_port,
384 			       u16 fid_index)
385 {
386 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
387 
388 	list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
389 			    list) {
390 		struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
391 
392 		if (fid && mlxsw_sp_fid_index(fid) == fid_index)
393 			return mlxsw_sp_port_vlan;
394 	}
395 
396 	return NULL;
397 }
398 
399 static struct mlxsw_sp_bridge_vlan *
400 mlxsw_sp_bridge_vlan_find(const struct mlxsw_sp_bridge_port *bridge_port,
401 			  u16 vid)
402 {
403 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
404 
405 	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
406 		if (bridge_vlan->vid == vid)
407 			return bridge_vlan;
408 	}
409 
410 	return NULL;
411 }
412 
413 static struct mlxsw_sp_bridge_vlan *
414 mlxsw_sp_bridge_vlan_create(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
415 {
416 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
417 
418 	bridge_vlan = kzalloc(sizeof(*bridge_vlan), GFP_KERNEL);
419 	if (!bridge_vlan)
420 		return NULL;
421 
422 	INIT_LIST_HEAD(&bridge_vlan->port_vlan_list);
423 	bridge_vlan->vid = vid;
424 	list_add(&bridge_vlan->list, &bridge_port->vlans_list);
425 
426 	return bridge_vlan;
427 }
428 
429 static void
430 mlxsw_sp_bridge_vlan_destroy(struct mlxsw_sp_bridge_vlan *bridge_vlan)
431 {
432 	list_del(&bridge_vlan->list);
433 	WARN_ON(!list_empty(&bridge_vlan->port_vlan_list));
434 	kfree(bridge_vlan);
435 }
436 
437 static struct mlxsw_sp_bridge_vlan *
438 mlxsw_sp_bridge_vlan_get(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
439 {
440 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
441 
442 	bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
443 	if (bridge_vlan)
444 		return bridge_vlan;
445 
446 	return mlxsw_sp_bridge_vlan_create(bridge_port, vid);
447 }
448 
449 static void mlxsw_sp_bridge_vlan_put(struct mlxsw_sp_bridge_vlan *bridge_vlan)
450 {
451 	if (list_empty(&bridge_vlan->port_vlan_list))
452 		mlxsw_sp_bridge_vlan_destroy(bridge_vlan);
453 }
454 
455 static void mlxsw_sp_port_bridge_flags_get(struct mlxsw_sp_bridge *bridge,
456 					   struct net_device *dev,
457 					   unsigned long *brport_flags)
458 {
459 	struct mlxsw_sp_bridge_port *bridge_port;
460 
461 	bridge_port = mlxsw_sp_bridge_port_find(bridge, dev);
462 	if (WARN_ON(!bridge_port))
463 		return;
464 
465 	memcpy(brport_flags, &bridge_port->flags, sizeof(*brport_flags));
466 }
467 
468 static int mlxsw_sp_port_attr_get(struct net_device *dev,
469 				  struct switchdev_attr *attr)
470 {
471 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
472 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
473 
474 	switch (attr->id) {
475 	case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
476 		attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac);
477 		memcpy(&attr->u.ppid.id, &mlxsw_sp->base_mac,
478 		       attr->u.ppid.id_len);
479 		break;
480 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
481 		mlxsw_sp_port_bridge_flags_get(mlxsw_sp->bridge, attr->orig_dev,
482 					       &attr->u.brport_flags);
483 		break;
484 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT:
485 		attr->u.brport_flags_support = BR_LEARNING | BR_FLOOD |
486 					       BR_MCAST_FLOOD;
487 		break;
488 	default:
489 		return -EOPNOTSUPP;
490 	}
491 
492 	return 0;
493 }
494 
495 static int
496 mlxsw_sp_port_bridge_vlan_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
497 				  struct mlxsw_sp_bridge_vlan *bridge_vlan,
498 				  u8 state)
499 {
500 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
501 
502 	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
503 			    bridge_vlan_node) {
504 		if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
505 			continue;
506 		return mlxsw_sp_port_vid_stp_set(mlxsw_sp_port,
507 						 bridge_vlan->vid, state);
508 	}
509 
510 	return 0;
511 }
512 
513 static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
514 					    struct switchdev_trans *trans,
515 					    struct net_device *orig_dev,
516 					    u8 state)
517 {
518 	struct mlxsw_sp_bridge_port *bridge_port;
519 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
520 	int err;
521 
522 	if (switchdev_trans_ph_prepare(trans))
523 		return 0;
524 
525 	/* It's possible we failed to enslave the port, yet this
526 	 * operation is executed due to it being deferred.
527 	 */
528 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
529 						orig_dev);
530 	if (!bridge_port)
531 		return 0;
532 
533 	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
534 		err = mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port,
535 							bridge_vlan, state);
536 		if (err)
537 			goto err_port_bridge_vlan_stp_set;
538 	}
539 
540 	bridge_port->stp_state = state;
541 
542 	return 0;
543 
544 err_port_bridge_vlan_stp_set:
545 	list_for_each_entry_continue_reverse(bridge_vlan,
546 					     &bridge_port->vlans_list, list)
547 		mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port, bridge_vlan,
548 						  bridge_port->stp_state);
549 	return err;
550 }
551 
552 static int
553 mlxsw_sp_port_bridge_vlan_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
554 				    struct mlxsw_sp_bridge_vlan *bridge_vlan,
555 				    enum mlxsw_sp_flood_type packet_type,
556 				    bool member)
557 {
558 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
559 
560 	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
561 			    bridge_vlan_node) {
562 		if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
563 			continue;
564 		return mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid,
565 					      packet_type,
566 					      mlxsw_sp_port->local_port,
567 					      member);
568 	}
569 
570 	return 0;
571 }
572 
573 static int
574 mlxsw_sp_bridge_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port,
575 				     struct mlxsw_sp_bridge_port *bridge_port,
576 				     enum mlxsw_sp_flood_type packet_type,
577 				     bool member)
578 {
579 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
580 	int err;
581 
582 	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
583 		err = mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port,
584 							  bridge_vlan,
585 							  packet_type,
586 							  member);
587 		if (err)
588 			goto err_port_bridge_vlan_flood_set;
589 	}
590 
591 	return 0;
592 
593 err_port_bridge_vlan_flood_set:
594 	list_for_each_entry_continue_reverse(bridge_vlan,
595 					     &bridge_port->vlans_list, list)
596 		mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port, bridge_vlan,
597 						    packet_type, !member);
598 	return err;
599 }
600 
601 static int
602 mlxsw_sp_port_bridge_vlan_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
603 				       struct mlxsw_sp_bridge_vlan *bridge_vlan,
604 				       bool set)
605 {
606 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
607 	u16 vid = bridge_vlan->vid;
608 
609 	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
610 			    bridge_vlan_node) {
611 		if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
612 			continue;
613 		return mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, set);
614 	}
615 
616 	return 0;
617 }
618 
619 static int
620 mlxsw_sp_bridge_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
621 				  struct mlxsw_sp_bridge_port *bridge_port,
622 				  bool set)
623 {
624 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
625 	int err;
626 
627 	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
628 		err = mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
629 							     bridge_vlan, set);
630 		if (err)
631 			goto err_port_bridge_vlan_learning_set;
632 	}
633 
634 	return 0;
635 
636 err_port_bridge_vlan_learning_set:
637 	list_for_each_entry_continue_reverse(bridge_vlan,
638 					     &bridge_port->vlans_list, list)
639 		mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
640 						       bridge_vlan, !set);
641 	return err;
642 }
643 
644 static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
645 					   struct switchdev_trans *trans,
646 					   struct net_device *orig_dev,
647 					   unsigned long brport_flags)
648 {
649 	struct mlxsw_sp_bridge_port *bridge_port;
650 	int err;
651 
652 	if (switchdev_trans_ph_prepare(trans))
653 		return 0;
654 
655 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
656 						orig_dev);
657 	if (!bridge_port)
658 		return 0;
659 
660 	err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
661 						   MLXSW_SP_FLOOD_TYPE_UC,
662 						   brport_flags & BR_FLOOD);
663 	if (err)
664 		return err;
665 
666 	err = mlxsw_sp_bridge_port_learning_set(mlxsw_sp_port, bridge_port,
667 						brport_flags & BR_LEARNING);
668 	if (err)
669 		return err;
670 
671 	if (bridge_port->bridge_device->multicast_enabled)
672 		goto out;
673 
674 	err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
675 						   MLXSW_SP_FLOOD_TYPE_MC,
676 						   brport_flags &
677 						   BR_MCAST_FLOOD);
678 	if (err)
679 		return err;
680 
681 out:
682 	memcpy(&bridge_port->flags, &brport_flags, sizeof(brport_flags));
683 	return 0;
684 }
685 
686 static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
687 {
688 	char sfdat_pl[MLXSW_REG_SFDAT_LEN];
689 	int err;
690 
691 	mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time);
692 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl);
693 	if (err)
694 		return err;
695 	mlxsw_sp->bridge->ageing_time = ageing_time;
696 	return 0;
697 }
698 
699 static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
700 					    struct switchdev_trans *trans,
701 					    unsigned long ageing_clock_t)
702 {
703 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
704 	unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
705 	u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
706 
707 	if (switchdev_trans_ph_prepare(trans)) {
708 		if (ageing_time < MLXSW_SP_MIN_AGEING_TIME ||
709 		    ageing_time > MLXSW_SP_MAX_AGEING_TIME)
710 			return -ERANGE;
711 		else
712 			return 0;
713 	}
714 
715 	return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
716 }
717 
718 static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
719 					  struct switchdev_trans *trans,
720 					  struct net_device *orig_dev,
721 					  bool vlan_enabled)
722 {
723 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
724 	struct mlxsw_sp_bridge_device *bridge_device;
725 
726 	if (!switchdev_trans_ph_prepare(trans))
727 		return 0;
728 
729 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
730 	if (WARN_ON(!bridge_device))
731 		return -EINVAL;
732 
733 	if (bridge_device->vlan_enabled == vlan_enabled)
734 		return 0;
735 
736 	netdev_err(bridge_device->dev, "VLAN filtering can't be changed for existing bridge\n");
737 	return -EINVAL;
738 }
739 
740 static int mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
741 					  struct switchdev_trans *trans,
742 					  struct net_device *orig_dev,
743 					  bool is_port_mrouter)
744 {
745 	struct mlxsw_sp_bridge_port *bridge_port;
746 	int err;
747 
748 	if (switchdev_trans_ph_prepare(trans))
749 		return 0;
750 
751 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
752 						orig_dev);
753 	if (!bridge_port)
754 		return 0;
755 
756 	if (!bridge_port->bridge_device->multicast_enabled)
757 		goto out;
758 
759 	err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
760 						   MLXSW_SP_FLOOD_TYPE_MC,
761 						   is_port_mrouter);
762 	if (err)
763 		return err;
764 
765 	mlxsw_sp_port_mrouter_update_mdb(mlxsw_sp_port, bridge_port,
766 					 is_port_mrouter);
767 out:
768 	bridge_port->mrouter = is_port_mrouter;
769 	return 0;
770 }
771 
772 static bool mlxsw_sp_mc_flood(const struct mlxsw_sp_bridge_port *bridge_port)
773 {
774 	const struct mlxsw_sp_bridge_device *bridge_device;
775 
776 	bridge_device = bridge_port->bridge_device;
777 	return bridge_device->multicast_enabled ? bridge_port->mrouter :
778 					bridge_port->flags & BR_MCAST_FLOOD;
779 }
780 
781 static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
782 					 struct switchdev_trans *trans,
783 					 struct net_device *orig_dev,
784 					 bool mc_disabled)
785 {
786 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
787 	struct mlxsw_sp_bridge_device *bridge_device;
788 	struct mlxsw_sp_bridge_port *bridge_port;
789 	int err;
790 
791 	if (switchdev_trans_ph_prepare(trans))
792 		return 0;
793 
794 	/* It's possible we failed to enslave the port, yet this
795 	 * operation is executed due to it being deferred.
796 	 */
797 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
798 	if (!bridge_device)
799 		return 0;
800 
801 	if (bridge_device->multicast_enabled != !mc_disabled) {
802 		bridge_device->multicast_enabled = !mc_disabled;
803 		mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp_port,
804 						   bridge_device);
805 	}
806 
807 	list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
808 		enum mlxsw_sp_flood_type packet_type = MLXSW_SP_FLOOD_TYPE_MC;
809 		bool member = mlxsw_sp_mc_flood(bridge_port);
810 
811 		err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
812 							   bridge_port,
813 							   packet_type, member);
814 		if (err)
815 			return err;
816 	}
817 
818 	bridge_device->multicast_enabled = !mc_disabled;
819 
820 	return 0;
821 }
822 
823 static int mlxsw_sp_smid_router_port_set(struct mlxsw_sp *mlxsw_sp,
824 					 u16 mid_idx, bool add)
825 {
826 	char *smid_pl;
827 	int err;
828 
829 	smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
830 	if (!smid_pl)
831 		return -ENOMEM;
832 
833 	mlxsw_reg_smid_pack(smid_pl, mid_idx,
834 			    mlxsw_sp_router_port(mlxsw_sp), add);
835 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
836 	kfree(smid_pl);
837 	return err;
838 }
839 
840 static void
841 mlxsw_sp_bridge_mrouter_update_mdb(struct mlxsw_sp *mlxsw_sp,
842 				   struct mlxsw_sp_bridge_device *bridge_device,
843 				   bool add)
844 {
845 	struct mlxsw_sp_mid *mid;
846 
847 	list_for_each_entry(mid, &bridge_device->mids_list, list)
848 		mlxsw_sp_smid_router_port_set(mlxsw_sp, mid->mid, add);
849 }
850 
851 static int
852 mlxsw_sp_port_attr_br_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
853 				  struct switchdev_trans *trans,
854 				  struct net_device *orig_dev,
855 				  bool is_mrouter)
856 {
857 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
858 	struct mlxsw_sp_bridge_device *bridge_device;
859 
860 	if (switchdev_trans_ph_prepare(trans))
861 		return 0;
862 
863 	/* It's possible we failed to enslave the port, yet this
864 	 * operation is executed due to it being deferred.
865 	 */
866 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
867 	if (!bridge_device)
868 		return 0;
869 
870 	if (bridge_device->mrouter != is_mrouter)
871 		mlxsw_sp_bridge_mrouter_update_mdb(mlxsw_sp, bridge_device,
872 						   is_mrouter);
873 	bridge_device->mrouter = is_mrouter;
874 	return 0;
875 }
876 
877 static int mlxsw_sp_port_attr_set(struct net_device *dev,
878 				  const struct switchdev_attr *attr,
879 				  struct switchdev_trans *trans)
880 {
881 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
882 	int err;
883 
884 	switch (attr->id) {
885 	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
886 		err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans,
887 						       attr->orig_dev,
888 						       attr->u.stp_state);
889 		break;
890 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
891 		err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans,
892 						      attr->orig_dev,
893 						      attr->u.brport_flags);
894 		break;
895 	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
896 		err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans,
897 						       attr->u.ageing_time);
898 		break;
899 	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
900 		err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port, trans,
901 						     attr->orig_dev,
902 						     attr->u.vlan_filtering);
903 		break;
904 	case SWITCHDEV_ATTR_ID_PORT_MROUTER:
905 		err = mlxsw_sp_port_attr_mrouter_set(mlxsw_sp_port, trans,
906 						     attr->orig_dev,
907 						     attr->u.mrouter);
908 		break;
909 	case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
910 		err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port, trans,
911 						    attr->orig_dev,
912 						    attr->u.mc_disabled);
913 		break;
914 	case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER:
915 		err = mlxsw_sp_port_attr_br_mrouter_set(mlxsw_sp_port, trans,
916 							attr->orig_dev,
917 							attr->u.mrouter);
918 		break;
919 	default:
920 		err = -EOPNOTSUPP;
921 		break;
922 	}
923 
924 	if (switchdev_trans_ph_commit(trans))
925 		mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
926 
927 	return err;
928 }
929 
930 static int
931 mlxsw_sp_port_vlan_fid_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
932 			    struct mlxsw_sp_bridge_port *bridge_port)
933 {
934 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
935 	struct mlxsw_sp_bridge_device *bridge_device;
936 	u8 local_port = mlxsw_sp_port->local_port;
937 	u16 vid = mlxsw_sp_port_vlan->vid;
938 	struct mlxsw_sp_fid *fid;
939 	int err;
940 
941 	bridge_device = bridge_port->bridge_device;
942 	fid = bridge_device->ops->fid_get(bridge_device, vid);
943 	if (IS_ERR(fid))
944 		return PTR_ERR(fid);
945 
946 	err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port,
947 				     bridge_port->flags & BR_FLOOD);
948 	if (err)
949 		goto err_fid_uc_flood_set;
950 
951 	err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port,
952 				     mlxsw_sp_mc_flood(bridge_port));
953 	if (err)
954 		goto err_fid_mc_flood_set;
955 
956 	err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port,
957 				     true);
958 	if (err)
959 		goto err_fid_bc_flood_set;
960 
961 	err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
962 	if (err)
963 		goto err_fid_port_vid_map;
964 
965 	mlxsw_sp_port_vlan->fid = fid;
966 
967 	return 0;
968 
969 err_fid_port_vid_map:
970 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
971 err_fid_bc_flood_set:
972 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
973 err_fid_mc_flood_set:
974 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
975 err_fid_uc_flood_set:
976 	mlxsw_sp_fid_put(fid);
977 	return err;
978 }
979 
980 static void
981 mlxsw_sp_port_vlan_fid_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
982 {
983 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
984 	struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
985 	u8 local_port = mlxsw_sp_port->local_port;
986 	u16 vid = mlxsw_sp_port_vlan->vid;
987 
988 	mlxsw_sp_port_vlan->fid = NULL;
989 	mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
990 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
991 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
992 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
993 	mlxsw_sp_fid_put(fid);
994 }
995 
996 static u16
997 mlxsw_sp_port_pvid_determine(const struct mlxsw_sp_port *mlxsw_sp_port,
998 			     u16 vid, bool is_pvid)
999 {
1000 	if (is_pvid)
1001 		return vid;
1002 	else if (mlxsw_sp_port->pvid == vid)
1003 		return 0;	/* Dis-allow untagged packets */
1004 	else
1005 		return mlxsw_sp_port->pvid;
1006 }
1007 
1008 static int
1009 mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
1010 			       struct mlxsw_sp_bridge_port *bridge_port)
1011 {
1012 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1013 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
1014 	u16 vid = mlxsw_sp_port_vlan->vid;
1015 	int err;
1016 
1017 	/* No need to continue if only VLAN flags were changed */
1018 	if (mlxsw_sp_port_vlan->bridge_port) {
1019 		mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
1020 		return 0;
1021 	}
1022 
1023 	err = mlxsw_sp_port_vlan_fid_join(mlxsw_sp_port_vlan, bridge_port);
1024 	if (err)
1025 		return err;
1026 
1027 	err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid,
1028 					     bridge_port->flags & BR_LEARNING);
1029 	if (err)
1030 		goto err_port_vid_learning_set;
1031 
1032 	err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
1033 					bridge_port->stp_state);
1034 	if (err)
1035 		goto err_port_vid_stp_set;
1036 
1037 	bridge_vlan = mlxsw_sp_bridge_vlan_get(bridge_port, vid);
1038 	if (!bridge_vlan) {
1039 		err = -ENOMEM;
1040 		goto err_bridge_vlan_get;
1041 	}
1042 
1043 	list_add(&mlxsw_sp_port_vlan->bridge_vlan_node,
1044 		 &bridge_vlan->port_vlan_list);
1045 
1046 	mlxsw_sp_bridge_port_get(mlxsw_sp_port->mlxsw_sp->bridge,
1047 				 bridge_port->dev);
1048 	mlxsw_sp_port_vlan->bridge_port = bridge_port;
1049 
1050 	return 0;
1051 
1052 err_bridge_vlan_get:
1053 	mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
1054 err_port_vid_stp_set:
1055 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
1056 err_port_vid_learning_set:
1057 	mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
1058 	return err;
1059 }
1060 
1061 void
1062 mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1063 {
1064 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1065 	struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
1066 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
1067 	struct mlxsw_sp_bridge_port *bridge_port;
1068 	u16 vid = mlxsw_sp_port_vlan->vid;
1069 	bool last_port, last_vlan;
1070 
1071 	if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021Q &&
1072 		    mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021D))
1073 		return;
1074 
1075 	bridge_port = mlxsw_sp_port_vlan->bridge_port;
1076 	last_vlan = list_is_singular(&bridge_port->vlans_list);
1077 	bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
1078 	last_port = list_is_singular(&bridge_vlan->port_vlan_list);
1079 
1080 	list_del(&mlxsw_sp_port_vlan->bridge_vlan_node);
1081 	mlxsw_sp_bridge_vlan_put(bridge_vlan);
1082 	mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
1083 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
1084 	if (last_port)
1085 		mlxsw_sp_bridge_port_fdb_flush(mlxsw_sp_port->mlxsw_sp,
1086 					       bridge_port,
1087 					       mlxsw_sp_fid_index(fid));
1088 	if (last_vlan)
1089 		mlxsw_sp_bridge_port_mdb_flush(mlxsw_sp_port, bridge_port);
1090 
1091 	mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
1092 
1093 	mlxsw_sp_bridge_port_put(mlxsw_sp_port->mlxsw_sp->bridge, bridge_port);
1094 	mlxsw_sp_port_vlan->bridge_port = NULL;
1095 }
1096 
1097 static int
1098 mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port,
1099 			      struct mlxsw_sp_bridge_port *bridge_port,
1100 			      u16 vid, bool is_untagged, bool is_pvid)
1101 {
1102 	u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid);
1103 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1104 	u16 old_pvid = mlxsw_sp_port->pvid;
1105 	int err;
1106 
1107 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, vid);
1108 	if (IS_ERR(mlxsw_sp_port_vlan))
1109 		return PTR_ERR(mlxsw_sp_port_vlan);
1110 
1111 	err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true,
1112 				     is_untagged);
1113 	if (err)
1114 		goto err_port_vlan_set;
1115 
1116 	err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid);
1117 	if (err)
1118 		goto err_port_pvid_set;
1119 
1120 	err = mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port);
1121 	if (err)
1122 		goto err_port_vlan_bridge_join;
1123 
1124 	return 0;
1125 
1126 err_port_vlan_bridge_join:
1127 	mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid);
1128 err_port_pvid_set:
1129 	mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1130 err_port_vlan_set:
1131 	mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
1132 	return err;
1133 }
1134 
1135 static int
1136 mlxsw_sp_br_ban_rif_pvid_change(struct mlxsw_sp *mlxsw_sp,
1137 				const struct net_device *br_dev,
1138 				const struct switchdev_obj_port_vlan *vlan)
1139 {
1140 	struct mlxsw_sp_rif *rif;
1141 	struct mlxsw_sp_fid *fid;
1142 	u16 pvid;
1143 	u16 vid;
1144 
1145 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev);
1146 	if (!rif)
1147 		return 0;
1148 	fid = mlxsw_sp_rif_fid(rif);
1149 	pvid = mlxsw_sp_fid_8021q_vid(fid);
1150 
1151 	for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
1152 		if (vlan->flags & BRIDGE_VLAN_INFO_PVID) {
1153 			if (vid != pvid) {
1154 				netdev_err(br_dev, "Can't change PVID, it's used by router interface\n");
1155 				return -EBUSY;
1156 			}
1157 		} else {
1158 			if (vid == pvid) {
1159 				netdev_err(br_dev, "Can't remove PVID, it's used by router interface\n");
1160 				return -EBUSY;
1161 			}
1162 		}
1163 	}
1164 
1165 	return 0;
1166 }
1167 
1168 static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
1169 				   const struct switchdev_obj_port_vlan *vlan,
1170 				   struct switchdev_trans *trans)
1171 {
1172 	bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
1173 	bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
1174 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1175 	struct net_device *orig_dev = vlan->obj.orig_dev;
1176 	struct mlxsw_sp_bridge_port *bridge_port;
1177 	u16 vid;
1178 
1179 	if (netif_is_bridge_master(orig_dev)) {
1180 		int err = 0;
1181 
1182 		if ((vlan->flags & BRIDGE_VLAN_INFO_BRENTRY) &&
1183 		    br_vlan_enabled(orig_dev) &&
1184 		    switchdev_trans_ph_prepare(trans))
1185 			err = mlxsw_sp_br_ban_rif_pvid_change(mlxsw_sp,
1186 							      orig_dev, vlan);
1187 		if (!err)
1188 			err = -EOPNOTSUPP;
1189 		return err;
1190 	}
1191 
1192 	if (switchdev_trans_ph_prepare(trans))
1193 		return 0;
1194 
1195 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1196 	if (WARN_ON(!bridge_port))
1197 		return -EINVAL;
1198 
1199 	if (!bridge_port->bridge_device->vlan_enabled)
1200 		return 0;
1201 
1202 	for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
1203 		int err;
1204 
1205 		err = mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port,
1206 						    vid, flag_untagged,
1207 						    flag_pvid);
1208 		if (err)
1209 			return err;
1210 	}
1211 
1212 	return 0;
1213 }
1214 
1215 static enum mlxsw_reg_sfdf_flush_type mlxsw_sp_fdb_flush_type(bool lagged)
1216 {
1217 	return lagged ? MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID :
1218 			MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID;
1219 }
1220 
1221 static int
1222 mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
1223 			       struct mlxsw_sp_bridge_port *bridge_port,
1224 			       u16 fid_index)
1225 {
1226 	bool lagged = bridge_port->lagged;
1227 	char sfdf_pl[MLXSW_REG_SFDF_LEN];
1228 	u16 system_port;
1229 
1230 	system_port = lagged ? bridge_port->lag_id : bridge_port->system_port;
1231 	mlxsw_reg_sfdf_pack(sfdf_pl, mlxsw_sp_fdb_flush_type(lagged));
1232 	mlxsw_reg_sfdf_fid_set(sfdf_pl, fid_index);
1233 	mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl, system_port);
1234 
1235 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
1236 }
1237 
1238 static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
1239 {
1240 	return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
1241 			 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY;
1242 }
1243 
1244 static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
1245 {
1246 	return adding ? MLXSW_REG_SFD_OP_WRITE_EDIT :
1247 			MLXSW_REG_SFD_OP_WRITE_REMOVE;
1248 }
1249 
1250 static int mlxsw_sp_port_fdb_tunnel_uc_op(struct mlxsw_sp *mlxsw_sp,
1251 					  const char *mac, u16 fid,
1252 					  enum mlxsw_sp_l3proto proto,
1253 					  const union mlxsw_sp_l3addr *addr,
1254 					  bool adding, bool dynamic)
1255 {
1256 	enum mlxsw_reg_sfd_uc_tunnel_protocol sfd_proto;
1257 	char *sfd_pl;
1258 	u8 num_rec;
1259 	u32 uip;
1260 	int err;
1261 
1262 	switch (proto) {
1263 	case MLXSW_SP_L3_PROTO_IPV4:
1264 		uip = be32_to_cpu(addr->addr4);
1265 		sfd_proto = MLXSW_REG_SFD_UC_TUNNEL_PROTOCOL_IPV4;
1266 		break;
1267 	case MLXSW_SP_L3_PROTO_IPV6: /* fall through */
1268 	default:
1269 		WARN_ON(1);
1270 		return -EOPNOTSUPP;
1271 	}
1272 
1273 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1274 	if (!sfd_pl)
1275 		return -ENOMEM;
1276 
1277 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1278 	mlxsw_reg_sfd_uc_tunnel_pack(sfd_pl, 0,
1279 				     mlxsw_sp_sfd_rec_policy(dynamic), mac, fid,
1280 				     MLXSW_REG_SFD_REC_ACTION_NOP, uip,
1281 				     sfd_proto);
1282 	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1283 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1284 	if (err)
1285 		goto out;
1286 
1287 	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1288 		err = -EBUSY;
1289 
1290 out:
1291 	kfree(sfd_pl);
1292 	return err;
1293 }
1294 
1295 static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1296 				     const char *mac, u16 fid, bool adding,
1297 				     enum mlxsw_reg_sfd_rec_action action,
1298 				     bool dynamic)
1299 {
1300 	char *sfd_pl;
1301 	u8 num_rec;
1302 	int err;
1303 
1304 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1305 	if (!sfd_pl)
1306 		return -ENOMEM;
1307 
1308 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1309 	mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
1310 			      mac, fid, action, local_port);
1311 	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1312 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1313 	if (err)
1314 		goto out;
1315 
1316 	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1317 		err = -EBUSY;
1318 
1319 out:
1320 	kfree(sfd_pl);
1321 	return err;
1322 }
1323 
1324 static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1325 				   const char *mac, u16 fid, bool adding,
1326 				   bool dynamic)
1327 {
1328 	return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
1329 					 MLXSW_REG_SFD_REC_ACTION_NOP, dynamic);
1330 }
1331 
1332 int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
1333 			bool adding)
1334 {
1335 	return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
1336 					 MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
1337 					 false);
1338 }
1339 
1340 static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
1341 				       const char *mac, u16 fid, u16 lag_vid,
1342 				       bool adding, bool dynamic)
1343 {
1344 	char *sfd_pl;
1345 	u8 num_rec;
1346 	int err;
1347 
1348 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1349 	if (!sfd_pl)
1350 		return -ENOMEM;
1351 
1352 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1353 	mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
1354 				  mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
1355 				  lag_vid, lag_id);
1356 	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1357 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1358 	if (err)
1359 		goto out;
1360 
1361 	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1362 		err = -EBUSY;
1363 
1364 out:
1365 	kfree(sfd_pl);
1366 	return err;
1367 }
1368 
1369 static int
1370 mlxsw_sp_port_fdb_set(struct mlxsw_sp_port *mlxsw_sp_port,
1371 		      struct switchdev_notifier_fdb_info *fdb_info, bool adding)
1372 {
1373 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1374 	struct net_device *orig_dev = fdb_info->info.dev;
1375 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1376 	struct mlxsw_sp_bridge_device *bridge_device;
1377 	struct mlxsw_sp_bridge_port *bridge_port;
1378 	u16 fid_index, vid;
1379 
1380 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1381 	if (!bridge_port)
1382 		return -EINVAL;
1383 
1384 	bridge_device = bridge_port->bridge_device;
1385 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1386 							       bridge_device,
1387 							       fdb_info->vid);
1388 	if (!mlxsw_sp_port_vlan)
1389 		return 0;
1390 
1391 	fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1392 	vid = mlxsw_sp_port_vlan->vid;
1393 
1394 	if (!bridge_port->lagged)
1395 		return mlxsw_sp_port_fdb_uc_op(mlxsw_sp,
1396 					       bridge_port->system_port,
1397 					       fdb_info->addr, fid_index,
1398 					       adding, false);
1399 	else
1400 		return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp,
1401 						   bridge_port->lag_id,
1402 						   fdb_info->addr, fid_index,
1403 						   vid, adding, false);
1404 }
1405 
1406 static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
1407 				u16 fid, u16 mid_idx, bool adding)
1408 {
1409 	char *sfd_pl;
1410 	u8 num_rec;
1411 	int err;
1412 
1413 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1414 	if (!sfd_pl)
1415 		return -ENOMEM;
1416 
1417 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1418 	mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
1419 			      MLXSW_REG_SFD_REC_ACTION_NOP, mid_idx);
1420 	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1421 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1422 	if (err)
1423 		goto out;
1424 
1425 	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1426 		err = -EBUSY;
1427 
1428 out:
1429 	kfree(sfd_pl);
1430 	return err;
1431 }
1432 
1433 static int mlxsw_sp_port_smid_full_entry(struct mlxsw_sp *mlxsw_sp, u16 mid_idx,
1434 					 long *ports_bitmap,
1435 					 bool set_router_port)
1436 {
1437 	char *smid_pl;
1438 	int err, i;
1439 
1440 	smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
1441 	if (!smid_pl)
1442 		return -ENOMEM;
1443 
1444 	mlxsw_reg_smid_pack(smid_pl, mid_idx, 0, false);
1445 	for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) {
1446 		if (mlxsw_sp->ports[i])
1447 			mlxsw_reg_smid_port_mask_set(smid_pl, i, 1);
1448 	}
1449 
1450 	mlxsw_reg_smid_port_mask_set(smid_pl,
1451 				     mlxsw_sp_router_port(mlxsw_sp), 1);
1452 
1453 	for_each_set_bit(i, ports_bitmap, mlxsw_core_max_ports(mlxsw_sp->core))
1454 		mlxsw_reg_smid_port_set(smid_pl, i, 1);
1455 
1456 	mlxsw_reg_smid_port_set(smid_pl, mlxsw_sp_router_port(mlxsw_sp),
1457 				set_router_port);
1458 
1459 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
1460 	kfree(smid_pl);
1461 	return err;
1462 }
1463 
1464 static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port,
1465 				  u16 mid_idx, bool add)
1466 {
1467 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1468 	char *smid_pl;
1469 	int err;
1470 
1471 	smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
1472 	if (!smid_pl)
1473 		return -ENOMEM;
1474 
1475 	mlxsw_reg_smid_pack(smid_pl, mid_idx, mlxsw_sp_port->local_port, add);
1476 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
1477 	kfree(smid_pl);
1478 	return err;
1479 }
1480 
1481 static struct
1482 mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp_bridge_device *bridge_device,
1483 				const unsigned char *addr,
1484 				u16 fid)
1485 {
1486 	struct mlxsw_sp_mid *mid;
1487 
1488 	list_for_each_entry(mid, &bridge_device->mids_list, list) {
1489 		if (ether_addr_equal(mid->addr, addr) && mid->fid == fid)
1490 			return mid;
1491 	}
1492 	return NULL;
1493 }
1494 
1495 static void
1496 mlxsw_sp_bridge_port_get_ports_bitmap(struct mlxsw_sp *mlxsw_sp,
1497 				      struct mlxsw_sp_bridge_port *bridge_port,
1498 				      unsigned long *ports_bitmap)
1499 {
1500 	struct mlxsw_sp_port *mlxsw_sp_port;
1501 	u64 max_lag_members, i;
1502 	int lag_id;
1503 
1504 	if (!bridge_port->lagged) {
1505 		set_bit(bridge_port->system_port, ports_bitmap);
1506 	} else {
1507 		max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1508 						     MAX_LAG_MEMBERS);
1509 		lag_id = bridge_port->lag_id;
1510 		for (i = 0; i < max_lag_members; i++) {
1511 			mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp,
1512 								 lag_id, i);
1513 			if (mlxsw_sp_port)
1514 				set_bit(mlxsw_sp_port->local_port,
1515 					ports_bitmap);
1516 		}
1517 	}
1518 }
1519 
1520 static void
1521 mlxsw_sp_mc_get_mrouters_bitmap(unsigned long *flood_bitmap,
1522 				struct mlxsw_sp_bridge_device *bridge_device,
1523 				struct mlxsw_sp *mlxsw_sp)
1524 {
1525 	struct mlxsw_sp_bridge_port *bridge_port;
1526 
1527 	list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
1528 		if (bridge_port->mrouter) {
1529 			mlxsw_sp_bridge_port_get_ports_bitmap(mlxsw_sp,
1530 							      bridge_port,
1531 							      flood_bitmap);
1532 		}
1533 	}
1534 }
1535 
1536 static bool
1537 mlxsw_sp_mc_write_mdb_entry(struct mlxsw_sp *mlxsw_sp,
1538 			    struct mlxsw_sp_mid *mid,
1539 			    struct mlxsw_sp_bridge_device *bridge_device)
1540 {
1541 	long *flood_bitmap;
1542 	int num_of_ports;
1543 	int alloc_size;
1544 	u16 mid_idx;
1545 	int err;
1546 
1547 	mid_idx = find_first_zero_bit(mlxsw_sp->bridge->mids_bitmap,
1548 				      MLXSW_SP_MID_MAX);
1549 	if (mid_idx == MLXSW_SP_MID_MAX)
1550 		return false;
1551 
1552 	num_of_ports = mlxsw_core_max_ports(mlxsw_sp->core);
1553 	alloc_size = sizeof(long) * BITS_TO_LONGS(num_of_ports);
1554 	flood_bitmap = kzalloc(alloc_size, GFP_KERNEL);
1555 	if (!flood_bitmap)
1556 		return false;
1557 
1558 	bitmap_copy(flood_bitmap,  mid->ports_in_mid, num_of_ports);
1559 	mlxsw_sp_mc_get_mrouters_bitmap(flood_bitmap, bridge_device, mlxsw_sp);
1560 
1561 	mid->mid = mid_idx;
1562 	err = mlxsw_sp_port_smid_full_entry(mlxsw_sp, mid_idx, flood_bitmap,
1563 					    bridge_device->mrouter);
1564 	kfree(flood_bitmap);
1565 	if (err)
1566 		return false;
1567 
1568 	err = mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid_idx,
1569 				   true);
1570 	if (err)
1571 		return false;
1572 
1573 	set_bit(mid_idx, mlxsw_sp->bridge->mids_bitmap);
1574 	mid->in_hw = true;
1575 	return true;
1576 }
1577 
1578 static int mlxsw_sp_mc_remove_mdb_entry(struct mlxsw_sp *mlxsw_sp,
1579 					struct mlxsw_sp_mid *mid)
1580 {
1581 	if (!mid->in_hw)
1582 		return 0;
1583 
1584 	clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap);
1585 	mid->in_hw = false;
1586 	return mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid->mid,
1587 				    false);
1588 }
1589 
1590 static struct
1591 mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
1592 				  struct mlxsw_sp_bridge_device *bridge_device,
1593 				  const unsigned char *addr,
1594 				  u16 fid)
1595 {
1596 	struct mlxsw_sp_mid *mid;
1597 	size_t alloc_size;
1598 
1599 	mid = kzalloc(sizeof(*mid), GFP_KERNEL);
1600 	if (!mid)
1601 		return NULL;
1602 
1603 	alloc_size = sizeof(unsigned long) *
1604 		     BITS_TO_LONGS(mlxsw_core_max_ports(mlxsw_sp->core));
1605 
1606 	mid->ports_in_mid = kzalloc(alloc_size, GFP_KERNEL);
1607 	if (!mid->ports_in_mid)
1608 		goto err_ports_in_mid_alloc;
1609 
1610 	ether_addr_copy(mid->addr, addr);
1611 	mid->fid = fid;
1612 	mid->in_hw = false;
1613 
1614 	if (!bridge_device->multicast_enabled)
1615 		goto out;
1616 
1617 	if (!mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid, bridge_device))
1618 		goto err_write_mdb_entry;
1619 
1620 out:
1621 	list_add_tail(&mid->list, &bridge_device->mids_list);
1622 	return mid;
1623 
1624 err_write_mdb_entry:
1625 	kfree(mid->ports_in_mid);
1626 err_ports_in_mid_alloc:
1627 	kfree(mid);
1628 	return NULL;
1629 }
1630 
1631 static int mlxsw_sp_port_remove_from_mid(struct mlxsw_sp_port *mlxsw_sp_port,
1632 					 struct mlxsw_sp_mid *mid)
1633 {
1634 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1635 	int err = 0;
1636 
1637 	clear_bit(mlxsw_sp_port->local_port, mid->ports_in_mid);
1638 	if (bitmap_empty(mid->ports_in_mid,
1639 			 mlxsw_core_max_ports(mlxsw_sp->core))) {
1640 		err = mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
1641 		list_del(&mid->list);
1642 		kfree(mid->ports_in_mid);
1643 		kfree(mid);
1644 	}
1645 	return err;
1646 }
1647 
1648 static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
1649 				 const struct switchdev_obj_port_mdb *mdb,
1650 				 struct switchdev_trans *trans)
1651 {
1652 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1653 	struct net_device *orig_dev = mdb->obj.orig_dev;
1654 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1655 	struct net_device *dev = mlxsw_sp_port->dev;
1656 	struct mlxsw_sp_bridge_device *bridge_device;
1657 	struct mlxsw_sp_bridge_port *bridge_port;
1658 	struct mlxsw_sp_mid *mid;
1659 	u16 fid_index;
1660 	int err = 0;
1661 
1662 	if (switchdev_trans_ph_prepare(trans))
1663 		return 0;
1664 
1665 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1666 	if (!bridge_port)
1667 		return 0;
1668 
1669 	bridge_device = bridge_port->bridge_device;
1670 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1671 							       bridge_device,
1672 							       mdb->vid);
1673 	if (!mlxsw_sp_port_vlan)
1674 		return 0;
1675 
1676 	fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1677 
1678 	mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
1679 	if (!mid) {
1680 		mid = __mlxsw_sp_mc_alloc(mlxsw_sp, bridge_device, mdb->addr,
1681 					  fid_index);
1682 		if (!mid) {
1683 			netdev_err(dev, "Unable to allocate MC group\n");
1684 			return -ENOMEM;
1685 		}
1686 	}
1687 	set_bit(mlxsw_sp_port->local_port, mid->ports_in_mid);
1688 
1689 	if (!bridge_device->multicast_enabled)
1690 		return 0;
1691 
1692 	if (bridge_port->mrouter)
1693 		return 0;
1694 
1695 	err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true);
1696 	if (err) {
1697 		netdev_err(dev, "Unable to set SMID\n");
1698 		goto err_out;
1699 	}
1700 
1701 	return 0;
1702 
1703 err_out:
1704 	mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
1705 	return err;
1706 }
1707 
1708 static void
1709 mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port,
1710 				   struct mlxsw_sp_bridge_device
1711 				   *bridge_device)
1712 {
1713 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1714 	struct mlxsw_sp_mid *mid;
1715 	bool mc_enabled;
1716 
1717 	mc_enabled = bridge_device->multicast_enabled;
1718 
1719 	list_for_each_entry(mid, &bridge_device->mids_list, list) {
1720 		if (mc_enabled)
1721 			mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid,
1722 						    bridge_device);
1723 		else
1724 			mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
1725 	}
1726 }
1727 
1728 static void
1729 mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
1730 				 struct mlxsw_sp_bridge_port *bridge_port,
1731 				 bool add)
1732 {
1733 	struct mlxsw_sp_bridge_device *bridge_device;
1734 	struct mlxsw_sp_mid *mid;
1735 
1736 	bridge_device = bridge_port->bridge_device;
1737 
1738 	list_for_each_entry(mid, &bridge_device->mids_list, list) {
1739 		if (!test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid))
1740 			mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, add);
1741 	}
1742 }
1743 
1744 struct mlxsw_sp_span_respin_work {
1745 	struct work_struct work;
1746 	struct mlxsw_sp *mlxsw_sp;
1747 };
1748 
1749 static void mlxsw_sp_span_respin_work(struct work_struct *work)
1750 {
1751 	struct mlxsw_sp_span_respin_work *respin_work =
1752 		container_of(work, struct mlxsw_sp_span_respin_work, work);
1753 
1754 	rtnl_lock();
1755 	mlxsw_sp_span_respin(respin_work->mlxsw_sp);
1756 	rtnl_unlock();
1757 	kfree(respin_work);
1758 }
1759 
1760 static void mlxsw_sp_span_respin_schedule(struct mlxsw_sp *mlxsw_sp)
1761 {
1762 	struct mlxsw_sp_span_respin_work *respin_work;
1763 
1764 	respin_work = kzalloc(sizeof(*respin_work), GFP_ATOMIC);
1765 	if (!respin_work)
1766 		return;
1767 
1768 	INIT_WORK(&respin_work->work, mlxsw_sp_span_respin_work);
1769 	respin_work->mlxsw_sp = mlxsw_sp;
1770 
1771 	mlxsw_core_schedule_work(&respin_work->work);
1772 }
1773 
1774 static int mlxsw_sp_port_obj_add(struct net_device *dev,
1775 				 const struct switchdev_obj *obj,
1776 				 struct switchdev_trans *trans)
1777 {
1778 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1779 	const struct switchdev_obj_port_vlan *vlan;
1780 	int err = 0;
1781 
1782 	switch (obj->id) {
1783 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
1784 		vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
1785 		err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, vlan, trans);
1786 
1787 		if (switchdev_trans_ph_prepare(trans)) {
1788 			/* The event is emitted before the changes are actually
1789 			 * applied to the bridge. Therefore schedule the respin
1790 			 * call for later, so that the respin logic sees the
1791 			 * updated bridge state.
1792 			 */
1793 			mlxsw_sp_span_respin_schedule(mlxsw_sp_port->mlxsw_sp);
1794 		}
1795 		break;
1796 	case SWITCHDEV_OBJ_ID_PORT_MDB:
1797 		err = mlxsw_sp_port_mdb_add(mlxsw_sp_port,
1798 					    SWITCHDEV_OBJ_PORT_MDB(obj),
1799 					    trans);
1800 		break;
1801 	default:
1802 		err = -EOPNOTSUPP;
1803 		break;
1804 	}
1805 
1806 	return err;
1807 }
1808 
1809 static void
1810 mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port,
1811 			      struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
1812 {
1813 	u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : vid;
1814 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1815 
1816 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1817 	if (WARN_ON(!mlxsw_sp_port_vlan))
1818 		return;
1819 
1820 	mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
1821 	mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid);
1822 	mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1823 	mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
1824 }
1825 
1826 static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
1827 				   const struct switchdev_obj_port_vlan *vlan)
1828 {
1829 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1830 	struct net_device *orig_dev = vlan->obj.orig_dev;
1831 	struct mlxsw_sp_bridge_port *bridge_port;
1832 	u16 vid;
1833 
1834 	if (netif_is_bridge_master(orig_dev))
1835 		return -EOPNOTSUPP;
1836 
1837 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1838 	if (WARN_ON(!bridge_port))
1839 		return -EINVAL;
1840 
1841 	if (!bridge_port->bridge_device->vlan_enabled)
1842 		return 0;
1843 
1844 	for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++)
1845 		mlxsw_sp_bridge_port_vlan_del(mlxsw_sp_port, bridge_port, vid);
1846 
1847 	return 0;
1848 }
1849 
1850 static int
1851 __mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
1852 			struct mlxsw_sp_bridge_port *bridge_port,
1853 			struct mlxsw_sp_mid *mid)
1854 {
1855 	struct net_device *dev = mlxsw_sp_port->dev;
1856 	int err;
1857 
1858 	if (bridge_port->bridge_device->multicast_enabled &&
1859 	    !bridge_port->mrouter) {
1860 		err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
1861 		if (err)
1862 			netdev_err(dev, "Unable to remove port from SMID\n");
1863 	}
1864 
1865 	err = mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
1866 	if (err)
1867 		netdev_err(dev, "Unable to remove MC SFD\n");
1868 
1869 	return err;
1870 }
1871 
1872 static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
1873 				 const struct switchdev_obj_port_mdb *mdb)
1874 {
1875 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1876 	struct net_device *orig_dev = mdb->obj.orig_dev;
1877 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1878 	struct mlxsw_sp_bridge_device *bridge_device;
1879 	struct net_device *dev = mlxsw_sp_port->dev;
1880 	struct mlxsw_sp_bridge_port *bridge_port;
1881 	struct mlxsw_sp_mid *mid;
1882 	u16 fid_index;
1883 
1884 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1885 	if (!bridge_port)
1886 		return 0;
1887 
1888 	bridge_device = bridge_port->bridge_device;
1889 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1890 							       bridge_device,
1891 							       mdb->vid);
1892 	if (!mlxsw_sp_port_vlan)
1893 		return 0;
1894 
1895 	fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1896 
1897 	mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
1898 	if (!mid) {
1899 		netdev_err(dev, "Unable to remove port from MC DB\n");
1900 		return -EINVAL;
1901 	}
1902 
1903 	return __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port, mid);
1904 }
1905 
1906 static void
1907 mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
1908 			       struct mlxsw_sp_bridge_port *bridge_port)
1909 {
1910 	struct mlxsw_sp_bridge_device *bridge_device;
1911 	struct mlxsw_sp_mid *mid, *tmp;
1912 
1913 	bridge_device = bridge_port->bridge_device;
1914 
1915 	list_for_each_entry_safe(mid, tmp, &bridge_device->mids_list, list) {
1916 		if (test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid)) {
1917 			__mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port,
1918 						mid);
1919 		} else if (bridge_device->multicast_enabled &&
1920 			   bridge_port->mrouter) {
1921 			mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
1922 		}
1923 	}
1924 }
1925 
1926 static int mlxsw_sp_port_obj_del(struct net_device *dev,
1927 				 const struct switchdev_obj *obj)
1928 {
1929 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1930 	int err = 0;
1931 
1932 	switch (obj->id) {
1933 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
1934 		err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
1935 					      SWITCHDEV_OBJ_PORT_VLAN(obj));
1936 		break;
1937 	case SWITCHDEV_OBJ_ID_PORT_MDB:
1938 		err = mlxsw_sp_port_mdb_del(mlxsw_sp_port,
1939 					    SWITCHDEV_OBJ_PORT_MDB(obj));
1940 		break;
1941 	default:
1942 		err = -EOPNOTSUPP;
1943 		break;
1944 	}
1945 
1946 	mlxsw_sp_span_respin_schedule(mlxsw_sp_port->mlxsw_sp);
1947 
1948 	return err;
1949 }
1950 
1951 static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
1952 						   u16 lag_id)
1953 {
1954 	struct mlxsw_sp_port *mlxsw_sp_port;
1955 	u64 max_lag_members;
1956 	int i;
1957 
1958 	max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1959 					     MAX_LAG_MEMBERS);
1960 	for (i = 0; i < max_lag_members; i++) {
1961 		mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
1962 		if (mlxsw_sp_port)
1963 			return mlxsw_sp_port;
1964 	}
1965 	return NULL;
1966 }
1967 
1968 static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
1969 	.switchdev_port_attr_get	= mlxsw_sp_port_attr_get,
1970 	.switchdev_port_attr_set	= mlxsw_sp_port_attr_set,
1971 	.switchdev_port_obj_add		= mlxsw_sp_port_obj_add,
1972 	.switchdev_port_obj_del		= mlxsw_sp_port_obj_del,
1973 };
1974 
1975 static int
1976 mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device,
1977 				struct mlxsw_sp_bridge_port *bridge_port,
1978 				struct mlxsw_sp_port *mlxsw_sp_port,
1979 				struct netlink_ext_ack *extack)
1980 {
1981 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1982 
1983 	if (is_vlan_dev(bridge_port->dev)) {
1984 		NL_SET_ERR_MSG_MOD(extack, "Can not enslave a VLAN device to a VLAN-aware bridge");
1985 		return -EINVAL;
1986 	}
1987 
1988 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1);
1989 	if (WARN_ON(!mlxsw_sp_port_vlan))
1990 		return -EINVAL;
1991 
1992 	/* Let VLAN-aware bridge take care of its own VLANs */
1993 	mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
1994 
1995 	return 0;
1996 }
1997 
1998 static void
1999 mlxsw_sp_bridge_8021q_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
2000 				 struct mlxsw_sp_bridge_port *bridge_port,
2001 				 struct mlxsw_sp_port *mlxsw_sp_port)
2002 {
2003 	mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
2004 	/* Make sure untagged frames are allowed to ingress */
2005 	mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
2006 }
2007 
2008 static int
2009 mlxsw_sp_bridge_8021q_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
2010 				 const struct net_device *vxlan_dev,
2011 				 struct netlink_ext_ack *extack)
2012 {
2013 	WARN_ON(1);
2014 	return -EINVAL;
2015 }
2016 
2017 static void
2018 mlxsw_sp_bridge_8021q_vxlan_leave(struct mlxsw_sp_bridge_device *bridge_device,
2019 				  const struct net_device *vxlan_dev)
2020 {
2021 }
2022 
2023 static struct mlxsw_sp_fid *
2024 mlxsw_sp_bridge_8021q_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
2025 			      u16 vid)
2026 {
2027 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2028 
2029 	return mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
2030 }
2031 
2032 static struct mlxsw_sp_fid *
2033 mlxsw_sp_bridge_8021q_fid_lookup(struct mlxsw_sp_bridge_device *bridge_device,
2034 				 u16 vid)
2035 {
2036 	WARN_ON(1);
2037 	return NULL;
2038 }
2039 
2040 static u16
2041 mlxsw_sp_bridge_8021q_fid_vid(struct mlxsw_sp_bridge_device *bridge_device,
2042 			      const struct mlxsw_sp_fid *fid)
2043 {
2044 	return mlxsw_sp_fid_8021q_vid(fid);
2045 }
2046 
2047 static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021q_ops = {
2048 	.port_join	= mlxsw_sp_bridge_8021q_port_join,
2049 	.port_leave	= mlxsw_sp_bridge_8021q_port_leave,
2050 	.vxlan_join	= mlxsw_sp_bridge_8021q_vxlan_join,
2051 	.vxlan_leave	= mlxsw_sp_bridge_8021q_vxlan_leave,
2052 	.fid_get	= mlxsw_sp_bridge_8021q_fid_get,
2053 	.fid_lookup	= mlxsw_sp_bridge_8021q_fid_lookup,
2054 	.fid_vid	= mlxsw_sp_bridge_8021q_fid_vid,
2055 };
2056 
2057 static bool
2058 mlxsw_sp_port_is_br_member(const struct mlxsw_sp_port *mlxsw_sp_port,
2059 			   const struct net_device *br_dev)
2060 {
2061 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2062 
2063 	list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
2064 			    list) {
2065 		if (mlxsw_sp_port_vlan->bridge_port &&
2066 		    mlxsw_sp_port_vlan->bridge_port->bridge_device->dev ==
2067 		    br_dev)
2068 			return true;
2069 	}
2070 
2071 	return false;
2072 }
2073 
2074 static int
2075 mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device,
2076 				struct mlxsw_sp_bridge_port *bridge_port,
2077 				struct mlxsw_sp_port *mlxsw_sp_port,
2078 				struct netlink_ext_ack *extack)
2079 {
2080 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2081 	struct net_device *dev = bridge_port->dev;
2082 	u16 vid;
2083 
2084 	vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 1;
2085 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
2086 	if (WARN_ON(!mlxsw_sp_port_vlan))
2087 		return -EINVAL;
2088 
2089 	if (mlxsw_sp_port_is_br_member(mlxsw_sp_port, bridge_device->dev)) {
2090 		NL_SET_ERR_MSG_MOD(extack, "Can not bridge VLAN uppers of the same port");
2091 		return -EINVAL;
2092 	}
2093 
2094 	/* Port is no longer usable as a router interface */
2095 	if (mlxsw_sp_port_vlan->fid)
2096 		mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
2097 
2098 	return mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port);
2099 }
2100 
2101 static void
2102 mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
2103 				 struct mlxsw_sp_bridge_port *bridge_port,
2104 				 struct mlxsw_sp_port *mlxsw_sp_port)
2105 {
2106 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2107 	struct net_device *dev = bridge_port->dev;
2108 	u16 vid;
2109 
2110 	vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 1;
2111 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
2112 	if (WARN_ON(!mlxsw_sp_port_vlan))
2113 		return;
2114 
2115 	mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
2116 }
2117 
2118 static int
2119 mlxsw_sp_bridge_8021d_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
2120 				 const struct net_device *vxlan_dev,
2121 				 struct netlink_ext_ack *extack)
2122 {
2123 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2124 	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
2125 	struct mlxsw_sp_nve_params params = {
2126 		.type = MLXSW_SP_NVE_TYPE_VXLAN,
2127 		.vni = vxlan->cfg.vni,
2128 		.dev = vxlan_dev,
2129 	};
2130 	struct mlxsw_sp_fid *fid;
2131 	int err;
2132 
2133 	fid = mlxsw_sp_fid_8021d_lookup(mlxsw_sp, bridge_device->dev->ifindex);
2134 	if (!fid)
2135 		return -EINVAL;
2136 
2137 	if (mlxsw_sp_fid_vni_is_set(fid))
2138 		return -EINVAL;
2139 
2140 	err = mlxsw_sp_nve_fid_enable(mlxsw_sp, fid, &params, extack);
2141 	if (err)
2142 		goto err_nve_fid_enable;
2143 
2144 	/* The tunnel port does not hold a reference on the FID. Only
2145 	 * local ports and the router port
2146 	 */
2147 	mlxsw_sp_fid_put(fid);
2148 
2149 	return 0;
2150 
2151 err_nve_fid_enable:
2152 	mlxsw_sp_fid_put(fid);
2153 	return err;
2154 }
2155 
2156 static void
2157 mlxsw_sp_bridge_8021d_vxlan_leave(struct mlxsw_sp_bridge_device *bridge_device,
2158 				  const struct net_device *vxlan_dev)
2159 {
2160 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2161 	struct mlxsw_sp_fid *fid;
2162 
2163 	fid = mlxsw_sp_fid_8021d_lookup(mlxsw_sp, bridge_device->dev->ifindex);
2164 	if (WARN_ON(!fid))
2165 		return;
2166 
2167 	/* If the VxLAN device is down, then the FID does not have a VNI */
2168 	if (!mlxsw_sp_fid_vni_is_set(fid))
2169 		goto out;
2170 
2171 	mlxsw_sp_nve_fid_disable(mlxsw_sp, fid);
2172 out:
2173 	mlxsw_sp_fid_put(fid);
2174 }
2175 
2176 static struct mlxsw_sp_fid *
2177 mlxsw_sp_bridge_8021d_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
2178 			      u16 vid)
2179 {
2180 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2181 	struct net_device *vxlan_dev;
2182 	struct mlxsw_sp_fid *fid;
2183 	int err;
2184 
2185 	fid = mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex);
2186 	if (IS_ERR(fid))
2187 		return fid;
2188 
2189 	if (mlxsw_sp_fid_vni_is_set(fid))
2190 		return fid;
2191 
2192 	vxlan_dev = mlxsw_sp_bridge_vxlan_dev_find(bridge_device->dev);
2193 	if (!vxlan_dev)
2194 		return fid;
2195 
2196 	if (!netif_running(vxlan_dev))
2197 		return fid;
2198 
2199 	err = mlxsw_sp_bridge_8021d_vxlan_join(bridge_device, vxlan_dev, NULL);
2200 	if (err)
2201 		goto err_vxlan_join;
2202 
2203 	return fid;
2204 
2205 err_vxlan_join:
2206 	mlxsw_sp_fid_put(fid);
2207 	return ERR_PTR(err);
2208 }
2209 
2210 static struct mlxsw_sp_fid *
2211 mlxsw_sp_bridge_8021d_fid_lookup(struct mlxsw_sp_bridge_device *bridge_device,
2212 				 u16 vid)
2213 {
2214 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2215 
2216 	/* The only valid VLAN for a VLAN-unaware bridge is 0 */
2217 	if (vid)
2218 		return NULL;
2219 
2220 	return mlxsw_sp_fid_8021d_lookup(mlxsw_sp, bridge_device->dev->ifindex);
2221 }
2222 
2223 static u16
2224 mlxsw_sp_bridge_8021d_fid_vid(struct mlxsw_sp_bridge_device *bridge_device,
2225 			      const struct mlxsw_sp_fid *fid)
2226 {
2227 	return 0;
2228 }
2229 
2230 static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021d_ops = {
2231 	.port_join	= mlxsw_sp_bridge_8021d_port_join,
2232 	.port_leave	= mlxsw_sp_bridge_8021d_port_leave,
2233 	.vxlan_join	= mlxsw_sp_bridge_8021d_vxlan_join,
2234 	.vxlan_leave	= mlxsw_sp_bridge_8021d_vxlan_leave,
2235 	.fid_get	= mlxsw_sp_bridge_8021d_fid_get,
2236 	.fid_lookup	= mlxsw_sp_bridge_8021d_fid_lookup,
2237 	.fid_vid	= mlxsw_sp_bridge_8021d_fid_vid,
2238 };
2239 
2240 int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
2241 			      struct net_device *brport_dev,
2242 			      struct net_device *br_dev,
2243 			      struct netlink_ext_ack *extack)
2244 {
2245 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2246 	struct mlxsw_sp_bridge_device *bridge_device;
2247 	struct mlxsw_sp_bridge_port *bridge_port;
2248 	int err;
2249 
2250 	bridge_port = mlxsw_sp_bridge_port_get(mlxsw_sp->bridge, brport_dev);
2251 	if (IS_ERR(bridge_port))
2252 		return PTR_ERR(bridge_port);
2253 	bridge_device = bridge_port->bridge_device;
2254 
2255 	err = bridge_device->ops->port_join(bridge_device, bridge_port,
2256 					    mlxsw_sp_port, extack);
2257 	if (err)
2258 		goto err_port_join;
2259 
2260 	return 0;
2261 
2262 err_port_join:
2263 	mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
2264 	return err;
2265 }
2266 
2267 void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2268 				struct net_device *brport_dev,
2269 				struct net_device *br_dev)
2270 {
2271 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2272 	struct mlxsw_sp_bridge_device *bridge_device;
2273 	struct mlxsw_sp_bridge_port *bridge_port;
2274 
2275 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2276 	if (!bridge_device)
2277 		return;
2278 	bridge_port = __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
2279 	if (!bridge_port)
2280 		return;
2281 
2282 	bridge_device->ops->port_leave(bridge_device, bridge_port,
2283 				       mlxsw_sp_port);
2284 	mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
2285 }
2286 
2287 int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp,
2288 			       const struct net_device *br_dev,
2289 			       const struct net_device *vxlan_dev,
2290 			       struct netlink_ext_ack *extack)
2291 {
2292 	struct mlxsw_sp_bridge_device *bridge_device;
2293 
2294 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2295 	if (WARN_ON(!bridge_device))
2296 		return -EINVAL;
2297 
2298 	return bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, extack);
2299 }
2300 
2301 void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
2302 				 const struct net_device *br_dev,
2303 				 const struct net_device *vxlan_dev)
2304 {
2305 	struct mlxsw_sp_bridge_device *bridge_device;
2306 
2307 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2308 	if (WARN_ON(!bridge_device))
2309 		return;
2310 
2311 	bridge_device->ops->vxlan_leave(bridge_device, vxlan_dev);
2312 }
2313 
2314 static void
2315 mlxsw_sp_fdb_call_notifiers(enum switchdev_notifier_type type,
2316 			    const char *mac, u16 vid,
2317 			    struct net_device *dev, bool offloaded)
2318 {
2319 	struct switchdev_notifier_fdb_info info;
2320 
2321 	info.addr = mac;
2322 	info.vid = vid;
2323 	info.offloaded = offloaded;
2324 	call_switchdev_notifiers(type, dev, &info.info);
2325 }
2326 
2327 static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
2328 					    char *sfn_pl, int rec_index,
2329 					    bool adding)
2330 {
2331 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2332 	struct mlxsw_sp_bridge_device *bridge_device;
2333 	struct mlxsw_sp_bridge_port *bridge_port;
2334 	struct mlxsw_sp_port *mlxsw_sp_port;
2335 	enum switchdev_notifier_type type;
2336 	char mac[ETH_ALEN];
2337 	u8 local_port;
2338 	u16 vid, fid;
2339 	bool do_notification = true;
2340 	int err;
2341 
2342 	mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port);
2343 	mlxsw_sp_port = mlxsw_sp->ports[local_port];
2344 	if (!mlxsw_sp_port) {
2345 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n");
2346 		goto just_remove;
2347 	}
2348 
2349 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
2350 	if (!mlxsw_sp_port_vlan) {
2351 		netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
2352 		goto just_remove;
2353 	}
2354 
2355 	bridge_port = mlxsw_sp_port_vlan->bridge_port;
2356 	if (!bridge_port) {
2357 		netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
2358 		goto just_remove;
2359 	}
2360 
2361 	bridge_device = bridge_port->bridge_device;
2362 	vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
2363 
2364 do_fdb_op:
2365 	err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid,
2366 				      adding, true);
2367 	if (err) {
2368 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
2369 		return;
2370 	}
2371 
2372 	if (!do_notification)
2373 		return;
2374 	type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
2375 	mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding);
2376 
2377 	return;
2378 
2379 just_remove:
2380 	adding = false;
2381 	do_notification = false;
2382 	goto do_fdb_op;
2383 }
2384 
2385 static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
2386 						char *sfn_pl, int rec_index,
2387 						bool adding)
2388 {
2389 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2390 	struct mlxsw_sp_bridge_device *bridge_device;
2391 	struct mlxsw_sp_bridge_port *bridge_port;
2392 	struct mlxsw_sp_port *mlxsw_sp_port;
2393 	enum switchdev_notifier_type type;
2394 	char mac[ETH_ALEN];
2395 	u16 lag_vid = 0;
2396 	u16 lag_id;
2397 	u16 vid, fid;
2398 	bool do_notification = true;
2399 	int err;
2400 
2401 	mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &fid, &lag_id);
2402 	mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
2403 	if (!mlxsw_sp_port) {
2404 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n");
2405 		goto just_remove;
2406 	}
2407 
2408 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
2409 	if (!mlxsw_sp_port_vlan) {
2410 		netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
2411 		goto just_remove;
2412 	}
2413 
2414 	bridge_port = mlxsw_sp_port_vlan->bridge_port;
2415 	if (!bridge_port) {
2416 		netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
2417 		goto just_remove;
2418 	}
2419 
2420 	bridge_device = bridge_port->bridge_device;
2421 	vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
2422 	lag_vid = mlxsw_sp_port_vlan->vid;
2423 
2424 do_fdb_op:
2425 	err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
2426 					  adding, true);
2427 	if (err) {
2428 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
2429 		return;
2430 	}
2431 
2432 	if (!do_notification)
2433 		return;
2434 	type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
2435 	mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding);
2436 
2437 	return;
2438 
2439 just_remove:
2440 	adding = false;
2441 	do_notification = false;
2442 	goto do_fdb_op;
2443 }
2444 
2445 static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
2446 					    char *sfn_pl, int rec_index)
2447 {
2448 	switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) {
2449 	case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC:
2450 		mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
2451 						rec_index, true);
2452 		break;
2453 	case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC:
2454 		mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
2455 						rec_index, false);
2456 		break;
2457 	case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG:
2458 		mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
2459 						    rec_index, true);
2460 		break;
2461 	case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG:
2462 		mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
2463 						    rec_index, false);
2464 		break;
2465 	}
2466 }
2467 
2468 static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp)
2469 {
2470 	struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
2471 
2472 	mlxsw_core_schedule_dw(&bridge->fdb_notify.dw,
2473 			       msecs_to_jiffies(bridge->fdb_notify.interval));
2474 }
2475 
2476 static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
2477 {
2478 	struct mlxsw_sp_bridge *bridge;
2479 	struct mlxsw_sp *mlxsw_sp;
2480 	char *sfn_pl;
2481 	u8 num_rec;
2482 	int i;
2483 	int err;
2484 
2485 	sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL);
2486 	if (!sfn_pl)
2487 		return;
2488 
2489 	bridge = container_of(work, struct mlxsw_sp_bridge, fdb_notify.dw.work);
2490 	mlxsw_sp = bridge->mlxsw_sp;
2491 
2492 	rtnl_lock();
2493 	mlxsw_reg_sfn_pack(sfn_pl);
2494 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
2495 	if (err) {
2496 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
2497 		goto out;
2498 	}
2499 	num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
2500 	for (i = 0; i < num_rec; i++)
2501 		mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
2502 
2503 out:
2504 	rtnl_unlock();
2505 	kfree(sfn_pl);
2506 	mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
2507 }
2508 
2509 struct mlxsw_sp_switchdev_event_work {
2510 	struct work_struct work;
2511 	union {
2512 		struct switchdev_notifier_fdb_info fdb_info;
2513 		struct switchdev_notifier_vxlan_fdb_info vxlan_fdb_info;
2514 	};
2515 	struct net_device *dev;
2516 	unsigned long event;
2517 };
2518 
2519 static void
2520 mlxsw_sp_switchdev_vxlan_addr_convert(const union vxlan_addr *vxlan_addr,
2521 				      enum mlxsw_sp_l3proto *proto,
2522 				      union mlxsw_sp_l3addr *addr)
2523 {
2524 	if (vxlan_addr->sa.sa_family == AF_INET) {
2525 		addr->addr4 = vxlan_addr->sin.sin_addr.s_addr;
2526 		*proto = MLXSW_SP_L3_PROTO_IPV4;
2527 	} else {
2528 		addr->addr6 = vxlan_addr->sin6.sin6_addr;
2529 		*proto = MLXSW_SP_L3_PROTO_IPV6;
2530 	}
2531 }
2532 
2533 static void
2534 mlxsw_sp_switchdev_bridge_vxlan_fdb_event(struct mlxsw_sp *mlxsw_sp,
2535 					  struct mlxsw_sp_switchdev_event_work *
2536 					  switchdev_work,
2537 					  struct mlxsw_sp_fid *fid, __be32 vni)
2538 {
2539 	struct switchdev_notifier_vxlan_fdb_info vxlan_fdb_info;
2540 	struct switchdev_notifier_fdb_info *fdb_info;
2541 	struct net_device *dev = switchdev_work->dev;
2542 	enum mlxsw_sp_l3proto proto;
2543 	union mlxsw_sp_l3addr addr;
2544 	int err;
2545 
2546 	fdb_info = &switchdev_work->fdb_info;
2547 	err = vxlan_fdb_find_uc(dev, fdb_info->addr, vni, &vxlan_fdb_info);
2548 	if (err)
2549 		return;
2550 
2551 	mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info.remote_ip,
2552 					      &proto, &addr);
2553 
2554 	switch (switchdev_work->event) {
2555 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
2556 		err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp,
2557 						     vxlan_fdb_info.eth_addr,
2558 						     mlxsw_sp_fid_index(fid),
2559 						     proto, &addr, true, false);
2560 		if (err)
2561 			return;
2562 		vxlan_fdb_info.offloaded = true;
2563 		call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
2564 					 &vxlan_fdb_info.info);
2565 		mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
2566 					    vxlan_fdb_info.eth_addr,
2567 					    fdb_info->vid, dev, true);
2568 		break;
2569 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
2570 		err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp,
2571 						     vxlan_fdb_info.eth_addr,
2572 						     mlxsw_sp_fid_index(fid),
2573 						     proto, &addr, false,
2574 						     false);
2575 		vxlan_fdb_info.offloaded = false;
2576 		call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
2577 					 &vxlan_fdb_info.info);
2578 		break;
2579 	}
2580 }
2581 
2582 static void
2583 mlxsw_sp_switchdev_bridge_nve_fdb_event(struct mlxsw_sp_switchdev_event_work *
2584 					switchdev_work)
2585 {
2586 	struct mlxsw_sp_bridge_device *bridge_device;
2587 	struct net_device *dev = switchdev_work->dev;
2588 	struct net_device *br_dev;
2589 	struct mlxsw_sp *mlxsw_sp;
2590 	struct mlxsw_sp_fid *fid;
2591 	__be32 vni;
2592 	int err;
2593 
2594 	if (switchdev_work->event != SWITCHDEV_FDB_ADD_TO_DEVICE &&
2595 	    switchdev_work->event != SWITCHDEV_FDB_DEL_TO_DEVICE)
2596 		return;
2597 
2598 	if (!switchdev_work->fdb_info.added_by_user)
2599 		return;
2600 
2601 	if (!netif_running(dev))
2602 		return;
2603 	br_dev = netdev_master_upper_dev_get(dev);
2604 	if (!br_dev)
2605 		return;
2606 	if (!netif_is_bridge_master(br_dev))
2607 		return;
2608 	mlxsw_sp = mlxsw_sp_lower_get(br_dev);
2609 	if (!mlxsw_sp)
2610 		return;
2611 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2612 	if (!bridge_device)
2613 		return;
2614 
2615 	fid = bridge_device->ops->fid_lookup(bridge_device,
2616 					     switchdev_work->fdb_info.vid);
2617 	if (!fid)
2618 		return;
2619 
2620 	err = mlxsw_sp_fid_vni(fid, &vni);
2621 	if (err)
2622 		goto out;
2623 
2624 	mlxsw_sp_switchdev_bridge_vxlan_fdb_event(mlxsw_sp, switchdev_work, fid,
2625 						  vni);
2626 
2627 out:
2628 	mlxsw_sp_fid_put(fid);
2629 }
2630 
2631 static void mlxsw_sp_switchdev_bridge_fdb_event_work(struct work_struct *work)
2632 {
2633 	struct mlxsw_sp_switchdev_event_work *switchdev_work =
2634 		container_of(work, struct mlxsw_sp_switchdev_event_work, work);
2635 	struct net_device *dev = switchdev_work->dev;
2636 	struct switchdev_notifier_fdb_info *fdb_info;
2637 	struct mlxsw_sp_port *mlxsw_sp_port;
2638 	int err;
2639 
2640 	rtnl_lock();
2641 	if (netif_is_vxlan(dev)) {
2642 		mlxsw_sp_switchdev_bridge_nve_fdb_event(switchdev_work);
2643 		goto out;
2644 	}
2645 
2646 	mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
2647 	if (!mlxsw_sp_port)
2648 		goto out;
2649 
2650 	switch (switchdev_work->event) {
2651 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
2652 		fdb_info = &switchdev_work->fdb_info;
2653 		if (!fdb_info->added_by_user)
2654 			break;
2655 		err = mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, true);
2656 		if (err)
2657 			break;
2658 		mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
2659 					    fdb_info->addr,
2660 					    fdb_info->vid, dev, true);
2661 		break;
2662 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
2663 		fdb_info = &switchdev_work->fdb_info;
2664 		mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, false);
2665 		break;
2666 	case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */
2667 	case SWITCHDEV_FDB_DEL_TO_BRIDGE:
2668 		/* These events are only used to potentially update an existing
2669 		 * SPAN mirror.
2670 		 */
2671 		break;
2672 	}
2673 
2674 	mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
2675 
2676 out:
2677 	rtnl_unlock();
2678 	kfree(switchdev_work->fdb_info.addr);
2679 	kfree(switchdev_work);
2680 	dev_put(dev);
2681 }
2682 
2683 static void
2684 mlxsw_sp_switchdev_vxlan_fdb_add(struct mlxsw_sp *mlxsw_sp,
2685 				 struct mlxsw_sp_switchdev_event_work *
2686 				 switchdev_work)
2687 {
2688 	struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
2689 	struct mlxsw_sp_bridge_device *bridge_device;
2690 	struct net_device *dev = switchdev_work->dev;
2691 	u8 all_zeros_mac[ETH_ALEN] = { 0 };
2692 	enum mlxsw_sp_l3proto proto;
2693 	union mlxsw_sp_l3addr addr;
2694 	struct net_device *br_dev;
2695 	struct mlxsw_sp_fid *fid;
2696 	u16 vid;
2697 	int err;
2698 
2699 	vxlan_fdb_info = &switchdev_work->vxlan_fdb_info;
2700 	br_dev = netdev_master_upper_dev_get(dev);
2701 
2702 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2703 	if (!bridge_device)
2704 		return;
2705 
2706 	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan_fdb_info->vni);
2707 	if (!fid)
2708 		return;
2709 
2710 	mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip,
2711 					      &proto, &addr);
2712 
2713 	if (ether_addr_equal(vxlan_fdb_info->eth_addr, all_zeros_mac)) {
2714 		err = mlxsw_sp_nve_flood_ip_add(mlxsw_sp, fid, proto, &addr);
2715 		if (err) {
2716 			mlxsw_sp_fid_put(fid);
2717 			return;
2718 		}
2719 		vxlan_fdb_info->offloaded = true;
2720 		call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
2721 					 &vxlan_fdb_info->info);
2722 		mlxsw_sp_fid_put(fid);
2723 		return;
2724 	}
2725 
2726 	/* The device has a single FDB table, whereas Linux has two - one
2727 	 * in the bridge driver and another in the VxLAN driver. We only
2728 	 * program an entry to the device if the MAC points to the VxLAN
2729 	 * device in the bridge's FDB table
2730 	 */
2731 	vid = bridge_device->ops->fid_vid(bridge_device, fid);
2732 	if (br_fdb_find_port(br_dev, vxlan_fdb_info->eth_addr, vid) != dev)
2733 		goto err_br_fdb_find;
2734 
2735 	err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, vxlan_fdb_info->eth_addr,
2736 					     mlxsw_sp_fid_index(fid), proto,
2737 					     &addr, true, false);
2738 	if (err)
2739 		goto err_fdb_tunnel_uc_op;
2740 	vxlan_fdb_info->offloaded = true;
2741 	call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
2742 				 &vxlan_fdb_info->info);
2743 	mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
2744 				    vxlan_fdb_info->eth_addr, vid, dev, true);
2745 
2746 	mlxsw_sp_fid_put(fid);
2747 
2748 	return;
2749 
2750 err_fdb_tunnel_uc_op:
2751 err_br_fdb_find:
2752 	mlxsw_sp_fid_put(fid);
2753 }
2754 
2755 static void
2756 mlxsw_sp_switchdev_vxlan_fdb_del(struct mlxsw_sp *mlxsw_sp,
2757 				 struct mlxsw_sp_switchdev_event_work *
2758 				 switchdev_work)
2759 {
2760 	struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
2761 	struct mlxsw_sp_bridge_device *bridge_device;
2762 	struct net_device *dev = switchdev_work->dev;
2763 	struct net_device *br_dev = netdev_master_upper_dev_get(dev);
2764 	u8 all_zeros_mac[ETH_ALEN] = { 0 };
2765 	enum mlxsw_sp_l3proto proto;
2766 	union mlxsw_sp_l3addr addr;
2767 	struct mlxsw_sp_fid *fid;
2768 	u16 vid;
2769 
2770 	vxlan_fdb_info = &switchdev_work->vxlan_fdb_info;
2771 
2772 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2773 	if (!bridge_device)
2774 		return;
2775 
2776 	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan_fdb_info->vni);
2777 	if (!fid)
2778 		return;
2779 
2780 	mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip,
2781 					      &proto, &addr);
2782 
2783 	if (ether_addr_equal(vxlan_fdb_info->eth_addr, all_zeros_mac)) {
2784 		mlxsw_sp_nve_flood_ip_del(mlxsw_sp, fid, proto, &addr);
2785 		mlxsw_sp_fid_put(fid);
2786 		return;
2787 	}
2788 
2789 	mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, vxlan_fdb_info->eth_addr,
2790 				       mlxsw_sp_fid_index(fid), proto, &addr,
2791 				       false, false);
2792 	vid = bridge_device->ops->fid_vid(bridge_device, fid);
2793 	mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
2794 				    vxlan_fdb_info->eth_addr, vid, dev, false);
2795 
2796 	mlxsw_sp_fid_put(fid);
2797 }
2798 
2799 static void mlxsw_sp_switchdev_vxlan_fdb_event_work(struct work_struct *work)
2800 {
2801 	struct mlxsw_sp_switchdev_event_work *switchdev_work =
2802 		container_of(work, struct mlxsw_sp_switchdev_event_work, work);
2803 	struct net_device *dev = switchdev_work->dev;
2804 	struct mlxsw_sp *mlxsw_sp;
2805 	struct net_device *br_dev;
2806 
2807 	rtnl_lock();
2808 
2809 	if (!netif_running(dev))
2810 		goto out;
2811 	br_dev = netdev_master_upper_dev_get(dev);
2812 	if (!br_dev)
2813 		goto out;
2814 	if (!netif_is_bridge_master(br_dev))
2815 		goto out;
2816 	mlxsw_sp = mlxsw_sp_lower_get(br_dev);
2817 	if (!mlxsw_sp)
2818 		goto out;
2819 
2820 	switch (switchdev_work->event) {
2821 	case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE:
2822 		mlxsw_sp_switchdev_vxlan_fdb_add(mlxsw_sp, switchdev_work);
2823 		break;
2824 	case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE:
2825 		mlxsw_sp_switchdev_vxlan_fdb_del(mlxsw_sp, switchdev_work);
2826 		break;
2827 	}
2828 
2829 out:
2830 	rtnl_unlock();
2831 	kfree(switchdev_work);
2832 	dev_put(dev);
2833 }
2834 
2835 static int
2836 mlxsw_sp_switchdev_vxlan_work_prepare(struct mlxsw_sp_switchdev_event_work *
2837 				      switchdev_work,
2838 				      struct switchdev_notifier_info *info)
2839 {
2840 	struct vxlan_dev *vxlan = netdev_priv(switchdev_work->dev);
2841 	struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
2842 	struct vxlan_config *cfg = &vxlan->cfg;
2843 
2844 	vxlan_fdb_info = container_of(info,
2845 				      struct switchdev_notifier_vxlan_fdb_info,
2846 				      info);
2847 
2848 	if (vxlan_fdb_info->remote_port != cfg->dst_port)
2849 		return -EOPNOTSUPP;
2850 	if (vxlan_fdb_info->remote_vni != cfg->vni)
2851 		return -EOPNOTSUPP;
2852 	if (vxlan_fdb_info->vni != cfg->vni)
2853 		return -EOPNOTSUPP;
2854 	if (vxlan_fdb_info->remote_ifindex)
2855 		return -EOPNOTSUPP;
2856 	if (is_multicast_ether_addr(vxlan_fdb_info->eth_addr))
2857 		return -EOPNOTSUPP;
2858 	if (vxlan_addr_multicast(&vxlan_fdb_info->remote_ip))
2859 		return -EOPNOTSUPP;
2860 
2861 	switchdev_work->vxlan_fdb_info = *vxlan_fdb_info;
2862 
2863 	return 0;
2864 }
2865 
2866 /* Called under rcu_read_lock() */
2867 static int mlxsw_sp_switchdev_event(struct notifier_block *unused,
2868 				    unsigned long event, void *ptr)
2869 {
2870 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
2871 	struct mlxsw_sp_switchdev_event_work *switchdev_work;
2872 	struct switchdev_notifier_fdb_info *fdb_info;
2873 	struct switchdev_notifier_info *info = ptr;
2874 	struct net_device *br_dev;
2875 	int err;
2876 
2877 	/* Tunnel devices are not our uppers, so check their master instead */
2878 	br_dev = netdev_master_upper_dev_get_rcu(dev);
2879 	if (!br_dev)
2880 		return NOTIFY_DONE;
2881 	if (!netif_is_bridge_master(br_dev))
2882 		return NOTIFY_DONE;
2883 	if (!mlxsw_sp_port_dev_lower_find_rcu(br_dev))
2884 		return NOTIFY_DONE;
2885 
2886 	switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
2887 	if (!switchdev_work)
2888 		return NOTIFY_BAD;
2889 
2890 	switchdev_work->dev = dev;
2891 	switchdev_work->event = event;
2892 
2893 	switch (event) {
2894 	case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */
2895 	case SWITCHDEV_FDB_DEL_TO_DEVICE: /* fall through */
2896 	case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */
2897 	case SWITCHDEV_FDB_DEL_TO_BRIDGE:
2898 		fdb_info = container_of(info,
2899 					struct switchdev_notifier_fdb_info,
2900 					info);
2901 		INIT_WORK(&switchdev_work->work,
2902 			  mlxsw_sp_switchdev_bridge_fdb_event_work);
2903 		memcpy(&switchdev_work->fdb_info, ptr,
2904 		       sizeof(switchdev_work->fdb_info));
2905 		switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
2906 		if (!switchdev_work->fdb_info.addr)
2907 			goto err_addr_alloc;
2908 		ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
2909 				fdb_info->addr);
2910 		/* Take a reference on the device. This can be either
2911 		 * upper device containig mlxsw_sp_port or just a
2912 		 * mlxsw_sp_port
2913 		 */
2914 		dev_hold(dev);
2915 		break;
2916 	case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE: /* fall through */
2917 	case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE:
2918 		INIT_WORK(&switchdev_work->work,
2919 			  mlxsw_sp_switchdev_vxlan_fdb_event_work);
2920 		err = mlxsw_sp_switchdev_vxlan_work_prepare(switchdev_work,
2921 							    info);
2922 		if (err)
2923 			goto err_vxlan_work_prepare;
2924 		dev_hold(dev);
2925 		break;
2926 	default:
2927 		kfree(switchdev_work);
2928 		return NOTIFY_DONE;
2929 	}
2930 
2931 	mlxsw_core_schedule_work(&switchdev_work->work);
2932 
2933 	return NOTIFY_DONE;
2934 
2935 err_vxlan_work_prepare:
2936 err_addr_alloc:
2937 	kfree(switchdev_work);
2938 	return NOTIFY_BAD;
2939 }
2940 
2941 static struct notifier_block mlxsw_sp_switchdev_notifier = {
2942 	.notifier_call = mlxsw_sp_switchdev_event,
2943 };
2944 
2945 u8
2946 mlxsw_sp_bridge_port_stp_state(struct mlxsw_sp_bridge_port *bridge_port)
2947 {
2948 	return bridge_port->stp_state;
2949 }
2950 
2951 static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
2952 {
2953 	struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
2954 	int err;
2955 
2956 	err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
2957 	if (err) {
2958 		dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
2959 		return err;
2960 	}
2961 
2962 	err = register_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
2963 	if (err) {
2964 		dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev notifier\n");
2965 		return err;
2966 	}
2967 
2968 	INIT_DELAYED_WORK(&bridge->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
2969 	bridge->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
2970 	mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
2971 	return 0;
2972 }
2973 
2974 static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
2975 {
2976 	cancel_delayed_work_sync(&mlxsw_sp->bridge->fdb_notify.dw);
2977 	unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
2978 
2979 }
2980 
2981 int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
2982 {
2983 	struct mlxsw_sp_bridge *bridge;
2984 
2985 	bridge = kzalloc(sizeof(*mlxsw_sp->bridge), GFP_KERNEL);
2986 	if (!bridge)
2987 		return -ENOMEM;
2988 	mlxsw_sp->bridge = bridge;
2989 	bridge->mlxsw_sp = mlxsw_sp;
2990 
2991 	INIT_LIST_HEAD(&mlxsw_sp->bridge->bridges_list);
2992 
2993 	bridge->bridge_8021q_ops = &mlxsw_sp_bridge_8021q_ops;
2994 	bridge->bridge_8021d_ops = &mlxsw_sp_bridge_8021d_ops;
2995 
2996 	return mlxsw_sp_fdb_init(mlxsw_sp);
2997 }
2998 
2999 void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
3000 {
3001 	mlxsw_sp_fdb_fini(mlxsw_sp);
3002 	WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list));
3003 	kfree(mlxsw_sp->bridge);
3004 }
3005 
3006 void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port)
3007 {
3008 	mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops;
3009 }
3010 
3011 void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port)
3012 {
3013 }
3014