1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/slab.h>
9 #include <linux/device.h>
10 #include <linux/skbuff.h>
11 #include <linux/if_vlan.h>
12 #include <linux/if_bridge.h>
13 #include <linux/workqueue.h>
14 #include <linux/jiffies.h>
15 #include <linux/rtnetlink.h>
16 #include <linux/netlink.h>
17 #include <net/switchdev.h>
18 #include <net/vxlan.h>
19 
20 #include "spectrum_span.h"
21 #include "spectrum_switchdev.h"
22 #include "spectrum.h"
23 #include "core.h"
24 #include "reg.h"
25 
26 struct mlxsw_sp_bridge_ops;
27 
28 struct mlxsw_sp_bridge {
29 	struct mlxsw_sp *mlxsw_sp;
30 	struct {
31 		struct delayed_work dw;
32 #define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100
33 		unsigned int interval; /* ms */
34 	} fdb_notify;
35 #define MLXSW_SP_MIN_AGEING_TIME 10
36 #define MLXSW_SP_MAX_AGEING_TIME 1000000
37 #define MLXSW_SP_DEFAULT_AGEING_TIME 300
38 	u32 ageing_time;
39 	bool vlan_enabled_exists;
40 	struct list_head bridges_list;
41 	DECLARE_BITMAP(mids_bitmap, MLXSW_SP_MID_MAX);
42 	const struct mlxsw_sp_bridge_ops *bridge_8021q_ops;
43 	const struct mlxsw_sp_bridge_ops *bridge_8021d_ops;
44 };
45 
46 struct mlxsw_sp_bridge_device {
47 	struct net_device *dev;
48 	struct list_head list;
49 	struct list_head ports_list;
50 	struct list_head mids_list;
51 	u8 vlan_enabled:1,
52 	   multicast_enabled:1,
53 	   mrouter:1;
54 	const struct mlxsw_sp_bridge_ops *ops;
55 };
56 
57 struct mlxsw_sp_bridge_port {
58 	struct net_device *dev;
59 	struct mlxsw_sp_bridge_device *bridge_device;
60 	struct list_head list;
61 	struct list_head vlans_list;
62 	unsigned int ref_count;
63 	u8 stp_state;
64 	unsigned long flags;
65 	bool mrouter;
66 	bool lagged;
67 	union {
68 		u16 lag_id;
69 		u16 system_port;
70 	};
71 };
72 
73 struct mlxsw_sp_bridge_vlan {
74 	struct list_head list;
75 	struct list_head port_vlan_list;
76 	u16 vid;
77 };
78 
79 struct mlxsw_sp_bridge_ops {
80 	int (*port_join)(struct mlxsw_sp_bridge_device *bridge_device,
81 			 struct mlxsw_sp_bridge_port *bridge_port,
82 			 struct mlxsw_sp_port *mlxsw_sp_port,
83 			 struct netlink_ext_ack *extack);
84 	void (*port_leave)(struct mlxsw_sp_bridge_device *bridge_device,
85 			   struct mlxsw_sp_bridge_port *bridge_port,
86 			   struct mlxsw_sp_port *mlxsw_sp_port);
87 	int (*vxlan_join)(struct mlxsw_sp_bridge_device *bridge_device,
88 			  const struct net_device *vxlan_dev,
89 			  struct netlink_ext_ack *extack);
90 	void (*vxlan_leave)(struct mlxsw_sp_bridge_device *bridge_device,
91 			    const struct net_device *vxlan_dev);
92 	struct mlxsw_sp_fid *
93 		(*fid_get)(struct mlxsw_sp_bridge_device *bridge_device,
94 			   u16 vid);
95 	struct mlxsw_sp_fid *
96 		(*fid_lookup)(struct mlxsw_sp_bridge_device *bridge_device,
97 			      u16 vid);
98 	u16 (*fid_vid)(struct mlxsw_sp_bridge_device *bridge_device,
99 		       const struct mlxsw_sp_fid *fid);
100 };
101 
102 static int
103 mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
104 			       struct mlxsw_sp_bridge_port *bridge_port,
105 			       u16 fid_index);
106 
107 static void
108 mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
109 			       struct mlxsw_sp_bridge_port *bridge_port);
110 
111 static void
112 mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port,
113 				   struct mlxsw_sp_bridge_device
114 				   *bridge_device);
115 
116 static void
117 mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
118 				 struct mlxsw_sp_bridge_port *bridge_port,
119 				 bool add);
120 
121 static struct mlxsw_sp_bridge_device *
122 mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge,
123 			    const struct net_device *br_dev)
124 {
125 	struct mlxsw_sp_bridge_device *bridge_device;
126 
127 	list_for_each_entry(bridge_device, &bridge->bridges_list, list)
128 		if (bridge_device->dev == br_dev)
129 			return bridge_device;
130 
131 	return NULL;
132 }
133 
134 bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
135 					 const struct net_device *br_dev)
136 {
137 	return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
138 }
139 
140 static int mlxsw_sp_bridge_device_upper_rif_destroy(struct net_device *dev,
141 						    void *data)
142 {
143 	struct mlxsw_sp *mlxsw_sp = data;
144 
145 	mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
146 	return 0;
147 }
148 
149 static void mlxsw_sp_bridge_device_rifs_destroy(struct mlxsw_sp *mlxsw_sp,
150 						struct net_device *dev)
151 {
152 	mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
153 	netdev_walk_all_upper_dev_rcu(dev,
154 				      mlxsw_sp_bridge_device_upper_rif_destroy,
155 				      mlxsw_sp);
156 }
157 
158 static struct mlxsw_sp_bridge_device *
159 mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
160 			      struct net_device *br_dev)
161 {
162 	struct device *dev = bridge->mlxsw_sp->bus_info->dev;
163 	struct mlxsw_sp_bridge_device *bridge_device;
164 	bool vlan_enabled = br_vlan_enabled(br_dev);
165 
166 	if (vlan_enabled && bridge->vlan_enabled_exists) {
167 		dev_err(dev, "Only one VLAN-aware bridge is supported\n");
168 		return ERR_PTR(-EINVAL);
169 	}
170 
171 	bridge_device = kzalloc(sizeof(*bridge_device), GFP_KERNEL);
172 	if (!bridge_device)
173 		return ERR_PTR(-ENOMEM);
174 
175 	bridge_device->dev = br_dev;
176 	bridge_device->vlan_enabled = vlan_enabled;
177 	bridge_device->multicast_enabled = br_multicast_enabled(br_dev);
178 	bridge_device->mrouter = br_multicast_router(br_dev);
179 	INIT_LIST_HEAD(&bridge_device->ports_list);
180 	if (vlan_enabled) {
181 		bridge->vlan_enabled_exists = true;
182 		bridge_device->ops = bridge->bridge_8021q_ops;
183 	} else {
184 		bridge_device->ops = bridge->bridge_8021d_ops;
185 	}
186 	INIT_LIST_HEAD(&bridge_device->mids_list);
187 	list_add(&bridge_device->list, &bridge->bridges_list);
188 
189 	return bridge_device;
190 }
191 
192 static void
193 mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
194 			       struct mlxsw_sp_bridge_device *bridge_device)
195 {
196 	mlxsw_sp_bridge_device_rifs_destroy(bridge->mlxsw_sp,
197 					    bridge_device->dev);
198 	list_del(&bridge_device->list);
199 	if (bridge_device->vlan_enabled)
200 		bridge->vlan_enabled_exists = false;
201 	WARN_ON(!list_empty(&bridge_device->ports_list));
202 	WARN_ON(!list_empty(&bridge_device->mids_list));
203 	kfree(bridge_device);
204 }
205 
206 static struct mlxsw_sp_bridge_device *
207 mlxsw_sp_bridge_device_get(struct mlxsw_sp_bridge *bridge,
208 			   struct net_device *br_dev)
209 {
210 	struct mlxsw_sp_bridge_device *bridge_device;
211 
212 	bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
213 	if (bridge_device)
214 		return bridge_device;
215 
216 	return mlxsw_sp_bridge_device_create(bridge, br_dev);
217 }
218 
219 static void
220 mlxsw_sp_bridge_device_put(struct mlxsw_sp_bridge *bridge,
221 			   struct mlxsw_sp_bridge_device *bridge_device)
222 {
223 	if (list_empty(&bridge_device->ports_list))
224 		mlxsw_sp_bridge_device_destroy(bridge, bridge_device);
225 }
226 
227 static struct mlxsw_sp_bridge_port *
228 __mlxsw_sp_bridge_port_find(const struct mlxsw_sp_bridge_device *bridge_device,
229 			    const struct net_device *brport_dev)
230 {
231 	struct mlxsw_sp_bridge_port *bridge_port;
232 
233 	list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
234 		if (bridge_port->dev == brport_dev)
235 			return bridge_port;
236 	}
237 
238 	return NULL;
239 }
240 
241 struct mlxsw_sp_bridge_port *
242 mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge *bridge,
243 			  struct net_device *brport_dev)
244 {
245 	struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
246 	struct mlxsw_sp_bridge_device *bridge_device;
247 
248 	if (!br_dev)
249 		return NULL;
250 
251 	bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
252 	if (!bridge_device)
253 		return NULL;
254 
255 	return __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
256 }
257 
258 static struct mlxsw_sp_bridge_port *
259 mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device,
260 			    struct net_device *brport_dev)
261 {
262 	struct mlxsw_sp_bridge_port *bridge_port;
263 	struct mlxsw_sp_port *mlxsw_sp_port;
264 
265 	bridge_port = kzalloc(sizeof(*bridge_port), GFP_KERNEL);
266 	if (!bridge_port)
267 		return NULL;
268 
269 	mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(brport_dev);
270 	bridge_port->lagged = mlxsw_sp_port->lagged;
271 	if (bridge_port->lagged)
272 		bridge_port->lag_id = mlxsw_sp_port->lag_id;
273 	else
274 		bridge_port->system_port = mlxsw_sp_port->local_port;
275 	bridge_port->dev = brport_dev;
276 	bridge_port->bridge_device = bridge_device;
277 	bridge_port->stp_state = BR_STATE_DISABLED;
278 	bridge_port->flags = BR_LEARNING | BR_FLOOD | BR_LEARNING_SYNC |
279 			     BR_MCAST_FLOOD;
280 	INIT_LIST_HEAD(&bridge_port->vlans_list);
281 	list_add(&bridge_port->list, &bridge_device->ports_list);
282 	bridge_port->ref_count = 1;
283 
284 	return bridge_port;
285 }
286 
287 static void
288 mlxsw_sp_bridge_port_destroy(struct mlxsw_sp_bridge_port *bridge_port)
289 {
290 	list_del(&bridge_port->list);
291 	WARN_ON(!list_empty(&bridge_port->vlans_list));
292 	kfree(bridge_port);
293 }
294 
295 static bool
296 mlxsw_sp_bridge_port_should_destroy(const struct mlxsw_sp_bridge_port *
297 				    bridge_port)
298 {
299 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_port->dev);
300 
301 	/* In case ports were pulled from out of a bridged LAG, then
302 	 * it's possible the reference count isn't zero, yet the bridge
303 	 * port should be destroyed, as it's no longer an upper of ours.
304 	 */
305 	if (!mlxsw_sp && list_empty(&bridge_port->vlans_list))
306 		return true;
307 	else if (bridge_port->ref_count == 0)
308 		return true;
309 	else
310 		return false;
311 }
312 
313 static struct mlxsw_sp_bridge_port *
314 mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge,
315 			 struct net_device *brport_dev)
316 {
317 	struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
318 	struct mlxsw_sp_bridge_device *bridge_device;
319 	struct mlxsw_sp_bridge_port *bridge_port;
320 	int err;
321 
322 	bridge_port = mlxsw_sp_bridge_port_find(bridge, brport_dev);
323 	if (bridge_port) {
324 		bridge_port->ref_count++;
325 		return bridge_port;
326 	}
327 
328 	bridge_device = mlxsw_sp_bridge_device_get(bridge, br_dev);
329 	if (IS_ERR(bridge_device))
330 		return ERR_CAST(bridge_device);
331 
332 	bridge_port = mlxsw_sp_bridge_port_create(bridge_device, brport_dev);
333 	if (!bridge_port) {
334 		err = -ENOMEM;
335 		goto err_bridge_port_create;
336 	}
337 
338 	return bridge_port;
339 
340 err_bridge_port_create:
341 	mlxsw_sp_bridge_device_put(bridge, bridge_device);
342 	return ERR_PTR(err);
343 }
344 
345 static void mlxsw_sp_bridge_port_put(struct mlxsw_sp_bridge *bridge,
346 				     struct mlxsw_sp_bridge_port *bridge_port)
347 {
348 	struct mlxsw_sp_bridge_device *bridge_device;
349 
350 	bridge_port->ref_count--;
351 	if (!mlxsw_sp_bridge_port_should_destroy(bridge_port))
352 		return;
353 	bridge_device = bridge_port->bridge_device;
354 	mlxsw_sp_bridge_port_destroy(bridge_port);
355 	mlxsw_sp_bridge_device_put(bridge, bridge_device);
356 }
357 
358 static struct mlxsw_sp_port_vlan *
359 mlxsw_sp_port_vlan_find_by_bridge(struct mlxsw_sp_port *mlxsw_sp_port,
360 				  const struct mlxsw_sp_bridge_device *
361 				  bridge_device,
362 				  u16 vid)
363 {
364 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
365 
366 	list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
367 			    list) {
368 		if (!mlxsw_sp_port_vlan->bridge_port)
369 			continue;
370 		if (mlxsw_sp_port_vlan->bridge_port->bridge_device !=
371 		    bridge_device)
372 			continue;
373 		if (bridge_device->vlan_enabled &&
374 		    mlxsw_sp_port_vlan->vid != vid)
375 			continue;
376 		return mlxsw_sp_port_vlan;
377 	}
378 
379 	return NULL;
380 }
381 
382 static struct mlxsw_sp_port_vlan*
383 mlxsw_sp_port_vlan_find_by_fid(struct mlxsw_sp_port *mlxsw_sp_port,
384 			       u16 fid_index)
385 {
386 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
387 
388 	list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
389 			    list) {
390 		struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
391 
392 		if (fid && mlxsw_sp_fid_index(fid) == fid_index)
393 			return mlxsw_sp_port_vlan;
394 	}
395 
396 	return NULL;
397 }
398 
399 static struct mlxsw_sp_bridge_vlan *
400 mlxsw_sp_bridge_vlan_find(const struct mlxsw_sp_bridge_port *bridge_port,
401 			  u16 vid)
402 {
403 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
404 
405 	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
406 		if (bridge_vlan->vid == vid)
407 			return bridge_vlan;
408 	}
409 
410 	return NULL;
411 }
412 
413 static struct mlxsw_sp_bridge_vlan *
414 mlxsw_sp_bridge_vlan_create(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
415 {
416 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
417 
418 	bridge_vlan = kzalloc(sizeof(*bridge_vlan), GFP_KERNEL);
419 	if (!bridge_vlan)
420 		return NULL;
421 
422 	INIT_LIST_HEAD(&bridge_vlan->port_vlan_list);
423 	bridge_vlan->vid = vid;
424 	list_add(&bridge_vlan->list, &bridge_port->vlans_list);
425 
426 	return bridge_vlan;
427 }
428 
429 static void
430 mlxsw_sp_bridge_vlan_destroy(struct mlxsw_sp_bridge_vlan *bridge_vlan)
431 {
432 	list_del(&bridge_vlan->list);
433 	WARN_ON(!list_empty(&bridge_vlan->port_vlan_list));
434 	kfree(bridge_vlan);
435 }
436 
437 static struct mlxsw_sp_bridge_vlan *
438 mlxsw_sp_bridge_vlan_get(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
439 {
440 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
441 
442 	bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
443 	if (bridge_vlan)
444 		return bridge_vlan;
445 
446 	return mlxsw_sp_bridge_vlan_create(bridge_port, vid);
447 }
448 
449 static void mlxsw_sp_bridge_vlan_put(struct mlxsw_sp_bridge_vlan *bridge_vlan)
450 {
451 	if (list_empty(&bridge_vlan->port_vlan_list))
452 		mlxsw_sp_bridge_vlan_destroy(bridge_vlan);
453 }
454 
455 static void mlxsw_sp_port_bridge_flags_get(struct mlxsw_sp_bridge *bridge,
456 					   struct net_device *dev,
457 					   unsigned long *brport_flags)
458 {
459 	struct mlxsw_sp_bridge_port *bridge_port;
460 
461 	bridge_port = mlxsw_sp_bridge_port_find(bridge, dev);
462 	if (WARN_ON(!bridge_port))
463 		return;
464 
465 	memcpy(brport_flags, &bridge_port->flags, sizeof(*brport_flags));
466 }
467 
468 static int mlxsw_sp_port_attr_get(struct net_device *dev,
469 				  struct switchdev_attr *attr)
470 {
471 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
472 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
473 
474 	switch (attr->id) {
475 	case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
476 		attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac);
477 		memcpy(&attr->u.ppid.id, &mlxsw_sp->base_mac,
478 		       attr->u.ppid.id_len);
479 		break;
480 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
481 		mlxsw_sp_port_bridge_flags_get(mlxsw_sp->bridge, attr->orig_dev,
482 					       &attr->u.brport_flags);
483 		break;
484 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT:
485 		attr->u.brport_flags_support = BR_LEARNING | BR_FLOOD |
486 					       BR_MCAST_FLOOD;
487 		break;
488 	default:
489 		return -EOPNOTSUPP;
490 	}
491 
492 	return 0;
493 }
494 
495 static int
496 mlxsw_sp_port_bridge_vlan_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
497 				  struct mlxsw_sp_bridge_vlan *bridge_vlan,
498 				  u8 state)
499 {
500 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
501 
502 	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
503 			    bridge_vlan_node) {
504 		if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
505 			continue;
506 		return mlxsw_sp_port_vid_stp_set(mlxsw_sp_port,
507 						 bridge_vlan->vid, state);
508 	}
509 
510 	return 0;
511 }
512 
513 static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
514 					    struct switchdev_trans *trans,
515 					    struct net_device *orig_dev,
516 					    u8 state)
517 {
518 	struct mlxsw_sp_bridge_port *bridge_port;
519 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
520 	int err;
521 
522 	if (switchdev_trans_ph_prepare(trans))
523 		return 0;
524 
525 	/* It's possible we failed to enslave the port, yet this
526 	 * operation is executed due to it being deferred.
527 	 */
528 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
529 						orig_dev);
530 	if (!bridge_port)
531 		return 0;
532 
533 	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
534 		err = mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port,
535 							bridge_vlan, state);
536 		if (err)
537 			goto err_port_bridge_vlan_stp_set;
538 	}
539 
540 	bridge_port->stp_state = state;
541 
542 	return 0;
543 
544 err_port_bridge_vlan_stp_set:
545 	list_for_each_entry_continue_reverse(bridge_vlan,
546 					     &bridge_port->vlans_list, list)
547 		mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port, bridge_vlan,
548 						  bridge_port->stp_state);
549 	return err;
550 }
551 
552 static int
553 mlxsw_sp_port_bridge_vlan_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
554 				    struct mlxsw_sp_bridge_vlan *bridge_vlan,
555 				    enum mlxsw_sp_flood_type packet_type,
556 				    bool member)
557 {
558 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
559 
560 	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
561 			    bridge_vlan_node) {
562 		if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
563 			continue;
564 		return mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid,
565 					      packet_type,
566 					      mlxsw_sp_port->local_port,
567 					      member);
568 	}
569 
570 	return 0;
571 }
572 
573 static int
574 mlxsw_sp_bridge_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port,
575 				     struct mlxsw_sp_bridge_port *bridge_port,
576 				     enum mlxsw_sp_flood_type packet_type,
577 				     bool member)
578 {
579 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
580 	int err;
581 
582 	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
583 		err = mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port,
584 							  bridge_vlan,
585 							  packet_type,
586 							  member);
587 		if (err)
588 			goto err_port_bridge_vlan_flood_set;
589 	}
590 
591 	return 0;
592 
593 err_port_bridge_vlan_flood_set:
594 	list_for_each_entry_continue_reverse(bridge_vlan,
595 					     &bridge_port->vlans_list, list)
596 		mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port, bridge_vlan,
597 						    packet_type, !member);
598 	return err;
599 }
600 
601 static int
602 mlxsw_sp_port_bridge_vlan_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
603 				       struct mlxsw_sp_bridge_vlan *bridge_vlan,
604 				       bool set)
605 {
606 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
607 	u16 vid = bridge_vlan->vid;
608 
609 	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
610 			    bridge_vlan_node) {
611 		if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
612 			continue;
613 		return mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, set);
614 	}
615 
616 	return 0;
617 }
618 
619 static int
620 mlxsw_sp_bridge_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
621 				  struct mlxsw_sp_bridge_port *bridge_port,
622 				  bool set)
623 {
624 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
625 	int err;
626 
627 	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
628 		err = mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
629 							     bridge_vlan, set);
630 		if (err)
631 			goto err_port_bridge_vlan_learning_set;
632 	}
633 
634 	return 0;
635 
636 err_port_bridge_vlan_learning_set:
637 	list_for_each_entry_continue_reverse(bridge_vlan,
638 					     &bridge_port->vlans_list, list)
639 		mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
640 						       bridge_vlan, !set);
641 	return err;
642 }
643 
644 static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
645 					   struct switchdev_trans *trans,
646 					   struct net_device *orig_dev,
647 					   unsigned long brport_flags)
648 {
649 	struct mlxsw_sp_bridge_port *bridge_port;
650 	int err;
651 
652 	if (switchdev_trans_ph_prepare(trans))
653 		return 0;
654 
655 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
656 						orig_dev);
657 	if (!bridge_port)
658 		return 0;
659 
660 	err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
661 						   MLXSW_SP_FLOOD_TYPE_UC,
662 						   brport_flags & BR_FLOOD);
663 	if (err)
664 		return err;
665 
666 	err = mlxsw_sp_bridge_port_learning_set(mlxsw_sp_port, bridge_port,
667 						brport_flags & BR_LEARNING);
668 	if (err)
669 		return err;
670 
671 	if (bridge_port->bridge_device->multicast_enabled)
672 		goto out;
673 
674 	err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
675 						   MLXSW_SP_FLOOD_TYPE_MC,
676 						   brport_flags &
677 						   BR_MCAST_FLOOD);
678 	if (err)
679 		return err;
680 
681 out:
682 	memcpy(&bridge_port->flags, &brport_flags, sizeof(brport_flags));
683 	return 0;
684 }
685 
686 static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
687 {
688 	char sfdat_pl[MLXSW_REG_SFDAT_LEN];
689 	int err;
690 
691 	mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time);
692 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl);
693 	if (err)
694 		return err;
695 	mlxsw_sp->bridge->ageing_time = ageing_time;
696 	return 0;
697 }
698 
699 static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
700 					    struct switchdev_trans *trans,
701 					    unsigned long ageing_clock_t)
702 {
703 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
704 	unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
705 	u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
706 
707 	if (switchdev_trans_ph_prepare(trans)) {
708 		if (ageing_time < MLXSW_SP_MIN_AGEING_TIME ||
709 		    ageing_time > MLXSW_SP_MAX_AGEING_TIME)
710 			return -ERANGE;
711 		else
712 			return 0;
713 	}
714 
715 	return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
716 }
717 
718 static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
719 					  struct switchdev_trans *trans,
720 					  struct net_device *orig_dev,
721 					  bool vlan_enabled)
722 {
723 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
724 	struct mlxsw_sp_bridge_device *bridge_device;
725 
726 	if (!switchdev_trans_ph_prepare(trans))
727 		return 0;
728 
729 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
730 	if (WARN_ON(!bridge_device))
731 		return -EINVAL;
732 
733 	if (bridge_device->vlan_enabled == vlan_enabled)
734 		return 0;
735 
736 	netdev_err(bridge_device->dev, "VLAN filtering can't be changed for existing bridge\n");
737 	return -EINVAL;
738 }
739 
740 static int mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
741 					  struct switchdev_trans *trans,
742 					  struct net_device *orig_dev,
743 					  bool is_port_mrouter)
744 {
745 	struct mlxsw_sp_bridge_port *bridge_port;
746 	int err;
747 
748 	if (switchdev_trans_ph_prepare(trans))
749 		return 0;
750 
751 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
752 						orig_dev);
753 	if (!bridge_port)
754 		return 0;
755 
756 	if (!bridge_port->bridge_device->multicast_enabled)
757 		goto out;
758 
759 	err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
760 						   MLXSW_SP_FLOOD_TYPE_MC,
761 						   is_port_mrouter);
762 	if (err)
763 		return err;
764 
765 	mlxsw_sp_port_mrouter_update_mdb(mlxsw_sp_port, bridge_port,
766 					 is_port_mrouter);
767 out:
768 	bridge_port->mrouter = is_port_mrouter;
769 	return 0;
770 }
771 
772 static bool mlxsw_sp_mc_flood(const struct mlxsw_sp_bridge_port *bridge_port)
773 {
774 	const struct mlxsw_sp_bridge_device *bridge_device;
775 
776 	bridge_device = bridge_port->bridge_device;
777 	return bridge_device->multicast_enabled ? bridge_port->mrouter :
778 					bridge_port->flags & BR_MCAST_FLOOD;
779 }
780 
781 static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
782 					 struct switchdev_trans *trans,
783 					 struct net_device *orig_dev,
784 					 bool mc_disabled)
785 {
786 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
787 	struct mlxsw_sp_bridge_device *bridge_device;
788 	struct mlxsw_sp_bridge_port *bridge_port;
789 	int err;
790 
791 	if (switchdev_trans_ph_prepare(trans))
792 		return 0;
793 
794 	/* It's possible we failed to enslave the port, yet this
795 	 * operation is executed due to it being deferred.
796 	 */
797 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
798 	if (!bridge_device)
799 		return 0;
800 
801 	if (bridge_device->multicast_enabled != !mc_disabled) {
802 		bridge_device->multicast_enabled = !mc_disabled;
803 		mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp_port,
804 						   bridge_device);
805 	}
806 
807 	list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
808 		enum mlxsw_sp_flood_type packet_type = MLXSW_SP_FLOOD_TYPE_MC;
809 		bool member = mlxsw_sp_mc_flood(bridge_port);
810 
811 		err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
812 							   bridge_port,
813 							   packet_type, member);
814 		if (err)
815 			return err;
816 	}
817 
818 	bridge_device->multicast_enabled = !mc_disabled;
819 
820 	return 0;
821 }
822 
823 static int mlxsw_sp_smid_router_port_set(struct mlxsw_sp *mlxsw_sp,
824 					 u16 mid_idx, bool add)
825 {
826 	char *smid_pl;
827 	int err;
828 
829 	smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
830 	if (!smid_pl)
831 		return -ENOMEM;
832 
833 	mlxsw_reg_smid_pack(smid_pl, mid_idx,
834 			    mlxsw_sp_router_port(mlxsw_sp), add);
835 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
836 	kfree(smid_pl);
837 	return err;
838 }
839 
840 static void
841 mlxsw_sp_bridge_mrouter_update_mdb(struct mlxsw_sp *mlxsw_sp,
842 				   struct mlxsw_sp_bridge_device *bridge_device,
843 				   bool add)
844 {
845 	struct mlxsw_sp_mid *mid;
846 
847 	list_for_each_entry(mid, &bridge_device->mids_list, list)
848 		mlxsw_sp_smid_router_port_set(mlxsw_sp, mid->mid, add);
849 }
850 
851 static int
852 mlxsw_sp_port_attr_br_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
853 				  struct switchdev_trans *trans,
854 				  struct net_device *orig_dev,
855 				  bool is_mrouter)
856 {
857 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
858 	struct mlxsw_sp_bridge_device *bridge_device;
859 
860 	if (switchdev_trans_ph_prepare(trans))
861 		return 0;
862 
863 	/* It's possible we failed to enslave the port, yet this
864 	 * operation is executed due to it being deferred.
865 	 */
866 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
867 	if (!bridge_device)
868 		return 0;
869 
870 	if (bridge_device->mrouter != is_mrouter)
871 		mlxsw_sp_bridge_mrouter_update_mdb(mlxsw_sp, bridge_device,
872 						   is_mrouter);
873 	bridge_device->mrouter = is_mrouter;
874 	return 0;
875 }
876 
877 static int mlxsw_sp_port_attr_set(struct net_device *dev,
878 				  const struct switchdev_attr *attr,
879 				  struct switchdev_trans *trans)
880 {
881 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
882 	int err;
883 
884 	switch (attr->id) {
885 	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
886 		err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans,
887 						       attr->orig_dev,
888 						       attr->u.stp_state);
889 		break;
890 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
891 		err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans,
892 						      attr->orig_dev,
893 						      attr->u.brport_flags);
894 		break;
895 	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
896 		err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans,
897 						       attr->u.ageing_time);
898 		break;
899 	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
900 		err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port, trans,
901 						     attr->orig_dev,
902 						     attr->u.vlan_filtering);
903 		break;
904 	case SWITCHDEV_ATTR_ID_PORT_MROUTER:
905 		err = mlxsw_sp_port_attr_mrouter_set(mlxsw_sp_port, trans,
906 						     attr->orig_dev,
907 						     attr->u.mrouter);
908 		break;
909 	case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
910 		err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port, trans,
911 						    attr->orig_dev,
912 						    attr->u.mc_disabled);
913 		break;
914 	case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER:
915 		err = mlxsw_sp_port_attr_br_mrouter_set(mlxsw_sp_port, trans,
916 							attr->orig_dev,
917 							attr->u.mrouter);
918 		break;
919 	default:
920 		err = -EOPNOTSUPP;
921 		break;
922 	}
923 
924 	if (switchdev_trans_ph_commit(trans))
925 		mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
926 
927 	return err;
928 }
929 
930 static int
931 mlxsw_sp_port_vlan_fid_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
932 			    struct mlxsw_sp_bridge_port *bridge_port)
933 {
934 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
935 	struct mlxsw_sp_bridge_device *bridge_device;
936 	u8 local_port = mlxsw_sp_port->local_port;
937 	u16 vid = mlxsw_sp_port_vlan->vid;
938 	struct mlxsw_sp_fid *fid;
939 	int err;
940 
941 	bridge_device = bridge_port->bridge_device;
942 	fid = bridge_device->ops->fid_get(bridge_device, vid);
943 	if (IS_ERR(fid))
944 		return PTR_ERR(fid);
945 
946 	err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port,
947 				     bridge_port->flags & BR_FLOOD);
948 	if (err)
949 		goto err_fid_uc_flood_set;
950 
951 	err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port,
952 				     mlxsw_sp_mc_flood(bridge_port));
953 	if (err)
954 		goto err_fid_mc_flood_set;
955 
956 	err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port,
957 				     true);
958 	if (err)
959 		goto err_fid_bc_flood_set;
960 
961 	err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
962 	if (err)
963 		goto err_fid_port_vid_map;
964 
965 	mlxsw_sp_port_vlan->fid = fid;
966 
967 	return 0;
968 
969 err_fid_port_vid_map:
970 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
971 err_fid_bc_flood_set:
972 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
973 err_fid_mc_flood_set:
974 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
975 err_fid_uc_flood_set:
976 	mlxsw_sp_fid_put(fid);
977 	return err;
978 }
979 
980 static void
981 mlxsw_sp_port_vlan_fid_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
982 {
983 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
984 	struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
985 	u8 local_port = mlxsw_sp_port->local_port;
986 	u16 vid = mlxsw_sp_port_vlan->vid;
987 
988 	mlxsw_sp_port_vlan->fid = NULL;
989 	mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
990 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
991 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
992 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
993 	mlxsw_sp_fid_put(fid);
994 }
995 
996 static u16
997 mlxsw_sp_port_pvid_determine(const struct mlxsw_sp_port *mlxsw_sp_port,
998 			     u16 vid, bool is_pvid)
999 {
1000 	if (is_pvid)
1001 		return vid;
1002 	else if (mlxsw_sp_port->pvid == vid)
1003 		return 0;	/* Dis-allow untagged packets */
1004 	else
1005 		return mlxsw_sp_port->pvid;
1006 }
1007 
1008 static int
1009 mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
1010 			       struct mlxsw_sp_bridge_port *bridge_port)
1011 {
1012 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1013 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
1014 	u16 vid = mlxsw_sp_port_vlan->vid;
1015 	int err;
1016 
1017 	/* No need to continue if only VLAN flags were changed */
1018 	if (mlxsw_sp_port_vlan->bridge_port) {
1019 		mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
1020 		return 0;
1021 	}
1022 
1023 	err = mlxsw_sp_port_vlan_fid_join(mlxsw_sp_port_vlan, bridge_port);
1024 	if (err)
1025 		return err;
1026 
1027 	err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid,
1028 					     bridge_port->flags & BR_LEARNING);
1029 	if (err)
1030 		goto err_port_vid_learning_set;
1031 
1032 	err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
1033 					bridge_port->stp_state);
1034 	if (err)
1035 		goto err_port_vid_stp_set;
1036 
1037 	bridge_vlan = mlxsw_sp_bridge_vlan_get(bridge_port, vid);
1038 	if (!bridge_vlan) {
1039 		err = -ENOMEM;
1040 		goto err_bridge_vlan_get;
1041 	}
1042 
1043 	list_add(&mlxsw_sp_port_vlan->bridge_vlan_node,
1044 		 &bridge_vlan->port_vlan_list);
1045 
1046 	mlxsw_sp_bridge_port_get(mlxsw_sp_port->mlxsw_sp->bridge,
1047 				 bridge_port->dev);
1048 	mlxsw_sp_port_vlan->bridge_port = bridge_port;
1049 
1050 	return 0;
1051 
1052 err_bridge_vlan_get:
1053 	mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
1054 err_port_vid_stp_set:
1055 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
1056 err_port_vid_learning_set:
1057 	mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
1058 	return err;
1059 }
1060 
1061 void
1062 mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1063 {
1064 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1065 	struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
1066 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
1067 	struct mlxsw_sp_bridge_port *bridge_port;
1068 	u16 vid = mlxsw_sp_port_vlan->vid;
1069 	bool last_port, last_vlan;
1070 
1071 	if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021Q &&
1072 		    mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021D))
1073 		return;
1074 
1075 	bridge_port = mlxsw_sp_port_vlan->bridge_port;
1076 	last_vlan = list_is_singular(&bridge_port->vlans_list);
1077 	bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
1078 	last_port = list_is_singular(&bridge_vlan->port_vlan_list);
1079 
1080 	list_del(&mlxsw_sp_port_vlan->bridge_vlan_node);
1081 	mlxsw_sp_bridge_vlan_put(bridge_vlan);
1082 	mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
1083 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
1084 	if (last_port)
1085 		mlxsw_sp_bridge_port_fdb_flush(mlxsw_sp_port->mlxsw_sp,
1086 					       bridge_port,
1087 					       mlxsw_sp_fid_index(fid));
1088 	if (last_vlan)
1089 		mlxsw_sp_bridge_port_mdb_flush(mlxsw_sp_port, bridge_port);
1090 
1091 	mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
1092 
1093 	mlxsw_sp_bridge_port_put(mlxsw_sp_port->mlxsw_sp->bridge, bridge_port);
1094 	mlxsw_sp_port_vlan->bridge_port = NULL;
1095 }
1096 
1097 static int
1098 mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port,
1099 			      struct mlxsw_sp_bridge_port *bridge_port,
1100 			      u16 vid, bool is_untagged, bool is_pvid)
1101 {
1102 	u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid);
1103 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1104 	u16 old_pvid = mlxsw_sp_port->pvid;
1105 	int err;
1106 
1107 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, vid);
1108 	if (IS_ERR(mlxsw_sp_port_vlan))
1109 		return PTR_ERR(mlxsw_sp_port_vlan);
1110 
1111 	err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true,
1112 				     is_untagged);
1113 	if (err)
1114 		goto err_port_vlan_set;
1115 
1116 	err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid);
1117 	if (err)
1118 		goto err_port_pvid_set;
1119 
1120 	err = mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port);
1121 	if (err)
1122 		goto err_port_vlan_bridge_join;
1123 
1124 	return 0;
1125 
1126 err_port_vlan_bridge_join:
1127 	mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid);
1128 err_port_pvid_set:
1129 	mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1130 err_port_vlan_set:
1131 	mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
1132 	return err;
1133 }
1134 
1135 static int
1136 mlxsw_sp_br_ban_rif_pvid_change(struct mlxsw_sp *mlxsw_sp,
1137 				const struct net_device *br_dev,
1138 				const struct switchdev_obj_port_vlan *vlan)
1139 {
1140 	struct mlxsw_sp_rif *rif;
1141 	struct mlxsw_sp_fid *fid;
1142 	u16 pvid;
1143 	u16 vid;
1144 
1145 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev);
1146 	if (!rif)
1147 		return 0;
1148 	fid = mlxsw_sp_rif_fid(rif);
1149 	pvid = mlxsw_sp_fid_8021q_vid(fid);
1150 
1151 	for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
1152 		if (vlan->flags & BRIDGE_VLAN_INFO_PVID) {
1153 			if (vid != pvid) {
1154 				netdev_err(br_dev, "Can't change PVID, it's used by router interface\n");
1155 				return -EBUSY;
1156 			}
1157 		} else {
1158 			if (vid == pvid) {
1159 				netdev_err(br_dev, "Can't remove PVID, it's used by router interface\n");
1160 				return -EBUSY;
1161 			}
1162 		}
1163 	}
1164 
1165 	return 0;
1166 }
1167 
1168 static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
1169 				   const struct switchdev_obj_port_vlan *vlan,
1170 				   struct switchdev_trans *trans)
1171 {
1172 	bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
1173 	bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
1174 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1175 	struct net_device *orig_dev = vlan->obj.orig_dev;
1176 	struct mlxsw_sp_bridge_port *bridge_port;
1177 	u16 vid;
1178 
1179 	if (netif_is_bridge_master(orig_dev)) {
1180 		int err = 0;
1181 
1182 		if ((vlan->flags & BRIDGE_VLAN_INFO_BRENTRY) &&
1183 		    br_vlan_enabled(orig_dev) &&
1184 		    switchdev_trans_ph_prepare(trans))
1185 			err = mlxsw_sp_br_ban_rif_pvid_change(mlxsw_sp,
1186 							      orig_dev, vlan);
1187 		if (!err)
1188 			err = -EOPNOTSUPP;
1189 		return err;
1190 	}
1191 
1192 	if (switchdev_trans_ph_prepare(trans))
1193 		return 0;
1194 
1195 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1196 	if (WARN_ON(!bridge_port))
1197 		return -EINVAL;
1198 
1199 	if (!bridge_port->bridge_device->vlan_enabled)
1200 		return 0;
1201 
1202 	for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
1203 		int err;
1204 
1205 		err = mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port,
1206 						    vid, flag_untagged,
1207 						    flag_pvid);
1208 		if (err)
1209 			return err;
1210 	}
1211 
1212 	return 0;
1213 }
1214 
1215 static enum mlxsw_reg_sfdf_flush_type mlxsw_sp_fdb_flush_type(bool lagged)
1216 {
1217 	return lagged ? MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID :
1218 			MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID;
1219 }
1220 
1221 static int
1222 mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
1223 			       struct mlxsw_sp_bridge_port *bridge_port,
1224 			       u16 fid_index)
1225 {
1226 	bool lagged = bridge_port->lagged;
1227 	char sfdf_pl[MLXSW_REG_SFDF_LEN];
1228 	u16 system_port;
1229 
1230 	system_port = lagged ? bridge_port->lag_id : bridge_port->system_port;
1231 	mlxsw_reg_sfdf_pack(sfdf_pl, mlxsw_sp_fdb_flush_type(lagged));
1232 	mlxsw_reg_sfdf_fid_set(sfdf_pl, fid_index);
1233 	mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl, system_port);
1234 
1235 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
1236 }
1237 
1238 static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
1239 {
1240 	return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
1241 			 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY;
1242 }
1243 
1244 static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
1245 {
1246 	return adding ? MLXSW_REG_SFD_OP_WRITE_EDIT :
1247 			MLXSW_REG_SFD_OP_WRITE_REMOVE;
1248 }
1249 
1250 static int mlxsw_sp_port_fdb_tunnel_uc_op(struct mlxsw_sp *mlxsw_sp,
1251 					  const char *mac, u16 fid,
1252 					  enum mlxsw_sp_l3proto proto,
1253 					  const union mlxsw_sp_l3addr *addr,
1254 					  bool adding, bool dynamic)
1255 {
1256 	enum mlxsw_reg_sfd_uc_tunnel_protocol sfd_proto;
1257 	char *sfd_pl;
1258 	u8 num_rec;
1259 	u32 uip;
1260 	int err;
1261 
1262 	switch (proto) {
1263 	case MLXSW_SP_L3_PROTO_IPV4:
1264 		uip = be32_to_cpu(addr->addr4);
1265 		sfd_proto = MLXSW_REG_SFD_UC_TUNNEL_PROTOCOL_IPV4;
1266 		break;
1267 	case MLXSW_SP_L3_PROTO_IPV6: /* fall through */
1268 	default:
1269 		WARN_ON(1);
1270 		return -EOPNOTSUPP;
1271 	}
1272 
1273 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1274 	if (!sfd_pl)
1275 		return -ENOMEM;
1276 
1277 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1278 	mlxsw_reg_sfd_uc_tunnel_pack(sfd_pl, 0,
1279 				     mlxsw_sp_sfd_rec_policy(dynamic), mac, fid,
1280 				     MLXSW_REG_SFD_REC_ACTION_NOP, uip,
1281 				     sfd_proto);
1282 	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1283 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1284 	if (err)
1285 		goto out;
1286 
1287 	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1288 		err = -EBUSY;
1289 
1290 out:
1291 	kfree(sfd_pl);
1292 	return err;
1293 }
1294 
1295 static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1296 				     const char *mac, u16 fid, bool adding,
1297 				     enum mlxsw_reg_sfd_rec_action action,
1298 				     bool dynamic)
1299 {
1300 	char *sfd_pl;
1301 	u8 num_rec;
1302 	int err;
1303 
1304 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1305 	if (!sfd_pl)
1306 		return -ENOMEM;
1307 
1308 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1309 	mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
1310 			      mac, fid, action, local_port);
1311 	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1312 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1313 	if (err)
1314 		goto out;
1315 
1316 	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1317 		err = -EBUSY;
1318 
1319 out:
1320 	kfree(sfd_pl);
1321 	return err;
1322 }
1323 
1324 static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1325 				   const char *mac, u16 fid, bool adding,
1326 				   bool dynamic)
1327 {
1328 	return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
1329 					 MLXSW_REG_SFD_REC_ACTION_NOP, dynamic);
1330 }
1331 
1332 int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
1333 			bool adding)
1334 {
1335 	return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
1336 					 MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
1337 					 false);
1338 }
1339 
1340 static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
1341 				       const char *mac, u16 fid, u16 lag_vid,
1342 				       bool adding, bool dynamic)
1343 {
1344 	char *sfd_pl;
1345 	u8 num_rec;
1346 	int err;
1347 
1348 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1349 	if (!sfd_pl)
1350 		return -ENOMEM;
1351 
1352 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1353 	mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
1354 				  mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
1355 				  lag_vid, lag_id);
1356 	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1357 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1358 	if (err)
1359 		goto out;
1360 
1361 	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1362 		err = -EBUSY;
1363 
1364 out:
1365 	kfree(sfd_pl);
1366 	return err;
1367 }
1368 
1369 static int
1370 mlxsw_sp_port_fdb_set(struct mlxsw_sp_port *mlxsw_sp_port,
1371 		      struct switchdev_notifier_fdb_info *fdb_info, bool adding)
1372 {
1373 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1374 	struct net_device *orig_dev = fdb_info->info.dev;
1375 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1376 	struct mlxsw_sp_bridge_device *bridge_device;
1377 	struct mlxsw_sp_bridge_port *bridge_port;
1378 	u16 fid_index, vid;
1379 
1380 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1381 	if (!bridge_port)
1382 		return -EINVAL;
1383 
1384 	bridge_device = bridge_port->bridge_device;
1385 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1386 							       bridge_device,
1387 							       fdb_info->vid);
1388 	if (!mlxsw_sp_port_vlan)
1389 		return 0;
1390 
1391 	fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1392 	vid = mlxsw_sp_port_vlan->vid;
1393 
1394 	if (!bridge_port->lagged)
1395 		return mlxsw_sp_port_fdb_uc_op(mlxsw_sp,
1396 					       bridge_port->system_port,
1397 					       fdb_info->addr, fid_index,
1398 					       adding, false);
1399 	else
1400 		return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp,
1401 						   bridge_port->lag_id,
1402 						   fdb_info->addr, fid_index,
1403 						   vid, adding, false);
1404 }
1405 
1406 static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
1407 				u16 fid, u16 mid_idx, bool adding)
1408 {
1409 	char *sfd_pl;
1410 	u8 num_rec;
1411 	int err;
1412 
1413 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1414 	if (!sfd_pl)
1415 		return -ENOMEM;
1416 
1417 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1418 	mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
1419 			      MLXSW_REG_SFD_REC_ACTION_NOP, mid_idx);
1420 	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1421 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1422 	if (err)
1423 		goto out;
1424 
1425 	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1426 		err = -EBUSY;
1427 
1428 out:
1429 	kfree(sfd_pl);
1430 	return err;
1431 }
1432 
1433 static int mlxsw_sp_port_smid_full_entry(struct mlxsw_sp *mlxsw_sp, u16 mid_idx,
1434 					 long *ports_bitmap,
1435 					 bool set_router_port)
1436 {
1437 	char *smid_pl;
1438 	int err, i;
1439 
1440 	smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
1441 	if (!smid_pl)
1442 		return -ENOMEM;
1443 
1444 	mlxsw_reg_smid_pack(smid_pl, mid_idx, 0, false);
1445 	for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) {
1446 		if (mlxsw_sp->ports[i])
1447 			mlxsw_reg_smid_port_mask_set(smid_pl, i, 1);
1448 	}
1449 
1450 	mlxsw_reg_smid_port_mask_set(smid_pl,
1451 				     mlxsw_sp_router_port(mlxsw_sp), 1);
1452 
1453 	for_each_set_bit(i, ports_bitmap, mlxsw_core_max_ports(mlxsw_sp->core))
1454 		mlxsw_reg_smid_port_set(smid_pl, i, 1);
1455 
1456 	mlxsw_reg_smid_port_set(smid_pl, mlxsw_sp_router_port(mlxsw_sp),
1457 				set_router_port);
1458 
1459 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
1460 	kfree(smid_pl);
1461 	return err;
1462 }
1463 
1464 static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port,
1465 				  u16 mid_idx, bool add)
1466 {
1467 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1468 	char *smid_pl;
1469 	int err;
1470 
1471 	smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
1472 	if (!smid_pl)
1473 		return -ENOMEM;
1474 
1475 	mlxsw_reg_smid_pack(smid_pl, mid_idx, mlxsw_sp_port->local_port, add);
1476 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
1477 	kfree(smid_pl);
1478 	return err;
1479 }
1480 
1481 static struct
1482 mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp_bridge_device *bridge_device,
1483 				const unsigned char *addr,
1484 				u16 fid)
1485 {
1486 	struct mlxsw_sp_mid *mid;
1487 
1488 	list_for_each_entry(mid, &bridge_device->mids_list, list) {
1489 		if (ether_addr_equal(mid->addr, addr) && mid->fid == fid)
1490 			return mid;
1491 	}
1492 	return NULL;
1493 }
1494 
1495 static void
1496 mlxsw_sp_bridge_port_get_ports_bitmap(struct mlxsw_sp *mlxsw_sp,
1497 				      struct mlxsw_sp_bridge_port *bridge_port,
1498 				      unsigned long *ports_bitmap)
1499 {
1500 	struct mlxsw_sp_port *mlxsw_sp_port;
1501 	u64 max_lag_members, i;
1502 	int lag_id;
1503 
1504 	if (!bridge_port->lagged) {
1505 		set_bit(bridge_port->system_port, ports_bitmap);
1506 	} else {
1507 		max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1508 						     MAX_LAG_MEMBERS);
1509 		lag_id = bridge_port->lag_id;
1510 		for (i = 0; i < max_lag_members; i++) {
1511 			mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp,
1512 								 lag_id, i);
1513 			if (mlxsw_sp_port)
1514 				set_bit(mlxsw_sp_port->local_port,
1515 					ports_bitmap);
1516 		}
1517 	}
1518 }
1519 
1520 static void
1521 mlxsw_sp_mc_get_mrouters_bitmap(unsigned long *flood_bitmap,
1522 				struct mlxsw_sp_bridge_device *bridge_device,
1523 				struct mlxsw_sp *mlxsw_sp)
1524 {
1525 	struct mlxsw_sp_bridge_port *bridge_port;
1526 
1527 	list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
1528 		if (bridge_port->mrouter) {
1529 			mlxsw_sp_bridge_port_get_ports_bitmap(mlxsw_sp,
1530 							      bridge_port,
1531 							      flood_bitmap);
1532 		}
1533 	}
1534 }
1535 
1536 static bool
1537 mlxsw_sp_mc_write_mdb_entry(struct mlxsw_sp *mlxsw_sp,
1538 			    struct mlxsw_sp_mid *mid,
1539 			    struct mlxsw_sp_bridge_device *bridge_device)
1540 {
1541 	long *flood_bitmap;
1542 	int num_of_ports;
1543 	int alloc_size;
1544 	u16 mid_idx;
1545 	int err;
1546 
1547 	mid_idx = find_first_zero_bit(mlxsw_sp->bridge->mids_bitmap,
1548 				      MLXSW_SP_MID_MAX);
1549 	if (mid_idx == MLXSW_SP_MID_MAX)
1550 		return false;
1551 
1552 	num_of_ports = mlxsw_core_max_ports(mlxsw_sp->core);
1553 	alloc_size = sizeof(long) * BITS_TO_LONGS(num_of_ports);
1554 	flood_bitmap = kzalloc(alloc_size, GFP_KERNEL);
1555 	if (!flood_bitmap)
1556 		return false;
1557 
1558 	bitmap_copy(flood_bitmap,  mid->ports_in_mid, num_of_ports);
1559 	mlxsw_sp_mc_get_mrouters_bitmap(flood_bitmap, bridge_device, mlxsw_sp);
1560 
1561 	mid->mid = mid_idx;
1562 	err = mlxsw_sp_port_smid_full_entry(mlxsw_sp, mid_idx, flood_bitmap,
1563 					    bridge_device->mrouter);
1564 	kfree(flood_bitmap);
1565 	if (err)
1566 		return false;
1567 
1568 	err = mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid_idx,
1569 				   true);
1570 	if (err)
1571 		return false;
1572 
1573 	set_bit(mid_idx, mlxsw_sp->bridge->mids_bitmap);
1574 	mid->in_hw = true;
1575 	return true;
1576 }
1577 
1578 static int mlxsw_sp_mc_remove_mdb_entry(struct mlxsw_sp *mlxsw_sp,
1579 					struct mlxsw_sp_mid *mid)
1580 {
1581 	if (!mid->in_hw)
1582 		return 0;
1583 
1584 	clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap);
1585 	mid->in_hw = false;
1586 	return mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid->mid,
1587 				    false);
1588 }
1589 
1590 static struct
1591 mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
1592 				  struct mlxsw_sp_bridge_device *bridge_device,
1593 				  const unsigned char *addr,
1594 				  u16 fid)
1595 {
1596 	struct mlxsw_sp_mid *mid;
1597 	size_t alloc_size;
1598 
1599 	mid = kzalloc(sizeof(*mid), GFP_KERNEL);
1600 	if (!mid)
1601 		return NULL;
1602 
1603 	alloc_size = sizeof(unsigned long) *
1604 		     BITS_TO_LONGS(mlxsw_core_max_ports(mlxsw_sp->core));
1605 
1606 	mid->ports_in_mid = kzalloc(alloc_size, GFP_KERNEL);
1607 	if (!mid->ports_in_mid)
1608 		goto err_ports_in_mid_alloc;
1609 
1610 	ether_addr_copy(mid->addr, addr);
1611 	mid->fid = fid;
1612 	mid->in_hw = false;
1613 
1614 	if (!bridge_device->multicast_enabled)
1615 		goto out;
1616 
1617 	if (!mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid, bridge_device))
1618 		goto err_write_mdb_entry;
1619 
1620 out:
1621 	list_add_tail(&mid->list, &bridge_device->mids_list);
1622 	return mid;
1623 
1624 err_write_mdb_entry:
1625 	kfree(mid->ports_in_mid);
1626 err_ports_in_mid_alloc:
1627 	kfree(mid);
1628 	return NULL;
1629 }
1630 
1631 static int mlxsw_sp_port_remove_from_mid(struct mlxsw_sp_port *mlxsw_sp_port,
1632 					 struct mlxsw_sp_mid *mid)
1633 {
1634 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1635 	int err = 0;
1636 
1637 	clear_bit(mlxsw_sp_port->local_port, mid->ports_in_mid);
1638 	if (bitmap_empty(mid->ports_in_mid,
1639 			 mlxsw_core_max_ports(mlxsw_sp->core))) {
1640 		err = mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
1641 		list_del(&mid->list);
1642 		kfree(mid->ports_in_mid);
1643 		kfree(mid);
1644 	}
1645 	return err;
1646 }
1647 
1648 static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
1649 				 const struct switchdev_obj_port_mdb *mdb,
1650 				 struct switchdev_trans *trans)
1651 {
1652 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1653 	struct net_device *orig_dev = mdb->obj.orig_dev;
1654 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1655 	struct net_device *dev = mlxsw_sp_port->dev;
1656 	struct mlxsw_sp_bridge_device *bridge_device;
1657 	struct mlxsw_sp_bridge_port *bridge_port;
1658 	struct mlxsw_sp_mid *mid;
1659 	u16 fid_index;
1660 	int err = 0;
1661 
1662 	if (switchdev_trans_ph_prepare(trans))
1663 		return 0;
1664 
1665 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1666 	if (!bridge_port)
1667 		return 0;
1668 
1669 	bridge_device = bridge_port->bridge_device;
1670 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1671 							       bridge_device,
1672 							       mdb->vid);
1673 	if (!mlxsw_sp_port_vlan)
1674 		return 0;
1675 
1676 	fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1677 
1678 	mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
1679 	if (!mid) {
1680 		mid = __mlxsw_sp_mc_alloc(mlxsw_sp, bridge_device, mdb->addr,
1681 					  fid_index);
1682 		if (!mid) {
1683 			netdev_err(dev, "Unable to allocate MC group\n");
1684 			return -ENOMEM;
1685 		}
1686 	}
1687 	set_bit(mlxsw_sp_port->local_port, mid->ports_in_mid);
1688 
1689 	if (!bridge_device->multicast_enabled)
1690 		return 0;
1691 
1692 	if (bridge_port->mrouter)
1693 		return 0;
1694 
1695 	err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true);
1696 	if (err) {
1697 		netdev_err(dev, "Unable to set SMID\n");
1698 		goto err_out;
1699 	}
1700 
1701 	return 0;
1702 
1703 err_out:
1704 	mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
1705 	return err;
1706 }
1707 
1708 static void
1709 mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port,
1710 				   struct mlxsw_sp_bridge_device
1711 				   *bridge_device)
1712 {
1713 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1714 	struct mlxsw_sp_mid *mid;
1715 	bool mc_enabled;
1716 
1717 	mc_enabled = bridge_device->multicast_enabled;
1718 
1719 	list_for_each_entry(mid, &bridge_device->mids_list, list) {
1720 		if (mc_enabled)
1721 			mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid,
1722 						    bridge_device);
1723 		else
1724 			mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
1725 	}
1726 }
1727 
1728 static void
1729 mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
1730 				 struct mlxsw_sp_bridge_port *bridge_port,
1731 				 bool add)
1732 {
1733 	struct mlxsw_sp_bridge_device *bridge_device;
1734 	struct mlxsw_sp_mid *mid;
1735 
1736 	bridge_device = bridge_port->bridge_device;
1737 
1738 	list_for_each_entry(mid, &bridge_device->mids_list, list) {
1739 		if (!test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid))
1740 			mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, add);
1741 	}
1742 }
1743 
1744 struct mlxsw_sp_span_respin_work {
1745 	struct work_struct work;
1746 	struct mlxsw_sp *mlxsw_sp;
1747 };
1748 
1749 static void mlxsw_sp_span_respin_work(struct work_struct *work)
1750 {
1751 	struct mlxsw_sp_span_respin_work *respin_work =
1752 		container_of(work, struct mlxsw_sp_span_respin_work, work);
1753 
1754 	rtnl_lock();
1755 	mlxsw_sp_span_respin(respin_work->mlxsw_sp);
1756 	rtnl_unlock();
1757 	kfree(respin_work);
1758 }
1759 
1760 static void mlxsw_sp_span_respin_schedule(struct mlxsw_sp *mlxsw_sp)
1761 {
1762 	struct mlxsw_sp_span_respin_work *respin_work;
1763 
1764 	respin_work = kzalloc(sizeof(*respin_work), GFP_ATOMIC);
1765 	if (!respin_work)
1766 		return;
1767 
1768 	INIT_WORK(&respin_work->work, mlxsw_sp_span_respin_work);
1769 	respin_work->mlxsw_sp = mlxsw_sp;
1770 
1771 	mlxsw_core_schedule_work(&respin_work->work);
1772 }
1773 
1774 static int mlxsw_sp_port_obj_add(struct net_device *dev,
1775 				 const struct switchdev_obj *obj,
1776 				 struct switchdev_trans *trans)
1777 {
1778 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1779 	const struct switchdev_obj_port_vlan *vlan;
1780 	int err = 0;
1781 
1782 	switch (obj->id) {
1783 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
1784 		vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
1785 		err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, vlan, trans);
1786 
1787 		if (switchdev_trans_ph_prepare(trans)) {
1788 			/* The event is emitted before the changes are actually
1789 			 * applied to the bridge. Therefore schedule the respin
1790 			 * call for later, so that the respin logic sees the
1791 			 * updated bridge state.
1792 			 */
1793 			mlxsw_sp_span_respin_schedule(mlxsw_sp_port->mlxsw_sp);
1794 		}
1795 		break;
1796 	case SWITCHDEV_OBJ_ID_PORT_MDB:
1797 		err = mlxsw_sp_port_mdb_add(mlxsw_sp_port,
1798 					    SWITCHDEV_OBJ_PORT_MDB(obj),
1799 					    trans);
1800 		break;
1801 	default:
1802 		err = -EOPNOTSUPP;
1803 		break;
1804 	}
1805 
1806 	return err;
1807 }
1808 
1809 static void
1810 mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port,
1811 			      struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
1812 {
1813 	u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : vid;
1814 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1815 
1816 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1817 	if (WARN_ON(!mlxsw_sp_port_vlan))
1818 		return;
1819 
1820 	mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
1821 	mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid);
1822 	mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1823 	mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
1824 }
1825 
1826 static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
1827 				   const struct switchdev_obj_port_vlan *vlan)
1828 {
1829 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1830 	struct net_device *orig_dev = vlan->obj.orig_dev;
1831 	struct mlxsw_sp_bridge_port *bridge_port;
1832 	u16 vid;
1833 
1834 	if (netif_is_bridge_master(orig_dev))
1835 		return -EOPNOTSUPP;
1836 
1837 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1838 	if (WARN_ON(!bridge_port))
1839 		return -EINVAL;
1840 
1841 	if (!bridge_port->bridge_device->vlan_enabled)
1842 		return 0;
1843 
1844 	for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++)
1845 		mlxsw_sp_bridge_port_vlan_del(mlxsw_sp_port, bridge_port, vid);
1846 
1847 	return 0;
1848 }
1849 
1850 static int
1851 __mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
1852 			struct mlxsw_sp_bridge_port *bridge_port,
1853 			struct mlxsw_sp_mid *mid)
1854 {
1855 	struct net_device *dev = mlxsw_sp_port->dev;
1856 	int err;
1857 
1858 	if (bridge_port->bridge_device->multicast_enabled &&
1859 	    !bridge_port->mrouter) {
1860 		err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
1861 		if (err)
1862 			netdev_err(dev, "Unable to remove port from SMID\n");
1863 	}
1864 
1865 	err = mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
1866 	if (err)
1867 		netdev_err(dev, "Unable to remove MC SFD\n");
1868 
1869 	return err;
1870 }
1871 
1872 static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
1873 				 const struct switchdev_obj_port_mdb *mdb)
1874 {
1875 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1876 	struct net_device *orig_dev = mdb->obj.orig_dev;
1877 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1878 	struct mlxsw_sp_bridge_device *bridge_device;
1879 	struct net_device *dev = mlxsw_sp_port->dev;
1880 	struct mlxsw_sp_bridge_port *bridge_port;
1881 	struct mlxsw_sp_mid *mid;
1882 	u16 fid_index;
1883 
1884 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1885 	if (!bridge_port)
1886 		return 0;
1887 
1888 	bridge_device = bridge_port->bridge_device;
1889 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1890 							       bridge_device,
1891 							       mdb->vid);
1892 	if (!mlxsw_sp_port_vlan)
1893 		return 0;
1894 
1895 	fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1896 
1897 	mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
1898 	if (!mid) {
1899 		netdev_err(dev, "Unable to remove port from MC DB\n");
1900 		return -EINVAL;
1901 	}
1902 
1903 	return __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port, mid);
1904 }
1905 
1906 static void
1907 mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
1908 			       struct mlxsw_sp_bridge_port *bridge_port)
1909 {
1910 	struct mlxsw_sp_bridge_device *bridge_device;
1911 	struct mlxsw_sp_mid *mid, *tmp;
1912 
1913 	bridge_device = bridge_port->bridge_device;
1914 
1915 	list_for_each_entry_safe(mid, tmp, &bridge_device->mids_list, list) {
1916 		if (test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid)) {
1917 			__mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port,
1918 						mid);
1919 		} else if (bridge_device->multicast_enabled &&
1920 			   bridge_port->mrouter) {
1921 			mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
1922 		}
1923 	}
1924 }
1925 
1926 static int mlxsw_sp_port_obj_del(struct net_device *dev,
1927 				 const struct switchdev_obj *obj)
1928 {
1929 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1930 	int err = 0;
1931 
1932 	switch (obj->id) {
1933 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
1934 		err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
1935 					      SWITCHDEV_OBJ_PORT_VLAN(obj));
1936 		break;
1937 	case SWITCHDEV_OBJ_ID_PORT_MDB:
1938 		err = mlxsw_sp_port_mdb_del(mlxsw_sp_port,
1939 					    SWITCHDEV_OBJ_PORT_MDB(obj));
1940 		break;
1941 	default:
1942 		err = -EOPNOTSUPP;
1943 		break;
1944 	}
1945 
1946 	mlxsw_sp_span_respin_schedule(mlxsw_sp_port->mlxsw_sp);
1947 
1948 	return err;
1949 }
1950 
1951 static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
1952 						   u16 lag_id)
1953 {
1954 	struct mlxsw_sp_port *mlxsw_sp_port;
1955 	u64 max_lag_members;
1956 	int i;
1957 
1958 	max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1959 					     MAX_LAG_MEMBERS);
1960 	for (i = 0; i < max_lag_members; i++) {
1961 		mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
1962 		if (mlxsw_sp_port)
1963 			return mlxsw_sp_port;
1964 	}
1965 	return NULL;
1966 }
1967 
1968 static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
1969 	.switchdev_port_attr_get	= mlxsw_sp_port_attr_get,
1970 	.switchdev_port_attr_set	= mlxsw_sp_port_attr_set,
1971 };
1972 
1973 static int
1974 mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device,
1975 				struct mlxsw_sp_bridge_port *bridge_port,
1976 				struct mlxsw_sp_port *mlxsw_sp_port,
1977 				struct netlink_ext_ack *extack)
1978 {
1979 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1980 
1981 	if (is_vlan_dev(bridge_port->dev)) {
1982 		NL_SET_ERR_MSG_MOD(extack, "Can not enslave a VLAN device to a VLAN-aware bridge");
1983 		return -EINVAL;
1984 	}
1985 
1986 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1);
1987 	if (WARN_ON(!mlxsw_sp_port_vlan))
1988 		return -EINVAL;
1989 
1990 	/* Let VLAN-aware bridge take care of its own VLANs */
1991 	mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
1992 
1993 	return 0;
1994 }
1995 
1996 static void
1997 mlxsw_sp_bridge_8021q_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
1998 				 struct mlxsw_sp_bridge_port *bridge_port,
1999 				 struct mlxsw_sp_port *mlxsw_sp_port)
2000 {
2001 	mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
2002 	/* Make sure untagged frames are allowed to ingress */
2003 	mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
2004 }
2005 
2006 static int
2007 mlxsw_sp_bridge_8021q_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
2008 				 const struct net_device *vxlan_dev,
2009 				 struct netlink_ext_ack *extack)
2010 {
2011 	WARN_ON(1);
2012 	return -EINVAL;
2013 }
2014 
2015 static void
2016 mlxsw_sp_bridge_8021q_vxlan_leave(struct mlxsw_sp_bridge_device *bridge_device,
2017 				  const struct net_device *vxlan_dev)
2018 {
2019 }
2020 
2021 static struct mlxsw_sp_fid *
2022 mlxsw_sp_bridge_8021q_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
2023 			      u16 vid)
2024 {
2025 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2026 
2027 	return mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
2028 }
2029 
2030 static struct mlxsw_sp_fid *
2031 mlxsw_sp_bridge_8021q_fid_lookup(struct mlxsw_sp_bridge_device *bridge_device,
2032 				 u16 vid)
2033 {
2034 	WARN_ON(1);
2035 	return NULL;
2036 }
2037 
2038 static u16
2039 mlxsw_sp_bridge_8021q_fid_vid(struct mlxsw_sp_bridge_device *bridge_device,
2040 			      const struct mlxsw_sp_fid *fid)
2041 {
2042 	return mlxsw_sp_fid_8021q_vid(fid);
2043 }
2044 
2045 static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021q_ops = {
2046 	.port_join	= mlxsw_sp_bridge_8021q_port_join,
2047 	.port_leave	= mlxsw_sp_bridge_8021q_port_leave,
2048 	.vxlan_join	= mlxsw_sp_bridge_8021q_vxlan_join,
2049 	.vxlan_leave	= mlxsw_sp_bridge_8021q_vxlan_leave,
2050 	.fid_get	= mlxsw_sp_bridge_8021q_fid_get,
2051 	.fid_lookup	= mlxsw_sp_bridge_8021q_fid_lookup,
2052 	.fid_vid	= mlxsw_sp_bridge_8021q_fid_vid,
2053 };
2054 
2055 static bool
2056 mlxsw_sp_port_is_br_member(const struct mlxsw_sp_port *mlxsw_sp_port,
2057 			   const struct net_device *br_dev)
2058 {
2059 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2060 
2061 	list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
2062 			    list) {
2063 		if (mlxsw_sp_port_vlan->bridge_port &&
2064 		    mlxsw_sp_port_vlan->bridge_port->bridge_device->dev ==
2065 		    br_dev)
2066 			return true;
2067 	}
2068 
2069 	return false;
2070 }
2071 
2072 static int
2073 mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device,
2074 				struct mlxsw_sp_bridge_port *bridge_port,
2075 				struct mlxsw_sp_port *mlxsw_sp_port,
2076 				struct netlink_ext_ack *extack)
2077 {
2078 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2079 	struct net_device *dev = bridge_port->dev;
2080 	u16 vid;
2081 
2082 	vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 1;
2083 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
2084 	if (WARN_ON(!mlxsw_sp_port_vlan))
2085 		return -EINVAL;
2086 
2087 	if (mlxsw_sp_port_is_br_member(mlxsw_sp_port, bridge_device->dev)) {
2088 		NL_SET_ERR_MSG_MOD(extack, "Can not bridge VLAN uppers of the same port");
2089 		return -EINVAL;
2090 	}
2091 
2092 	/* Port is no longer usable as a router interface */
2093 	if (mlxsw_sp_port_vlan->fid)
2094 		mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
2095 
2096 	return mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port);
2097 }
2098 
2099 static void
2100 mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
2101 				 struct mlxsw_sp_bridge_port *bridge_port,
2102 				 struct mlxsw_sp_port *mlxsw_sp_port)
2103 {
2104 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2105 	struct net_device *dev = bridge_port->dev;
2106 	u16 vid;
2107 
2108 	vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 1;
2109 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
2110 	if (WARN_ON(!mlxsw_sp_port_vlan))
2111 		return;
2112 
2113 	mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
2114 }
2115 
2116 static int
2117 mlxsw_sp_bridge_8021d_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
2118 				 const struct net_device *vxlan_dev,
2119 				 struct netlink_ext_ack *extack)
2120 {
2121 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2122 	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
2123 	struct mlxsw_sp_nve_params params = {
2124 		.type = MLXSW_SP_NVE_TYPE_VXLAN,
2125 		.vni = vxlan->cfg.vni,
2126 		.dev = vxlan_dev,
2127 	};
2128 	struct mlxsw_sp_fid *fid;
2129 	int err;
2130 
2131 	fid = mlxsw_sp_fid_8021d_lookup(mlxsw_sp, bridge_device->dev->ifindex);
2132 	if (!fid)
2133 		return -EINVAL;
2134 
2135 	if (mlxsw_sp_fid_vni_is_set(fid))
2136 		return -EINVAL;
2137 
2138 	err = mlxsw_sp_nve_fid_enable(mlxsw_sp, fid, &params, extack);
2139 	if (err)
2140 		goto err_nve_fid_enable;
2141 
2142 	/* The tunnel port does not hold a reference on the FID. Only
2143 	 * local ports and the router port
2144 	 */
2145 	mlxsw_sp_fid_put(fid);
2146 
2147 	return 0;
2148 
2149 err_nve_fid_enable:
2150 	mlxsw_sp_fid_put(fid);
2151 	return err;
2152 }
2153 
2154 static void
2155 mlxsw_sp_bridge_8021d_vxlan_leave(struct mlxsw_sp_bridge_device *bridge_device,
2156 				  const struct net_device *vxlan_dev)
2157 {
2158 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2159 	struct mlxsw_sp_fid *fid;
2160 
2161 	fid = mlxsw_sp_fid_8021d_lookup(mlxsw_sp, bridge_device->dev->ifindex);
2162 	if (WARN_ON(!fid))
2163 		return;
2164 
2165 	/* If the VxLAN device is down, then the FID does not have a VNI */
2166 	if (!mlxsw_sp_fid_vni_is_set(fid))
2167 		goto out;
2168 
2169 	mlxsw_sp_nve_fid_disable(mlxsw_sp, fid);
2170 out:
2171 	mlxsw_sp_fid_put(fid);
2172 }
2173 
2174 static struct mlxsw_sp_fid *
2175 mlxsw_sp_bridge_8021d_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
2176 			      u16 vid)
2177 {
2178 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2179 	struct net_device *vxlan_dev;
2180 	struct mlxsw_sp_fid *fid;
2181 	int err;
2182 
2183 	fid = mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex);
2184 	if (IS_ERR(fid))
2185 		return fid;
2186 
2187 	if (mlxsw_sp_fid_vni_is_set(fid))
2188 		return fid;
2189 
2190 	vxlan_dev = mlxsw_sp_bridge_vxlan_dev_find(bridge_device->dev);
2191 	if (!vxlan_dev)
2192 		return fid;
2193 
2194 	if (!netif_running(vxlan_dev))
2195 		return fid;
2196 
2197 	err = mlxsw_sp_bridge_8021d_vxlan_join(bridge_device, vxlan_dev, NULL);
2198 	if (err)
2199 		goto err_vxlan_join;
2200 
2201 	return fid;
2202 
2203 err_vxlan_join:
2204 	mlxsw_sp_fid_put(fid);
2205 	return ERR_PTR(err);
2206 }
2207 
2208 static struct mlxsw_sp_fid *
2209 mlxsw_sp_bridge_8021d_fid_lookup(struct mlxsw_sp_bridge_device *bridge_device,
2210 				 u16 vid)
2211 {
2212 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2213 
2214 	/* The only valid VLAN for a VLAN-unaware bridge is 0 */
2215 	if (vid)
2216 		return NULL;
2217 
2218 	return mlxsw_sp_fid_8021d_lookup(mlxsw_sp, bridge_device->dev->ifindex);
2219 }
2220 
2221 static u16
2222 mlxsw_sp_bridge_8021d_fid_vid(struct mlxsw_sp_bridge_device *bridge_device,
2223 			      const struct mlxsw_sp_fid *fid)
2224 {
2225 	return 0;
2226 }
2227 
2228 static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021d_ops = {
2229 	.port_join	= mlxsw_sp_bridge_8021d_port_join,
2230 	.port_leave	= mlxsw_sp_bridge_8021d_port_leave,
2231 	.vxlan_join	= mlxsw_sp_bridge_8021d_vxlan_join,
2232 	.vxlan_leave	= mlxsw_sp_bridge_8021d_vxlan_leave,
2233 	.fid_get	= mlxsw_sp_bridge_8021d_fid_get,
2234 	.fid_lookup	= mlxsw_sp_bridge_8021d_fid_lookup,
2235 	.fid_vid	= mlxsw_sp_bridge_8021d_fid_vid,
2236 };
2237 
2238 int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
2239 			      struct net_device *brport_dev,
2240 			      struct net_device *br_dev,
2241 			      struct netlink_ext_ack *extack)
2242 {
2243 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2244 	struct mlxsw_sp_bridge_device *bridge_device;
2245 	struct mlxsw_sp_bridge_port *bridge_port;
2246 	int err;
2247 
2248 	bridge_port = mlxsw_sp_bridge_port_get(mlxsw_sp->bridge, brport_dev);
2249 	if (IS_ERR(bridge_port))
2250 		return PTR_ERR(bridge_port);
2251 	bridge_device = bridge_port->bridge_device;
2252 
2253 	err = bridge_device->ops->port_join(bridge_device, bridge_port,
2254 					    mlxsw_sp_port, extack);
2255 	if (err)
2256 		goto err_port_join;
2257 
2258 	return 0;
2259 
2260 err_port_join:
2261 	mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
2262 	return err;
2263 }
2264 
2265 void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2266 				struct net_device *brport_dev,
2267 				struct net_device *br_dev)
2268 {
2269 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2270 	struct mlxsw_sp_bridge_device *bridge_device;
2271 	struct mlxsw_sp_bridge_port *bridge_port;
2272 
2273 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2274 	if (!bridge_device)
2275 		return;
2276 	bridge_port = __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
2277 	if (!bridge_port)
2278 		return;
2279 
2280 	bridge_device->ops->port_leave(bridge_device, bridge_port,
2281 				       mlxsw_sp_port);
2282 	mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
2283 }
2284 
2285 int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp,
2286 			       const struct net_device *br_dev,
2287 			       const struct net_device *vxlan_dev,
2288 			       struct netlink_ext_ack *extack)
2289 {
2290 	struct mlxsw_sp_bridge_device *bridge_device;
2291 
2292 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2293 	if (WARN_ON(!bridge_device))
2294 		return -EINVAL;
2295 
2296 	return bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, extack);
2297 }
2298 
2299 void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
2300 				 const struct net_device *br_dev,
2301 				 const struct net_device *vxlan_dev)
2302 {
2303 	struct mlxsw_sp_bridge_device *bridge_device;
2304 
2305 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2306 	if (WARN_ON(!bridge_device))
2307 		return;
2308 
2309 	bridge_device->ops->vxlan_leave(bridge_device, vxlan_dev);
2310 }
2311 
2312 static void
2313 mlxsw_sp_switchdev_vxlan_addr_convert(const union vxlan_addr *vxlan_addr,
2314 				      enum mlxsw_sp_l3proto *proto,
2315 				      union mlxsw_sp_l3addr *addr)
2316 {
2317 	if (vxlan_addr->sa.sa_family == AF_INET) {
2318 		addr->addr4 = vxlan_addr->sin.sin_addr.s_addr;
2319 		*proto = MLXSW_SP_L3_PROTO_IPV4;
2320 	} else {
2321 		addr->addr6 = vxlan_addr->sin6.sin6_addr;
2322 		*proto = MLXSW_SP_L3_PROTO_IPV6;
2323 	}
2324 }
2325 
2326 static void
2327 mlxsw_sp_switchdev_addr_vxlan_convert(enum mlxsw_sp_l3proto proto,
2328 				      const union mlxsw_sp_l3addr *addr,
2329 				      union vxlan_addr *vxlan_addr)
2330 {
2331 	switch (proto) {
2332 	case MLXSW_SP_L3_PROTO_IPV4:
2333 		vxlan_addr->sa.sa_family = AF_INET;
2334 		vxlan_addr->sin.sin_addr.s_addr = addr->addr4;
2335 		break;
2336 	case MLXSW_SP_L3_PROTO_IPV6:
2337 		vxlan_addr->sa.sa_family = AF_INET6;
2338 		vxlan_addr->sin6.sin6_addr = addr->addr6;
2339 		break;
2340 	}
2341 }
2342 
2343 static void mlxsw_sp_fdb_vxlan_call_notifiers(struct net_device *dev,
2344 					      const char *mac,
2345 					      enum mlxsw_sp_l3proto proto,
2346 					      union mlxsw_sp_l3addr *addr,
2347 					      __be32 vni, bool adding)
2348 {
2349 	struct switchdev_notifier_vxlan_fdb_info info;
2350 	struct vxlan_dev *vxlan = netdev_priv(dev);
2351 	enum switchdev_notifier_type type;
2352 
2353 	type = adding ? SWITCHDEV_VXLAN_FDB_ADD_TO_BRIDGE :
2354 			SWITCHDEV_VXLAN_FDB_DEL_TO_BRIDGE;
2355 	mlxsw_sp_switchdev_addr_vxlan_convert(proto, addr, &info.remote_ip);
2356 	info.remote_port = vxlan->cfg.dst_port;
2357 	info.remote_vni = vni;
2358 	info.remote_ifindex = 0;
2359 	ether_addr_copy(info.eth_addr, mac);
2360 	info.vni = vni;
2361 	info.offloaded = adding;
2362 	call_switchdev_notifiers(type, dev, &info.info);
2363 }
2364 
2365 static void mlxsw_sp_fdb_nve_call_notifiers(struct net_device *dev,
2366 					    const char *mac,
2367 					    enum mlxsw_sp_l3proto proto,
2368 					    union mlxsw_sp_l3addr *addr,
2369 					    __be32 vni,
2370 					    bool adding)
2371 {
2372 	if (netif_is_vxlan(dev))
2373 		mlxsw_sp_fdb_vxlan_call_notifiers(dev, mac, proto, addr, vni,
2374 						  adding);
2375 }
2376 
2377 static void
2378 mlxsw_sp_fdb_call_notifiers(enum switchdev_notifier_type type,
2379 			    const char *mac, u16 vid,
2380 			    struct net_device *dev, bool offloaded)
2381 {
2382 	struct switchdev_notifier_fdb_info info;
2383 
2384 	info.addr = mac;
2385 	info.vid = vid;
2386 	info.offloaded = offloaded;
2387 	call_switchdev_notifiers(type, dev, &info.info);
2388 }
2389 
2390 static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
2391 					    char *sfn_pl, int rec_index,
2392 					    bool adding)
2393 {
2394 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2395 	struct mlxsw_sp_bridge_device *bridge_device;
2396 	struct mlxsw_sp_bridge_port *bridge_port;
2397 	struct mlxsw_sp_port *mlxsw_sp_port;
2398 	enum switchdev_notifier_type type;
2399 	char mac[ETH_ALEN];
2400 	u8 local_port;
2401 	u16 vid, fid;
2402 	bool do_notification = true;
2403 	int err;
2404 
2405 	mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port);
2406 	mlxsw_sp_port = mlxsw_sp->ports[local_port];
2407 	if (!mlxsw_sp_port) {
2408 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n");
2409 		goto just_remove;
2410 	}
2411 
2412 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
2413 	if (!mlxsw_sp_port_vlan) {
2414 		netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
2415 		goto just_remove;
2416 	}
2417 
2418 	bridge_port = mlxsw_sp_port_vlan->bridge_port;
2419 	if (!bridge_port) {
2420 		netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
2421 		goto just_remove;
2422 	}
2423 
2424 	bridge_device = bridge_port->bridge_device;
2425 	vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
2426 
2427 do_fdb_op:
2428 	err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid,
2429 				      adding, true);
2430 	if (err) {
2431 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
2432 		return;
2433 	}
2434 
2435 	if (!do_notification)
2436 		return;
2437 	type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
2438 	mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding);
2439 
2440 	return;
2441 
2442 just_remove:
2443 	adding = false;
2444 	do_notification = false;
2445 	goto do_fdb_op;
2446 }
2447 
2448 static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
2449 						char *sfn_pl, int rec_index,
2450 						bool adding)
2451 {
2452 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2453 	struct mlxsw_sp_bridge_device *bridge_device;
2454 	struct mlxsw_sp_bridge_port *bridge_port;
2455 	struct mlxsw_sp_port *mlxsw_sp_port;
2456 	enum switchdev_notifier_type type;
2457 	char mac[ETH_ALEN];
2458 	u16 lag_vid = 0;
2459 	u16 lag_id;
2460 	u16 vid, fid;
2461 	bool do_notification = true;
2462 	int err;
2463 
2464 	mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &fid, &lag_id);
2465 	mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
2466 	if (!mlxsw_sp_port) {
2467 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n");
2468 		goto just_remove;
2469 	}
2470 
2471 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
2472 	if (!mlxsw_sp_port_vlan) {
2473 		netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
2474 		goto just_remove;
2475 	}
2476 
2477 	bridge_port = mlxsw_sp_port_vlan->bridge_port;
2478 	if (!bridge_port) {
2479 		netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
2480 		goto just_remove;
2481 	}
2482 
2483 	bridge_device = bridge_port->bridge_device;
2484 	vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
2485 	lag_vid = mlxsw_sp_fid_lag_vid_valid(mlxsw_sp_port_vlan->fid) ?
2486 		  mlxsw_sp_port_vlan->vid : 0;
2487 
2488 do_fdb_op:
2489 	err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
2490 					  adding, true);
2491 	if (err) {
2492 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
2493 		return;
2494 	}
2495 
2496 	if (!do_notification)
2497 		return;
2498 	type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
2499 	mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding);
2500 
2501 	return;
2502 
2503 just_remove:
2504 	adding = false;
2505 	do_notification = false;
2506 	goto do_fdb_op;
2507 }
2508 
2509 static int
2510 __mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp *mlxsw_sp,
2511 					    const struct mlxsw_sp_fid *fid,
2512 					    bool adding,
2513 					    struct net_device **nve_dev,
2514 					    u16 *p_vid, __be32 *p_vni)
2515 {
2516 	struct mlxsw_sp_bridge_device *bridge_device;
2517 	struct net_device *br_dev, *dev;
2518 	int nve_ifindex;
2519 	int err;
2520 
2521 	err = mlxsw_sp_fid_nve_ifindex(fid, &nve_ifindex);
2522 	if (err)
2523 		return err;
2524 
2525 	err = mlxsw_sp_fid_vni(fid, p_vni);
2526 	if (err)
2527 		return err;
2528 
2529 	dev = __dev_get_by_index(&init_net, nve_ifindex);
2530 	if (!dev)
2531 		return -EINVAL;
2532 	*nve_dev = dev;
2533 
2534 	if (!netif_running(dev))
2535 		return -EINVAL;
2536 
2537 	if (adding && !br_port_flag_is_set(dev, BR_LEARNING))
2538 		return -EINVAL;
2539 
2540 	if (adding && netif_is_vxlan(dev)) {
2541 		struct vxlan_dev *vxlan = netdev_priv(dev);
2542 
2543 		if (!(vxlan->cfg.flags & VXLAN_F_LEARN))
2544 			return -EINVAL;
2545 	}
2546 
2547 	br_dev = netdev_master_upper_dev_get(dev);
2548 	if (!br_dev)
2549 		return -EINVAL;
2550 
2551 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2552 	if (!bridge_device)
2553 		return -EINVAL;
2554 
2555 	*p_vid = bridge_device->ops->fid_vid(bridge_device, fid);
2556 
2557 	return 0;
2558 }
2559 
2560 static void mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp *mlxsw_sp,
2561 						      char *sfn_pl,
2562 						      int rec_index,
2563 						      bool adding)
2564 {
2565 	enum mlxsw_reg_sfn_uc_tunnel_protocol sfn_proto;
2566 	enum switchdev_notifier_type type;
2567 	struct net_device *nve_dev;
2568 	union mlxsw_sp_l3addr addr;
2569 	struct mlxsw_sp_fid *fid;
2570 	char mac[ETH_ALEN];
2571 	u16 fid_index, vid;
2572 	__be32 vni;
2573 	u32 uip;
2574 	int err;
2575 
2576 	mlxsw_reg_sfn_uc_tunnel_unpack(sfn_pl, rec_index, mac, &fid_index,
2577 				       &uip, &sfn_proto);
2578 
2579 	fid = mlxsw_sp_fid_lookup_by_index(mlxsw_sp, fid_index);
2580 	if (!fid)
2581 		goto err_fid_lookup;
2582 
2583 	err = mlxsw_sp_nve_learned_ip_resolve(mlxsw_sp, uip,
2584 					      (enum mlxsw_sp_l3proto) sfn_proto,
2585 					      &addr);
2586 	if (err)
2587 		goto err_ip_resolve;
2588 
2589 	err = __mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, fid, adding,
2590 							  &nve_dev, &vid, &vni);
2591 	if (err)
2592 		goto err_fdb_process;
2593 
2594 	err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, mac, fid_index,
2595 					     (enum mlxsw_sp_l3proto) sfn_proto,
2596 					     &addr, adding, true);
2597 	if (err)
2598 		goto err_fdb_op;
2599 
2600 	mlxsw_sp_fdb_nve_call_notifiers(nve_dev, mac,
2601 					(enum mlxsw_sp_l3proto) sfn_proto,
2602 					&addr, vni, adding);
2603 
2604 	type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE :
2605 			SWITCHDEV_FDB_DEL_TO_BRIDGE;
2606 	mlxsw_sp_fdb_call_notifiers(type, mac, vid, nve_dev, adding);
2607 
2608 	mlxsw_sp_fid_put(fid);
2609 
2610 	return;
2611 
2612 err_fdb_op:
2613 err_fdb_process:
2614 err_ip_resolve:
2615 	mlxsw_sp_fid_put(fid);
2616 err_fid_lookup:
2617 	/* Remove an FDB entry in case we cannot process it. Otherwise the
2618 	 * device will keep sending the same notification over and over again.
2619 	 */
2620 	mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, mac, fid_index,
2621 				       (enum mlxsw_sp_l3proto) sfn_proto, &addr,
2622 				       false, true);
2623 }
2624 
2625 static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
2626 					    char *sfn_pl, int rec_index)
2627 {
2628 	switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) {
2629 	case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC:
2630 		mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
2631 						rec_index, true);
2632 		break;
2633 	case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC:
2634 		mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
2635 						rec_index, false);
2636 		break;
2637 	case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG:
2638 		mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
2639 						    rec_index, true);
2640 		break;
2641 	case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG:
2642 		mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
2643 						    rec_index, false);
2644 		break;
2645 	case MLXSW_REG_SFN_REC_TYPE_LEARNED_UNICAST_TUNNEL:
2646 		mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, sfn_pl,
2647 							  rec_index, true);
2648 		break;
2649 	case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_UNICAST_TUNNEL:
2650 		mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, sfn_pl,
2651 							  rec_index, false);
2652 		break;
2653 	}
2654 }
2655 
2656 static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp)
2657 {
2658 	struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
2659 
2660 	mlxsw_core_schedule_dw(&bridge->fdb_notify.dw,
2661 			       msecs_to_jiffies(bridge->fdb_notify.interval));
2662 }
2663 
2664 static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
2665 {
2666 	struct mlxsw_sp_bridge *bridge;
2667 	struct mlxsw_sp *mlxsw_sp;
2668 	char *sfn_pl;
2669 	u8 num_rec;
2670 	int i;
2671 	int err;
2672 
2673 	sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL);
2674 	if (!sfn_pl)
2675 		return;
2676 
2677 	bridge = container_of(work, struct mlxsw_sp_bridge, fdb_notify.dw.work);
2678 	mlxsw_sp = bridge->mlxsw_sp;
2679 
2680 	rtnl_lock();
2681 	mlxsw_reg_sfn_pack(sfn_pl);
2682 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
2683 	if (err) {
2684 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
2685 		goto out;
2686 	}
2687 	num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
2688 	for (i = 0; i < num_rec; i++)
2689 		mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
2690 
2691 out:
2692 	rtnl_unlock();
2693 	kfree(sfn_pl);
2694 	mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
2695 }
2696 
2697 struct mlxsw_sp_switchdev_event_work {
2698 	struct work_struct work;
2699 	union {
2700 		struct switchdev_notifier_fdb_info fdb_info;
2701 		struct switchdev_notifier_vxlan_fdb_info vxlan_fdb_info;
2702 	};
2703 	struct net_device *dev;
2704 	unsigned long event;
2705 };
2706 
2707 static void
2708 mlxsw_sp_switchdev_bridge_vxlan_fdb_event(struct mlxsw_sp *mlxsw_sp,
2709 					  struct mlxsw_sp_switchdev_event_work *
2710 					  switchdev_work,
2711 					  struct mlxsw_sp_fid *fid, __be32 vni)
2712 {
2713 	struct switchdev_notifier_vxlan_fdb_info vxlan_fdb_info;
2714 	struct switchdev_notifier_fdb_info *fdb_info;
2715 	struct net_device *dev = switchdev_work->dev;
2716 	enum mlxsw_sp_l3proto proto;
2717 	union mlxsw_sp_l3addr addr;
2718 	int err;
2719 
2720 	fdb_info = &switchdev_work->fdb_info;
2721 	err = vxlan_fdb_find_uc(dev, fdb_info->addr, vni, &vxlan_fdb_info);
2722 	if (err)
2723 		return;
2724 
2725 	mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info.remote_ip,
2726 					      &proto, &addr);
2727 
2728 	switch (switchdev_work->event) {
2729 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
2730 		err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp,
2731 						     vxlan_fdb_info.eth_addr,
2732 						     mlxsw_sp_fid_index(fid),
2733 						     proto, &addr, true, false);
2734 		if (err)
2735 			return;
2736 		vxlan_fdb_info.offloaded = true;
2737 		call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
2738 					 &vxlan_fdb_info.info);
2739 		mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
2740 					    vxlan_fdb_info.eth_addr,
2741 					    fdb_info->vid, dev, true);
2742 		break;
2743 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
2744 		err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp,
2745 						     vxlan_fdb_info.eth_addr,
2746 						     mlxsw_sp_fid_index(fid),
2747 						     proto, &addr, false,
2748 						     false);
2749 		vxlan_fdb_info.offloaded = false;
2750 		call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
2751 					 &vxlan_fdb_info.info);
2752 		break;
2753 	}
2754 }
2755 
2756 static void
2757 mlxsw_sp_switchdev_bridge_nve_fdb_event(struct mlxsw_sp_switchdev_event_work *
2758 					switchdev_work)
2759 {
2760 	struct mlxsw_sp_bridge_device *bridge_device;
2761 	struct net_device *dev = switchdev_work->dev;
2762 	struct net_device *br_dev;
2763 	struct mlxsw_sp *mlxsw_sp;
2764 	struct mlxsw_sp_fid *fid;
2765 	__be32 vni;
2766 	int err;
2767 
2768 	if (switchdev_work->event != SWITCHDEV_FDB_ADD_TO_DEVICE &&
2769 	    switchdev_work->event != SWITCHDEV_FDB_DEL_TO_DEVICE)
2770 		return;
2771 
2772 	if (switchdev_work->event == SWITCHDEV_FDB_ADD_TO_DEVICE &&
2773 	    !switchdev_work->fdb_info.added_by_user)
2774 		return;
2775 
2776 	if (!netif_running(dev))
2777 		return;
2778 	br_dev = netdev_master_upper_dev_get(dev);
2779 	if (!br_dev)
2780 		return;
2781 	if (!netif_is_bridge_master(br_dev))
2782 		return;
2783 	mlxsw_sp = mlxsw_sp_lower_get(br_dev);
2784 	if (!mlxsw_sp)
2785 		return;
2786 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2787 	if (!bridge_device)
2788 		return;
2789 
2790 	fid = bridge_device->ops->fid_lookup(bridge_device,
2791 					     switchdev_work->fdb_info.vid);
2792 	if (!fid)
2793 		return;
2794 
2795 	err = mlxsw_sp_fid_vni(fid, &vni);
2796 	if (err)
2797 		goto out;
2798 
2799 	mlxsw_sp_switchdev_bridge_vxlan_fdb_event(mlxsw_sp, switchdev_work, fid,
2800 						  vni);
2801 
2802 out:
2803 	mlxsw_sp_fid_put(fid);
2804 }
2805 
2806 static void mlxsw_sp_switchdev_bridge_fdb_event_work(struct work_struct *work)
2807 {
2808 	struct mlxsw_sp_switchdev_event_work *switchdev_work =
2809 		container_of(work, struct mlxsw_sp_switchdev_event_work, work);
2810 	struct net_device *dev = switchdev_work->dev;
2811 	struct switchdev_notifier_fdb_info *fdb_info;
2812 	struct mlxsw_sp_port *mlxsw_sp_port;
2813 	int err;
2814 
2815 	rtnl_lock();
2816 	if (netif_is_vxlan(dev)) {
2817 		mlxsw_sp_switchdev_bridge_nve_fdb_event(switchdev_work);
2818 		goto out;
2819 	}
2820 
2821 	mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
2822 	if (!mlxsw_sp_port)
2823 		goto out;
2824 
2825 	switch (switchdev_work->event) {
2826 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
2827 		fdb_info = &switchdev_work->fdb_info;
2828 		if (!fdb_info->added_by_user)
2829 			break;
2830 		err = mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, true);
2831 		if (err)
2832 			break;
2833 		mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
2834 					    fdb_info->addr,
2835 					    fdb_info->vid, dev, true);
2836 		break;
2837 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
2838 		fdb_info = &switchdev_work->fdb_info;
2839 		mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, false);
2840 		break;
2841 	case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */
2842 	case SWITCHDEV_FDB_DEL_TO_BRIDGE:
2843 		/* These events are only used to potentially update an existing
2844 		 * SPAN mirror.
2845 		 */
2846 		break;
2847 	}
2848 
2849 	mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
2850 
2851 out:
2852 	rtnl_unlock();
2853 	kfree(switchdev_work->fdb_info.addr);
2854 	kfree(switchdev_work);
2855 	dev_put(dev);
2856 }
2857 
2858 static void
2859 mlxsw_sp_switchdev_vxlan_fdb_add(struct mlxsw_sp *mlxsw_sp,
2860 				 struct mlxsw_sp_switchdev_event_work *
2861 				 switchdev_work)
2862 {
2863 	struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
2864 	struct mlxsw_sp_bridge_device *bridge_device;
2865 	struct net_device *dev = switchdev_work->dev;
2866 	u8 all_zeros_mac[ETH_ALEN] = { 0 };
2867 	enum mlxsw_sp_l3proto proto;
2868 	union mlxsw_sp_l3addr addr;
2869 	struct net_device *br_dev;
2870 	struct mlxsw_sp_fid *fid;
2871 	u16 vid;
2872 	int err;
2873 
2874 	vxlan_fdb_info = &switchdev_work->vxlan_fdb_info;
2875 	br_dev = netdev_master_upper_dev_get(dev);
2876 
2877 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2878 	if (!bridge_device)
2879 		return;
2880 
2881 	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan_fdb_info->vni);
2882 	if (!fid)
2883 		return;
2884 
2885 	mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip,
2886 					      &proto, &addr);
2887 
2888 	if (ether_addr_equal(vxlan_fdb_info->eth_addr, all_zeros_mac)) {
2889 		err = mlxsw_sp_nve_flood_ip_add(mlxsw_sp, fid, proto, &addr);
2890 		if (err) {
2891 			mlxsw_sp_fid_put(fid);
2892 			return;
2893 		}
2894 		vxlan_fdb_info->offloaded = true;
2895 		call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
2896 					 &vxlan_fdb_info->info);
2897 		mlxsw_sp_fid_put(fid);
2898 		return;
2899 	}
2900 
2901 	/* The device has a single FDB table, whereas Linux has two - one
2902 	 * in the bridge driver and another in the VxLAN driver. We only
2903 	 * program an entry to the device if the MAC points to the VxLAN
2904 	 * device in the bridge's FDB table
2905 	 */
2906 	vid = bridge_device->ops->fid_vid(bridge_device, fid);
2907 	if (br_fdb_find_port(br_dev, vxlan_fdb_info->eth_addr, vid) != dev)
2908 		goto err_br_fdb_find;
2909 
2910 	err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, vxlan_fdb_info->eth_addr,
2911 					     mlxsw_sp_fid_index(fid), proto,
2912 					     &addr, true, false);
2913 	if (err)
2914 		goto err_fdb_tunnel_uc_op;
2915 	vxlan_fdb_info->offloaded = true;
2916 	call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
2917 				 &vxlan_fdb_info->info);
2918 	mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
2919 				    vxlan_fdb_info->eth_addr, vid, dev, true);
2920 
2921 	mlxsw_sp_fid_put(fid);
2922 
2923 	return;
2924 
2925 err_fdb_tunnel_uc_op:
2926 err_br_fdb_find:
2927 	mlxsw_sp_fid_put(fid);
2928 }
2929 
2930 static void
2931 mlxsw_sp_switchdev_vxlan_fdb_del(struct mlxsw_sp *mlxsw_sp,
2932 				 struct mlxsw_sp_switchdev_event_work *
2933 				 switchdev_work)
2934 {
2935 	struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
2936 	struct mlxsw_sp_bridge_device *bridge_device;
2937 	struct net_device *dev = switchdev_work->dev;
2938 	struct net_device *br_dev = netdev_master_upper_dev_get(dev);
2939 	u8 all_zeros_mac[ETH_ALEN] = { 0 };
2940 	enum mlxsw_sp_l3proto proto;
2941 	union mlxsw_sp_l3addr addr;
2942 	struct mlxsw_sp_fid *fid;
2943 	u16 vid;
2944 
2945 	vxlan_fdb_info = &switchdev_work->vxlan_fdb_info;
2946 
2947 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2948 	if (!bridge_device)
2949 		return;
2950 
2951 	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan_fdb_info->vni);
2952 	if (!fid)
2953 		return;
2954 
2955 	mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip,
2956 					      &proto, &addr);
2957 
2958 	if (ether_addr_equal(vxlan_fdb_info->eth_addr, all_zeros_mac)) {
2959 		mlxsw_sp_nve_flood_ip_del(mlxsw_sp, fid, proto, &addr);
2960 		mlxsw_sp_fid_put(fid);
2961 		return;
2962 	}
2963 
2964 	mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, vxlan_fdb_info->eth_addr,
2965 				       mlxsw_sp_fid_index(fid), proto, &addr,
2966 				       false, false);
2967 	vid = bridge_device->ops->fid_vid(bridge_device, fid);
2968 	mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
2969 				    vxlan_fdb_info->eth_addr, vid, dev, false);
2970 
2971 	mlxsw_sp_fid_put(fid);
2972 }
2973 
2974 static void mlxsw_sp_switchdev_vxlan_fdb_event_work(struct work_struct *work)
2975 {
2976 	struct mlxsw_sp_switchdev_event_work *switchdev_work =
2977 		container_of(work, struct mlxsw_sp_switchdev_event_work, work);
2978 	struct net_device *dev = switchdev_work->dev;
2979 	struct mlxsw_sp *mlxsw_sp;
2980 	struct net_device *br_dev;
2981 
2982 	rtnl_lock();
2983 
2984 	if (!netif_running(dev))
2985 		goto out;
2986 	br_dev = netdev_master_upper_dev_get(dev);
2987 	if (!br_dev)
2988 		goto out;
2989 	if (!netif_is_bridge_master(br_dev))
2990 		goto out;
2991 	mlxsw_sp = mlxsw_sp_lower_get(br_dev);
2992 	if (!mlxsw_sp)
2993 		goto out;
2994 
2995 	switch (switchdev_work->event) {
2996 	case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE:
2997 		mlxsw_sp_switchdev_vxlan_fdb_add(mlxsw_sp, switchdev_work);
2998 		break;
2999 	case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE:
3000 		mlxsw_sp_switchdev_vxlan_fdb_del(mlxsw_sp, switchdev_work);
3001 		break;
3002 	}
3003 
3004 out:
3005 	rtnl_unlock();
3006 	kfree(switchdev_work);
3007 	dev_put(dev);
3008 }
3009 
3010 static int
3011 mlxsw_sp_switchdev_vxlan_work_prepare(struct mlxsw_sp_switchdev_event_work *
3012 				      switchdev_work,
3013 				      struct switchdev_notifier_info *info)
3014 {
3015 	struct vxlan_dev *vxlan = netdev_priv(switchdev_work->dev);
3016 	struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
3017 	struct vxlan_config *cfg = &vxlan->cfg;
3018 
3019 	vxlan_fdb_info = container_of(info,
3020 				      struct switchdev_notifier_vxlan_fdb_info,
3021 				      info);
3022 
3023 	if (vxlan_fdb_info->remote_port != cfg->dst_port)
3024 		return -EOPNOTSUPP;
3025 	if (vxlan_fdb_info->remote_vni != cfg->vni)
3026 		return -EOPNOTSUPP;
3027 	if (vxlan_fdb_info->vni != cfg->vni)
3028 		return -EOPNOTSUPP;
3029 	if (vxlan_fdb_info->remote_ifindex)
3030 		return -EOPNOTSUPP;
3031 	if (is_multicast_ether_addr(vxlan_fdb_info->eth_addr))
3032 		return -EOPNOTSUPP;
3033 	if (vxlan_addr_multicast(&vxlan_fdb_info->remote_ip))
3034 		return -EOPNOTSUPP;
3035 
3036 	switchdev_work->vxlan_fdb_info = *vxlan_fdb_info;
3037 
3038 	return 0;
3039 }
3040 
3041 /* Called under rcu_read_lock() */
3042 static int mlxsw_sp_switchdev_event(struct notifier_block *unused,
3043 				    unsigned long event, void *ptr)
3044 {
3045 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
3046 	struct mlxsw_sp_switchdev_event_work *switchdev_work;
3047 	struct switchdev_notifier_fdb_info *fdb_info;
3048 	struct switchdev_notifier_info *info = ptr;
3049 	struct net_device *br_dev;
3050 	int err;
3051 
3052 	/* Tunnel devices are not our uppers, so check their master instead */
3053 	br_dev = netdev_master_upper_dev_get_rcu(dev);
3054 	if (!br_dev)
3055 		return NOTIFY_DONE;
3056 	if (!netif_is_bridge_master(br_dev))
3057 		return NOTIFY_DONE;
3058 	if (!mlxsw_sp_port_dev_lower_find_rcu(br_dev))
3059 		return NOTIFY_DONE;
3060 
3061 	switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
3062 	if (!switchdev_work)
3063 		return NOTIFY_BAD;
3064 
3065 	switchdev_work->dev = dev;
3066 	switchdev_work->event = event;
3067 
3068 	switch (event) {
3069 	case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */
3070 	case SWITCHDEV_FDB_DEL_TO_DEVICE: /* fall through */
3071 	case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */
3072 	case SWITCHDEV_FDB_DEL_TO_BRIDGE:
3073 		fdb_info = container_of(info,
3074 					struct switchdev_notifier_fdb_info,
3075 					info);
3076 		INIT_WORK(&switchdev_work->work,
3077 			  mlxsw_sp_switchdev_bridge_fdb_event_work);
3078 		memcpy(&switchdev_work->fdb_info, ptr,
3079 		       sizeof(switchdev_work->fdb_info));
3080 		switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
3081 		if (!switchdev_work->fdb_info.addr)
3082 			goto err_addr_alloc;
3083 		ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
3084 				fdb_info->addr);
3085 		/* Take a reference on the device. This can be either
3086 		 * upper device containig mlxsw_sp_port or just a
3087 		 * mlxsw_sp_port
3088 		 */
3089 		dev_hold(dev);
3090 		break;
3091 	case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE: /* fall through */
3092 	case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE:
3093 		INIT_WORK(&switchdev_work->work,
3094 			  mlxsw_sp_switchdev_vxlan_fdb_event_work);
3095 		err = mlxsw_sp_switchdev_vxlan_work_prepare(switchdev_work,
3096 							    info);
3097 		if (err)
3098 			goto err_vxlan_work_prepare;
3099 		dev_hold(dev);
3100 		break;
3101 	default:
3102 		kfree(switchdev_work);
3103 		return NOTIFY_DONE;
3104 	}
3105 
3106 	mlxsw_core_schedule_work(&switchdev_work->work);
3107 
3108 	return NOTIFY_DONE;
3109 
3110 err_vxlan_work_prepare:
3111 err_addr_alloc:
3112 	kfree(switchdev_work);
3113 	return NOTIFY_BAD;
3114 }
3115 
3116 static struct notifier_block mlxsw_sp_switchdev_notifier = {
3117 	.notifier_call = mlxsw_sp_switchdev_event,
3118 };
3119 
3120 static int mlxsw_sp_switchdev_blocking_event(struct notifier_block *unused,
3121 					     unsigned long event, void *ptr)
3122 {
3123 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
3124 	int err;
3125 
3126 	switch (event) {
3127 	case SWITCHDEV_PORT_OBJ_ADD:
3128 		err = switchdev_handle_port_obj_add(dev, ptr,
3129 						    mlxsw_sp_port_dev_check,
3130 						    mlxsw_sp_port_obj_add);
3131 		return notifier_from_errno(err);
3132 	case SWITCHDEV_PORT_OBJ_DEL:
3133 		err = switchdev_handle_port_obj_del(dev, ptr,
3134 						    mlxsw_sp_port_dev_check,
3135 						    mlxsw_sp_port_obj_del);
3136 		return notifier_from_errno(err);
3137 	}
3138 
3139 	return NOTIFY_DONE;
3140 }
3141 
3142 static struct notifier_block mlxsw_sp_switchdev_blocking_notifier = {
3143 	.notifier_call = mlxsw_sp_switchdev_blocking_event,
3144 };
3145 
3146 u8
3147 mlxsw_sp_bridge_port_stp_state(struct mlxsw_sp_bridge_port *bridge_port)
3148 {
3149 	return bridge_port->stp_state;
3150 }
3151 
3152 static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
3153 {
3154 	struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
3155 	struct notifier_block *nb;
3156 	int err;
3157 
3158 	err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
3159 	if (err) {
3160 		dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
3161 		return err;
3162 	}
3163 
3164 	err = register_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
3165 	if (err) {
3166 		dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev notifier\n");
3167 		return err;
3168 	}
3169 
3170 	nb = &mlxsw_sp_switchdev_blocking_notifier;
3171 	err = register_switchdev_blocking_notifier(nb);
3172 	if (err) {
3173 		dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev blocking notifier\n");
3174 		goto err_register_switchdev_blocking_notifier;
3175 	}
3176 
3177 	INIT_DELAYED_WORK(&bridge->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
3178 	bridge->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
3179 	mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
3180 	return 0;
3181 
3182 err_register_switchdev_blocking_notifier:
3183 	unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
3184 	return err;
3185 }
3186 
3187 static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
3188 {
3189 	struct notifier_block *nb;
3190 
3191 	cancel_delayed_work_sync(&mlxsw_sp->bridge->fdb_notify.dw);
3192 
3193 	nb = &mlxsw_sp_switchdev_blocking_notifier;
3194 	unregister_switchdev_blocking_notifier(nb);
3195 
3196 	unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
3197 }
3198 
3199 int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
3200 {
3201 	struct mlxsw_sp_bridge *bridge;
3202 
3203 	bridge = kzalloc(sizeof(*mlxsw_sp->bridge), GFP_KERNEL);
3204 	if (!bridge)
3205 		return -ENOMEM;
3206 	mlxsw_sp->bridge = bridge;
3207 	bridge->mlxsw_sp = mlxsw_sp;
3208 
3209 	INIT_LIST_HEAD(&mlxsw_sp->bridge->bridges_list);
3210 
3211 	bridge->bridge_8021q_ops = &mlxsw_sp_bridge_8021q_ops;
3212 	bridge->bridge_8021d_ops = &mlxsw_sp_bridge_8021d_ops;
3213 
3214 	return mlxsw_sp_fdb_init(mlxsw_sp);
3215 }
3216 
3217 void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
3218 {
3219 	mlxsw_sp_fdb_fini(mlxsw_sp);
3220 	WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list));
3221 	kfree(mlxsw_sp->bridge);
3222 }
3223 
3224 void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port)
3225 {
3226 	mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops;
3227 }
3228 
3229 void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port)
3230 {
3231 }
3232