xref: /openbmc/linux/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c (revision fed8b7e366e7c8f81e957ef91aa8f0a38e038c66)
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/slab.h>
9 #include <linux/device.h>
10 #include <linux/skbuff.h>
11 #include <linux/if_vlan.h>
12 #include <linux/if_bridge.h>
13 #include <linux/workqueue.h>
14 #include <linux/jiffies.h>
15 #include <linux/rtnetlink.h>
16 #include <linux/netlink.h>
17 #include <net/switchdev.h>
18 #include <net/vxlan.h>
19 
20 #include "spectrum_span.h"
21 #include "spectrum_switchdev.h"
22 #include "spectrum.h"
23 #include "core.h"
24 #include "reg.h"
25 
26 struct mlxsw_sp_bridge_ops;
27 
28 struct mlxsw_sp_bridge {
29 	struct mlxsw_sp *mlxsw_sp;
30 	struct {
31 		struct delayed_work dw;
32 #define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100
33 		unsigned int interval; /* ms */
34 	} fdb_notify;
35 #define MLXSW_SP_MIN_AGEING_TIME 10
36 #define MLXSW_SP_MAX_AGEING_TIME 1000000
37 #define MLXSW_SP_DEFAULT_AGEING_TIME 300
38 	u32 ageing_time;
39 	bool vlan_enabled_exists;
40 	struct list_head bridges_list;
41 	DECLARE_BITMAP(mids_bitmap, MLXSW_SP_MID_MAX);
42 	const struct mlxsw_sp_bridge_ops *bridge_8021q_ops;
43 	const struct mlxsw_sp_bridge_ops *bridge_8021d_ops;
44 };
45 
46 struct mlxsw_sp_bridge_device {
47 	struct net_device *dev;
48 	struct list_head list;
49 	struct list_head ports_list;
50 	struct list_head mids_list;
51 	u8 vlan_enabled:1,
52 	   multicast_enabled:1,
53 	   mrouter:1;
54 	const struct mlxsw_sp_bridge_ops *ops;
55 };
56 
57 struct mlxsw_sp_bridge_port {
58 	struct net_device *dev;
59 	struct mlxsw_sp_bridge_device *bridge_device;
60 	struct list_head list;
61 	struct list_head vlans_list;
62 	unsigned int ref_count;
63 	u8 stp_state;
64 	unsigned long flags;
65 	bool mrouter;
66 	bool lagged;
67 	union {
68 		u16 lag_id;
69 		u16 system_port;
70 	};
71 };
72 
73 struct mlxsw_sp_bridge_vlan {
74 	struct list_head list;
75 	struct list_head port_vlan_list;
76 	u16 vid;
77 };
78 
79 struct mlxsw_sp_bridge_ops {
80 	int (*port_join)(struct mlxsw_sp_bridge_device *bridge_device,
81 			 struct mlxsw_sp_bridge_port *bridge_port,
82 			 struct mlxsw_sp_port *mlxsw_sp_port,
83 			 struct netlink_ext_ack *extack);
84 	void (*port_leave)(struct mlxsw_sp_bridge_device *bridge_device,
85 			   struct mlxsw_sp_bridge_port *bridge_port,
86 			   struct mlxsw_sp_port *mlxsw_sp_port);
87 	int (*vxlan_join)(struct mlxsw_sp_bridge_device *bridge_device,
88 			  const struct net_device *vxlan_dev,
89 			  struct netlink_ext_ack *extack);
90 	void (*vxlan_leave)(struct mlxsw_sp_bridge_device *bridge_device,
91 			    const struct net_device *vxlan_dev);
92 	struct mlxsw_sp_fid *
93 		(*fid_get)(struct mlxsw_sp_bridge_device *bridge_device,
94 			   u16 vid);
95 	struct mlxsw_sp_fid *
96 		(*fid_lookup)(struct mlxsw_sp_bridge_device *bridge_device,
97 			      u16 vid);
98 	u16 (*fid_vid)(struct mlxsw_sp_bridge_device *bridge_device,
99 		       const struct mlxsw_sp_fid *fid);
100 };
101 
102 static int
103 mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
104 			       struct mlxsw_sp_bridge_port *bridge_port,
105 			       u16 fid_index);
106 
107 static void
108 mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
109 			       struct mlxsw_sp_bridge_port *bridge_port);
110 
111 static void
112 mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port,
113 				   struct mlxsw_sp_bridge_device
114 				   *bridge_device);
115 
116 static void
117 mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
118 				 struct mlxsw_sp_bridge_port *bridge_port,
119 				 bool add);
120 
121 static struct mlxsw_sp_bridge_device *
122 mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge,
123 			    const struct net_device *br_dev)
124 {
125 	struct mlxsw_sp_bridge_device *bridge_device;
126 
127 	list_for_each_entry(bridge_device, &bridge->bridges_list, list)
128 		if (bridge_device->dev == br_dev)
129 			return bridge_device;
130 
131 	return NULL;
132 }
133 
134 bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
135 					 const struct net_device *br_dev)
136 {
137 	return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
138 }
139 
140 static int mlxsw_sp_bridge_device_upper_rif_destroy(struct net_device *dev,
141 						    void *data)
142 {
143 	struct mlxsw_sp *mlxsw_sp = data;
144 
145 	mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
146 	return 0;
147 }
148 
149 static void mlxsw_sp_bridge_device_rifs_destroy(struct mlxsw_sp *mlxsw_sp,
150 						struct net_device *dev)
151 {
152 	mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
153 	netdev_walk_all_upper_dev_rcu(dev,
154 				      mlxsw_sp_bridge_device_upper_rif_destroy,
155 				      mlxsw_sp);
156 }
157 
158 static struct mlxsw_sp_bridge_device *
159 mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
160 			      struct net_device *br_dev)
161 {
162 	struct device *dev = bridge->mlxsw_sp->bus_info->dev;
163 	struct mlxsw_sp_bridge_device *bridge_device;
164 	bool vlan_enabled = br_vlan_enabled(br_dev);
165 
166 	if (vlan_enabled && bridge->vlan_enabled_exists) {
167 		dev_err(dev, "Only one VLAN-aware bridge is supported\n");
168 		return ERR_PTR(-EINVAL);
169 	}
170 
171 	bridge_device = kzalloc(sizeof(*bridge_device), GFP_KERNEL);
172 	if (!bridge_device)
173 		return ERR_PTR(-ENOMEM);
174 
175 	bridge_device->dev = br_dev;
176 	bridge_device->vlan_enabled = vlan_enabled;
177 	bridge_device->multicast_enabled = br_multicast_enabled(br_dev);
178 	bridge_device->mrouter = br_multicast_router(br_dev);
179 	INIT_LIST_HEAD(&bridge_device->ports_list);
180 	if (vlan_enabled) {
181 		bridge->vlan_enabled_exists = true;
182 		bridge_device->ops = bridge->bridge_8021q_ops;
183 	} else {
184 		bridge_device->ops = bridge->bridge_8021d_ops;
185 	}
186 	INIT_LIST_HEAD(&bridge_device->mids_list);
187 	list_add(&bridge_device->list, &bridge->bridges_list);
188 
189 	return bridge_device;
190 }
191 
192 static void
193 mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
194 			       struct mlxsw_sp_bridge_device *bridge_device)
195 {
196 	mlxsw_sp_bridge_device_rifs_destroy(bridge->mlxsw_sp,
197 					    bridge_device->dev);
198 	list_del(&bridge_device->list);
199 	if (bridge_device->vlan_enabled)
200 		bridge->vlan_enabled_exists = false;
201 	WARN_ON(!list_empty(&bridge_device->ports_list));
202 	WARN_ON(!list_empty(&bridge_device->mids_list));
203 	kfree(bridge_device);
204 }
205 
206 static struct mlxsw_sp_bridge_device *
207 mlxsw_sp_bridge_device_get(struct mlxsw_sp_bridge *bridge,
208 			   struct net_device *br_dev)
209 {
210 	struct mlxsw_sp_bridge_device *bridge_device;
211 
212 	bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
213 	if (bridge_device)
214 		return bridge_device;
215 
216 	return mlxsw_sp_bridge_device_create(bridge, br_dev);
217 }
218 
219 static void
220 mlxsw_sp_bridge_device_put(struct mlxsw_sp_bridge *bridge,
221 			   struct mlxsw_sp_bridge_device *bridge_device)
222 {
223 	if (list_empty(&bridge_device->ports_list))
224 		mlxsw_sp_bridge_device_destroy(bridge, bridge_device);
225 }
226 
227 static struct mlxsw_sp_bridge_port *
228 __mlxsw_sp_bridge_port_find(const struct mlxsw_sp_bridge_device *bridge_device,
229 			    const struct net_device *brport_dev)
230 {
231 	struct mlxsw_sp_bridge_port *bridge_port;
232 
233 	list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
234 		if (bridge_port->dev == brport_dev)
235 			return bridge_port;
236 	}
237 
238 	return NULL;
239 }
240 
241 struct mlxsw_sp_bridge_port *
242 mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge *bridge,
243 			  struct net_device *brport_dev)
244 {
245 	struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
246 	struct mlxsw_sp_bridge_device *bridge_device;
247 
248 	if (!br_dev)
249 		return NULL;
250 
251 	bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
252 	if (!bridge_device)
253 		return NULL;
254 
255 	return __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
256 }
257 
258 static struct mlxsw_sp_bridge_port *
259 mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device,
260 			    struct net_device *brport_dev)
261 {
262 	struct mlxsw_sp_bridge_port *bridge_port;
263 	struct mlxsw_sp_port *mlxsw_sp_port;
264 
265 	bridge_port = kzalloc(sizeof(*bridge_port), GFP_KERNEL);
266 	if (!bridge_port)
267 		return NULL;
268 
269 	mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(brport_dev);
270 	bridge_port->lagged = mlxsw_sp_port->lagged;
271 	if (bridge_port->lagged)
272 		bridge_port->lag_id = mlxsw_sp_port->lag_id;
273 	else
274 		bridge_port->system_port = mlxsw_sp_port->local_port;
275 	bridge_port->dev = brport_dev;
276 	bridge_port->bridge_device = bridge_device;
277 	bridge_port->stp_state = BR_STATE_DISABLED;
278 	bridge_port->flags = BR_LEARNING | BR_FLOOD | BR_LEARNING_SYNC |
279 			     BR_MCAST_FLOOD;
280 	INIT_LIST_HEAD(&bridge_port->vlans_list);
281 	list_add(&bridge_port->list, &bridge_device->ports_list);
282 	bridge_port->ref_count = 1;
283 
284 	return bridge_port;
285 }
286 
287 static void
288 mlxsw_sp_bridge_port_destroy(struct mlxsw_sp_bridge_port *bridge_port)
289 {
290 	list_del(&bridge_port->list);
291 	WARN_ON(!list_empty(&bridge_port->vlans_list));
292 	kfree(bridge_port);
293 }
294 
295 static bool
296 mlxsw_sp_bridge_port_should_destroy(const struct mlxsw_sp_bridge_port *
297 				    bridge_port)
298 {
299 	struct net_device *dev = bridge_port->dev;
300 	struct mlxsw_sp *mlxsw_sp;
301 
302 	if (is_vlan_dev(dev))
303 		mlxsw_sp = mlxsw_sp_lower_get(vlan_dev_real_dev(dev));
304 	else
305 		mlxsw_sp = mlxsw_sp_lower_get(dev);
306 
307 	/* In case ports were pulled from out of a bridged LAG, then
308 	 * it's possible the reference count isn't zero, yet the bridge
309 	 * port should be destroyed, as it's no longer an upper of ours.
310 	 */
311 	if (!mlxsw_sp && list_empty(&bridge_port->vlans_list))
312 		return true;
313 	else if (bridge_port->ref_count == 0)
314 		return true;
315 	else
316 		return false;
317 }
318 
319 static struct mlxsw_sp_bridge_port *
320 mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge,
321 			 struct net_device *brport_dev)
322 {
323 	struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
324 	struct mlxsw_sp_bridge_device *bridge_device;
325 	struct mlxsw_sp_bridge_port *bridge_port;
326 	int err;
327 
328 	bridge_port = mlxsw_sp_bridge_port_find(bridge, brport_dev);
329 	if (bridge_port) {
330 		bridge_port->ref_count++;
331 		return bridge_port;
332 	}
333 
334 	bridge_device = mlxsw_sp_bridge_device_get(bridge, br_dev);
335 	if (IS_ERR(bridge_device))
336 		return ERR_CAST(bridge_device);
337 
338 	bridge_port = mlxsw_sp_bridge_port_create(bridge_device, brport_dev);
339 	if (!bridge_port) {
340 		err = -ENOMEM;
341 		goto err_bridge_port_create;
342 	}
343 
344 	return bridge_port;
345 
346 err_bridge_port_create:
347 	mlxsw_sp_bridge_device_put(bridge, bridge_device);
348 	return ERR_PTR(err);
349 }
350 
351 static void mlxsw_sp_bridge_port_put(struct mlxsw_sp_bridge *bridge,
352 				     struct mlxsw_sp_bridge_port *bridge_port)
353 {
354 	struct mlxsw_sp_bridge_device *bridge_device;
355 
356 	bridge_port->ref_count--;
357 	if (!mlxsw_sp_bridge_port_should_destroy(bridge_port))
358 		return;
359 	bridge_device = bridge_port->bridge_device;
360 	mlxsw_sp_bridge_port_destroy(bridge_port);
361 	mlxsw_sp_bridge_device_put(bridge, bridge_device);
362 }
363 
364 static struct mlxsw_sp_port_vlan *
365 mlxsw_sp_port_vlan_find_by_bridge(struct mlxsw_sp_port *mlxsw_sp_port,
366 				  const struct mlxsw_sp_bridge_device *
367 				  bridge_device,
368 				  u16 vid)
369 {
370 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
371 
372 	list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
373 			    list) {
374 		if (!mlxsw_sp_port_vlan->bridge_port)
375 			continue;
376 		if (mlxsw_sp_port_vlan->bridge_port->bridge_device !=
377 		    bridge_device)
378 			continue;
379 		if (bridge_device->vlan_enabled &&
380 		    mlxsw_sp_port_vlan->vid != vid)
381 			continue;
382 		return mlxsw_sp_port_vlan;
383 	}
384 
385 	return NULL;
386 }
387 
388 static struct mlxsw_sp_port_vlan*
389 mlxsw_sp_port_vlan_find_by_fid(struct mlxsw_sp_port *mlxsw_sp_port,
390 			       u16 fid_index)
391 {
392 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
393 
394 	list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
395 			    list) {
396 		struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
397 
398 		if (fid && mlxsw_sp_fid_index(fid) == fid_index)
399 			return mlxsw_sp_port_vlan;
400 	}
401 
402 	return NULL;
403 }
404 
405 static struct mlxsw_sp_bridge_vlan *
406 mlxsw_sp_bridge_vlan_find(const struct mlxsw_sp_bridge_port *bridge_port,
407 			  u16 vid)
408 {
409 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
410 
411 	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
412 		if (bridge_vlan->vid == vid)
413 			return bridge_vlan;
414 	}
415 
416 	return NULL;
417 }
418 
419 static struct mlxsw_sp_bridge_vlan *
420 mlxsw_sp_bridge_vlan_create(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
421 {
422 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
423 
424 	bridge_vlan = kzalloc(sizeof(*bridge_vlan), GFP_KERNEL);
425 	if (!bridge_vlan)
426 		return NULL;
427 
428 	INIT_LIST_HEAD(&bridge_vlan->port_vlan_list);
429 	bridge_vlan->vid = vid;
430 	list_add(&bridge_vlan->list, &bridge_port->vlans_list);
431 
432 	return bridge_vlan;
433 }
434 
435 static void
436 mlxsw_sp_bridge_vlan_destroy(struct mlxsw_sp_bridge_vlan *bridge_vlan)
437 {
438 	list_del(&bridge_vlan->list);
439 	WARN_ON(!list_empty(&bridge_vlan->port_vlan_list));
440 	kfree(bridge_vlan);
441 }
442 
443 static struct mlxsw_sp_bridge_vlan *
444 mlxsw_sp_bridge_vlan_get(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
445 {
446 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
447 
448 	bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
449 	if (bridge_vlan)
450 		return bridge_vlan;
451 
452 	return mlxsw_sp_bridge_vlan_create(bridge_port, vid);
453 }
454 
455 static void mlxsw_sp_bridge_vlan_put(struct mlxsw_sp_bridge_vlan *bridge_vlan)
456 {
457 	if (list_empty(&bridge_vlan->port_vlan_list))
458 		mlxsw_sp_bridge_vlan_destroy(bridge_vlan);
459 }
460 
461 static void mlxsw_sp_port_bridge_flags_get(struct mlxsw_sp_bridge *bridge,
462 					   struct net_device *dev,
463 					   unsigned long *brport_flags)
464 {
465 	struct mlxsw_sp_bridge_port *bridge_port;
466 
467 	bridge_port = mlxsw_sp_bridge_port_find(bridge, dev);
468 	if (WARN_ON(!bridge_port))
469 		return;
470 
471 	memcpy(brport_flags, &bridge_port->flags, sizeof(*brport_flags));
472 }
473 
474 static int mlxsw_sp_port_attr_get(struct net_device *dev,
475 				  struct switchdev_attr *attr)
476 {
477 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
478 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
479 
480 	switch (attr->id) {
481 	case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
482 		attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac);
483 		memcpy(&attr->u.ppid.id, &mlxsw_sp->base_mac,
484 		       attr->u.ppid.id_len);
485 		break;
486 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
487 		mlxsw_sp_port_bridge_flags_get(mlxsw_sp->bridge, attr->orig_dev,
488 					       &attr->u.brport_flags);
489 		break;
490 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT:
491 		attr->u.brport_flags_support = BR_LEARNING | BR_FLOOD |
492 					       BR_MCAST_FLOOD;
493 		break;
494 	default:
495 		return -EOPNOTSUPP;
496 	}
497 
498 	return 0;
499 }
500 
501 static int
502 mlxsw_sp_port_bridge_vlan_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
503 				  struct mlxsw_sp_bridge_vlan *bridge_vlan,
504 				  u8 state)
505 {
506 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
507 
508 	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
509 			    bridge_vlan_node) {
510 		if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
511 			continue;
512 		return mlxsw_sp_port_vid_stp_set(mlxsw_sp_port,
513 						 bridge_vlan->vid, state);
514 	}
515 
516 	return 0;
517 }
518 
519 static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
520 					    struct switchdev_trans *trans,
521 					    struct net_device *orig_dev,
522 					    u8 state)
523 {
524 	struct mlxsw_sp_bridge_port *bridge_port;
525 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
526 	int err;
527 
528 	if (switchdev_trans_ph_prepare(trans))
529 		return 0;
530 
531 	/* It's possible we failed to enslave the port, yet this
532 	 * operation is executed due to it being deferred.
533 	 */
534 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
535 						orig_dev);
536 	if (!bridge_port)
537 		return 0;
538 
539 	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
540 		err = mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port,
541 							bridge_vlan, state);
542 		if (err)
543 			goto err_port_bridge_vlan_stp_set;
544 	}
545 
546 	bridge_port->stp_state = state;
547 
548 	return 0;
549 
550 err_port_bridge_vlan_stp_set:
551 	list_for_each_entry_continue_reverse(bridge_vlan,
552 					     &bridge_port->vlans_list, list)
553 		mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port, bridge_vlan,
554 						  bridge_port->stp_state);
555 	return err;
556 }
557 
558 static int
559 mlxsw_sp_port_bridge_vlan_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
560 				    struct mlxsw_sp_bridge_vlan *bridge_vlan,
561 				    enum mlxsw_sp_flood_type packet_type,
562 				    bool member)
563 {
564 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
565 
566 	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
567 			    bridge_vlan_node) {
568 		if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
569 			continue;
570 		return mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid,
571 					      packet_type,
572 					      mlxsw_sp_port->local_port,
573 					      member);
574 	}
575 
576 	return 0;
577 }
578 
579 static int
580 mlxsw_sp_bridge_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port,
581 				     struct mlxsw_sp_bridge_port *bridge_port,
582 				     enum mlxsw_sp_flood_type packet_type,
583 				     bool member)
584 {
585 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
586 	int err;
587 
588 	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
589 		err = mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port,
590 							  bridge_vlan,
591 							  packet_type,
592 							  member);
593 		if (err)
594 			goto err_port_bridge_vlan_flood_set;
595 	}
596 
597 	return 0;
598 
599 err_port_bridge_vlan_flood_set:
600 	list_for_each_entry_continue_reverse(bridge_vlan,
601 					     &bridge_port->vlans_list, list)
602 		mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port, bridge_vlan,
603 						    packet_type, !member);
604 	return err;
605 }
606 
607 static int
608 mlxsw_sp_port_bridge_vlan_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
609 				       struct mlxsw_sp_bridge_vlan *bridge_vlan,
610 				       bool set)
611 {
612 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
613 	u16 vid = bridge_vlan->vid;
614 
615 	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
616 			    bridge_vlan_node) {
617 		if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
618 			continue;
619 		return mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, set);
620 	}
621 
622 	return 0;
623 }
624 
625 static int
626 mlxsw_sp_bridge_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
627 				  struct mlxsw_sp_bridge_port *bridge_port,
628 				  bool set)
629 {
630 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
631 	int err;
632 
633 	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
634 		err = mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
635 							     bridge_vlan, set);
636 		if (err)
637 			goto err_port_bridge_vlan_learning_set;
638 	}
639 
640 	return 0;
641 
642 err_port_bridge_vlan_learning_set:
643 	list_for_each_entry_continue_reverse(bridge_vlan,
644 					     &bridge_port->vlans_list, list)
645 		mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
646 						       bridge_vlan, !set);
647 	return err;
648 }
649 
650 static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
651 					   struct switchdev_trans *trans,
652 					   struct net_device *orig_dev,
653 					   unsigned long brport_flags)
654 {
655 	struct mlxsw_sp_bridge_port *bridge_port;
656 	int err;
657 
658 	if (switchdev_trans_ph_prepare(trans))
659 		return 0;
660 
661 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
662 						orig_dev);
663 	if (!bridge_port)
664 		return 0;
665 
666 	err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
667 						   MLXSW_SP_FLOOD_TYPE_UC,
668 						   brport_flags & BR_FLOOD);
669 	if (err)
670 		return err;
671 
672 	err = mlxsw_sp_bridge_port_learning_set(mlxsw_sp_port, bridge_port,
673 						brport_flags & BR_LEARNING);
674 	if (err)
675 		return err;
676 
677 	if (bridge_port->bridge_device->multicast_enabled)
678 		goto out;
679 
680 	err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
681 						   MLXSW_SP_FLOOD_TYPE_MC,
682 						   brport_flags &
683 						   BR_MCAST_FLOOD);
684 	if (err)
685 		return err;
686 
687 out:
688 	memcpy(&bridge_port->flags, &brport_flags, sizeof(brport_flags));
689 	return 0;
690 }
691 
692 static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
693 {
694 	char sfdat_pl[MLXSW_REG_SFDAT_LEN];
695 	int err;
696 
697 	mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time);
698 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl);
699 	if (err)
700 		return err;
701 	mlxsw_sp->bridge->ageing_time = ageing_time;
702 	return 0;
703 }
704 
705 static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
706 					    struct switchdev_trans *trans,
707 					    unsigned long ageing_clock_t)
708 {
709 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
710 	unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
711 	u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
712 
713 	if (switchdev_trans_ph_prepare(trans)) {
714 		if (ageing_time < MLXSW_SP_MIN_AGEING_TIME ||
715 		    ageing_time > MLXSW_SP_MAX_AGEING_TIME)
716 			return -ERANGE;
717 		else
718 			return 0;
719 	}
720 
721 	return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
722 }
723 
724 static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
725 					  struct switchdev_trans *trans,
726 					  struct net_device *orig_dev,
727 					  bool vlan_enabled)
728 {
729 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
730 	struct mlxsw_sp_bridge_device *bridge_device;
731 
732 	if (!switchdev_trans_ph_prepare(trans))
733 		return 0;
734 
735 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
736 	if (WARN_ON(!bridge_device))
737 		return -EINVAL;
738 
739 	if (bridge_device->vlan_enabled == vlan_enabled)
740 		return 0;
741 
742 	netdev_err(bridge_device->dev, "VLAN filtering can't be changed for existing bridge\n");
743 	return -EINVAL;
744 }
745 
746 static int mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
747 					  struct switchdev_trans *trans,
748 					  struct net_device *orig_dev,
749 					  bool is_port_mrouter)
750 {
751 	struct mlxsw_sp_bridge_port *bridge_port;
752 	int err;
753 
754 	if (switchdev_trans_ph_prepare(trans))
755 		return 0;
756 
757 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
758 						orig_dev);
759 	if (!bridge_port)
760 		return 0;
761 
762 	if (!bridge_port->bridge_device->multicast_enabled)
763 		goto out;
764 
765 	err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
766 						   MLXSW_SP_FLOOD_TYPE_MC,
767 						   is_port_mrouter);
768 	if (err)
769 		return err;
770 
771 	mlxsw_sp_port_mrouter_update_mdb(mlxsw_sp_port, bridge_port,
772 					 is_port_mrouter);
773 out:
774 	bridge_port->mrouter = is_port_mrouter;
775 	return 0;
776 }
777 
778 static bool mlxsw_sp_mc_flood(const struct mlxsw_sp_bridge_port *bridge_port)
779 {
780 	const struct mlxsw_sp_bridge_device *bridge_device;
781 
782 	bridge_device = bridge_port->bridge_device;
783 	return bridge_device->multicast_enabled ? bridge_port->mrouter :
784 					bridge_port->flags & BR_MCAST_FLOOD;
785 }
786 
787 static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
788 					 struct switchdev_trans *trans,
789 					 struct net_device *orig_dev,
790 					 bool mc_disabled)
791 {
792 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
793 	struct mlxsw_sp_bridge_device *bridge_device;
794 	struct mlxsw_sp_bridge_port *bridge_port;
795 	int err;
796 
797 	if (switchdev_trans_ph_prepare(trans))
798 		return 0;
799 
800 	/* It's possible we failed to enslave the port, yet this
801 	 * operation is executed due to it being deferred.
802 	 */
803 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
804 	if (!bridge_device)
805 		return 0;
806 
807 	if (bridge_device->multicast_enabled != !mc_disabled) {
808 		bridge_device->multicast_enabled = !mc_disabled;
809 		mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp_port,
810 						   bridge_device);
811 	}
812 
813 	list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
814 		enum mlxsw_sp_flood_type packet_type = MLXSW_SP_FLOOD_TYPE_MC;
815 		bool member = mlxsw_sp_mc_flood(bridge_port);
816 
817 		err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
818 							   bridge_port,
819 							   packet_type, member);
820 		if (err)
821 			return err;
822 	}
823 
824 	bridge_device->multicast_enabled = !mc_disabled;
825 
826 	return 0;
827 }
828 
829 static int mlxsw_sp_smid_router_port_set(struct mlxsw_sp *mlxsw_sp,
830 					 u16 mid_idx, bool add)
831 {
832 	char *smid_pl;
833 	int err;
834 
835 	smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
836 	if (!smid_pl)
837 		return -ENOMEM;
838 
839 	mlxsw_reg_smid_pack(smid_pl, mid_idx,
840 			    mlxsw_sp_router_port(mlxsw_sp), add);
841 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
842 	kfree(smid_pl);
843 	return err;
844 }
845 
846 static void
847 mlxsw_sp_bridge_mrouter_update_mdb(struct mlxsw_sp *mlxsw_sp,
848 				   struct mlxsw_sp_bridge_device *bridge_device,
849 				   bool add)
850 {
851 	struct mlxsw_sp_mid *mid;
852 
853 	list_for_each_entry(mid, &bridge_device->mids_list, list)
854 		mlxsw_sp_smid_router_port_set(mlxsw_sp, mid->mid, add);
855 }
856 
857 static int
858 mlxsw_sp_port_attr_br_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
859 				  struct switchdev_trans *trans,
860 				  struct net_device *orig_dev,
861 				  bool is_mrouter)
862 {
863 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
864 	struct mlxsw_sp_bridge_device *bridge_device;
865 
866 	if (switchdev_trans_ph_prepare(trans))
867 		return 0;
868 
869 	/* It's possible we failed to enslave the port, yet this
870 	 * operation is executed due to it being deferred.
871 	 */
872 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
873 	if (!bridge_device)
874 		return 0;
875 
876 	if (bridge_device->mrouter != is_mrouter)
877 		mlxsw_sp_bridge_mrouter_update_mdb(mlxsw_sp, bridge_device,
878 						   is_mrouter);
879 	bridge_device->mrouter = is_mrouter;
880 	return 0;
881 }
882 
883 static int mlxsw_sp_port_attr_set(struct net_device *dev,
884 				  const struct switchdev_attr *attr,
885 				  struct switchdev_trans *trans)
886 {
887 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
888 	int err;
889 
890 	switch (attr->id) {
891 	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
892 		err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans,
893 						       attr->orig_dev,
894 						       attr->u.stp_state);
895 		break;
896 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
897 		err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans,
898 						      attr->orig_dev,
899 						      attr->u.brport_flags);
900 		break;
901 	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
902 		err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans,
903 						       attr->u.ageing_time);
904 		break;
905 	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
906 		err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port, trans,
907 						     attr->orig_dev,
908 						     attr->u.vlan_filtering);
909 		break;
910 	case SWITCHDEV_ATTR_ID_PORT_MROUTER:
911 		err = mlxsw_sp_port_attr_mrouter_set(mlxsw_sp_port, trans,
912 						     attr->orig_dev,
913 						     attr->u.mrouter);
914 		break;
915 	case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
916 		err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port, trans,
917 						    attr->orig_dev,
918 						    attr->u.mc_disabled);
919 		break;
920 	case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER:
921 		err = mlxsw_sp_port_attr_br_mrouter_set(mlxsw_sp_port, trans,
922 							attr->orig_dev,
923 							attr->u.mrouter);
924 		break;
925 	default:
926 		err = -EOPNOTSUPP;
927 		break;
928 	}
929 
930 	if (switchdev_trans_ph_commit(trans))
931 		mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
932 
933 	return err;
934 }
935 
936 static int
937 mlxsw_sp_port_vlan_fid_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
938 			    struct mlxsw_sp_bridge_port *bridge_port)
939 {
940 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
941 	struct mlxsw_sp_bridge_device *bridge_device;
942 	u8 local_port = mlxsw_sp_port->local_port;
943 	u16 vid = mlxsw_sp_port_vlan->vid;
944 	struct mlxsw_sp_fid *fid;
945 	int err;
946 
947 	bridge_device = bridge_port->bridge_device;
948 	fid = bridge_device->ops->fid_get(bridge_device, vid);
949 	if (IS_ERR(fid))
950 		return PTR_ERR(fid);
951 
952 	err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port,
953 				     bridge_port->flags & BR_FLOOD);
954 	if (err)
955 		goto err_fid_uc_flood_set;
956 
957 	err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port,
958 				     mlxsw_sp_mc_flood(bridge_port));
959 	if (err)
960 		goto err_fid_mc_flood_set;
961 
962 	err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port,
963 				     true);
964 	if (err)
965 		goto err_fid_bc_flood_set;
966 
967 	err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
968 	if (err)
969 		goto err_fid_port_vid_map;
970 
971 	mlxsw_sp_port_vlan->fid = fid;
972 
973 	return 0;
974 
975 err_fid_port_vid_map:
976 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
977 err_fid_bc_flood_set:
978 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
979 err_fid_mc_flood_set:
980 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
981 err_fid_uc_flood_set:
982 	mlxsw_sp_fid_put(fid);
983 	return err;
984 }
985 
986 static void
987 mlxsw_sp_port_vlan_fid_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
988 {
989 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
990 	struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
991 	u8 local_port = mlxsw_sp_port->local_port;
992 	u16 vid = mlxsw_sp_port_vlan->vid;
993 
994 	mlxsw_sp_port_vlan->fid = NULL;
995 	mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
996 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
997 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
998 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
999 	mlxsw_sp_fid_put(fid);
1000 }
1001 
1002 static u16
1003 mlxsw_sp_port_pvid_determine(const struct mlxsw_sp_port *mlxsw_sp_port,
1004 			     u16 vid, bool is_pvid)
1005 {
1006 	if (is_pvid)
1007 		return vid;
1008 	else if (mlxsw_sp_port->pvid == vid)
1009 		return 0;	/* Dis-allow untagged packets */
1010 	else
1011 		return mlxsw_sp_port->pvid;
1012 }
1013 
1014 static int
1015 mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
1016 			       struct mlxsw_sp_bridge_port *bridge_port)
1017 {
1018 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1019 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
1020 	u16 vid = mlxsw_sp_port_vlan->vid;
1021 	int err;
1022 
1023 	/* No need to continue if only VLAN flags were changed */
1024 	if (mlxsw_sp_port_vlan->bridge_port) {
1025 		mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
1026 		return 0;
1027 	}
1028 
1029 	err = mlxsw_sp_port_vlan_fid_join(mlxsw_sp_port_vlan, bridge_port);
1030 	if (err)
1031 		return err;
1032 
1033 	err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid,
1034 					     bridge_port->flags & BR_LEARNING);
1035 	if (err)
1036 		goto err_port_vid_learning_set;
1037 
1038 	err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
1039 					bridge_port->stp_state);
1040 	if (err)
1041 		goto err_port_vid_stp_set;
1042 
1043 	bridge_vlan = mlxsw_sp_bridge_vlan_get(bridge_port, vid);
1044 	if (!bridge_vlan) {
1045 		err = -ENOMEM;
1046 		goto err_bridge_vlan_get;
1047 	}
1048 
1049 	list_add(&mlxsw_sp_port_vlan->bridge_vlan_node,
1050 		 &bridge_vlan->port_vlan_list);
1051 
1052 	mlxsw_sp_bridge_port_get(mlxsw_sp_port->mlxsw_sp->bridge,
1053 				 bridge_port->dev);
1054 	mlxsw_sp_port_vlan->bridge_port = bridge_port;
1055 
1056 	return 0;
1057 
1058 err_bridge_vlan_get:
1059 	mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
1060 err_port_vid_stp_set:
1061 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
1062 err_port_vid_learning_set:
1063 	mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
1064 	return err;
1065 }
1066 
1067 void
1068 mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1069 {
1070 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1071 	struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
1072 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
1073 	struct mlxsw_sp_bridge_port *bridge_port;
1074 	u16 vid = mlxsw_sp_port_vlan->vid;
1075 	bool last_port, last_vlan;
1076 
1077 	if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021Q &&
1078 		    mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021D))
1079 		return;
1080 
1081 	bridge_port = mlxsw_sp_port_vlan->bridge_port;
1082 	last_vlan = list_is_singular(&bridge_port->vlans_list);
1083 	bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
1084 	last_port = list_is_singular(&bridge_vlan->port_vlan_list);
1085 
1086 	list_del(&mlxsw_sp_port_vlan->bridge_vlan_node);
1087 	mlxsw_sp_bridge_vlan_put(bridge_vlan);
1088 	mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
1089 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
1090 	if (last_port)
1091 		mlxsw_sp_bridge_port_fdb_flush(mlxsw_sp_port->mlxsw_sp,
1092 					       bridge_port,
1093 					       mlxsw_sp_fid_index(fid));
1094 	if (last_vlan)
1095 		mlxsw_sp_bridge_port_mdb_flush(mlxsw_sp_port, bridge_port);
1096 
1097 	mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
1098 
1099 	mlxsw_sp_bridge_port_put(mlxsw_sp_port->mlxsw_sp->bridge, bridge_port);
1100 	mlxsw_sp_port_vlan->bridge_port = NULL;
1101 }
1102 
1103 static int
1104 mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port,
1105 			      struct mlxsw_sp_bridge_port *bridge_port,
1106 			      u16 vid, bool is_untagged, bool is_pvid)
1107 {
1108 	u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid);
1109 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1110 	u16 old_pvid = mlxsw_sp_port->pvid;
1111 	int err;
1112 
1113 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, vid);
1114 	if (IS_ERR(mlxsw_sp_port_vlan))
1115 		return PTR_ERR(mlxsw_sp_port_vlan);
1116 
1117 	err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true,
1118 				     is_untagged);
1119 	if (err)
1120 		goto err_port_vlan_set;
1121 
1122 	err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid);
1123 	if (err)
1124 		goto err_port_pvid_set;
1125 
1126 	err = mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port);
1127 	if (err)
1128 		goto err_port_vlan_bridge_join;
1129 
1130 	return 0;
1131 
1132 err_port_vlan_bridge_join:
1133 	mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid);
1134 err_port_pvid_set:
1135 	mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1136 err_port_vlan_set:
1137 	mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
1138 	return err;
1139 }
1140 
1141 static int
1142 mlxsw_sp_br_ban_rif_pvid_change(struct mlxsw_sp *mlxsw_sp,
1143 				const struct net_device *br_dev,
1144 				const struct switchdev_obj_port_vlan *vlan)
1145 {
1146 	struct mlxsw_sp_rif *rif;
1147 	struct mlxsw_sp_fid *fid;
1148 	u16 pvid;
1149 	u16 vid;
1150 
1151 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev);
1152 	if (!rif)
1153 		return 0;
1154 	fid = mlxsw_sp_rif_fid(rif);
1155 	pvid = mlxsw_sp_fid_8021q_vid(fid);
1156 
1157 	for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
1158 		if (vlan->flags & BRIDGE_VLAN_INFO_PVID) {
1159 			if (vid != pvid) {
1160 				netdev_err(br_dev, "Can't change PVID, it's used by router interface\n");
1161 				return -EBUSY;
1162 			}
1163 		} else {
1164 			if (vid == pvid) {
1165 				netdev_err(br_dev, "Can't remove PVID, it's used by router interface\n");
1166 				return -EBUSY;
1167 			}
1168 		}
1169 	}
1170 
1171 	return 0;
1172 }
1173 
1174 static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
1175 				   const struct switchdev_obj_port_vlan *vlan,
1176 				   struct switchdev_trans *trans)
1177 {
1178 	bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
1179 	bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
1180 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1181 	struct net_device *orig_dev = vlan->obj.orig_dev;
1182 	struct mlxsw_sp_bridge_port *bridge_port;
1183 	u16 vid;
1184 
1185 	if (netif_is_bridge_master(orig_dev)) {
1186 		int err = 0;
1187 
1188 		if ((vlan->flags & BRIDGE_VLAN_INFO_BRENTRY) &&
1189 		    br_vlan_enabled(orig_dev) &&
1190 		    switchdev_trans_ph_prepare(trans))
1191 			err = mlxsw_sp_br_ban_rif_pvid_change(mlxsw_sp,
1192 							      orig_dev, vlan);
1193 		if (!err)
1194 			err = -EOPNOTSUPP;
1195 		return err;
1196 	}
1197 
1198 	if (switchdev_trans_ph_prepare(trans))
1199 		return 0;
1200 
1201 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1202 	if (WARN_ON(!bridge_port))
1203 		return -EINVAL;
1204 
1205 	if (!bridge_port->bridge_device->vlan_enabled)
1206 		return 0;
1207 
1208 	for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
1209 		int err;
1210 
1211 		err = mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port,
1212 						    vid, flag_untagged,
1213 						    flag_pvid);
1214 		if (err)
1215 			return err;
1216 	}
1217 
1218 	return 0;
1219 }
1220 
1221 static enum mlxsw_reg_sfdf_flush_type mlxsw_sp_fdb_flush_type(bool lagged)
1222 {
1223 	return lagged ? MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID :
1224 			MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID;
1225 }
1226 
1227 static int
1228 mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
1229 			       struct mlxsw_sp_bridge_port *bridge_port,
1230 			       u16 fid_index)
1231 {
1232 	bool lagged = bridge_port->lagged;
1233 	char sfdf_pl[MLXSW_REG_SFDF_LEN];
1234 	u16 system_port;
1235 
1236 	system_port = lagged ? bridge_port->lag_id : bridge_port->system_port;
1237 	mlxsw_reg_sfdf_pack(sfdf_pl, mlxsw_sp_fdb_flush_type(lagged));
1238 	mlxsw_reg_sfdf_fid_set(sfdf_pl, fid_index);
1239 	mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl, system_port);
1240 
1241 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
1242 }
1243 
1244 static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
1245 {
1246 	return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
1247 			 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY;
1248 }
1249 
1250 static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
1251 {
1252 	return adding ? MLXSW_REG_SFD_OP_WRITE_EDIT :
1253 			MLXSW_REG_SFD_OP_WRITE_REMOVE;
1254 }
1255 
1256 static int mlxsw_sp_port_fdb_tunnel_uc_op(struct mlxsw_sp *mlxsw_sp,
1257 					  const char *mac, u16 fid,
1258 					  enum mlxsw_sp_l3proto proto,
1259 					  const union mlxsw_sp_l3addr *addr,
1260 					  bool adding, bool dynamic)
1261 {
1262 	enum mlxsw_reg_sfd_uc_tunnel_protocol sfd_proto;
1263 	char *sfd_pl;
1264 	u8 num_rec;
1265 	u32 uip;
1266 	int err;
1267 
1268 	switch (proto) {
1269 	case MLXSW_SP_L3_PROTO_IPV4:
1270 		uip = be32_to_cpu(addr->addr4);
1271 		sfd_proto = MLXSW_REG_SFD_UC_TUNNEL_PROTOCOL_IPV4;
1272 		break;
1273 	case MLXSW_SP_L3_PROTO_IPV6: /* fall through */
1274 	default:
1275 		WARN_ON(1);
1276 		return -EOPNOTSUPP;
1277 	}
1278 
1279 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1280 	if (!sfd_pl)
1281 		return -ENOMEM;
1282 
1283 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1284 	mlxsw_reg_sfd_uc_tunnel_pack(sfd_pl, 0,
1285 				     mlxsw_sp_sfd_rec_policy(dynamic), mac, fid,
1286 				     MLXSW_REG_SFD_REC_ACTION_NOP, uip,
1287 				     sfd_proto);
1288 	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1289 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1290 	if (err)
1291 		goto out;
1292 
1293 	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1294 		err = -EBUSY;
1295 
1296 out:
1297 	kfree(sfd_pl);
1298 	return err;
1299 }
1300 
1301 static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1302 				     const char *mac, u16 fid, bool adding,
1303 				     enum mlxsw_reg_sfd_rec_action action,
1304 				     bool dynamic)
1305 {
1306 	char *sfd_pl;
1307 	u8 num_rec;
1308 	int err;
1309 
1310 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1311 	if (!sfd_pl)
1312 		return -ENOMEM;
1313 
1314 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1315 	mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
1316 			      mac, fid, action, local_port);
1317 	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1318 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1319 	if (err)
1320 		goto out;
1321 
1322 	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1323 		err = -EBUSY;
1324 
1325 out:
1326 	kfree(sfd_pl);
1327 	return err;
1328 }
1329 
1330 static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1331 				   const char *mac, u16 fid, bool adding,
1332 				   bool dynamic)
1333 {
1334 	return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
1335 					 MLXSW_REG_SFD_REC_ACTION_NOP, dynamic);
1336 }
1337 
1338 int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
1339 			bool adding)
1340 {
1341 	return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
1342 					 MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
1343 					 false);
1344 }
1345 
1346 static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
1347 				       const char *mac, u16 fid, u16 lag_vid,
1348 				       bool adding, bool dynamic)
1349 {
1350 	char *sfd_pl;
1351 	u8 num_rec;
1352 	int err;
1353 
1354 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1355 	if (!sfd_pl)
1356 		return -ENOMEM;
1357 
1358 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1359 	mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
1360 				  mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
1361 				  lag_vid, lag_id);
1362 	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1363 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1364 	if (err)
1365 		goto out;
1366 
1367 	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1368 		err = -EBUSY;
1369 
1370 out:
1371 	kfree(sfd_pl);
1372 	return err;
1373 }
1374 
1375 static int
1376 mlxsw_sp_port_fdb_set(struct mlxsw_sp_port *mlxsw_sp_port,
1377 		      struct switchdev_notifier_fdb_info *fdb_info, bool adding)
1378 {
1379 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1380 	struct net_device *orig_dev = fdb_info->info.dev;
1381 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1382 	struct mlxsw_sp_bridge_device *bridge_device;
1383 	struct mlxsw_sp_bridge_port *bridge_port;
1384 	u16 fid_index, vid;
1385 
1386 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1387 	if (!bridge_port)
1388 		return -EINVAL;
1389 
1390 	bridge_device = bridge_port->bridge_device;
1391 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1392 							       bridge_device,
1393 							       fdb_info->vid);
1394 	if (!mlxsw_sp_port_vlan)
1395 		return 0;
1396 
1397 	fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1398 	vid = mlxsw_sp_port_vlan->vid;
1399 
1400 	if (!bridge_port->lagged)
1401 		return mlxsw_sp_port_fdb_uc_op(mlxsw_sp,
1402 					       bridge_port->system_port,
1403 					       fdb_info->addr, fid_index,
1404 					       adding, false);
1405 	else
1406 		return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp,
1407 						   bridge_port->lag_id,
1408 						   fdb_info->addr, fid_index,
1409 						   vid, adding, false);
1410 }
1411 
1412 static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
1413 				u16 fid, u16 mid_idx, bool adding)
1414 {
1415 	char *sfd_pl;
1416 	u8 num_rec;
1417 	int err;
1418 
1419 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1420 	if (!sfd_pl)
1421 		return -ENOMEM;
1422 
1423 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1424 	mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
1425 			      MLXSW_REG_SFD_REC_ACTION_NOP, mid_idx);
1426 	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1427 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1428 	if (err)
1429 		goto out;
1430 
1431 	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1432 		err = -EBUSY;
1433 
1434 out:
1435 	kfree(sfd_pl);
1436 	return err;
1437 }
1438 
1439 static int mlxsw_sp_port_smid_full_entry(struct mlxsw_sp *mlxsw_sp, u16 mid_idx,
1440 					 long *ports_bitmap,
1441 					 bool set_router_port)
1442 {
1443 	char *smid_pl;
1444 	int err, i;
1445 
1446 	smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
1447 	if (!smid_pl)
1448 		return -ENOMEM;
1449 
1450 	mlxsw_reg_smid_pack(smid_pl, mid_idx, 0, false);
1451 	for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) {
1452 		if (mlxsw_sp->ports[i])
1453 			mlxsw_reg_smid_port_mask_set(smid_pl, i, 1);
1454 	}
1455 
1456 	mlxsw_reg_smid_port_mask_set(smid_pl,
1457 				     mlxsw_sp_router_port(mlxsw_sp), 1);
1458 
1459 	for_each_set_bit(i, ports_bitmap, mlxsw_core_max_ports(mlxsw_sp->core))
1460 		mlxsw_reg_smid_port_set(smid_pl, i, 1);
1461 
1462 	mlxsw_reg_smid_port_set(smid_pl, mlxsw_sp_router_port(mlxsw_sp),
1463 				set_router_port);
1464 
1465 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
1466 	kfree(smid_pl);
1467 	return err;
1468 }
1469 
1470 static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port,
1471 				  u16 mid_idx, bool add)
1472 {
1473 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1474 	char *smid_pl;
1475 	int err;
1476 
1477 	smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
1478 	if (!smid_pl)
1479 		return -ENOMEM;
1480 
1481 	mlxsw_reg_smid_pack(smid_pl, mid_idx, mlxsw_sp_port->local_port, add);
1482 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
1483 	kfree(smid_pl);
1484 	return err;
1485 }
1486 
1487 static struct
1488 mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp_bridge_device *bridge_device,
1489 				const unsigned char *addr,
1490 				u16 fid)
1491 {
1492 	struct mlxsw_sp_mid *mid;
1493 
1494 	list_for_each_entry(mid, &bridge_device->mids_list, list) {
1495 		if (ether_addr_equal(mid->addr, addr) && mid->fid == fid)
1496 			return mid;
1497 	}
1498 	return NULL;
1499 }
1500 
1501 static void
1502 mlxsw_sp_bridge_port_get_ports_bitmap(struct mlxsw_sp *mlxsw_sp,
1503 				      struct mlxsw_sp_bridge_port *bridge_port,
1504 				      unsigned long *ports_bitmap)
1505 {
1506 	struct mlxsw_sp_port *mlxsw_sp_port;
1507 	u64 max_lag_members, i;
1508 	int lag_id;
1509 
1510 	if (!bridge_port->lagged) {
1511 		set_bit(bridge_port->system_port, ports_bitmap);
1512 	} else {
1513 		max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1514 						     MAX_LAG_MEMBERS);
1515 		lag_id = bridge_port->lag_id;
1516 		for (i = 0; i < max_lag_members; i++) {
1517 			mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp,
1518 								 lag_id, i);
1519 			if (mlxsw_sp_port)
1520 				set_bit(mlxsw_sp_port->local_port,
1521 					ports_bitmap);
1522 		}
1523 	}
1524 }
1525 
1526 static void
1527 mlxsw_sp_mc_get_mrouters_bitmap(unsigned long *flood_bitmap,
1528 				struct mlxsw_sp_bridge_device *bridge_device,
1529 				struct mlxsw_sp *mlxsw_sp)
1530 {
1531 	struct mlxsw_sp_bridge_port *bridge_port;
1532 
1533 	list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
1534 		if (bridge_port->mrouter) {
1535 			mlxsw_sp_bridge_port_get_ports_bitmap(mlxsw_sp,
1536 							      bridge_port,
1537 							      flood_bitmap);
1538 		}
1539 	}
1540 }
1541 
1542 static bool
1543 mlxsw_sp_mc_write_mdb_entry(struct mlxsw_sp *mlxsw_sp,
1544 			    struct mlxsw_sp_mid *mid,
1545 			    struct mlxsw_sp_bridge_device *bridge_device)
1546 {
1547 	long *flood_bitmap;
1548 	int num_of_ports;
1549 	int alloc_size;
1550 	u16 mid_idx;
1551 	int err;
1552 
1553 	mid_idx = find_first_zero_bit(mlxsw_sp->bridge->mids_bitmap,
1554 				      MLXSW_SP_MID_MAX);
1555 	if (mid_idx == MLXSW_SP_MID_MAX)
1556 		return false;
1557 
1558 	num_of_ports = mlxsw_core_max_ports(mlxsw_sp->core);
1559 	alloc_size = sizeof(long) * BITS_TO_LONGS(num_of_ports);
1560 	flood_bitmap = kzalloc(alloc_size, GFP_KERNEL);
1561 	if (!flood_bitmap)
1562 		return false;
1563 
1564 	bitmap_copy(flood_bitmap,  mid->ports_in_mid, num_of_ports);
1565 	mlxsw_sp_mc_get_mrouters_bitmap(flood_bitmap, bridge_device, mlxsw_sp);
1566 
1567 	mid->mid = mid_idx;
1568 	err = mlxsw_sp_port_smid_full_entry(mlxsw_sp, mid_idx, flood_bitmap,
1569 					    bridge_device->mrouter);
1570 	kfree(flood_bitmap);
1571 	if (err)
1572 		return false;
1573 
1574 	err = mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid_idx,
1575 				   true);
1576 	if (err)
1577 		return false;
1578 
1579 	set_bit(mid_idx, mlxsw_sp->bridge->mids_bitmap);
1580 	mid->in_hw = true;
1581 	return true;
1582 }
1583 
1584 static int mlxsw_sp_mc_remove_mdb_entry(struct mlxsw_sp *mlxsw_sp,
1585 					struct mlxsw_sp_mid *mid)
1586 {
1587 	if (!mid->in_hw)
1588 		return 0;
1589 
1590 	clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap);
1591 	mid->in_hw = false;
1592 	return mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid->mid,
1593 				    false);
1594 }
1595 
1596 static struct
1597 mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
1598 				  struct mlxsw_sp_bridge_device *bridge_device,
1599 				  const unsigned char *addr,
1600 				  u16 fid)
1601 {
1602 	struct mlxsw_sp_mid *mid;
1603 	size_t alloc_size;
1604 
1605 	mid = kzalloc(sizeof(*mid), GFP_KERNEL);
1606 	if (!mid)
1607 		return NULL;
1608 
1609 	alloc_size = sizeof(unsigned long) *
1610 		     BITS_TO_LONGS(mlxsw_core_max_ports(mlxsw_sp->core));
1611 
1612 	mid->ports_in_mid = kzalloc(alloc_size, GFP_KERNEL);
1613 	if (!mid->ports_in_mid)
1614 		goto err_ports_in_mid_alloc;
1615 
1616 	ether_addr_copy(mid->addr, addr);
1617 	mid->fid = fid;
1618 	mid->in_hw = false;
1619 
1620 	if (!bridge_device->multicast_enabled)
1621 		goto out;
1622 
1623 	if (!mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid, bridge_device))
1624 		goto err_write_mdb_entry;
1625 
1626 out:
1627 	list_add_tail(&mid->list, &bridge_device->mids_list);
1628 	return mid;
1629 
1630 err_write_mdb_entry:
1631 	kfree(mid->ports_in_mid);
1632 err_ports_in_mid_alloc:
1633 	kfree(mid);
1634 	return NULL;
1635 }
1636 
1637 static int mlxsw_sp_port_remove_from_mid(struct mlxsw_sp_port *mlxsw_sp_port,
1638 					 struct mlxsw_sp_mid *mid)
1639 {
1640 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1641 	int err = 0;
1642 
1643 	clear_bit(mlxsw_sp_port->local_port, mid->ports_in_mid);
1644 	if (bitmap_empty(mid->ports_in_mid,
1645 			 mlxsw_core_max_ports(mlxsw_sp->core))) {
1646 		err = mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
1647 		list_del(&mid->list);
1648 		kfree(mid->ports_in_mid);
1649 		kfree(mid);
1650 	}
1651 	return err;
1652 }
1653 
1654 static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
1655 				 const struct switchdev_obj_port_mdb *mdb,
1656 				 struct switchdev_trans *trans)
1657 {
1658 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1659 	struct net_device *orig_dev = mdb->obj.orig_dev;
1660 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1661 	struct net_device *dev = mlxsw_sp_port->dev;
1662 	struct mlxsw_sp_bridge_device *bridge_device;
1663 	struct mlxsw_sp_bridge_port *bridge_port;
1664 	struct mlxsw_sp_mid *mid;
1665 	u16 fid_index;
1666 	int err = 0;
1667 
1668 	if (switchdev_trans_ph_prepare(trans))
1669 		return 0;
1670 
1671 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1672 	if (!bridge_port)
1673 		return 0;
1674 
1675 	bridge_device = bridge_port->bridge_device;
1676 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1677 							       bridge_device,
1678 							       mdb->vid);
1679 	if (!mlxsw_sp_port_vlan)
1680 		return 0;
1681 
1682 	fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1683 
1684 	mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
1685 	if (!mid) {
1686 		mid = __mlxsw_sp_mc_alloc(mlxsw_sp, bridge_device, mdb->addr,
1687 					  fid_index);
1688 		if (!mid) {
1689 			netdev_err(dev, "Unable to allocate MC group\n");
1690 			return -ENOMEM;
1691 		}
1692 	}
1693 	set_bit(mlxsw_sp_port->local_port, mid->ports_in_mid);
1694 
1695 	if (!bridge_device->multicast_enabled)
1696 		return 0;
1697 
1698 	if (bridge_port->mrouter)
1699 		return 0;
1700 
1701 	err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true);
1702 	if (err) {
1703 		netdev_err(dev, "Unable to set SMID\n");
1704 		goto err_out;
1705 	}
1706 
1707 	return 0;
1708 
1709 err_out:
1710 	mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
1711 	return err;
1712 }
1713 
1714 static void
1715 mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port,
1716 				   struct mlxsw_sp_bridge_device
1717 				   *bridge_device)
1718 {
1719 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1720 	struct mlxsw_sp_mid *mid;
1721 	bool mc_enabled;
1722 
1723 	mc_enabled = bridge_device->multicast_enabled;
1724 
1725 	list_for_each_entry(mid, &bridge_device->mids_list, list) {
1726 		if (mc_enabled)
1727 			mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid,
1728 						    bridge_device);
1729 		else
1730 			mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
1731 	}
1732 }
1733 
1734 static void
1735 mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
1736 				 struct mlxsw_sp_bridge_port *bridge_port,
1737 				 bool add)
1738 {
1739 	struct mlxsw_sp_bridge_device *bridge_device;
1740 	struct mlxsw_sp_mid *mid;
1741 
1742 	bridge_device = bridge_port->bridge_device;
1743 
1744 	list_for_each_entry(mid, &bridge_device->mids_list, list) {
1745 		if (!test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid))
1746 			mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, add);
1747 	}
1748 }
1749 
1750 struct mlxsw_sp_span_respin_work {
1751 	struct work_struct work;
1752 	struct mlxsw_sp *mlxsw_sp;
1753 };
1754 
1755 static void mlxsw_sp_span_respin_work(struct work_struct *work)
1756 {
1757 	struct mlxsw_sp_span_respin_work *respin_work =
1758 		container_of(work, struct mlxsw_sp_span_respin_work, work);
1759 
1760 	rtnl_lock();
1761 	mlxsw_sp_span_respin(respin_work->mlxsw_sp);
1762 	rtnl_unlock();
1763 	kfree(respin_work);
1764 }
1765 
1766 static void mlxsw_sp_span_respin_schedule(struct mlxsw_sp *mlxsw_sp)
1767 {
1768 	struct mlxsw_sp_span_respin_work *respin_work;
1769 
1770 	respin_work = kzalloc(sizeof(*respin_work), GFP_ATOMIC);
1771 	if (!respin_work)
1772 		return;
1773 
1774 	INIT_WORK(&respin_work->work, mlxsw_sp_span_respin_work);
1775 	respin_work->mlxsw_sp = mlxsw_sp;
1776 
1777 	mlxsw_core_schedule_work(&respin_work->work);
1778 }
1779 
1780 static int mlxsw_sp_port_obj_add(struct net_device *dev,
1781 				 const struct switchdev_obj *obj,
1782 				 struct switchdev_trans *trans)
1783 {
1784 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1785 	const struct switchdev_obj_port_vlan *vlan;
1786 	int err = 0;
1787 
1788 	switch (obj->id) {
1789 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
1790 		vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
1791 		err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, vlan, trans);
1792 
1793 		if (switchdev_trans_ph_prepare(trans)) {
1794 			/* The event is emitted before the changes are actually
1795 			 * applied to the bridge. Therefore schedule the respin
1796 			 * call for later, so that the respin logic sees the
1797 			 * updated bridge state.
1798 			 */
1799 			mlxsw_sp_span_respin_schedule(mlxsw_sp_port->mlxsw_sp);
1800 		}
1801 		break;
1802 	case SWITCHDEV_OBJ_ID_PORT_MDB:
1803 		err = mlxsw_sp_port_mdb_add(mlxsw_sp_port,
1804 					    SWITCHDEV_OBJ_PORT_MDB(obj),
1805 					    trans);
1806 		break;
1807 	default:
1808 		err = -EOPNOTSUPP;
1809 		break;
1810 	}
1811 
1812 	return err;
1813 }
1814 
1815 static void
1816 mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port,
1817 			      struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
1818 {
1819 	u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : vid;
1820 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1821 
1822 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1823 	if (WARN_ON(!mlxsw_sp_port_vlan))
1824 		return;
1825 
1826 	mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
1827 	mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid);
1828 	mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1829 	mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
1830 }
1831 
1832 static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
1833 				   const struct switchdev_obj_port_vlan *vlan)
1834 {
1835 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1836 	struct net_device *orig_dev = vlan->obj.orig_dev;
1837 	struct mlxsw_sp_bridge_port *bridge_port;
1838 	u16 vid;
1839 
1840 	if (netif_is_bridge_master(orig_dev))
1841 		return -EOPNOTSUPP;
1842 
1843 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1844 	if (WARN_ON(!bridge_port))
1845 		return -EINVAL;
1846 
1847 	if (!bridge_port->bridge_device->vlan_enabled)
1848 		return 0;
1849 
1850 	for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++)
1851 		mlxsw_sp_bridge_port_vlan_del(mlxsw_sp_port, bridge_port, vid);
1852 
1853 	return 0;
1854 }
1855 
1856 static int
1857 __mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
1858 			struct mlxsw_sp_bridge_port *bridge_port,
1859 			struct mlxsw_sp_mid *mid)
1860 {
1861 	struct net_device *dev = mlxsw_sp_port->dev;
1862 	int err;
1863 
1864 	if (bridge_port->bridge_device->multicast_enabled &&
1865 	    !bridge_port->mrouter) {
1866 		err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
1867 		if (err)
1868 			netdev_err(dev, "Unable to remove port from SMID\n");
1869 	}
1870 
1871 	err = mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
1872 	if (err)
1873 		netdev_err(dev, "Unable to remove MC SFD\n");
1874 
1875 	return err;
1876 }
1877 
1878 static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
1879 				 const struct switchdev_obj_port_mdb *mdb)
1880 {
1881 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1882 	struct net_device *orig_dev = mdb->obj.orig_dev;
1883 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1884 	struct mlxsw_sp_bridge_device *bridge_device;
1885 	struct net_device *dev = mlxsw_sp_port->dev;
1886 	struct mlxsw_sp_bridge_port *bridge_port;
1887 	struct mlxsw_sp_mid *mid;
1888 	u16 fid_index;
1889 
1890 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1891 	if (!bridge_port)
1892 		return 0;
1893 
1894 	bridge_device = bridge_port->bridge_device;
1895 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1896 							       bridge_device,
1897 							       mdb->vid);
1898 	if (!mlxsw_sp_port_vlan)
1899 		return 0;
1900 
1901 	fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1902 
1903 	mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
1904 	if (!mid) {
1905 		netdev_err(dev, "Unable to remove port from MC DB\n");
1906 		return -EINVAL;
1907 	}
1908 
1909 	return __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port, mid);
1910 }
1911 
1912 static void
1913 mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
1914 			       struct mlxsw_sp_bridge_port *bridge_port)
1915 {
1916 	struct mlxsw_sp_bridge_device *bridge_device;
1917 	struct mlxsw_sp_mid *mid, *tmp;
1918 
1919 	bridge_device = bridge_port->bridge_device;
1920 
1921 	list_for_each_entry_safe(mid, tmp, &bridge_device->mids_list, list) {
1922 		if (test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid)) {
1923 			__mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port,
1924 						mid);
1925 		} else if (bridge_device->multicast_enabled &&
1926 			   bridge_port->mrouter) {
1927 			mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
1928 		}
1929 	}
1930 }
1931 
1932 static int mlxsw_sp_port_obj_del(struct net_device *dev,
1933 				 const struct switchdev_obj *obj)
1934 {
1935 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1936 	int err = 0;
1937 
1938 	switch (obj->id) {
1939 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
1940 		err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
1941 					      SWITCHDEV_OBJ_PORT_VLAN(obj));
1942 		break;
1943 	case SWITCHDEV_OBJ_ID_PORT_MDB:
1944 		err = mlxsw_sp_port_mdb_del(mlxsw_sp_port,
1945 					    SWITCHDEV_OBJ_PORT_MDB(obj));
1946 		break;
1947 	default:
1948 		err = -EOPNOTSUPP;
1949 		break;
1950 	}
1951 
1952 	mlxsw_sp_span_respin_schedule(mlxsw_sp_port->mlxsw_sp);
1953 
1954 	return err;
1955 }
1956 
1957 static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
1958 						   u16 lag_id)
1959 {
1960 	struct mlxsw_sp_port *mlxsw_sp_port;
1961 	u64 max_lag_members;
1962 	int i;
1963 
1964 	max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1965 					     MAX_LAG_MEMBERS);
1966 	for (i = 0; i < max_lag_members; i++) {
1967 		mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
1968 		if (mlxsw_sp_port)
1969 			return mlxsw_sp_port;
1970 	}
1971 	return NULL;
1972 }
1973 
1974 static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
1975 	.switchdev_port_attr_get	= mlxsw_sp_port_attr_get,
1976 	.switchdev_port_attr_set	= mlxsw_sp_port_attr_set,
1977 	.switchdev_port_obj_add		= mlxsw_sp_port_obj_add,
1978 	.switchdev_port_obj_del		= mlxsw_sp_port_obj_del,
1979 };
1980 
1981 static int
1982 mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device,
1983 				struct mlxsw_sp_bridge_port *bridge_port,
1984 				struct mlxsw_sp_port *mlxsw_sp_port,
1985 				struct netlink_ext_ack *extack)
1986 {
1987 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1988 
1989 	if (is_vlan_dev(bridge_port->dev)) {
1990 		NL_SET_ERR_MSG_MOD(extack, "Can not enslave a VLAN device to a VLAN-aware bridge");
1991 		return -EINVAL;
1992 	}
1993 
1994 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1);
1995 	if (WARN_ON(!mlxsw_sp_port_vlan))
1996 		return -EINVAL;
1997 
1998 	/* Let VLAN-aware bridge take care of its own VLANs */
1999 	mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
2000 
2001 	return 0;
2002 }
2003 
2004 static void
2005 mlxsw_sp_bridge_8021q_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
2006 				 struct mlxsw_sp_bridge_port *bridge_port,
2007 				 struct mlxsw_sp_port *mlxsw_sp_port)
2008 {
2009 	mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
2010 	/* Make sure untagged frames are allowed to ingress */
2011 	mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
2012 }
2013 
2014 static int
2015 mlxsw_sp_bridge_8021q_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
2016 				 const struct net_device *vxlan_dev,
2017 				 struct netlink_ext_ack *extack)
2018 {
2019 	WARN_ON(1);
2020 	return -EINVAL;
2021 }
2022 
2023 static void
2024 mlxsw_sp_bridge_8021q_vxlan_leave(struct mlxsw_sp_bridge_device *bridge_device,
2025 				  const struct net_device *vxlan_dev)
2026 {
2027 }
2028 
2029 static struct mlxsw_sp_fid *
2030 mlxsw_sp_bridge_8021q_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
2031 			      u16 vid)
2032 {
2033 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2034 
2035 	return mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
2036 }
2037 
2038 static struct mlxsw_sp_fid *
2039 mlxsw_sp_bridge_8021q_fid_lookup(struct mlxsw_sp_bridge_device *bridge_device,
2040 				 u16 vid)
2041 {
2042 	WARN_ON(1);
2043 	return NULL;
2044 }
2045 
2046 static u16
2047 mlxsw_sp_bridge_8021q_fid_vid(struct mlxsw_sp_bridge_device *bridge_device,
2048 			      const struct mlxsw_sp_fid *fid)
2049 {
2050 	return mlxsw_sp_fid_8021q_vid(fid);
2051 }
2052 
2053 static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021q_ops = {
2054 	.port_join	= mlxsw_sp_bridge_8021q_port_join,
2055 	.port_leave	= mlxsw_sp_bridge_8021q_port_leave,
2056 	.vxlan_join	= mlxsw_sp_bridge_8021q_vxlan_join,
2057 	.vxlan_leave	= mlxsw_sp_bridge_8021q_vxlan_leave,
2058 	.fid_get	= mlxsw_sp_bridge_8021q_fid_get,
2059 	.fid_lookup	= mlxsw_sp_bridge_8021q_fid_lookup,
2060 	.fid_vid	= mlxsw_sp_bridge_8021q_fid_vid,
2061 };
2062 
2063 static bool
2064 mlxsw_sp_port_is_br_member(const struct mlxsw_sp_port *mlxsw_sp_port,
2065 			   const struct net_device *br_dev)
2066 {
2067 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2068 
2069 	list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
2070 			    list) {
2071 		if (mlxsw_sp_port_vlan->bridge_port &&
2072 		    mlxsw_sp_port_vlan->bridge_port->bridge_device->dev ==
2073 		    br_dev)
2074 			return true;
2075 	}
2076 
2077 	return false;
2078 }
2079 
2080 static int
2081 mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device,
2082 				struct mlxsw_sp_bridge_port *bridge_port,
2083 				struct mlxsw_sp_port *mlxsw_sp_port,
2084 				struct netlink_ext_ack *extack)
2085 {
2086 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2087 	struct net_device *dev = bridge_port->dev;
2088 	u16 vid;
2089 
2090 	vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 1;
2091 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
2092 	if (WARN_ON(!mlxsw_sp_port_vlan))
2093 		return -EINVAL;
2094 
2095 	if (mlxsw_sp_port_is_br_member(mlxsw_sp_port, bridge_device->dev)) {
2096 		NL_SET_ERR_MSG_MOD(extack, "Can not bridge VLAN uppers of the same port");
2097 		return -EINVAL;
2098 	}
2099 
2100 	/* Port is no longer usable as a router interface */
2101 	if (mlxsw_sp_port_vlan->fid)
2102 		mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
2103 
2104 	return mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port);
2105 }
2106 
2107 static void
2108 mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
2109 				 struct mlxsw_sp_bridge_port *bridge_port,
2110 				 struct mlxsw_sp_port *mlxsw_sp_port)
2111 {
2112 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2113 	struct net_device *dev = bridge_port->dev;
2114 	u16 vid;
2115 
2116 	vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 1;
2117 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
2118 	if (!mlxsw_sp_port_vlan)
2119 		return;
2120 
2121 	mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
2122 }
2123 
2124 static int
2125 mlxsw_sp_bridge_8021d_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
2126 				 const struct net_device *vxlan_dev,
2127 				 struct netlink_ext_ack *extack)
2128 {
2129 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2130 	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
2131 	struct mlxsw_sp_nve_params params = {
2132 		.type = MLXSW_SP_NVE_TYPE_VXLAN,
2133 		.vni = vxlan->cfg.vni,
2134 		.dev = vxlan_dev,
2135 	};
2136 	struct mlxsw_sp_fid *fid;
2137 	int err;
2138 
2139 	fid = mlxsw_sp_fid_8021d_lookup(mlxsw_sp, bridge_device->dev->ifindex);
2140 	if (!fid)
2141 		return -EINVAL;
2142 
2143 	if (mlxsw_sp_fid_vni_is_set(fid)) {
2144 		err = -EINVAL;
2145 		goto err_vni_exists;
2146 	}
2147 
2148 	err = mlxsw_sp_nve_fid_enable(mlxsw_sp, fid, &params, extack);
2149 	if (err)
2150 		goto err_nve_fid_enable;
2151 
2152 	/* The tunnel port does not hold a reference on the FID. Only
2153 	 * local ports and the router port
2154 	 */
2155 	mlxsw_sp_fid_put(fid);
2156 
2157 	return 0;
2158 
2159 err_nve_fid_enable:
2160 err_vni_exists:
2161 	mlxsw_sp_fid_put(fid);
2162 	return err;
2163 }
2164 
2165 static void
2166 mlxsw_sp_bridge_8021d_vxlan_leave(struct mlxsw_sp_bridge_device *bridge_device,
2167 				  const struct net_device *vxlan_dev)
2168 {
2169 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2170 	struct mlxsw_sp_fid *fid;
2171 
2172 	fid = mlxsw_sp_fid_8021d_lookup(mlxsw_sp, bridge_device->dev->ifindex);
2173 	if (WARN_ON(!fid))
2174 		return;
2175 
2176 	/* If the VxLAN device is down, then the FID does not have a VNI */
2177 	if (!mlxsw_sp_fid_vni_is_set(fid))
2178 		goto out;
2179 
2180 	mlxsw_sp_nve_fid_disable(mlxsw_sp, fid);
2181 out:
2182 	mlxsw_sp_fid_put(fid);
2183 }
2184 
2185 static struct mlxsw_sp_fid *
2186 mlxsw_sp_bridge_8021d_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
2187 			      u16 vid)
2188 {
2189 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2190 	struct net_device *vxlan_dev;
2191 	struct mlxsw_sp_fid *fid;
2192 	int err;
2193 
2194 	fid = mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex);
2195 	if (IS_ERR(fid))
2196 		return fid;
2197 
2198 	if (mlxsw_sp_fid_vni_is_set(fid))
2199 		return fid;
2200 
2201 	vxlan_dev = mlxsw_sp_bridge_vxlan_dev_find(bridge_device->dev);
2202 	if (!vxlan_dev)
2203 		return fid;
2204 
2205 	if (!netif_running(vxlan_dev))
2206 		return fid;
2207 
2208 	err = mlxsw_sp_bridge_8021d_vxlan_join(bridge_device, vxlan_dev, NULL);
2209 	if (err)
2210 		goto err_vxlan_join;
2211 
2212 	return fid;
2213 
2214 err_vxlan_join:
2215 	mlxsw_sp_fid_put(fid);
2216 	return ERR_PTR(err);
2217 }
2218 
2219 static struct mlxsw_sp_fid *
2220 mlxsw_sp_bridge_8021d_fid_lookup(struct mlxsw_sp_bridge_device *bridge_device,
2221 				 u16 vid)
2222 {
2223 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2224 
2225 	/* The only valid VLAN for a VLAN-unaware bridge is 0 */
2226 	if (vid)
2227 		return NULL;
2228 
2229 	return mlxsw_sp_fid_8021d_lookup(mlxsw_sp, bridge_device->dev->ifindex);
2230 }
2231 
2232 static u16
2233 mlxsw_sp_bridge_8021d_fid_vid(struct mlxsw_sp_bridge_device *bridge_device,
2234 			      const struct mlxsw_sp_fid *fid)
2235 {
2236 	return 0;
2237 }
2238 
2239 static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021d_ops = {
2240 	.port_join	= mlxsw_sp_bridge_8021d_port_join,
2241 	.port_leave	= mlxsw_sp_bridge_8021d_port_leave,
2242 	.vxlan_join	= mlxsw_sp_bridge_8021d_vxlan_join,
2243 	.vxlan_leave	= mlxsw_sp_bridge_8021d_vxlan_leave,
2244 	.fid_get	= mlxsw_sp_bridge_8021d_fid_get,
2245 	.fid_lookup	= mlxsw_sp_bridge_8021d_fid_lookup,
2246 	.fid_vid	= mlxsw_sp_bridge_8021d_fid_vid,
2247 };
2248 
2249 int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
2250 			      struct net_device *brport_dev,
2251 			      struct net_device *br_dev,
2252 			      struct netlink_ext_ack *extack)
2253 {
2254 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2255 	struct mlxsw_sp_bridge_device *bridge_device;
2256 	struct mlxsw_sp_bridge_port *bridge_port;
2257 	int err;
2258 
2259 	bridge_port = mlxsw_sp_bridge_port_get(mlxsw_sp->bridge, brport_dev);
2260 	if (IS_ERR(bridge_port))
2261 		return PTR_ERR(bridge_port);
2262 	bridge_device = bridge_port->bridge_device;
2263 
2264 	err = bridge_device->ops->port_join(bridge_device, bridge_port,
2265 					    mlxsw_sp_port, extack);
2266 	if (err)
2267 		goto err_port_join;
2268 
2269 	return 0;
2270 
2271 err_port_join:
2272 	mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
2273 	return err;
2274 }
2275 
2276 void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2277 				struct net_device *brport_dev,
2278 				struct net_device *br_dev)
2279 {
2280 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2281 	struct mlxsw_sp_bridge_device *bridge_device;
2282 	struct mlxsw_sp_bridge_port *bridge_port;
2283 
2284 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2285 	if (!bridge_device)
2286 		return;
2287 	bridge_port = __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
2288 	if (!bridge_port)
2289 		return;
2290 
2291 	bridge_device->ops->port_leave(bridge_device, bridge_port,
2292 				       mlxsw_sp_port);
2293 	mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
2294 }
2295 
2296 int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp,
2297 			       const struct net_device *br_dev,
2298 			       const struct net_device *vxlan_dev,
2299 			       struct netlink_ext_ack *extack)
2300 {
2301 	struct mlxsw_sp_bridge_device *bridge_device;
2302 
2303 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2304 	if (WARN_ON(!bridge_device))
2305 		return -EINVAL;
2306 
2307 	return bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, extack);
2308 }
2309 
2310 void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
2311 				 const struct net_device *br_dev,
2312 				 const struct net_device *vxlan_dev)
2313 {
2314 	struct mlxsw_sp_bridge_device *bridge_device;
2315 
2316 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2317 	if (WARN_ON(!bridge_device))
2318 		return;
2319 
2320 	bridge_device->ops->vxlan_leave(bridge_device, vxlan_dev);
2321 }
2322 
2323 static void
2324 mlxsw_sp_fdb_call_notifiers(enum switchdev_notifier_type type,
2325 			    const char *mac, u16 vid,
2326 			    struct net_device *dev, bool offloaded)
2327 {
2328 	struct switchdev_notifier_fdb_info info;
2329 
2330 	info.addr = mac;
2331 	info.vid = vid;
2332 	info.offloaded = offloaded;
2333 	call_switchdev_notifiers(type, dev, &info.info);
2334 }
2335 
2336 static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
2337 					    char *sfn_pl, int rec_index,
2338 					    bool adding)
2339 {
2340 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2341 	struct mlxsw_sp_bridge_device *bridge_device;
2342 	struct mlxsw_sp_bridge_port *bridge_port;
2343 	struct mlxsw_sp_port *mlxsw_sp_port;
2344 	enum switchdev_notifier_type type;
2345 	char mac[ETH_ALEN];
2346 	u8 local_port;
2347 	u16 vid, fid;
2348 	bool do_notification = true;
2349 	int err;
2350 
2351 	mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port);
2352 	mlxsw_sp_port = mlxsw_sp->ports[local_port];
2353 	if (!mlxsw_sp_port) {
2354 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n");
2355 		goto just_remove;
2356 	}
2357 
2358 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
2359 	if (!mlxsw_sp_port_vlan) {
2360 		netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
2361 		goto just_remove;
2362 	}
2363 
2364 	bridge_port = mlxsw_sp_port_vlan->bridge_port;
2365 	if (!bridge_port) {
2366 		netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
2367 		goto just_remove;
2368 	}
2369 
2370 	bridge_device = bridge_port->bridge_device;
2371 	vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
2372 
2373 do_fdb_op:
2374 	err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid,
2375 				      adding, true);
2376 	if (err) {
2377 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
2378 		return;
2379 	}
2380 
2381 	if (!do_notification)
2382 		return;
2383 	type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
2384 	mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding);
2385 
2386 	return;
2387 
2388 just_remove:
2389 	adding = false;
2390 	do_notification = false;
2391 	goto do_fdb_op;
2392 }
2393 
2394 static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
2395 						char *sfn_pl, int rec_index,
2396 						bool adding)
2397 {
2398 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2399 	struct mlxsw_sp_bridge_device *bridge_device;
2400 	struct mlxsw_sp_bridge_port *bridge_port;
2401 	struct mlxsw_sp_port *mlxsw_sp_port;
2402 	enum switchdev_notifier_type type;
2403 	char mac[ETH_ALEN];
2404 	u16 lag_vid = 0;
2405 	u16 lag_id;
2406 	u16 vid, fid;
2407 	bool do_notification = true;
2408 	int err;
2409 
2410 	mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &fid, &lag_id);
2411 	mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
2412 	if (!mlxsw_sp_port) {
2413 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n");
2414 		goto just_remove;
2415 	}
2416 
2417 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
2418 	if (!mlxsw_sp_port_vlan) {
2419 		netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
2420 		goto just_remove;
2421 	}
2422 
2423 	bridge_port = mlxsw_sp_port_vlan->bridge_port;
2424 	if (!bridge_port) {
2425 		netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
2426 		goto just_remove;
2427 	}
2428 
2429 	bridge_device = bridge_port->bridge_device;
2430 	vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
2431 	lag_vid = mlxsw_sp_port_vlan->vid;
2432 
2433 do_fdb_op:
2434 	err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
2435 					  adding, true);
2436 	if (err) {
2437 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
2438 		return;
2439 	}
2440 
2441 	if (!do_notification)
2442 		return;
2443 	type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
2444 	mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding);
2445 
2446 	return;
2447 
2448 just_remove:
2449 	adding = false;
2450 	do_notification = false;
2451 	goto do_fdb_op;
2452 }
2453 
2454 static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
2455 					    char *sfn_pl, int rec_index)
2456 {
2457 	switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) {
2458 	case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC:
2459 		mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
2460 						rec_index, true);
2461 		break;
2462 	case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC:
2463 		mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
2464 						rec_index, false);
2465 		break;
2466 	case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG:
2467 		mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
2468 						    rec_index, true);
2469 		break;
2470 	case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG:
2471 		mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
2472 						    rec_index, false);
2473 		break;
2474 	}
2475 }
2476 
2477 static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp)
2478 {
2479 	struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
2480 
2481 	mlxsw_core_schedule_dw(&bridge->fdb_notify.dw,
2482 			       msecs_to_jiffies(bridge->fdb_notify.interval));
2483 }
2484 
2485 static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
2486 {
2487 	struct mlxsw_sp_bridge *bridge;
2488 	struct mlxsw_sp *mlxsw_sp;
2489 	char *sfn_pl;
2490 	u8 num_rec;
2491 	int i;
2492 	int err;
2493 
2494 	sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL);
2495 	if (!sfn_pl)
2496 		return;
2497 
2498 	bridge = container_of(work, struct mlxsw_sp_bridge, fdb_notify.dw.work);
2499 	mlxsw_sp = bridge->mlxsw_sp;
2500 
2501 	rtnl_lock();
2502 	mlxsw_reg_sfn_pack(sfn_pl);
2503 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
2504 	if (err) {
2505 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
2506 		goto out;
2507 	}
2508 	num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
2509 	for (i = 0; i < num_rec; i++)
2510 		mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
2511 
2512 out:
2513 	rtnl_unlock();
2514 	kfree(sfn_pl);
2515 	mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
2516 }
2517 
2518 struct mlxsw_sp_switchdev_event_work {
2519 	struct work_struct work;
2520 	union {
2521 		struct switchdev_notifier_fdb_info fdb_info;
2522 		struct switchdev_notifier_vxlan_fdb_info vxlan_fdb_info;
2523 	};
2524 	struct net_device *dev;
2525 	unsigned long event;
2526 };
2527 
2528 static void
2529 mlxsw_sp_switchdev_vxlan_addr_convert(const union vxlan_addr *vxlan_addr,
2530 				      enum mlxsw_sp_l3proto *proto,
2531 				      union mlxsw_sp_l3addr *addr)
2532 {
2533 	if (vxlan_addr->sa.sa_family == AF_INET) {
2534 		addr->addr4 = vxlan_addr->sin.sin_addr.s_addr;
2535 		*proto = MLXSW_SP_L3_PROTO_IPV4;
2536 	} else {
2537 		addr->addr6 = vxlan_addr->sin6.sin6_addr;
2538 		*proto = MLXSW_SP_L3_PROTO_IPV6;
2539 	}
2540 }
2541 
2542 static void
2543 mlxsw_sp_switchdev_bridge_vxlan_fdb_event(struct mlxsw_sp *mlxsw_sp,
2544 					  struct mlxsw_sp_switchdev_event_work *
2545 					  switchdev_work,
2546 					  struct mlxsw_sp_fid *fid, __be32 vni)
2547 {
2548 	struct switchdev_notifier_vxlan_fdb_info vxlan_fdb_info;
2549 	struct switchdev_notifier_fdb_info *fdb_info;
2550 	struct net_device *dev = switchdev_work->dev;
2551 	enum mlxsw_sp_l3proto proto;
2552 	union mlxsw_sp_l3addr addr;
2553 	int err;
2554 
2555 	fdb_info = &switchdev_work->fdb_info;
2556 	err = vxlan_fdb_find_uc(dev, fdb_info->addr, vni, &vxlan_fdb_info);
2557 	if (err)
2558 		return;
2559 
2560 	mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info.remote_ip,
2561 					      &proto, &addr);
2562 
2563 	switch (switchdev_work->event) {
2564 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
2565 		err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp,
2566 						     vxlan_fdb_info.eth_addr,
2567 						     mlxsw_sp_fid_index(fid),
2568 						     proto, &addr, true, false);
2569 		if (err)
2570 			return;
2571 		vxlan_fdb_info.offloaded = true;
2572 		call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
2573 					 &vxlan_fdb_info.info);
2574 		mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
2575 					    vxlan_fdb_info.eth_addr,
2576 					    fdb_info->vid, dev, true);
2577 		break;
2578 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
2579 		err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp,
2580 						     vxlan_fdb_info.eth_addr,
2581 						     mlxsw_sp_fid_index(fid),
2582 						     proto, &addr, false,
2583 						     false);
2584 		vxlan_fdb_info.offloaded = false;
2585 		call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
2586 					 &vxlan_fdb_info.info);
2587 		break;
2588 	}
2589 }
2590 
2591 static void
2592 mlxsw_sp_switchdev_bridge_nve_fdb_event(struct mlxsw_sp_switchdev_event_work *
2593 					switchdev_work)
2594 {
2595 	struct mlxsw_sp_bridge_device *bridge_device;
2596 	struct net_device *dev = switchdev_work->dev;
2597 	struct net_device *br_dev;
2598 	struct mlxsw_sp *mlxsw_sp;
2599 	struct mlxsw_sp_fid *fid;
2600 	__be32 vni;
2601 	int err;
2602 
2603 	if (switchdev_work->event != SWITCHDEV_FDB_ADD_TO_DEVICE &&
2604 	    switchdev_work->event != SWITCHDEV_FDB_DEL_TO_DEVICE)
2605 		return;
2606 
2607 	if (!switchdev_work->fdb_info.added_by_user)
2608 		return;
2609 
2610 	if (!netif_running(dev))
2611 		return;
2612 	br_dev = netdev_master_upper_dev_get(dev);
2613 	if (!br_dev)
2614 		return;
2615 	if (!netif_is_bridge_master(br_dev))
2616 		return;
2617 	mlxsw_sp = mlxsw_sp_lower_get(br_dev);
2618 	if (!mlxsw_sp)
2619 		return;
2620 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2621 	if (!bridge_device)
2622 		return;
2623 
2624 	fid = bridge_device->ops->fid_lookup(bridge_device,
2625 					     switchdev_work->fdb_info.vid);
2626 	if (!fid)
2627 		return;
2628 
2629 	err = mlxsw_sp_fid_vni(fid, &vni);
2630 	if (err)
2631 		goto out;
2632 
2633 	mlxsw_sp_switchdev_bridge_vxlan_fdb_event(mlxsw_sp, switchdev_work, fid,
2634 						  vni);
2635 
2636 out:
2637 	mlxsw_sp_fid_put(fid);
2638 }
2639 
2640 static void mlxsw_sp_switchdev_bridge_fdb_event_work(struct work_struct *work)
2641 {
2642 	struct mlxsw_sp_switchdev_event_work *switchdev_work =
2643 		container_of(work, struct mlxsw_sp_switchdev_event_work, work);
2644 	struct net_device *dev = switchdev_work->dev;
2645 	struct switchdev_notifier_fdb_info *fdb_info;
2646 	struct mlxsw_sp_port *mlxsw_sp_port;
2647 	int err;
2648 
2649 	rtnl_lock();
2650 	if (netif_is_vxlan(dev)) {
2651 		mlxsw_sp_switchdev_bridge_nve_fdb_event(switchdev_work);
2652 		goto out;
2653 	}
2654 
2655 	mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
2656 	if (!mlxsw_sp_port)
2657 		goto out;
2658 
2659 	switch (switchdev_work->event) {
2660 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
2661 		fdb_info = &switchdev_work->fdb_info;
2662 		if (!fdb_info->added_by_user)
2663 			break;
2664 		err = mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, true);
2665 		if (err)
2666 			break;
2667 		mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
2668 					    fdb_info->addr,
2669 					    fdb_info->vid, dev, true);
2670 		break;
2671 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
2672 		fdb_info = &switchdev_work->fdb_info;
2673 		mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, false);
2674 		break;
2675 	case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */
2676 	case SWITCHDEV_FDB_DEL_TO_BRIDGE:
2677 		/* These events are only used to potentially update an existing
2678 		 * SPAN mirror.
2679 		 */
2680 		break;
2681 	}
2682 
2683 	mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
2684 
2685 out:
2686 	rtnl_unlock();
2687 	kfree(switchdev_work->fdb_info.addr);
2688 	kfree(switchdev_work);
2689 	dev_put(dev);
2690 }
2691 
2692 static void
2693 mlxsw_sp_switchdev_vxlan_fdb_add(struct mlxsw_sp *mlxsw_sp,
2694 				 struct mlxsw_sp_switchdev_event_work *
2695 				 switchdev_work)
2696 {
2697 	struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
2698 	struct mlxsw_sp_bridge_device *bridge_device;
2699 	struct net_device *dev = switchdev_work->dev;
2700 	u8 all_zeros_mac[ETH_ALEN] = { 0 };
2701 	enum mlxsw_sp_l3proto proto;
2702 	union mlxsw_sp_l3addr addr;
2703 	struct net_device *br_dev;
2704 	struct mlxsw_sp_fid *fid;
2705 	u16 vid;
2706 	int err;
2707 
2708 	vxlan_fdb_info = &switchdev_work->vxlan_fdb_info;
2709 	br_dev = netdev_master_upper_dev_get(dev);
2710 
2711 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2712 	if (!bridge_device)
2713 		return;
2714 
2715 	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan_fdb_info->vni);
2716 	if (!fid)
2717 		return;
2718 
2719 	mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip,
2720 					      &proto, &addr);
2721 
2722 	if (ether_addr_equal(vxlan_fdb_info->eth_addr, all_zeros_mac)) {
2723 		err = mlxsw_sp_nve_flood_ip_add(mlxsw_sp, fid, proto, &addr);
2724 		if (err) {
2725 			mlxsw_sp_fid_put(fid);
2726 			return;
2727 		}
2728 		vxlan_fdb_info->offloaded = true;
2729 		call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
2730 					 &vxlan_fdb_info->info);
2731 		mlxsw_sp_fid_put(fid);
2732 		return;
2733 	}
2734 
2735 	/* The device has a single FDB table, whereas Linux has two - one
2736 	 * in the bridge driver and another in the VxLAN driver. We only
2737 	 * program an entry to the device if the MAC points to the VxLAN
2738 	 * device in the bridge's FDB table
2739 	 */
2740 	vid = bridge_device->ops->fid_vid(bridge_device, fid);
2741 	if (br_fdb_find_port(br_dev, vxlan_fdb_info->eth_addr, vid) != dev)
2742 		goto err_br_fdb_find;
2743 
2744 	err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, vxlan_fdb_info->eth_addr,
2745 					     mlxsw_sp_fid_index(fid), proto,
2746 					     &addr, true, false);
2747 	if (err)
2748 		goto err_fdb_tunnel_uc_op;
2749 	vxlan_fdb_info->offloaded = true;
2750 	call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
2751 				 &vxlan_fdb_info->info);
2752 	mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
2753 				    vxlan_fdb_info->eth_addr, vid, dev, true);
2754 
2755 	mlxsw_sp_fid_put(fid);
2756 
2757 	return;
2758 
2759 err_fdb_tunnel_uc_op:
2760 err_br_fdb_find:
2761 	mlxsw_sp_fid_put(fid);
2762 }
2763 
2764 static void
2765 mlxsw_sp_switchdev_vxlan_fdb_del(struct mlxsw_sp *mlxsw_sp,
2766 				 struct mlxsw_sp_switchdev_event_work *
2767 				 switchdev_work)
2768 {
2769 	struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
2770 	struct mlxsw_sp_bridge_device *bridge_device;
2771 	struct net_device *dev = switchdev_work->dev;
2772 	struct net_device *br_dev = netdev_master_upper_dev_get(dev);
2773 	u8 all_zeros_mac[ETH_ALEN] = { 0 };
2774 	enum mlxsw_sp_l3proto proto;
2775 	union mlxsw_sp_l3addr addr;
2776 	struct mlxsw_sp_fid *fid;
2777 	u16 vid;
2778 
2779 	vxlan_fdb_info = &switchdev_work->vxlan_fdb_info;
2780 
2781 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2782 	if (!bridge_device)
2783 		return;
2784 
2785 	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan_fdb_info->vni);
2786 	if (!fid)
2787 		return;
2788 
2789 	mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip,
2790 					      &proto, &addr);
2791 
2792 	if (ether_addr_equal(vxlan_fdb_info->eth_addr, all_zeros_mac)) {
2793 		mlxsw_sp_nve_flood_ip_del(mlxsw_sp, fid, proto, &addr);
2794 		mlxsw_sp_fid_put(fid);
2795 		return;
2796 	}
2797 
2798 	mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, vxlan_fdb_info->eth_addr,
2799 				       mlxsw_sp_fid_index(fid), proto, &addr,
2800 				       false, false);
2801 	vid = bridge_device->ops->fid_vid(bridge_device, fid);
2802 	mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
2803 				    vxlan_fdb_info->eth_addr, vid, dev, false);
2804 
2805 	mlxsw_sp_fid_put(fid);
2806 }
2807 
2808 static void mlxsw_sp_switchdev_vxlan_fdb_event_work(struct work_struct *work)
2809 {
2810 	struct mlxsw_sp_switchdev_event_work *switchdev_work =
2811 		container_of(work, struct mlxsw_sp_switchdev_event_work, work);
2812 	struct net_device *dev = switchdev_work->dev;
2813 	struct mlxsw_sp *mlxsw_sp;
2814 	struct net_device *br_dev;
2815 
2816 	rtnl_lock();
2817 
2818 	if (!netif_running(dev))
2819 		goto out;
2820 	br_dev = netdev_master_upper_dev_get(dev);
2821 	if (!br_dev)
2822 		goto out;
2823 	if (!netif_is_bridge_master(br_dev))
2824 		goto out;
2825 	mlxsw_sp = mlxsw_sp_lower_get(br_dev);
2826 	if (!mlxsw_sp)
2827 		goto out;
2828 
2829 	switch (switchdev_work->event) {
2830 	case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE:
2831 		mlxsw_sp_switchdev_vxlan_fdb_add(mlxsw_sp, switchdev_work);
2832 		break;
2833 	case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE:
2834 		mlxsw_sp_switchdev_vxlan_fdb_del(mlxsw_sp, switchdev_work);
2835 		break;
2836 	}
2837 
2838 out:
2839 	rtnl_unlock();
2840 	kfree(switchdev_work);
2841 	dev_put(dev);
2842 }
2843 
2844 static int
2845 mlxsw_sp_switchdev_vxlan_work_prepare(struct mlxsw_sp_switchdev_event_work *
2846 				      switchdev_work,
2847 				      struct switchdev_notifier_info *info)
2848 {
2849 	struct vxlan_dev *vxlan = netdev_priv(switchdev_work->dev);
2850 	struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
2851 	struct vxlan_config *cfg = &vxlan->cfg;
2852 
2853 	vxlan_fdb_info = container_of(info,
2854 				      struct switchdev_notifier_vxlan_fdb_info,
2855 				      info);
2856 
2857 	if (vxlan_fdb_info->remote_port != cfg->dst_port)
2858 		return -EOPNOTSUPP;
2859 	if (vxlan_fdb_info->remote_vni != cfg->vni)
2860 		return -EOPNOTSUPP;
2861 	if (vxlan_fdb_info->vni != cfg->vni)
2862 		return -EOPNOTSUPP;
2863 	if (vxlan_fdb_info->remote_ifindex)
2864 		return -EOPNOTSUPP;
2865 	if (is_multicast_ether_addr(vxlan_fdb_info->eth_addr))
2866 		return -EOPNOTSUPP;
2867 	if (vxlan_addr_multicast(&vxlan_fdb_info->remote_ip))
2868 		return -EOPNOTSUPP;
2869 
2870 	switchdev_work->vxlan_fdb_info = *vxlan_fdb_info;
2871 
2872 	return 0;
2873 }
2874 
2875 /* Called under rcu_read_lock() */
2876 static int mlxsw_sp_switchdev_event(struct notifier_block *unused,
2877 				    unsigned long event, void *ptr)
2878 {
2879 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
2880 	struct mlxsw_sp_switchdev_event_work *switchdev_work;
2881 	struct switchdev_notifier_fdb_info *fdb_info;
2882 	struct switchdev_notifier_info *info = ptr;
2883 	struct net_device *br_dev;
2884 	int err;
2885 
2886 	/* Tunnel devices are not our uppers, so check their master instead */
2887 	br_dev = netdev_master_upper_dev_get_rcu(dev);
2888 	if (!br_dev)
2889 		return NOTIFY_DONE;
2890 	if (!netif_is_bridge_master(br_dev))
2891 		return NOTIFY_DONE;
2892 	if (!mlxsw_sp_port_dev_lower_find_rcu(br_dev))
2893 		return NOTIFY_DONE;
2894 
2895 	switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
2896 	if (!switchdev_work)
2897 		return NOTIFY_BAD;
2898 
2899 	switchdev_work->dev = dev;
2900 	switchdev_work->event = event;
2901 
2902 	switch (event) {
2903 	case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */
2904 	case SWITCHDEV_FDB_DEL_TO_DEVICE: /* fall through */
2905 	case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */
2906 	case SWITCHDEV_FDB_DEL_TO_BRIDGE:
2907 		fdb_info = container_of(info,
2908 					struct switchdev_notifier_fdb_info,
2909 					info);
2910 		INIT_WORK(&switchdev_work->work,
2911 			  mlxsw_sp_switchdev_bridge_fdb_event_work);
2912 		memcpy(&switchdev_work->fdb_info, ptr,
2913 		       sizeof(switchdev_work->fdb_info));
2914 		switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
2915 		if (!switchdev_work->fdb_info.addr)
2916 			goto err_addr_alloc;
2917 		ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
2918 				fdb_info->addr);
2919 		/* Take a reference on the device. This can be either
2920 		 * upper device containig mlxsw_sp_port or just a
2921 		 * mlxsw_sp_port
2922 		 */
2923 		dev_hold(dev);
2924 		break;
2925 	case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE: /* fall through */
2926 	case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE:
2927 		INIT_WORK(&switchdev_work->work,
2928 			  mlxsw_sp_switchdev_vxlan_fdb_event_work);
2929 		err = mlxsw_sp_switchdev_vxlan_work_prepare(switchdev_work,
2930 							    info);
2931 		if (err)
2932 			goto err_vxlan_work_prepare;
2933 		dev_hold(dev);
2934 		break;
2935 	default:
2936 		kfree(switchdev_work);
2937 		return NOTIFY_DONE;
2938 	}
2939 
2940 	mlxsw_core_schedule_work(&switchdev_work->work);
2941 
2942 	return NOTIFY_DONE;
2943 
2944 err_vxlan_work_prepare:
2945 err_addr_alloc:
2946 	kfree(switchdev_work);
2947 	return NOTIFY_BAD;
2948 }
2949 
2950 static struct notifier_block mlxsw_sp_switchdev_notifier = {
2951 	.notifier_call = mlxsw_sp_switchdev_event,
2952 };
2953 
2954 u8
2955 mlxsw_sp_bridge_port_stp_state(struct mlxsw_sp_bridge_port *bridge_port)
2956 {
2957 	return bridge_port->stp_state;
2958 }
2959 
2960 static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
2961 {
2962 	struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
2963 	int err;
2964 
2965 	err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
2966 	if (err) {
2967 		dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
2968 		return err;
2969 	}
2970 
2971 	err = register_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
2972 	if (err) {
2973 		dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev notifier\n");
2974 		return err;
2975 	}
2976 
2977 	INIT_DELAYED_WORK(&bridge->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
2978 	bridge->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
2979 	mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
2980 	return 0;
2981 }
2982 
2983 static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
2984 {
2985 	cancel_delayed_work_sync(&mlxsw_sp->bridge->fdb_notify.dw);
2986 	unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
2987 
2988 }
2989 
2990 int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
2991 {
2992 	struct mlxsw_sp_bridge *bridge;
2993 
2994 	bridge = kzalloc(sizeof(*mlxsw_sp->bridge), GFP_KERNEL);
2995 	if (!bridge)
2996 		return -ENOMEM;
2997 	mlxsw_sp->bridge = bridge;
2998 	bridge->mlxsw_sp = mlxsw_sp;
2999 
3000 	INIT_LIST_HEAD(&mlxsw_sp->bridge->bridges_list);
3001 
3002 	bridge->bridge_8021q_ops = &mlxsw_sp_bridge_8021q_ops;
3003 	bridge->bridge_8021d_ops = &mlxsw_sp_bridge_8021d_ops;
3004 
3005 	return mlxsw_sp_fdb_init(mlxsw_sp);
3006 }
3007 
3008 void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
3009 {
3010 	mlxsw_sp_fdb_fini(mlxsw_sp);
3011 	WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list));
3012 	kfree(mlxsw_sp->bridge);
3013 }
3014 
3015 void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port)
3016 {
3017 	mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops;
3018 }
3019 
3020 void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port)
3021 {
3022 }
3023