xref: /openbmc/linux/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c (revision 44ad3baf1cca483e418b6aadf2d3994f69e0f16a)
1 // SPDX-License-Identifier: GPL-2.0+
2 
3 #include <linux/if_bridge.h>
4 #include <net/switchdev.h>
5 
6 #include "lan966x_main.h"
7 
8 static struct notifier_block lan966x_netdevice_nb __read_mostly;
9 
lan966x_port_set_mcast_ip_flood(struct lan966x_port * port,u32 pgid_ip)10 static void lan966x_port_set_mcast_ip_flood(struct lan966x_port *port,
11 					    u32 pgid_ip)
12 {
13 	struct lan966x *lan966x = port->lan966x;
14 	u32 flood_mask_ip;
15 
16 	flood_mask_ip = lan_rd(lan966x, ANA_PGID(pgid_ip));
17 	flood_mask_ip = ANA_PGID_PGID_GET(flood_mask_ip);
18 
19 	/* If mcast snooping is not enabled then use mcast flood mask
20 	 * to decide to enable multicast flooding or not.
21 	 */
22 	if (!port->mcast_ena) {
23 		u32 flood_mask;
24 
25 		flood_mask = lan_rd(lan966x, ANA_PGID(PGID_MC));
26 		flood_mask = ANA_PGID_PGID_GET(flood_mask);
27 
28 		if (flood_mask & BIT(port->chip_port))
29 			flood_mask_ip |= BIT(port->chip_port);
30 		else
31 			flood_mask_ip &= ~BIT(port->chip_port);
32 	} else {
33 		flood_mask_ip &= ~BIT(port->chip_port);
34 	}
35 
36 	lan_rmw(ANA_PGID_PGID_SET(flood_mask_ip),
37 		ANA_PGID_PGID,
38 		lan966x, ANA_PGID(pgid_ip));
39 }
40 
lan966x_port_set_mcast_flood(struct lan966x_port * port,bool enabled)41 static void lan966x_port_set_mcast_flood(struct lan966x_port *port,
42 					 bool enabled)
43 {
44 	u32 val = lan_rd(port->lan966x, ANA_PGID(PGID_MC));
45 
46 	val = ANA_PGID_PGID_GET(val);
47 	if (enabled)
48 		val |= BIT(port->chip_port);
49 	else
50 		val &= ~BIT(port->chip_port);
51 
52 	lan_rmw(ANA_PGID_PGID_SET(val),
53 		ANA_PGID_PGID,
54 		port->lan966x, ANA_PGID(PGID_MC));
55 
56 	if (!port->mcast_ena) {
57 		lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV4);
58 		lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV6);
59 	}
60 }
61 
lan966x_port_set_ucast_flood(struct lan966x_port * port,bool enabled)62 static void lan966x_port_set_ucast_flood(struct lan966x_port *port,
63 					 bool enabled)
64 {
65 	u32 val = lan_rd(port->lan966x, ANA_PGID(PGID_UC));
66 
67 	val = ANA_PGID_PGID_GET(val);
68 	if (enabled)
69 		val |= BIT(port->chip_port);
70 	else
71 		val &= ~BIT(port->chip_port);
72 
73 	lan_rmw(ANA_PGID_PGID_SET(val),
74 		ANA_PGID_PGID,
75 		port->lan966x, ANA_PGID(PGID_UC));
76 }
77 
lan966x_port_set_bcast_flood(struct lan966x_port * port,bool enabled)78 static void lan966x_port_set_bcast_flood(struct lan966x_port *port,
79 					 bool enabled)
80 {
81 	u32 val = lan_rd(port->lan966x, ANA_PGID(PGID_BC));
82 
83 	val = ANA_PGID_PGID_GET(val);
84 	if (enabled)
85 		val |= BIT(port->chip_port);
86 	else
87 		val &= ~BIT(port->chip_port);
88 
89 	lan_rmw(ANA_PGID_PGID_SET(val),
90 		ANA_PGID_PGID,
91 		port->lan966x, ANA_PGID(PGID_BC));
92 }
93 
lan966x_port_set_learning(struct lan966x_port * port,bool enabled)94 static void lan966x_port_set_learning(struct lan966x_port *port, bool enabled)
95 {
96 	lan_rmw(ANA_PORT_CFG_LEARN_ENA_SET(enabled),
97 		ANA_PORT_CFG_LEARN_ENA,
98 		port->lan966x, ANA_PORT_CFG(port->chip_port));
99 
100 	port->learn_ena = enabled;
101 }
102 
lan966x_port_bridge_flags(struct lan966x_port * port,struct switchdev_brport_flags flags)103 static void lan966x_port_bridge_flags(struct lan966x_port *port,
104 				      struct switchdev_brport_flags flags)
105 {
106 	if (flags.mask & BR_MCAST_FLOOD)
107 		lan966x_port_set_mcast_flood(port,
108 					     !!(flags.val & BR_MCAST_FLOOD));
109 
110 	if (flags.mask & BR_FLOOD)
111 		lan966x_port_set_ucast_flood(port,
112 					     !!(flags.val & BR_FLOOD));
113 
114 	if (flags.mask & BR_BCAST_FLOOD)
115 		lan966x_port_set_bcast_flood(port,
116 					     !!(flags.val & BR_BCAST_FLOOD));
117 
118 	if (flags.mask & BR_LEARNING)
119 		lan966x_port_set_learning(port,
120 					  !!(flags.val & BR_LEARNING));
121 }
122 
lan966x_port_pre_bridge_flags(struct lan966x_port * port,struct switchdev_brport_flags flags)123 static int lan966x_port_pre_bridge_flags(struct lan966x_port *port,
124 					 struct switchdev_brport_flags flags)
125 {
126 	if (flags.mask & ~(BR_MCAST_FLOOD | BR_FLOOD | BR_BCAST_FLOOD |
127 			   BR_LEARNING))
128 		return -EINVAL;
129 
130 	return 0;
131 }
132 
lan966x_update_fwd_mask(struct lan966x * lan966x)133 void lan966x_update_fwd_mask(struct lan966x *lan966x)
134 {
135 	int i;
136 
137 	for (i = 0; i < lan966x->num_phys_ports; i++) {
138 		struct lan966x_port *port = lan966x->ports[i];
139 		unsigned long mask = 0;
140 
141 		if (port && lan966x->bridge_fwd_mask & BIT(i)) {
142 			mask = lan966x->bridge_fwd_mask & ~BIT(i);
143 
144 			if (port->bond)
145 				mask &= ~lan966x_lag_get_mask(lan966x,
146 							      port->bond);
147 		}
148 
149 		mask |= BIT(CPU_PORT);
150 
151 		lan_wr(ANA_PGID_PGID_SET(mask),
152 		       lan966x, ANA_PGID(PGID_SRC + i));
153 	}
154 }
155 
lan966x_port_stp_state_set(struct lan966x_port * port,u8 state)156 void lan966x_port_stp_state_set(struct lan966x_port *port, u8 state)
157 {
158 	struct lan966x *lan966x = port->lan966x;
159 	bool learn_ena = false;
160 
161 	if ((state == BR_STATE_FORWARDING || state == BR_STATE_LEARNING) &&
162 	    port->learn_ena)
163 		learn_ena = true;
164 
165 	if (state == BR_STATE_FORWARDING)
166 		lan966x->bridge_fwd_mask |= BIT(port->chip_port);
167 	else
168 		lan966x->bridge_fwd_mask &= ~BIT(port->chip_port);
169 
170 	lan_rmw(ANA_PORT_CFG_LEARN_ENA_SET(learn_ena),
171 		ANA_PORT_CFG_LEARN_ENA,
172 		lan966x, ANA_PORT_CFG(port->chip_port));
173 
174 	lan966x_update_fwd_mask(lan966x);
175 }
176 
lan966x_port_ageing_set(struct lan966x_port * port,unsigned long ageing_clock_t)177 void lan966x_port_ageing_set(struct lan966x_port *port,
178 			     unsigned long ageing_clock_t)
179 {
180 	unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
181 	u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
182 
183 	lan966x_mac_set_ageing(port->lan966x, ageing_time);
184 }
185 
lan966x_port_mc_set(struct lan966x_port * port,bool mcast_ena)186 static void lan966x_port_mc_set(struct lan966x_port *port, bool mcast_ena)
187 {
188 	struct lan966x *lan966x = port->lan966x;
189 
190 	port->mcast_ena = mcast_ena;
191 	if (mcast_ena)
192 		lan966x_mdb_restore_entries(lan966x);
193 	else
194 		lan966x_mdb_clear_entries(lan966x);
195 
196 	lan_rmw(ANA_CPU_FWD_CFG_IGMP_REDIR_ENA_SET(mcast_ena) |
197 		ANA_CPU_FWD_CFG_MLD_REDIR_ENA_SET(mcast_ena) |
198 		ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA_SET(mcast_ena),
199 		ANA_CPU_FWD_CFG_IGMP_REDIR_ENA |
200 		ANA_CPU_FWD_CFG_MLD_REDIR_ENA |
201 		ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA,
202 		lan966x, ANA_CPU_FWD_CFG(port->chip_port));
203 
204 	lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV4);
205 	lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV6);
206 }
207 
lan966x_port_attr_set(struct net_device * dev,const void * ctx,const struct switchdev_attr * attr,struct netlink_ext_ack * extack)208 static int lan966x_port_attr_set(struct net_device *dev, const void *ctx,
209 				 const struct switchdev_attr *attr,
210 				 struct netlink_ext_ack *extack)
211 {
212 	struct lan966x_port *port = netdev_priv(dev);
213 	int err = 0;
214 
215 	if (ctx && ctx != port)
216 		return 0;
217 
218 	switch (attr->id) {
219 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
220 		lan966x_port_bridge_flags(port, attr->u.brport_flags);
221 		break;
222 	case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
223 		err = lan966x_port_pre_bridge_flags(port, attr->u.brport_flags);
224 		break;
225 	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
226 		lan966x_port_stp_state_set(port, attr->u.stp_state);
227 		break;
228 	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
229 		lan966x_port_ageing_set(port, attr->u.ageing_time);
230 		break;
231 	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
232 		lan966x_vlan_port_set_vlan_aware(port, attr->u.vlan_filtering);
233 		lan966x_vlan_port_apply(port);
234 		break;
235 	case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
236 		lan966x_port_mc_set(port, !attr->u.mc_disabled);
237 		break;
238 	default:
239 		err = -EOPNOTSUPP;
240 		break;
241 	}
242 
243 	return err;
244 }
245 
lan966x_port_bridge_join(struct lan966x_port * port,struct net_device * brport_dev,struct net_device * bridge,struct netlink_ext_ack * extack)246 static int lan966x_port_bridge_join(struct lan966x_port *port,
247 				    struct net_device *brport_dev,
248 				    struct net_device *bridge,
249 				    struct netlink_ext_ack *extack)
250 {
251 	struct switchdev_brport_flags flags = {0};
252 	struct lan966x *lan966x = port->lan966x;
253 	struct net_device *dev = port->dev;
254 	int err;
255 
256 	if (!lan966x->bridge_mask) {
257 		lan966x->bridge = bridge;
258 	} else {
259 		if (lan966x->bridge != bridge) {
260 			NL_SET_ERR_MSG_MOD(extack, "Not allow to add port to different bridge");
261 			return -ENODEV;
262 		}
263 	}
264 
265 	err = switchdev_bridge_port_offload(brport_dev, dev, port,
266 					    &lan966x_switchdev_nb,
267 					    &lan966x_switchdev_blocking_nb,
268 					    false, extack);
269 	if (err)
270 		return err;
271 
272 	lan966x->bridge_mask |= BIT(port->chip_port);
273 
274 	flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
275 	flags.val = flags.mask;
276 	lan966x_port_bridge_flags(port, flags);
277 
278 	return 0;
279 }
280 
lan966x_port_bridge_leave(struct lan966x_port * port,struct net_device * bridge)281 static void lan966x_port_bridge_leave(struct lan966x_port *port,
282 				      struct net_device *bridge)
283 {
284 	struct switchdev_brport_flags flags = {0};
285 	struct lan966x *lan966x = port->lan966x;
286 
287 	flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
288 	flags.val = flags.mask & ~BR_LEARNING;
289 	lan966x_port_bridge_flags(port, flags);
290 
291 	lan966x->bridge_mask &= ~BIT(port->chip_port);
292 
293 	if (!lan966x->bridge_mask)
294 		lan966x->bridge = NULL;
295 
296 	/* Set the port back to host mode */
297 	lan966x_vlan_port_set_vlan_aware(port, false);
298 	lan966x_vlan_port_set_vid(port, HOST_PVID, false, false);
299 	lan966x_vlan_port_apply(port);
300 	lan966x_vlan_port_rew_host(port);
301 }
302 
lan966x_port_changeupper(struct net_device * dev,struct net_device * brport_dev,struct netdev_notifier_changeupper_info * info)303 int lan966x_port_changeupper(struct net_device *dev,
304 			     struct net_device *brport_dev,
305 			     struct netdev_notifier_changeupper_info *info)
306 {
307 	struct lan966x_port *port = netdev_priv(dev);
308 	struct netlink_ext_ack *extack;
309 	int err = 0;
310 
311 	extack = netdev_notifier_info_to_extack(&info->info);
312 
313 	if (netif_is_bridge_master(info->upper_dev)) {
314 		if (info->linking)
315 			err = lan966x_port_bridge_join(port, brport_dev,
316 						       info->upper_dev,
317 						       extack);
318 		else
319 			lan966x_port_bridge_leave(port, info->upper_dev);
320 	}
321 
322 	if (netif_is_lag_master(info->upper_dev)) {
323 		if (info->linking)
324 			err = lan966x_lag_port_join(port, info->upper_dev,
325 						    info->upper_dev,
326 						    extack);
327 		else
328 			lan966x_lag_port_leave(port, info->upper_dev);
329 	}
330 
331 	return err;
332 }
333 
lan966x_port_prechangeupper(struct net_device * dev,struct net_device * brport_dev,struct netdev_notifier_changeupper_info * info)334 int lan966x_port_prechangeupper(struct net_device *dev,
335 				struct net_device *brport_dev,
336 				struct netdev_notifier_changeupper_info *info)
337 {
338 	struct lan966x_port *port = netdev_priv(dev);
339 	int err = NOTIFY_DONE;
340 
341 	if (netif_is_bridge_master(info->upper_dev) && !info->linking) {
342 		switchdev_bridge_port_unoffload(port->dev, port, NULL, NULL);
343 		lan966x_fdb_flush_workqueue(port->lan966x);
344 	}
345 
346 	if (netif_is_lag_master(info->upper_dev)) {
347 		err = lan966x_lag_port_prechangeupper(dev, info);
348 		if (err || info->linking)
349 			return err;
350 
351 		switchdev_bridge_port_unoffload(brport_dev, port, NULL, NULL);
352 		lan966x_fdb_flush_workqueue(port->lan966x);
353 	}
354 
355 	return err;
356 }
357 
lan966x_foreign_bridging_check(struct net_device * upper,bool * has_foreign,bool * seen_lan966x,struct netlink_ext_ack * extack)358 static int lan966x_foreign_bridging_check(struct net_device *upper,
359 					  bool *has_foreign,
360 					  bool *seen_lan966x,
361 					  struct netlink_ext_ack *extack)
362 {
363 	struct lan966x *lan966x = NULL;
364 	struct net_device *dev;
365 	struct list_head *iter;
366 
367 	if (!netif_is_bridge_master(upper) &&
368 	    !netif_is_lag_master(upper))
369 		return 0;
370 
371 	netdev_for_each_lower_dev(upper, dev, iter) {
372 		if (lan966x_netdevice_check(dev)) {
373 			struct lan966x_port *port = netdev_priv(dev);
374 
375 			if (lan966x) {
376 				/* Upper already has at least one port of a
377 				 * lan966x switch inside it, check that it's
378 				 * the same instance of the driver.
379 				 */
380 				if (port->lan966x != lan966x) {
381 					NL_SET_ERR_MSG_MOD(extack,
382 							   "Bridging between multiple lan966x switches disallowed");
383 					return -EINVAL;
384 				}
385 			} else {
386 				/* This is the first lan966x port inside this
387 				 * upper device
388 				 */
389 				lan966x = port->lan966x;
390 				*seen_lan966x = true;
391 			}
392 		} else if (netif_is_lag_master(dev)) {
393 			/* Allow to have bond interfaces that have only lan966x
394 			 * devices
395 			 */
396 			if (lan966x_foreign_bridging_check(dev, has_foreign,
397 							   seen_lan966x,
398 							   extack))
399 				return -EINVAL;
400 		} else {
401 			*has_foreign = true;
402 		}
403 
404 		if (*seen_lan966x && *has_foreign) {
405 			NL_SET_ERR_MSG_MOD(extack,
406 					   "Bridging lan966x ports with foreign interfaces disallowed");
407 			return -EINVAL;
408 		}
409 	}
410 
411 	return 0;
412 }
413 
lan966x_bridge_check(struct net_device * dev,struct netdev_notifier_changeupper_info * info)414 static int lan966x_bridge_check(struct net_device *dev,
415 				struct netdev_notifier_changeupper_info *info)
416 {
417 	bool has_foreign = false;
418 	bool seen_lan966x = false;
419 
420 	return lan966x_foreign_bridging_check(info->upper_dev,
421 					      &has_foreign,
422 					      &seen_lan966x,
423 					      info->info.extack);
424 }
425 
lan966x_netdevice_port_event(struct net_device * dev,struct notifier_block * nb,unsigned long event,void * ptr)426 static int lan966x_netdevice_port_event(struct net_device *dev,
427 					struct notifier_block *nb,
428 					unsigned long event, void *ptr)
429 {
430 	int err = 0;
431 
432 	if (!lan966x_netdevice_check(dev)) {
433 		switch (event) {
434 		case NETDEV_CHANGEUPPER:
435 		case NETDEV_PRECHANGEUPPER:
436 			err = lan966x_bridge_check(dev, ptr);
437 			if (err)
438 				return err;
439 
440 			if (netif_is_lag_master(dev)) {
441 				if (event == NETDEV_CHANGEUPPER)
442 					err = lan966x_lag_netdev_changeupper(dev,
443 									     ptr);
444 				else
445 					err = lan966x_lag_netdev_prechangeupper(dev,
446 										ptr);
447 
448 				return err;
449 			}
450 			break;
451 		default:
452 			return 0;
453 		}
454 
455 		return 0;
456 	}
457 
458 	switch (event) {
459 	case NETDEV_PRECHANGEUPPER:
460 		err = lan966x_port_prechangeupper(dev, dev, ptr);
461 		break;
462 	case NETDEV_CHANGEUPPER:
463 		err = lan966x_bridge_check(dev, ptr);
464 		if (err)
465 			return err;
466 
467 		err = lan966x_port_changeupper(dev, dev, ptr);
468 		break;
469 	case NETDEV_CHANGELOWERSTATE:
470 		err = lan966x_lag_port_changelowerstate(dev, ptr);
471 		break;
472 	}
473 
474 	return err;
475 }
476 
lan966x_netdevice_event(struct notifier_block * nb,unsigned long event,void * ptr)477 static int lan966x_netdevice_event(struct notifier_block *nb,
478 				   unsigned long event, void *ptr)
479 {
480 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
481 	int ret;
482 
483 	ret = lan966x_netdevice_port_event(dev, nb, event, ptr);
484 
485 	return notifier_from_errno(ret);
486 }
487 
lan966x_foreign_dev_check(const struct net_device * dev,const struct net_device * foreign_dev)488 static bool lan966x_foreign_dev_check(const struct net_device *dev,
489 				      const struct net_device *foreign_dev)
490 {
491 	struct lan966x_port *port = netdev_priv(dev);
492 	struct lan966x *lan966x = port->lan966x;
493 	int i;
494 
495 	if (netif_is_bridge_master(foreign_dev))
496 		if (lan966x->bridge == foreign_dev)
497 			return false;
498 
499 	if (netif_is_lag_master(foreign_dev))
500 		for (i = 0; i < lan966x->num_phys_ports; ++i)
501 			if (lan966x->ports[i] &&
502 			    lan966x->ports[i]->bond == foreign_dev)
503 				return false;
504 
505 	return true;
506 }
507 
lan966x_switchdev_event(struct notifier_block * nb,unsigned long event,void * ptr)508 static int lan966x_switchdev_event(struct notifier_block *nb,
509 				   unsigned long event, void *ptr)
510 {
511 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
512 	int err;
513 
514 	switch (event) {
515 	case SWITCHDEV_PORT_ATTR_SET:
516 		err = switchdev_handle_port_attr_set(dev, ptr,
517 						     lan966x_netdevice_check,
518 						     lan966x_port_attr_set);
519 		return notifier_from_errno(err);
520 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
521 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
522 		err = switchdev_handle_fdb_event_to_device(dev, event, ptr,
523 							   lan966x_netdevice_check,
524 							   lan966x_foreign_dev_check,
525 							   lan966x_handle_fdb);
526 		return notifier_from_errno(err);
527 	}
528 
529 	return NOTIFY_DONE;
530 }
531 
lan966x_handle_port_vlan_add(struct lan966x_port * port,const struct switchdev_obj * obj)532 static int lan966x_handle_port_vlan_add(struct lan966x_port *port,
533 					const struct switchdev_obj *obj)
534 {
535 	const struct switchdev_obj_port_vlan *v = SWITCHDEV_OBJ_PORT_VLAN(obj);
536 	struct lan966x *lan966x = port->lan966x;
537 
538 	if (!netif_is_bridge_master(obj->orig_dev))
539 		lan966x_vlan_port_add_vlan(port, v->vid,
540 					   v->flags & BRIDGE_VLAN_INFO_PVID,
541 					   v->flags & BRIDGE_VLAN_INFO_UNTAGGED);
542 	else
543 		lan966x_vlan_cpu_add_vlan(lan966x, v->vid);
544 
545 	return 0;
546 }
547 
lan966x_handle_port_obj_add(struct net_device * dev,const void * ctx,const struct switchdev_obj * obj,struct netlink_ext_ack * extack)548 static int lan966x_handle_port_obj_add(struct net_device *dev, const void *ctx,
549 				       const struct switchdev_obj *obj,
550 				       struct netlink_ext_ack *extack)
551 {
552 	struct lan966x_port *port = netdev_priv(dev);
553 	int err;
554 
555 	if (ctx && ctx != port)
556 		return 0;
557 
558 	switch (obj->id) {
559 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
560 		err = lan966x_handle_port_vlan_add(port, obj);
561 		break;
562 	case SWITCHDEV_OBJ_ID_PORT_MDB:
563 	case SWITCHDEV_OBJ_ID_HOST_MDB:
564 		err = lan966x_handle_port_mdb_add(port, obj);
565 		break;
566 	default:
567 		err = -EOPNOTSUPP;
568 		break;
569 	}
570 
571 	return err;
572 }
573 
lan966x_handle_port_vlan_del(struct lan966x_port * port,const struct switchdev_obj * obj)574 static int lan966x_handle_port_vlan_del(struct lan966x_port *port,
575 					const struct switchdev_obj *obj)
576 {
577 	const struct switchdev_obj_port_vlan *v = SWITCHDEV_OBJ_PORT_VLAN(obj);
578 	struct lan966x *lan966x = port->lan966x;
579 
580 	if (!netif_is_bridge_master(obj->orig_dev))
581 		lan966x_vlan_port_del_vlan(port, v->vid);
582 	else
583 		lan966x_vlan_cpu_del_vlan(lan966x, v->vid);
584 
585 	return 0;
586 }
587 
lan966x_handle_port_obj_del(struct net_device * dev,const void * ctx,const struct switchdev_obj * obj)588 static int lan966x_handle_port_obj_del(struct net_device *dev, const void *ctx,
589 				       const struct switchdev_obj *obj)
590 {
591 	struct lan966x_port *port = netdev_priv(dev);
592 	int err;
593 
594 	if (ctx && ctx != port)
595 		return 0;
596 
597 	switch (obj->id) {
598 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
599 		err = lan966x_handle_port_vlan_del(port, obj);
600 		break;
601 	case SWITCHDEV_OBJ_ID_PORT_MDB:
602 	case SWITCHDEV_OBJ_ID_HOST_MDB:
603 		err = lan966x_handle_port_mdb_del(port, obj);
604 		break;
605 	default:
606 		err = -EOPNOTSUPP;
607 		break;
608 	}
609 
610 	return err;
611 }
612 
lan966x_switchdev_blocking_event(struct notifier_block * nb,unsigned long event,void * ptr)613 static int lan966x_switchdev_blocking_event(struct notifier_block *nb,
614 					    unsigned long event,
615 					    void *ptr)
616 {
617 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
618 	int err;
619 
620 	switch (event) {
621 	case SWITCHDEV_PORT_OBJ_ADD:
622 		err = switchdev_handle_port_obj_add(dev, ptr,
623 						    lan966x_netdevice_check,
624 						    lan966x_handle_port_obj_add);
625 		return notifier_from_errno(err);
626 	case SWITCHDEV_PORT_OBJ_DEL:
627 		err = switchdev_handle_port_obj_del(dev, ptr,
628 						    lan966x_netdevice_check,
629 						    lan966x_handle_port_obj_del);
630 		return notifier_from_errno(err);
631 	case SWITCHDEV_PORT_ATTR_SET:
632 		err = switchdev_handle_port_attr_set(dev, ptr,
633 						     lan966x_netdevice_check,
634 						     lan966x_port_attr_set);
635 		return notifier_from_errno(err);
636 	}
637 
638 	return NOTIFY_DONE;
639 }
640 
641 static struct notifier_block lan966x_netdevice_nb __read_mostly = {
642 	.notifier_call = lan966x_netdevice_event,
643 };
644 
645 struct notifier_block lan966x_switchdev_nb __read_mostly = {
646 	.notifier_call = lan966x_switchdev_event,
647 };
648 
649 struct notifier_block lan966x_switchdev_blocking_nb __read_mostly = {
650 	.notifier_call = lan966x_switchdev_blocking_event,
651 };
652 
lan966x_register_notifier_blocks(void)653 void lan966x_register_notifier_blocks(void)
654 {
655 	register_netdevice_notifier(&lan966x_netdevice_nb);
656 	register_switchdev_notifier(&lan966x_switchdev_nb);
657 	register_switchdev_blocking_notifier(&lan966x_switchdev_blocking_nb);
658 }
659 
lan966x_unregister_notifier_blocks(void)660 void lan966x_unregister_notifier_blocks(void)
661 {
662 	unregister_switchdev_blocking_notifier(&lan966x_switchdev_blocking_nb);
663 	unregister_switchdev_notifier(&lan966x_switchdev_nb);
664 	unregister_netdevice_notifier(&lan966x_netdevice_nb);
665 }
666