1 // SPDX-License-Identifier: GPL-2.0+
2 
3 #include <linux/if_bridge.h>
4 #include <net/switchdev.h>
5 
6 #include "lan966x_main.h"
7 
8 static struct notifier_block lan966x_netdevice_nb __read_mostly;
9 
10 static void lan966x_port_set_mcast_ip_flood(struct lan966x_port *port,
11 					    u32 pgid_ip)
12 {
13 	struct lan966x *lan966x = port->lan966x;
14 	u32 flood_mask_ip;
15 
16 	flood_mask_ip = lan_rd(lan966x, ANA_PGID(pgid_ip));
17 	flood_mask_ip = ANA_PGID_PGID_GET(flood_mask_ip);
18 
19 	/* If mcast snooping is not enabled then use mcast flood mask
20 	 * to decide to enable multicast flooding or not.
21 	 */
22 	if (!port->mcast_ena) {
23 		u32 flood_mask;
24 
25 		flood_mask = lan_rd(lan966x, ANA_PGID(PGID_MC));
26 		flood_mask = ANA_PGID_PGID_GET(flood_mask);
27 
28 		if (flood_mask & BIT(port->chip_port))
29 			flood_mask_ip |= BIT(port->chip_port);
30 		else
31 			flood_mask_ip &= ~BIT(port->chip_port);
32 	} else {
33 		flood_mask_ip &= ~BIT(port->chip_port);
34 	}
35 
36 	lan_rmw(ANA_PGID_PGID_SET(flood_mask_ip),
37 		ANA_PGID_PGID,
38 		lan966x, ANA_PGID(pgid_ip));
39 }
40 
41 static void lan966x_port_set_mcast_flood(struct lan966x_port *port,
42 					 bool enabled)
43 {
44 	u32 val = lan_rd(port->lan966x, ANA_PGID(PGID_MC));
45 
46 	val = ANA_PGID_PGID_GET(val);
47 	if (enabled)
48 		val |= BIT(port->chip_port);
49 	else
50 		val &= ~BIT(port->chip_port);
51 
52 	lan_rmw(ANA_PGID_PGID_SET(val),
53 		ANA_PGID_PGID,
54 		port->lan966x, ANA_PGID(PGID_MC));
55 
56 	if (!port->mcast_ena) {
57 		lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV4);
58 		lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV6);
59 	}
60 }
61 
62 static void lan966x_port_set_ucast_flood(struct lan966x_port *port,
63 					 bool enabled)
64 {
65 	u32 val = lan_rd(port->lan966x, ANA_PGID(PGID_UC));
66 
67 	val = ANA_PGID_PGID_GET(val);
68 	if (enabled)
69 		val |= BIT(port->chip_port);
70 	else
71 		val &= ~BIT(port->chip_port);
72 
73 	lan_rmw(ANA_PGID_PGID_SET(val),
74 		ANA_PGID_PGID,
75 		port->lan966x, ANA_PGID(PGID_UC));
76 }
77 
78 static void lan966x_port_set_bcast_flood(struct lan966x_port *port,
79 					 bool enabled)
80 {
81 	u32 val = lan_rd(port->lan966x, ANA_PGID(PGID_BC));
82 
83 	val = ANA_PGID_PGID_GET(val);
84 	if (enabled)
85 		val |= BIT(port->chip_port);
86 	else
87 		val &= ~BIT(port->chip_port);
88 
89 	lan_rmw(ANA_PGID_PGID_SET(val),
90 		ANA_PGID_PGID,
91 		port->lan966x, ANA_PGID(PGID_BC));
92 }
93 
94 static void lan966x_port_set_learning(struct lan966x_port *port, bool enabled)
95 {
96 	lan_rmw(ANA_PORT_CFG_LEARN_ENA_SET(enabled),
97 		ANA_PORT_CFG_LEARN_ENA,
98 		port->lan966x, ANA_PORT_CFG(port->chip_port));
99 
100 	port->learn_ena = enabled;
101 }
102 
103 static void lan966x_port_bridge_flags(struct lan966x_port *port,
104 				      struct switchdev_brport_flags flags)
105 {
106 	if (flags.mask & BR_MCAST_FLOOD)
107 		lan966x_port_set_mcast_flood(port,
108 					     !!(flags.val & BR_MCAST_FLOOD));
109 
110 	if (flags.mask & BR_FLOOD)
111 		lan966x_port_set_ucast_flood(port,
112 					     !!(flags.val & BR_FLOOD));
113 
114 	if (flags.mask & BR_BCAST_FLOOD)
115 		lan966x_port_set_bcast_flood(port,
116 					     !!(flags.val & BR_BCAST_FLOOD));
117 
118 	if (flags.mask & BR_LEARNING)
119 		lan966x_port_set_learning(port,
120 					  !!(flags.val & BR_LEARNING));
121 }
122 
123 static int lan966x_port_pre_bridge_flags(struct lan966x_port *port,
124 					 struct switchdev_brport_flags flags)
125 {
126 	if (flags.mask & ~(BR_MCAST_FLOOD | BR_FLOOD | BR_BCAST_FLOOD |
127 			   BR_LEARNING))
128 		return -EINVAL;
129 
130 	return 0;
131 }
132 
133 void lan966x_update_fwd_mask(struct lan966x *lan966x)
134 {
135 	int i;
136 
137 	for (i = 0; i < lan966x->num_phys_ports; i++) {
138 		struct lan966x_port *port = lan966x->ports[i];
139 		unsigned long mask = 0;
140 
141 		if (port && lan966x->bridge_fwd_mask & BIT(i)) {
142 			mask = lan966x->bridge_fwd_mask & ~BIT(i);
143 
144 			if (port->bond)
145 				mask &= ~lan966x_lag_get_mask(lan966x,
146 							      port->bond);
147 		}
148 
149 		mask |= BIT(CPU_PORT);
150 
151 		lan_wr(ANA_PGID_PGID_SET(mask),
152 		       lan966x, ANA_PGID(PGID_SRC + i));
153 	}
154 }
155 
156 void lan966x_port_stp_state_set(struct lan966x_port *port, u8 state)
157 {
158 	struct lan966x *lan966x = port->lan966x;
159 	bool learn_ena = false;
160 
161 	if ((state == BR_STATE_FORWARDING || state == BR_STATE_LEARNING) &&
162 	    port->learn_ena)
163 		learn_ena = true;
164 
165 	if (state == BR_STATE_FORWARDING)
166 		lan966x->bridge_fwd_mask |= BIT(port->chip_port);
167 	else
168 		lan966x->bridge_fwd_mask &= ~BIT(port->chip_port);
169 
170 	lan_rmw(ANA_PORT_CFG_LEARN_ENA_SET(learn_ena),
171 		ANA_PORT_CFG_LEARN_ENA,
172 		lan966x, ANA_PORT_CFG(port->chip_port));
173 
174 	lan966x_update_fwd_mask(lan966x);
175 }
176 
177 void lan966x_port_ageing_set(struct lan966x_port *port,
178 			     unsigned long ageing_clock_t)
179 {
180 	unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
181 	u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
182 
183 	lan966x_mac_set_ageing(port->lan966x, ageing_time);
184 }
185 
186 static void lan966x_port_mc_set(struct lan966x_port *port, bool mcast_ena)
187 {
188 	struct lan966x *lan966x = port->lan966x;
189 
190 	port->mcast_ena = mcast_ena;
191 	if (mcast_ena)
192 		lan966x_mdb_restore_entries(lan966x);
193 	else
194 		lan966x_mdb_clear_entries(lan966x);
195 
196 	lan_rmw(ANA_CPU_FWD_CFG_IGMP_REDIR_ENA_SET(mcast_ena) |
197 		ANA_CPU_FWD_CFG_MLD_REDIR_ENA_SET(mcast_ena) |
198 		ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA_SET(mcast_ena),
199 		ANA_CPU_FWD_CFG_IGMP_REDIR_ENA |
200 		ANA_CPU_FWD_CFG_MLD_REDIR_ENA |
201 		ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA,
202 		lan966x, ANA_CPU_FWD_CFG(port->chip_port));
203 
204 	lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV4);
205 	lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV6);
206 }
207 
208 static int lan966x_port_attr_set(struct net_device *dev, const void *ctx,
209 				 const struct switchdev_attr *attr,
210 				 struct netlink_ext_ack *extack)
211 {
212 	struct lan966x_port *port = netdev_priv(dev);
213 	int err = 0;
214 
215 	if (ctx && ctx != port)
216 		return 0;
217 
218 	switch (attr->id) {
219 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
220 		lan966x_port_bridge_flags(port, attr->u.brport_flags);
221 		break;
222 	case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
223 		err = lan966x_port_pre_bridge_flags(port, attr->u.brport_flags);
224 		break;
225 	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
226 		lan966x_port_stp_state_set(port, attr->u.stp_state);
227 		break;
228 	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
229 		lan966x_port_ageing_set(port, attr->u.ageing_time);
230 		break;
231 	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
232 		lan966x_vlan_port_set_vlan_aware(port, attr->u.vlan_filtering);
233 		lan966x_vlan_port_apply(port);
234 		break;
235 	case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
236 		lan966x_port_mc_set(port, !attr->u.mc_disabled);
237 		break;
238 	default:
239 		err = -EOPNOTSUPP;
240 		break;
241 	}
242 
243 	return err;
244 }
245 
246 static int lan966x_port_bridge_join(struct lan966x_port *port,
247 				    struct net_device *brport_dev,
248 				    struct net_device *bridge,
249 				    struct netlink_ext_ack *extack)
250 {
251 	struct switchdev_brport_flags flags = {0};
252 	struct lan966x *lan966x = port->lan966x;
253 	struct net_device *dev = port->dev;
254 	int err;
255 
256 	if (!lan966x->bridge_mask) {
257 		lan966x->bridge = bridge;
258 	} else {
259 		if (lan966x->bridge != bridge) {
260 			NL_SET_ERR_MSG_MOD(extack, "Not allow to add port to different bridge");
261 			return -ENODEV;
262 		}
263 	}
264 
265 	err = switchdev_bridge_port_offload(brport_dev, dev, port,
266 					    &lan966x_switchdev_nb,
267 					    &lan966x_switchdev_blocking_nb,
268 					    false, extack);
269 	if (err)
270 		return err;
271 
272 	lan966x->bridge_mask |= BIT(port->chip_port);
273 
274 	flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
275 	flags.val = flags.mask;
276 	lan966x_port_bridge_flags(port, flags);
277 
278 	return 0;
279 }
280 
281 static void lan966x_port_bridge_leave(struct lan966x_port *port,
282 				      struct net_device *bridge)
283 {
284 	struct switchdev_brport_flags flags = {0};
285 	struct lan966x *lan966x = port->lan966x;
286 
287 	flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
288 	flags.val = flags.mask & ~BR_LEARNING;
289 	lan966x_port_bridge_flags(port, flags);
290 
291 	lan966x->bridge_mask &= ~BIT(port->chip_port);
292 
293 	if (!lan966x->bridge_mask)
294 		lan966x->bridge = NULL;
295 
296 	/* Set the port back to host mode */
297 	lan966x_vlan_port_set_vlan_aware(port, false);
298 	lan966x_vlan_port_set_vid(port, HOST_PVID, false, false);
299 	lan966x_vlan_port_apply(port);
300 }
301 
302 int lan966x_port_changeupper(struct net_device *dev,
303 			     struct net_device *brport_dev,
304 			     struct netdev_notifier_changeupper_info *info)
305 {
306 	struct lan966x_port *port = netdev_priv(dev);
307 	struct netlink_ext_ack *extack;
308 	int err = 0;
309 
310 	extack = netdev_notifier_info_to_extack(&info->info);
311 
312 	if (netif_is_bridge_master(info->upper_dev)) {
313 		if (info->linking)
314 			err = lan966x_port_bridge_join(port, brport_dev,
315 						       info->upper_dev,
316 						       extack);
317 		else
318 			lan966x_port_bridge_leave(port, info->upper_dev);
319 	}
320 
321 	if (netif_is_lag_master(info->upper_dev)) {
322 		if (info->linking)
323 			err = lan966x_lag_port_join(port, info->upper_dev,
324 						    info->upper_dev,
325 						    extack);
326 		else
327 			lan966x_lag_port_leave(port, info->upper_dev);
328 	}
329 
330 	return err;
331 }
332 
333 int lan966x_port_prechangeupper(struct net_device *dev,
334 				struct net_device *brport_dev,
335 				struct netdev_notifier_changeupper_info *info)
336 {
337 	struct lan966x_port *port = netdev_priv(dev);
338 	int err = NOTIFY_DONE;
339 
340 	if (netif_is_bridge_master(info->upper_dev) && !info->linking) {
341 		switchdev_bridge_port_unoffload(port->dev, port, NULL, NULL);
342 		lan966x_fdb_flush_workqueue(port->lan966x);
343 	}
344 
345 	if (netif_is_lag_master(info->upper_dev)) {
346 		err = lan966x_lag_port_prechangeupper(dev, info);
347 		if (err || info->linking)
348 			return err;
349 
350 		switchdev_bridge_port_unoffload(brport_dev, port, NULL, NULL);
351 	}
352 
353 	return err;
354 }
355 
356 static int lan966x_foreign_bridging_check(struct net_device *upper,
357 					  bool *has_foreign,
358 					  bool *seen_lan966x,
359 					  struct netlink_ext_ack *extack)
360 {
361 	struct lan966x *lan966x = NULL;
362 	struct net_device *dev;
363 	struct list_head *iter;
364 
365 	if (!netif_is_bridge_master(upper) &&
366 	    !netif_is_lag_master(upper))
367 		return 0;
368 
369 	netdev_for_each_lower_dev(upper, dev, iter) {
370 		if (lan966x_netdevice_check(dev)) {
371 			struct lan966x_port *port = netdev_priv(dev);
372 
373 			if (lan966x) {
374 				/* Upper already has at least one port of a
375 				 * lan966x switch inside it, check that it's
376 				 * the same instance of the driver.
377 				 */
378 				if (port->lan966x != lan966x) {
379 					NL_SET_ERR_MSG_MOD(extack,
380 							   "Bridging between multiple lan966x switches disallowed");
381 					return -EINVAL;
382 				}
383 			} else {
384 				/* This is the first lan966x port inside this
385 				 * upper device
386 				 */
387 				lan966x = port->lan966x;
388 				*seen_lan966x = true;
389 			}
390 		} else if (netif_is_lag_master(dev)) {
391 			/* Allow to have bond interfaces that have only lan966x
392 			 * devices
393 			 */
394 			if (lan966x_foreign_bridging_check(dev, has_foreign,
395 							   seen_lan966x,
396 							   extack))
397 				return -EINVAL;
398 		} else {
399 			*has_foreign = true;
400 		}
401 
402 		if (*seen_lan966x && *has_foreign) {
403 			NL_SET_ERR_MSG_MOD(extack,
404 					   "Bridging lan966x ports with foreign interfaces disallowed");
405 			return -EINVAL;
406 		}
407 	}
408 
409 	return 0;
410 }
411 
412 static int lan966x_bridge_check(struct net_device *dev,
413 				struct netdev_notifier_changeupper_info *info)
414 {
415 	bool has_foreign = false;
416 	bool seen_lan966x = false;
417 
418 	return lan966x_foreign_bridging_check(info->upper_dev,
419 					      &has_foreign,
420 					      &seen_lan966x,
421 					      info->info.extack);
422 }
423 
424 static int lan966x_netdevice_port_event(struct net_device *dev,
425 					struct notifier_block *nb,
426 					unsigned long event, void *ptr)
427 {
428 	int err = 0;
429 
430 	if (!lan966x_netdevice_check(dev)) {
431 		switch (event) {
432 		case NETDEV_CHANGEUPPER:
433 		case NETDEV_PRECHANGEUPPER:
434 			err = lan966x_bridge_check(dev, ptr);
435 			if (err)
436 				return err;
437 
438 			if (netif_is_lag_master(dev)) {
439 				if (event == NETDEV_CHANGEUPPER)
440 					err = lan966x_lag_netdev_changeupper(dev,
441 									     ptr);
442 				else
443 					err = lan966x_lag_netdev_prechangeupper(dev,
444 										ptr);
445 
446 				return err;
447 			}
448 			break;
449 		default:
450 			return 0;
451 		}
452 
453 		return 0;
454 	}
455 
456 	switch (event) {
457 	case NETDEV_PRECHANGEUPPER:
458 		err = lan966x_port_prechangeupper(dev, dev, ptr);
459 		break;
460 	case NETDEV_CHANGEUPPER:
461 		err = lan966x_bridge_check(dev, ptr);
462 		if (err)
463 			return err;
464 
465 		err = lan966x_port_changeupper(dev, dev, ptr);
466 		break;
467 	case NETDEV_CHANGELOWERSTATE:
468 		err = lan966x_lag_port_changelowerstate(dev, ptr);
469 		break;
470 	}
471 
472 	return err;
473 }
474 
475 static int lan966x_netdevice_event(struct notifier_block *nb,
476 				   unsigned long event, void *ptr)
477 {
478 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
479 	int ret;
480 
481 	ret = lan966x_netdevice_port_event(dev, nb, event, ptr);
482 
483 	return notifier_from_errno(ret);
484 }
485 
486 static bool lan966x_foreign_dev_check(const struct net_device *dev,
487 				      const struct net_device *foreign_dev)
488 {
489 	struct lan966x_port *port = netdev_priv(dev);
490 	struct lan966x *lan966x = port->lan966x;
491 	int i;
492 
493 	if (netif_is_bridge_master(foreign_dev))
494 		if (lan966x->bridge == foreign_dev)
495 			return false;
496 
497 	if (netif_is_lag_master(foreign_dev))
498 		for (i = 0; i < lan966x->num_phys_ports; ++i)
499 			if (lan966x->ports[i] &&
500 			    lan966x->ports[i]->bond == foreign_dev)
501 				return false;
502 
503 	return true;
504 }
505 
506 static int lan966x_switchdev_event(struct notifier_block *nb,
507 				   unsigned long event, void *ptr)
508 {
509 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
510 	int err;
511 
512 	switch (event) {
513 	case SWITCHDEV_PORT_ATTR_SET:
514 		err = switchdev_handle_port_attr_set(dev, ptr,
515 						     lan966x_netdevice_check,
516 						     lan966x_port_attr_set);
517 		return notifier_from_errno(err);
518 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
519 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
520 		err = switchdev_handle_fdb_event_to_device(dev, event, ptr,
521 							   lan966x_netdevice_check,
522 							   lan966x_foreign_dev_check,
523 							   lan966x_handle_fdb);
524 		return notifier_from_errno(err);
525 	}
526 
527 	return NOTIFY_DONE;
528 }
529 
530 static int lan966x_handle_port_vlan_add(struct lan966x_port *port,
531 					const struct switchdev_obj *obj)
532 {
533 	const struct switchdev_obj_port_vlan *v = SWITCHDEV_OBJ_PORT_VLAN(obj);
534 	struct lan966x *lan966x = port->lan966x;
535 
536 	if (!netif_is_bridge_master(obj->orig_dev))
537 		lan966x_vlan_port_add_vlan(port, v->vid,
538 					   v->flags & BRIDGE_VLAN_INFO_PVID,
539 					   v->flags & BRIDGE_VLAN_INFO_UNTAGGED);
540 	else
541 		lan966x_vlan_cpu_add_vlan(lan966x, v->vid);
542 
543 	return 0;
544 }
545 
546 static int lan966x_handle_port_obj_add(struct net_device *dev, const void *ctx,
547 				       const struct switchdev_obj *obj,
548 				       struct netlink_ext_ack *extack)
549 {
550 	struct lan966x_port *port = netdev_priv(dev);
551 	int err;
552 
553 	if (ctx && ctx != port)
554 		return 0;
555 
556 	switch (obj->id) {
557 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
558 		err = lan966x_handle_port_vlan_add(port, obj);
559 		break;
560 	case SWITCHDEV_OBJ_ID_PORT_MDB:
561 	case SWITCHDEV_OBJ_ID_HOST_MDB:
562 		err = lan966x_handle_port_mdb_add(port, obj);
563 		break;
564 	default:
565 		err = -EOPNOTSUPP;
566 		break;
567 	}
568 
569 	return err;
570 }
571 
572 static int lan966x_handle_port_vlan_del(struct lan966x_port *port,
573 					const struct switchdev_obj *obj)
574 {
575 	const struct switchdev_obj_port_vlan *v = SWITCHDEV_OBJ_PORT_VLAN(obj);
576 	struct lan966x *lan966x = port->lan966x;
577 
578 	if (!netif_is_bridge_master(obj->orig_dev))
579 		lan966x_vlan_port_del_vlan(port, v->vid);
580 	else
581 		lan966x_vlan_cpu_del_vlan(lan966x, v->vid);
582 
583 	return 0;
584 }
585 
586 static int lan966x_handle_port_obj_del(struct net_device *dev, const void *ctx,
587 				       const struct switchdev_obj *obj)
588 {
589 	struct lan966x_port *port = netdev_priv(dev);
590 	int err;
591 
592 	if (ctx && ctx != port)
593 		return 0;
594 
595 	switch (obj->id) {
596 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
597 		err = lan966x_handle_port_vlan_del(port, obj);
598 		break;
599 	case SWITCHDEV_OBJ_ID_PORT_MDB:
600 	case SWITCHDEV_OBJ_ID_HOST_MDB:
601 		err = lan966x_handle_port_mdb_del(port, obj);
602 		break;
603 	default:
604 		err = -EOPNOTSUPP;
605 		break;
606 	}
607 
608 	return err;
609 }
610 
611 static int lan966x_switchdev_blocking_event(struct notifier_block *nb,
612 					    unsigned long event,
613 					    void *ptr)
614 {
615 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
616 	int err;
617 
618 	switch (event) {
619 	case SWITCHDEV_PORT_OBJ_ADD:
620 		err = switchdev_handle_port_obj_add(dev, ptr,
621 						    lan966x_netdevice_check,
622 						    lan966x_handle_port_obj_add);
623 		return notifier_from_errno(err);
624 	case SWITCHDEV_PORT_OBJ_DEL:
625 		err = switchdev_handle_port_obj_del(dev, ptr,
626 						    lan966x_netdevice_check,
627 						    lan966x_handle_port_obj_del);
628 		return notifier_from_errno(err);
629 	case SWITCHDEV_PORT_ATTR_SET:
630 		err = switchdev_handle_port_attr_set(dev, ptr,
631 						     lan966x_netdevice_check,
632 						     lan966x_port_attr_set);
633 		return notifier_from_errno(err);
634 	}
635 
636 	return NOTIFY_DONE;
637 }
638 
639 static struct notifier_block lan966x_netdevice_nb __read_mostly = {
640 	.notifier_call = lan966x_netdevice_event,
641 };
642 
643 struct notifier_block lan966x_switchdev_nb __read_mostly = {
644 	.notifier_call = lan966x_switchdev_event,
645 };
646 
647 struct notifier_block lan966x_switchdev_blocking_nb __read_mostly = {
648 	.notifier_call = lan966x_switchdev_blocking_event,
649 };
650 
651 void lan966x_register_notifier_blocks(void)
652 {
653 	register_netdevice_notifier(&lan966x_netdevice_nb);
654 	register_switchdev_notifier(&lan966x_switchdev_nb);
655 	register_switchdev_blocking_notifier(&lan966x_switchdev_blocking_nb);
656 }
657 
658 void lan966x_unregister_notifier_blocks(void)
659 {
660 	unregister_switchdev_blocking_notifier(&lan966x_switchdev_blocking_nb);
661 	unregister_switchdev_notifier(&lan966x_switchdev_nb);
662 	unregister_netdevice_notifier(&lan966x_netdevice_nb);
663 }
664