1 // SPDX-License-Identifier: GPL-2.0+
2 /* Microchip Sparx5 Switch driver
3  *
4  * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
5  */
6 
7 #include <linux/if_bridge.h>
8 #include <net/switchdev.h>
9 
10 #include "sparx5_main_regs.h"
11 #include "sparx5_main.h"
12 
13 static struct workqueue_struct *sparx5_owq;
14 
15 struct sparx5_switchdev_event_work {
16 	struct work_struct work;
17 	struct switchdev_notifier_fdb_info fdb_info;
18 	struct net_device *dev;
19 	struct sparx5 *sparx5;
20 	unsigned long event;
21 };
22 
23 static int sparx5_port_attr_pre_bridge_flags(struct sparx5_port *port,
24 					     struct switchdev_brport_flags flags)
25 {
26 	if (flags.mask & ~(BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD))
27 		return -EINVAL;
28 
29 	return 0;
30 }
31 
32 static void sparx5_port_attr_bridge_flags(struct sparx5_port *port,
33 					  struct switchdev_brport_flags flags)
34 {
35 	int pgid;
36 
37 	if (flags.mask & BR_MCAST_FLOOD)
38 		for (pgid = PGID_MC_FLOOD; pgid <= PGID_IPV6_MC_CTRL; pgid++)
39 			sparx5_pgid_update_mask(port, pgid, !!(flags.val & BR_MCAST_FLOOD));
40 	if (flags.mask & BR_FLOOD)
41 		sparx5_pgid_update_mask(port, PGID_UC_FLOOD, !!(flags.val & BR_FLOOD));
42 	if (flags.mask & BR_BCAST_FLOOD)
43 		sparx5_pgid_update_mask(port, PGID_BCAST, !!(flags.val & BR_BCAST_FLOOD));
44 }
45 
46 static void sparx5_attr_stp_state_set(struct sparx5_port *port,
47 				      u8 state)
48 {
49 	struct sparx5 *sparx5 = port->sparx5;
50 
51 	if (!test_bit(port->portno, sparx5->bridge_mask)) {
52 		netdev_err(port->ndev,
53 			   "Controlling non-bridged port %d?\n", port->portno);
54 		return;
55 	}
56 
57 	switch (state) {
58 	case BR_STATE_FORWARDING:
59 		set_bit(port->portno, sparx5->bridge_fwd_mask);
60 		fallthrough;
61 	case BR_STATE_LEARNING:
62 		set_bit(port->portno, sparx5->bridge_lrn_mask);
63 		break;
64 
65 	default:
66 		/* All other states treated as blocking */
67 		clear_bit(port->portno, sparx5->bridge_fwd_mask);
68 		clear_bit(port->portno, sparx5->bridge_lrn_mask);
69 		break;
70 	}
71 
72 	/* apply the bridge_fwd_mask to all the ports */
73 	sparx5_update_fwd(sparx5);
74 }
75 
76 static void sparx5_port_attr_ageing_set(struct sparx5_port *port,
77 					unsigned long ageing_clock_t)
78 {
79 	unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
80 	u32 ageing_time = jiffies_to_msecs(ageing_jiffies);
81 
82 	sparx5_set_ageing(port->sparx5, ageing_time);
83 }
84 
85 static int sparx5_port_attr_set(struct net_device *dev, const void *ctx,
86 				const struct switchdev_attr *attr,
87 				struct netlink_ext_ack *extack)
88 {
89 	struct sparx5_port *port = netdev_priv(dev);
90 
91 	switch (attr->id) {
92 	case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
93 		return sparx5_port_attr_pre_bridge_flags(port,
94 							 attr->u.brport_flags);
95 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
96 		sparx5_port_attr_bridge_flags(port, attr->u.brport_flags);
97 		break;
98 	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
99 		sparx5_attr_stp_state_set(port, attr->u.stp_state);
100 		break;
101 	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
102 		sparx5_port_attr_ageing_set(port, attr->u.ageing_time);
103 		break;
104 	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
105 		/* Used PVID 1 when default_pvid is 0, to avoid
106 		 * collision with non-bridged ports.
107 		 */
108 		if (port->pvid == 0)
109 			port->pvid = 1;
110 		port->vlan_aware = attr->u.vlan_filtering;
111 		sparx5_vlan_port_apply(port->sparx5, port);
112 		break;
113 	default:
114 		return -EOPNOTSUPP;
115 	}
116 
117 	return 0;
118 }
119 
120 static int sparx5_port_bridge_join(struct sparx5_port *port,
121 				   struct net_device *bridge,
122 				   struct netlink_ext_ack *extack)
123 {
124 	struct sparx5 *sparx5 = port->sparx5;
125 	struct net_device *ndev = port->ndev;
126 	int err;
127 
128 	if (bitmap_empty(sparx5->bridge_mask, SPX5_PORTS))
129 		/* First bridged port */
130 		sparx5->hw_bridge_dev = bridge;
131 	else
132 		if (sparx5->hw_bridge_dev != bridge)
133 			/* This is adding the port to a second bridge, this is
134 			 * unsupported
135 			 */
136 			return -ENODEV;
137 
138 	set_bit(port->portno, sparx5->bridge_mask);
139 
140 	err = switchdev_bridge_port_offload(ndev, ndev, NULL, NULL, NULL,
141 					    false, extack);
142 	if (err)
143 		goto err_switchdev_offload;
144 
145 	/* Remove standalone port entry */
146 	sparx5_mact_forget(sparx5, ndev->dev_addr, 0);
147 
148 	/* Port enters in bridge mode therefor don't need to copy to CPU
149 	 * frames for multicast in case the bridge is not requesting them
150 	 */
151 	__dev_mc_unsync(ndev, sparx5_mc_unsync);
152 
153 	return 0;
154 
155 err_switchdev_offload:
156 	clear_bit(port->portno, sparx5->bridge_mask);
157 	return err;
158 }
159 
160 static void sparx5_port_bridge_leave(struct sparx5_port *port,
161 				     struct net_device *bridge)
162 {
163 	struct sparx5 *sparx5 = port->sparx5;
164 
165 	switchdev_bridge_port_unoffload(port->ndev, NULL, NULL, NULL);
166 
167 	clear_bit(port->portno, sparx5->bridge_mask);
168 	if (bitmap_empty(sparx5->bridge_mask, SPX5_PORTS))
169 		sparx5->hw_bridge_dev = NULL;
170 
171 	/* Clear bridge vlan settings before updating the port settings */
172 	port->vlan_aware = 0;
173 	port->pvid = NULL_VID;
174 	port->vid = NULL_VID;
175 
176 	/* Forward frames to CPU */
177 	sparx5_mact_learn(sparx5, PGID_CPU, port->ndev->dev_addr, 0);
178 
179 	/* Port enters in host more therefore restore mc list */
180 	__dev_mc_sync(port->ndev, sparx5_mc_sync, sparx5_mc_unsync);
181 }
182 
183 static int sparx5_port_changeupper(struct net_device *dev,
184 				   struct netdev_notifier_changeupper_info *info)
185 {
186 	struct sparx5_port *port = netdev_priv(dev);
187 	struct netlink_ext_ack *extack;
188 	int err = 0;
189 
190 	extack = netdev_notifier_info_to_extack(&info->info);
191 
192 	if (netif_is_bridge_master(info->upper_dev)) {
193 		if (info->linking)
194 			err = sparx5_port_bridge_join(port, info->upper_dev,
195 						      extack);
196 		else
197 			sparx5_port_bridge_leave(port, info->upper_dev);
198 
199 		sparx5_vlan_port_apply(port->sparx5, port);
200 	}
201 
202 	return err;
203 }
204 
205 static int sparx5_port_add_addr(struct net_device *dev, bool up)
206 {
207 	struct sparx5_port *port = netdev_priv(dev);
208 	struct sparx5 *sparx5 = port->sparx5;
209 	u16 vid = port->pvid;
210 
211 	if (up)
212 		sparx5_mact_learn(sparx5, PGID_CPU, port->ndev->dev_addr, vid);
213 	else
214 		sparx5_mact_forget(sparx5, port->ndev->dev_addr, vid);
215 
216 	return 0;
217 }
218 
219 static int sparx5_netdevice_port_event(struct net_device *dev,
220 				       struct notifier_block *nb,
221 				       unsigned long event, void *ptr)
222 {
223 	int err = 0;
224 
225 	if (!sparx5_netdevice_check(dev))
226 		return 0;
227 
228 	switch (event) {
229 	case NETDEV_CHANGEUPPER:
230 		err = sparx5_port_changeupper(dev, ptr);
231 		break;
232 	case NETDEV_PRE_UP:
233 		err = sparx5_port_add_addr(dev, true);
234 		break;
235 	case NETDEV_DOWN:
236 		err = sparx5_port_add_addr(dev, false);
237 		break;
238 	}
239 
240 	return err;
241 }
242 
243 static int sparx5_netdevice_event(struct notifier_block *nb,
244 				  unsigned long event, void *ptr)
245 {
246 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
247 	int ret = 0;
248 
249 	ret = sparx5_netdevice_port_event(dev, nb, event, ptr);
250 
251 	return notifier_from_errno(ret);
252 }
253 
254 static void sparx5_switchdev_bridge_fdb_event_work(struct work_struct *work)
255 {
256 	struct sparx5_switchdev_event_work *switchdev_work =
257 		container_of(work, struct sparx5_switchdev_event_work, work);
258 	struct net_device *dev = switchdev_work->dev;
259 	struct switchdev_notifier_fdb_info *fdb_info;
260 	struct sparx5_port *port;
261 	struct sparx5 *sparx5;
262 	bool host_addr;
263 	u16 vid;
264 
265 	rtnl_lock();
266 	if (!sparx5_netdevice_check(dev)) {
267 		host_addr = true;
268 		sparx5 = switchdev_work->sparx5;
269 	} else {
270 		host_addr = false;
271 		sparx5 = switchdev_work->sparx5;
272 		port = netdev_priv(dev);
273 	}
274 
275 	fdb_info = &switchdev_work->fdb_info;
276 
277 	/* Used PVID 1 when default_pvid is 0, to avoid
278 	 * collision with non-bridged ports.
279 	 */
280 	if (fdb_info->vid == 0)
281 		vid = 1;
282 	else
283 		vid = fdb_info->vid;
284 
285 	switch (switchdev_work->event) {
286 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
287 		if (host_addr)
288 			sparx5_add_mact_entry(sparx5, dev, PGID_CPU,
289 					      fdb_info->addr, vid);
290 		else
291 			sparx5_add_mact_entry(sparx5, port->ndev, port->portno,
292 					      fdb_info->addr, vid);
293 		break;
294 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
295 		sparx5_del_mact_entry(sparx5, fdb_info->addr, vid);
296 		break;
297 	}
298 
299 	rtnl_unlock();
300 	kfree(switchdev_work->fdb_info.addr);
301 	kfree(switchdev_work);
302 	dev_put(dev);
303 }
304 
305 static void sparx5_schedule_work(struct work_struct *work)
306 {
307 	queue_work(sparx5_owq, work);
308 }
309 
310 static int sparx5_switchdev_event(struct notifier_block *nb,
311 				  unsigned long event, void *ptr)
312 {
313 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
314 	struct sparx5_switchdev_event_work *switchdev_work;
315 	struct switchdev_notifier_fdb_info *fdb_info;
316 	struct switchdev_notifier_info *info = ptr;
317 	struct sparx5 *spx5;
318 	int err;
319 
320 	spx5 = container_of(nb, struct sparx5, switchdev_nb);
321 
322 	switch (event) {
323 	case SWITCHDEV_PORT_ATTR_SET:
324 		err = switchdev_handle_port_attr_set(dev, ptr,
325 						     sparx5_netdevice_check,
326 						     sparx5_port_attr_set);
327 		return notifier_from_errno(err);
328 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
329 		fallthrough;
330 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
331 		switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
332 		if (!switchdev_work)
333 			return NOTIFY_BAD;
334 
335 		switchdev_work->dev = dev;
336 		switchdev_work->event = event;
337 		switchdev_work->sparx5 = spx5;
338 
339 		fdb_info = container_of(info,
340 					struct switchdev_notifier_fdb_info,
341 					info);
342 		INIT_WORK(&switchdev_work->work,
343 			  sparx5_switchdev_bridge_fdb_event_work);
344 		memcpy(&switchdev_work->fdb_info, ptr,
345 		       sizeof(switchdev_work->fdb_info));
346 		switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
347 		if (!switchdev_work->fdb_info.addr)
348 			goto err_addr_alloc;
349 
350 		ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
351 				fdb_info->addr);
352 		dev_hold(dev);
353 
354 		sparx5_schedule_work(&switchdev_work->work);
355 		break;
356 	}
357 
358 	return NOTIFY_DONE;
359 err_addr_alloc:
360 	kfree(switchdev_work);
361 	return NOTIFY_BAD;
362 }
363 
364 static int sparx5_handle_port_vlan_add(struct net_device *dev,
365 				       struct notifier_block *nb,
366 				       const struct switchdev_obj_port_vlan *v)
367 {
368 	struct sparx5_port *port = netdev_priv(dev);
369 
370 	if (netif_is_bridge_master(dev)) {
371 		struct sparx5 *sparx5 =
372 			container_of(nb, struct sparx5,
373 				     switchdev_blocking_nb);
374 
375 		/* Flood broadcast to CPU */
376 		sparx5_mact_learn(sparx5, PGID_BCAST, dev->broadcast,
377 				  v->vid);
378 		return 0;
379 	}
380 
381 	if (!sparx5_netdevice_check(dev))
382 		return -EOPNOTSUPP;
383 
384 	return sparx5_vlan_vid_add(port, v->vid,
385 				  v->flags & BRIDGE_VLAN_INFO_PVID,
386 				  v->flags & BRIDGE_VLAN_INFO_UNTAGGED);
387 }
388 
389 static int sparx5_handle_port_mdb_add(struct net_device *dev,
390 				      struct notifier_block *nb,
391 				      const struct switchdev_obj_port_mdb *v)
392 {
393 	struct sparx5_port *port = netdev_priv(dev);
394 	struct sparx5 *spx5 = port->sparx5;
395 	u16 pgid_idx, vid;
396 	u32 mact_entry;
397 	bool is_host;
398 	int res, err;
399 
400 	if (!sparx5_netdevice_check(dev))
401 		return -EOPNOTSUPP;
402 
403 	is_host = netif_is_bridge_master(v->obj.orig_dev);
404 
405 	/* When VLAN unaware the vlan value is not parsed and we receive vid 0.
406 	 * Fall back to bridge vid 1.
407 	 */
408 	if (!br_vlan_enabled(spx5->hw_bridge_dev))
409 		vid = 1;
410 	else
411 		vid = v->vid;
412 
413 	res = sparx5_mact_find(spx5, v->addr, vid, &mact_entry);
414 
415 	if (res == 0) {
416 		pgid_idx = LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_GET(mact_entry);
417 
418 		/* MC_IDX starts after the port masks in the PGID table */
419 		pgid_idx += SPX5_PORTS;
420 
421 		if (is_host)
422 			spx5_rmw(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(1),
423 				 ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA, spx5,
424 				 ANA_AC_PGID_MISC_CFG(pgid_idx));
425 		else
426 			sparx5_pgid_update_mask(port, pgid_idx, true);
427 
428 	} else {
429 		err = sparx5_pgid_alloc_mcast(spx5, &pgid_idx);
430 		if (err) {
431 			netdev_warn(dev, "multicast pgid table full\n");
432 			return err;
433 		}
434 
435 		if (is_host)
436 			spx5_rmw(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(1),
437 				 ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA, spx5,
438 				 ANA_AC_PGID_MISC_CFG(pgid_idx));
439 		else
440 			sparx5_pgid_update_mask(port, pgid_idx, true);
441 
442 		err = sparx5_mact_learn(spx5, pgid_idx, v->addr, vid);
443 
444 		if (err) {
445 			netdev_warn(dev, "could not learn mac address %pM\n", v->addr);
446 			sparx5_pgid_free(spx5, pgid_idx);
447 			sparx5_pgid_update_mask(port, pgid_idx, false);
448 			return err;
449 		}
450 	}
451 
452 	return 0;
453 }
454 
455 static int sparx5_mdb_del_entry(struct net_device *dev,
456 				struct sparx5 *spx5,
457 				const unsigned char mac[ETH_ALEN],
458 				const u16 vid,
459 				u16 pgid_idx)
460 {
461 	int err;
462 
463 	err = sparx5_mact_forget(spx5, mac, vid);
464 	if (err) {
465 		netdev_warn(dev, "could not forget mac address %pM", mac);
466 		return err;
467 	}
468 	err = sparx5_pgid_free(spx5, pgid_idx);
469 	if (err) {
470 		netdev_err(dev, "attempted to free already freed pgid\n");
471 		return err;
472 	}
473 	return 0;
474 }
475 
476 static int sparx5_handle_port_mdb_del(struct net_device *dev,
477 				      struct notifier_block *nb,
478 				      const struct switchdev_obj_port_mdb *v)
479 {
480 	struct sparx5_port *port = netdev_priv(dev);
481 	struct sparx5 *spx5 = port->sparx5;
482 	u16 pgid_idx, vid;
483 	u32 mact_entry, res, pgid_entry[3], misc_cfg;
484 	bool host_ena;
485 
486 	if (!sparx5_netdevice_check(dev))
487 		return -EOPNOTSUPP;
488 
489 	if (!br_vlan_enabled(spx5->hw_bridge_dev))
490 		vid = 1;
491 	else
492 		vid = v->vid;
493 
494 	res = sparx5_mact_find(spx5, v->addr, vid, &mact_entry);
495 
496 	if (res == 0) {
497 		pgid_idx = LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_GET(mact_entry);
498 
499 		/* MC_IDX starts after the port masks in the PGID table */
500 		pgid_idx += SPX5_PORTS;
501 
502 		if (netif_is_bridge_master(v->obj.orig_dev))
503 			spx5_rmw(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(0),
504 				 ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA, spx5,
505 				 ANA_AC_PGID_MISC_CFG(pgid_idx));
506 		else
507 			sparx5_pgid_update_mask(port, pgid_idx, false);
508 
509 		misc_cfg = spx5_rd(spx5, ANA_AC_PGID_MISC_CFG(pgid_idx));
510 		host_ena = ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_GET(misc_cfg);
511 
512 		sparx5_pgid_read_mask(spx5, pgid_idx, pgid_entry);
513 		if (bitmap_empty((unsigned long *)pgid_entry, SPX5_PORTS) && !host_ena)
514 			/* No ports or CPU are in MC group. Remove entry */
515 			return sparx5_mdb_del_entry(dev, spx5, v->addr, vid, pgid_idx);
516 	}
517 
518 	return 0;
519 }
520 
521 static int sparx5_handle_port_obj_add(struct net_device *dev,
522 				      struct notifier_block *nb,
523 				      struct switchdev_notifier_port_obj_info *info)
524 {
525 	const struct switchdev_obj *obj = info->obj;
526 	int err;
527 
528 	switch (obj->id) {
529 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
530 		err = sparx5_handle_port_vlan_add(dev, nb,
531 						  SWITCHDEV_OBJ_PORT_VLAN(obj));
532 		break;
533 	case SWITCHDEV_OBJ_ID_PORT_MDB:
534 	case SWITCHDEV_OBJ_ID_HOST_MDB:
535 		err = sparx5_handle_port_mdb_add(dev, nb,
536 						 SWITCHDEV_OBJ_PORT_MDB(obj));
537 		break;
538 	default:
539 		err = -EOPNOTSUPP;
540 		break;
541 	}
542 
543 	info->handled = true;
544 	return err;
545 }
546 
547 static int sparx5_handle_port_vlan_del(struct net_device *dev,
548 				       struct notifier_block *nb,
549 				       u16 vid)
550 {
551 	struct sparx5_port *port = netdev_priv(dev);
552 	int ret;
553 
554 	/* Master bridge? */
555 	if (netif_is_bridge_master(dev)) {
556 		struct sparx5 *sparx5 =
557 			container_of(nb, struct sparx5,
558 				     switchdev_blocking_nb);
559 
560 		sparx5_mact_forget(sparx5, dev->broadcast, vid);
561 		return 0;
562 	}
563 
564 	if (!sparx5_netdevice_check(dev))
565 		return -EOPNOTSUPP;
566 
567 	ret = sparx5_vlan_vid_del(port, vid);
568 	if (ret)
569 		return ret;
570 
571 	return 0;
572 }
573 
574 static int sparx5_handle_port_obj_del(struct net_device *dev,
575 				      struct notifier_block *nb,
576 				      struct switchdev_notifier_port_obj_info *info)
577 {
578 	const struct switchdev_obj *obj = info->obj;
579 	int err;
580 
581 	switch (obj->id) {
582 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
583 		err = sparx5_handle_port_vlan_del(dev, nb,
584 						  SWITCHDEV_OBJ_PORT_VLAN(obj)->vid);
585 		break;
586 	case SWITCHDEV_OBJ_ID_PORT_MDB:
587 	case SWITCHDEV_OBJ_ID_HOST_MDB:
588 		err = sparx5_handle_port_mdb_del(dev, nb,
589 						 SWITCHDEV_OBJ_PORT_MDB(obj));
590 		break;
591 	default:
592 		err = -EOPNOTSUPP;
593 		break;
594 	}
595 
596 	info->handled = true;
597 	return err;
598 }
599 
600 static int sparx5_switchdev_blocking_event(struct notifier_block *nb,
601 					   unsigned long event,
602 					   void *ptr)
603 {
604 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
605 	int err;
606 
607 	switch (event) {
608 	case SWITCHDEV_PORT_OBJ_ADD:
609 		err = sparx5_handle_port_obj_add(dev, nb, ptr);
610 		return notifier_from_errno(err);
611 	case SWITCHDEV_PORT_OBJ_DEL:
612 		err = sparx5_handle_port_obj_del(dev, nb, ptr);
613 		return notifier_from_errno(err);
614 	case SWITCHDEV_PORT_ATTR_SET:
615 		err = switchdev_handle_port_attr_set(dev, ptr,
616 						     sparx5_netdevice_check,
617 						     sparx5_port_attr_set);
618 		return notifier_from_errno(err);
619 	}
620 
621 	return NOTIFY_DONE;
622 }
623 
624 int sparx5_register_notifier_blocks(struct sparx5 *s5)
625 {
626 	int err;
627 
628 	s5->netdevice_nb.notifier_call = sparx5_netdevice_event;
629 	err = register_netdevice_notifier(&s5->netdevice_nb);
630 	if (err)
631 		return err;
632 
633 	s5->switchdev_nb.notifier_call = sparx5_switchdev_event;
634 	err = register_switchdev_notifier(&s5->switchdev_nb);
635 	if (err)
636 		goto err_switchdev_nb;
637 
638 	s5->switchdev_blocking_nb.notifier_call = sparx5_switchdev_blocking_event;
639 	err = register_switchdev_blocking_notifier(&s5->switchdev_blocking_nb);
640 	if (err)
641 		goto err_switchdev_blocking_nb;
642 
643 	sparx5_owq = alloc_ordered_workqueue("sparx5_order", 0);
644 	if (!sparx5_owq) {
645 		err = -ENOMEM;
646 		goto err_switchdev_blocking_nb;
647 	}
648 
649 	return 0;
650 
651 err_switchdev_blocking_nb:
652 	unregister_switchdev_notifier(&s5->switchdev_nb);
653 err_switchdev_nb:
654 	unregister_netdevice_notifier(&s5->netdevice_nb);
655 
656 	return err;
657 }
658 
659 void sparx5_unregister_notifier_blocks(struct sparx5 *s5)
660 {
661 	destroy_workqueue(sparx5_owq);
662 
663 	unregister_switchdev_blocking_notifier(&s5->switchdev_blocking_nb);
664 	unregister_switchdev_notifier(&s5->switchdev_nb);
665 	unregister_netdevice_notifier(&s5->netdevice_nb);
666 }
667