xref: /openbmc/linux/net/dsa/switch.c (revision 7ce05074)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Handling of a single switch chip, part of a switch fabric
4  *
5  * Copyright (c) 2017 Savoir-faire Linux Inc.
6  *	Vivien Didelot <vivien.didelot@savoirfairelinux.com>
7  */
8 
9 #include <linux/if_bridge.h>
10 #include <linux/netdevice.h>
11 #include <linux/notifier.h>
12 #include <linux/if_vlan.h>
13 #include <net/switchdev.h>
14 
15 #include "dsa_priv.h"
16 
17 static unsigned int dsa_switch_fastest_ageing_time(struct dsa_switch *ds,
18 						   unsigned int ageing_time)
19 {
20 	int i;
21 
22 	for (i = 0; i < ds->num_ports; ++i) {
23 		struct dsa_port *dp = dsa_to_port(ds, i);
24 
25 		if (dp->ageing_time && dp->ageing_time < ageing_time)
26 			ageing_time = dp->ageing_time;
27 	}
28 
29 	return ageing_time;
30 }
31 
32 static int dsa_switch_ageing_time(struct dsa_switch *ds,
33 				  struct dsa_notifier_ageing_time_info *info)
34 {
35 	unsigned int ageing_time = info->ageing_time;
36 
37 	if (ds->ageing_time_min && ageing_time < ds->ageing_time_min)
38 		return -ERANGE;
39 
40 	if (ds->ageing_time_max && ageing_time > ds->ageing_time_max)
41 		return -ERANGE;
42 
43 	/* Program the fastest ageing time in case of multiple bridges */
44 	ageing_time = dsa_switch_fastest_ageing_time(ds, ageing_time);
45 
46 	if (ds->ops->set_ageing_time)
47 		return ds->ops->set_ageing_time(ds, ageing_time);
48 
49 	return 0;
50 }
51 
52 static bool dsa_switch_mtu_match(struct dsa_switch *ds, int port,
53 				 struct dsa_notifier_mtu_info *info)
54 {
55 	if (ds->index == info->sw_index && port == info->port)
56 		return true;
57 
58 	/* Do not propagate to other switches in the tree if the notifier was
59 	 * targeted for a single switch.
60 	 */
61 	if (info->targeted_match)
62 		return false;
63 
64 	if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
65 		return true;
66 
67 	return false;
68 }
69 
70 static int dsa_switch_mtu(struct dsa_switch *ds,
71 			  struct dsa_notifier_mtu_info *info)
72 {
73 	int port, ret;
74 
75 	if (!ds->ops->port_change_mtu)
76 		return -EOPNOTSUPP;
77 
78 	for (port = 0; port < ds->num_ports; port++) {
79 		if (dsa_switch_mtu_match(ds, port, info)) {
80 			ret = ds->ops->port_change_mtu(ds, port, info->mtu);
81 			if (ret)
82 				return ret;
83 		}
84 	}
85 
86 	return 0;
87 }
88 
89 static int dsa_switch_bridge_join(struct dsa_switch *ds,
90 				  struct dsa_notifier_bridge_info *info)
91 {
92 	struct dsa_switch_tree *dst = ds->dst;
93 
94 	if (dst->index == info->tree_index && ds->index == info->sw_index &&
95 	    ds->ops->port_bridge_join)
96 		return ds->ops->port_bridge_join(ds, info->port, info->br);
97 
98 	if ((dst->index != info->tree_index || ds->index != info->sw_index) &&
99 	    ds->ops->crosschip_bridge_join)
100 		return ds->ops->crosschip_bridge_join(ds, info->tree_index,
101 						      info->sw_index,
102 						      info->port, info->br);
103 
104 	return 0;
105 }
106 
107 static int dsa_switch_bridge_leave(struct dsa_switch *ds,
108 				   struct dsa_notifier_bridge_info *info)
109 {
110 	bool unset_vlan_filtering = br_vlan_enabled(info->br);
111 	struct dsa_switch_tree *dst = ds->dst;
112 	struct netlink_ext_ack extack = {0};
113 	int err, port;
114 
115 	if (dst->index == info->tree_index && ds->index == info->sw_index &&
116 	    ds->ops->port_bridge_leave)
117 		ds->ops->port_bridge_leave(ds, info->port, info->br);
118 
119 	if ((dst->index != info->tree_index || ds->index != info->sw_index) &&
120 	    ds->ops->crosschip_bridge_leave)
121 		ds->ops->crosschip_bridge_leave(ds, info->tree_index,
122 						info->sw_index, info->port,
123 						info->br);
124 
125 	/* If the bridge was vlan_filtering, the bridge core doesn't trigger an
126 	 * event for changing vlan_filtering setting upon slave ports leaving
127 	 * it. That is a good thing, because that lets us handle it and also
128 	 * handle the case where the switch's vlan_filtering setting is global
129 	 * (not per port). When that happens, the correct moment to trigger the
130 	 * vlan_filtering callback is only when the last port leaves the last
131 	 * VLAN-aware bridge.
132 	 */
133 	if (unset_vlan_filtering && ds->vlan_filtering_is_global) {
134 		for (port = 0; port < ds->num_ports; port++) {
135 			struct net_device *bridge_dev;
136 
137 			bridge_dev = dsa_to_port(ds, port)->bridge_dev;
138 
139 			if (bridge_dev && br_vlan_enabled(bridge_dev)) {
140 				unset_vlan_filtering = false;
141 				break;
142 			}
143 		}
144 	}
145 	if (unset_vlan_filtering) {
146 		err = dsa_port_vlan_filtering(dsa_to_port(ds, info->port),
147 					      false, &extack);
148 		if (extack._msg)
149 			dev_err(ds->dev, "port %d: %s\n", info->port,
150 				extack._msg);
151 		if (err && err != EOPNOTSUPP)
152 			return err;
153 	}
154 	return 0;
155 }
156 
157 /* Matches for all upstream-facing ports (the CPU port and all upstream-facing
158  * DSA links) that sit between the targeted port on which the notifier was
159  * emitted and its dedicated CPU port.
160  */
161 static bool dsa_switch_host_address_match(struct dsa_switch *ds, int port,
162 					  int info_sw_index, int info_port)
163 {
164 	struct dsa_port *targeted_dp, *cpu_dp;
165 	struct dsa_switch *targeted_ds;
166 
167 	targeted_ds = dsa_switch_find(ds->dst->index, info_sw_index);
168 	targeted_dp = dsa_to_port(targeted_ds, info_port);
169 	cpu_dp = targeted_dp->cpu_dp;
170 
171 	if (dsa_switch_is_upstream_of(ds, targeted_ds))
172 		return port == dsa_towards_port(ds, cpu_dp->ds->index,
173 						cpu_dp->index);
174 
175 	return false;
176 }
177 
178 static struct dsa_mac_addr *dsa_mac_addr_find(struct list_head *addr_list,
179 					      const unsigned char *addr,
180 					      u16 vid)
181 {
182 	struct dsa_mac_addr *a;
183 
184 	list_for_each_entry(a, addr_list, list)
185 		if (ether_addr_equal(a->addr, addr) && a->vid == vid)
186 			return a;
187 
188 	return NULL;
189 }
190 
191 static int dsa_switch_do_mdb_add(struct dsa_switch *ds, int port,
192 				 const struct switchdev_obj_port_mdb *mdb)
193 {
194 	struct dsa_port *dp = dsa_to_port(ds, port);
195 	struct dsa_mac_addr *a;
196 	int err;
197 
198 	/* No need to bother with refcounting for user ports */
199 	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
200 		return ds->ops->port_mdb_add(ds, port, mdb);
201 
202 	a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid);
203 	if (a) {
204 		refcount_inc(&a->refcount);
205 		return 0;
206 	}
207 
208 	a = kzalloc(sizeof(*a), GFP_KERNEL);
209 	if (!a)
210 		return -ENOMEM;
211 
212 	err = ds->ops->port_mdb_add(ds, port, mdb);
213 	if (err) {
214 		kfree(a);
215 		return err;
216 	}
217 
218 	ether_addr_copy(a->addr, mdb->addr);
219 	a->vid = mdb->vid;
220 	refcount_set(&a->refcount, 1);
221 	list_add_tail(&a->list, &dp->mdbs);
222 
223 	return 0;
224 }
225 
226 static int dsa_switch_do_mdb_del(struct dsa_switch *ds, int port,
227 				 const struct switchdev_obj_port_mdb *mdb)
228 {
229 	struct dsa_port *dp = dsa_to_port(ds, port);
230 	struct dsa_mac_addr *a;
231 	int err;
232 
233 	/* No need to bother with refcounting for user ports */
234 	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
235 		return ds->ops->port_mdb_del(ds, port, mdb);
236 
237 	a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid);
238 	if (!a)
239 		return -ENOENT;
240 
241 	if (!refcount_dec_and_test(&a->refcount))
242 		return 0;
243 
244 	err = ds->ops->port_mdb_del(ds, port, mdb);
245 	if (err) {
246 		refcount_inc(&a->refcount);
247 		return err;
248 	}
249 
250 	list_del(&a->list);
251 	kfree(a);
252 
253 	return 0;
254 }
255 
256 static int dsa_switch_do_fdb_add(struct dsa_switch *ds, int port,
257 				 const unsigned char *addr, u16 vid)
258 {
259 	struct dsa_port *dp = dsa_to_port(ds, port);
260 	struct dsa_mac_addr *a;
261 	int err;
262 
263 	/* No need to bother with refcounting for user ports */
264 	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
265 		return ds->ops->port_fdb_add(ds, port, addr, vid);
266 
267 	a = dsa_mac_addr_find(&dp->fdbs, addr, vid);
268 	if (a) {
269 		refcount_inc(&a->refcount);
270 		return 0;
271 	}
272 
273 	a = kzalloc(sizeof(*a), GFP_KERNEL);
274 	if (!a)
275 		return -ENOMEM;
276 
277 	err = ds->ops->port_fdb_add(ds, port, addr, vid);
278 	if (err) {
279 		kfree(a);
280 		return err;
281 	}
282 
283 	ether_addr_copy(a->addr, addr);
284 	a->vid = vid;
285 	refcount_set(&a->refcount, 1);
286 	list_add_tail(&a->list, &dp->fdbs);
287 
288 	return 0;
289 }
290 
291 static int dsa_switch_do_fdb_del(struct dsa_switch *ds, int port,
292 				 const unsigned char *addr, u16 vid)
293 {
294 	struct dsa_port *dp = dsa_to_port(ds, port);
295 	struct dsa_mac_addr *a;
296 	int err;
297 
298 	/* No need to bother with refcounting for user ports */
299 	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
300 		return ds->ops->port_fdb_del(ds, port, addr, vid);
301 
302 	a = dsa_mac_addr_find(&dp->fdbs, addr, vid);
303 	if (!a)
304 		return -ENOENT;
305 
306 	if (!refcount_dec_and_test(&a->refcount))
307 		return 0;
308 
309 	err = ds->ops->port_fdb_del(ds, port, addr, vid);
310 	if (err) {
311 		refcount_inc(&a->refcount);
312 		return err;
313 	}
314 
315 	list_del(&a->list);
316 	kfree(a);
317 
318 	return 0;
319 }
320 
321 static int dsa_switch_host_fdb_add(struct dsa_switch *ds,
322 				   struct dsa_notifier_fdb_info *info)
323 {
324 	int err = 0;
325 	int port;
326 
327 	if (!ds->ops->port_fdb_add)
328 		return -EOPNOTSUPP;
329 
330 	for (port = 0; port < ds->num_ports; port++) {
331 		if (dsa_switch_host_address_match(ds, port, info->sw_index,
332 						  info->port)) {
333 			err = dsa_switch_do_fdb_add(ds, port, info->addr,
334 						    info->vid);
335 			if (err)
336 				break;
337 		}
338 	}
339 
340 	return err;
341 }
342 
343 static int dsa_switch_host_fdb_del(struct dsa_switch *ds,
344 				   struct dsa_notifier_fdb_info *info)
345 {
346 	int err = 0;
347 	int port;
348 
349 	if (!ds->ops->port_fdb_del)
350 		return -EOPNOTSUPP;
351 
352 	for (port = 0; port < ds->num_ports; port++) {
353 		if (dsa_switch_host_address_match(ds, port, info->sw_index,
354 						  info->port)) {
355 			err = dsa_switch_do_fdb_del(ds, port, info->addr,
356 						    info->vid);
357 			if (err)
358 				break;
359 		}
360 	}
361 
362 	return err;
363 }
364 
365 static int dsa_switch_fdb_add(struct dsa_switch *ds,
366 			      struct dsa_notifier_fdb_info *info)
367 {
368 	int port = dsa_towards_port(ds, info->sw_index, info->port);
369 
370 	if (!ds->ops->port_fdb_add)
371 		return -EOPNOTSUPP;
372 
373 	return dsa_switch_do_fdb_add(ds, port, info->addr, info->vid);
374 }
375 
376 static int dsa_switch_fdb_del(struct dsa_switch *ds,
377 			      struct dsa_notifier_fdb_info *info)
378 {
379 	int port = dsa_towards_port(ds, info->sw_index, info->port);
380 
381 	if (!ds->ops->port_fdb_del)
382 		return -EOPNOTSUPP;
383 
384 	return dsa_switch_do_fdb_del(ds, port, info->addr, info->vid);
385 }
386 
387 static int dsa_switch_hsr_join(struct dsa_switch *ds,
388 			       struct dsa_notifier_hsr_info *info)
389 {
390 	if (ds->index == info->sw_index && ds->ops->port_hsr_join)
391 		return ds->ops->port_hsr_join(ds, info->port, info->hsr);
392 
393 	return -EOPNOTSUPP;
394 }
395 
396 static int dsa_switch_hsr_leave(struct dsa_switch *ds,
397 				struct dsa_notifier_hsr_info *info)
398 {
399 	if (ds->index == info->sw_index && ds->ops->port_hsr_leave)
400 		return ds->ops->port_hsr_leave(ds, info->port, info->hsr);
401 
402 	return -EOPNOTSUPP;
403 }
404 
405 static int dsa_switch_lag_change(struct dsa_switch *ds,
406 				 struct dsa_notifier_lag_info *info)
407 {
408 	if (ds->index == info->sw_index && ds->ops->port_lag_change)
409 		return ds->ops->port_lag_change(ds, info->port);
410 
411 	if (ds->index != info->sw_index && ds->ops->crosschip_lag_change)
412 		return ds->ops->crosschip_lag_change(ds, info->sw_index,
413 						     info->port);
414 
415 	return 0;
416 }
417 
418 static int dsa_switch_lag_join(struct dsa_switch *ds,
419 			       struct dsa_notifier_lag_info *info)
420 {
421 	if (ds->index == info->sw_index && ds->ops->port_lag_join)
422 		return ds->ops->port_lag_join(ds, info->port, info->lag,
423 					      info->info);
424 
425 	if (ds->index != info->sw_index && ds->ops->crosschip_lag_join)
426 		return ds->ops->crosschip_lag_join(ds, info->sw_index,
427 						   info->port, info->lag,
428 						   info->info);
429 
430 	return -EOPNOTSUPP;
431 }
432 
433 static int dsa_switch_lag_leave(struct dsa_switch *ds,
434 				struct dsa_notifier_lag_info *info)
435 {
436 	if (ds->index == info->sw_index && ds->ops->port_lag_leave)
437 		return ds->ops->port_lag_leave(ds, info->port, info->lag);
438 
439 	if (ds->index != info->sw_index && ds->ops->crosschip_lag_leave)
440 		return ds->ops->crosschip_lag_leave(ds, info->sw_index,
441 						    info->port, info->lag);
442 
443 	return -EOPNOTSUPP;
444 }
445 
446 static int dsa_switch_mdb_add(struct dsa_switch *ds,
447 			      struct dsa_notifier_mdb_info *info)
448 {
449 	int port = dsa_towards_port(ds, info->sw_index, info->port);
450 
451 	if (!ds->ops->port_mdb_add)
452 		return -EOPNOTSUPP;
453 
454 	return dsa_switch_do_mdb_add(ds, port, info->mdb);
455 }
456 
457 static int dsa_switch_mdb_del(struct dsa_switch *ds,
458 			      struct dsa_notifier_mdb_info *info)
459 {
460 	int port = dsa_towards_port(ds, info->sw_index, info->port);
461 
462 	if (!ds->ops->port_mdb_del)
463 		return -EOPNOTSUPP;
464 
465 	return dsa_switch_do_mdb_del(ds, port, info->mdb);
466 }
467 
468 static int dsa_switch_host_mdb_add(struct dsa_switch *ds,
469 				   struct dsa_notifier_mdb_info *info)
470 {
471 	int err = 0;
472 	int port;
473 
474 	if (!ds->ops->port_mdb_add)
475 		return -EOPNOTSUPP;
476 
477 	for (port = 0; port < ds->num_ports; port++) {
478 		if (dsa_switch_host_address_match(ds, port, info->sw_index,
479 						  info->port)) {
480 			err = dsa_switch_do_mdb_add(ds, port, info->mdb);
481 			if (err)
482 				break;
483 		}
484 	}
485 
486 	return err;
487 }
488 
489 static int dsa_switch_host_mdb_del(struct dsa_switch *ds,
490 				   struct dsa_notifier_mdb_info *info)
491 {
492 	int err = 0;
493 	int port;
494 
495 	if (!ds->ops->port_mdb_del)
496 		return -EOPNOTSUPP;
497 
498 	for (port = 0; port < ds->num_ports; port++) {
499 		if (dsa_switch_host_address_match(ds, port, info->sw_index,
500 						  info->port)) {
501 			err = dsa_switch_do_mdb_del(ds, port, info->mdb);
502 			if (err)
503 				break;
504 		}
505 	}
506 
507 	return err;
508 }
509 
510 static bool dsa_switch_vlan_match(struct dsa_switch *ds, int port,
511 				  struct dsa_notifier_vlan_info *info)
512 {
513 	if (ds->index == info->sw_index && port == info->port)
514 		return true;
515 
516 	if (dsa_is_dsa_port(ds, port))
517 		return true;
518 
519 	return false;
520 }
521 
522 static int dsa_switch_vlan_add(struct dsa_switch *ds,
523 			       struct dsa_notifier_vlan_info *info)
524 {
525 	int port, err;
526 
527 	if (!ds->ops->port_vlan_add)
528 		return -EOPNOTSUPP;
529 
530 	for (port = 0; port < ds->num_ports; port++) {
531 		if (dsa_switch_vlan_match(ds, port, info)) {
532 			err = ds->ops->port_vlan_add(ds, port, info->vlan,
533 						     info->extack);
534 			if (err)
535 				return err;
536 		}
537 	}
538 
539 	return 0;
540 }
541 
542 static int dsa_switch_vlan_del(struct dsa_switch *ds,
543 			       struct dsa_notifier_vlan_info *info)
544 {
545 	if (!ds->ops->port_vlan_del)
546 		return -EOPNOTSUPP;
547 
548 	if (ds->index == info->sw_index)
549 		return ds->ops->port_vlan_del(ds, info->port, info->vlan);
550 
551 	/* Do not deprogram the DSA links as they may be used as conduit
552 	 * for other VLAN members in the fabric.
553 	 */
554 	return 0;
555 }
556 
557 static int dsa_switch_change_tag_proto(struct dsa_switch *ds,
558 				       struct dsa_notifier_tag_proto_info *info)
559 {
560 	const struct dsa_device_ops *tag_ops = info->tag_ops;
561 	int port, err;
562 
563 	if (!ds->ops->change_tag_protocol)
564 		return -EOPNOTSUPP;
565 
566 	ASSERT_RTNL();
567 
568 	for (port = 0; port < ds->num_ports; port++) {
569 		if (!dsa_is_cpu_port(ds, port))
570 			continue;
571 
572 		err = ds->ops->change_tag_protocol(ds, port, tag_ops->proto);
573 		if (err)
574 			return err;
575 
576 		dsa_port_set_tag_protocol(dsa_to_port(ds, port), tag_ops);
577 	}
578 
579 	/* Now that changing the tag protocol can no longer fail, let's update
580 	 * the remaining bits which are "duplicated for faster access", and the
581 	 * bits that depend on the tagger, such as the MTU.
582 	 */
583 	for (port = 0; port < ds->num_ports; port++) {
584 		if (dsa_is_user_port(ds, port)) {
585 			struct net_device *slave;
586 
587 			slave = dsa_to_port(ds, port)->slave;
588 			dsa_slave_setup_tagger(slave);
589 
590 			/* rtnl_mutex is held in dsa_tree_change_tag_proto */
591 			dsa_slave_change_mtu(slave, slave->mtu);
592 		}
593 	}
594 
595 	return 0;
596 }
597 
598 static int dsa_switch_mrp_add(struct dsa_switch *ds,
599 			      struct dsa_notifier_mrp_info *info)
600 {
601 	if (!ds->ops->port_mrp_add)
602 		return -EOPNOTSUPP;
603 
604 	if (ds->index == info->sw_index)
605 		return ds->ops->port_mrp_add(ds, info->port, info->mrp);
606 
607 	return 0;
608 }
609 
610 static int dsa_switch_mrp_del(struct dsa_switch *ds,
611 			      struct dsa_notifier_mrp_info *info)
612 {
613 	if (!ds->ops->port_mrp_del)
614 		return -EOPNOTSUPP;
615 
616 	if (ds->index == info->sw_index)
617 		return ds->ops->port_mrp_del(ds, info->port, info->mrp);
618 
619 	return 0;
620 }
621 
622 static int
623 dsa_switch_mrp_add_ring_role(struct dsa_switch *ds,
624 			     struct dsa_notifier_mrp_ring_role_info *info)
625 {
626 	if (!ds->ops->port_mrp_add)
627 		return -EOPNOTSUPP;
628 
629 	if (ds->index == info->sw_index)
630 		return ds->ops->port_mrp_add_ring_role(ds, info->port,
631 						       info->mrp);
632 
633 	return 0;
634 }
635 
636 static int
637 dsa_switch_mrp_del_ring_role(struct dsa_switch *ds,
638 			     struct dsa_notifier_mrp_ring_role_info *info)
639 {
640 	if (!ds->ops->port_mrp_del)
641 		return -EOPNOTSUPP;
642 
643 	if (ds->index == info->sw_index)
644 		return ds->ops->port_mrp_del_ring_role(ds, info->port,
645 						       info->mrp);
646 
647 	return 0;
648 }
649 
650 static int dsa_switch_event(struct notifier_block *nb,
651 			    unsigned long event, void *info)
652 {
653 	struct dsa_switch *ds = container_of(nb, struct dsa_switch, nb);
654 	int err;
655 
656 	switch (event) {
657 	case DSA_NOTIFIER_AGEING_TIME:
658 		err = dsa_switch_ageing_time(ds, info);
659 		break;
660 	case DSA_NOTIFIER_BRIDGE_JOIN:
661 		err = dsa_switch_bridge_join(ds, info);
662 		break;
663 	case DSA_NOTIFIER_BRIDGE_LEAVE:
664 		err = dsa_switch_bridge_leave(ds, info);
665 		break;
666 	case DSA_NOTIFIER_FDB_ADD:
667 		err = dsa_switch_fdb_add(ds, info);
668 		break;
669 	case DSA_NOTIFIER_FDB_DEL:
670 		err = dsa_switch_fdb_del(ds, info);
671 		break;
672 	case DSA_NOTIFIER_HOST_FDB_ADD:
673 		err = dsa_switch_host_fdb_add(ds, info);
674 		break;
675 	case DSA_NOTIFIER_HOST_FDB_DEL:
676 		err = dsa_switch_host_fdb_del(ds, info);
677 		break;
678 	case DSA_NOTIFIER_HSR_JOIN:
679 		err = dsa_switch_hsr_join(ds, info);
680 		break;
681 	case DSA_NOTIFIER_HSR_LEAVE:
682 		err = dsa_switch_hsr_leave(ds, info);
683 		break;
684 	case DSA_NOTIFIER_LAG_CHANGE:
685 		err = dsa_switch_lag_change(ds, info);
686 		break;
687 	case DSA_NOTIFIER_LAG_JOIN:
688 		err = dsa_switch_lag_join(ds, info);
689 		break;
690 	case DSA_NOTIFIER_LAG_LEAVE:
691 		err = dsa_switch_lag_leave(ds, info);
692 		break;
693 	case DSA_NOTIFIER_MDB_ADD:
694 		err = dsa_switch_mdb_add(ds, info);
695 		break;
696 	case DSA_NOTIFIER_MDB_DEL:
697 		err = dsa_switch_mdb_del(ds, info);
698 		break;
699 	case DSA_NOTIFIER_HOST_MDB_ADD:
700 		err = dsa_switch_host_mdb_add(ds, info);
701 		break;
702 	case DSA_NOTIFIER_HOST_MDB_DEL:
703 		err = dsa_switch_host_mdb_del(ds, info);
704 		break;
705 	case DSA_NOTIFIER_VLAN_ADD:
706 		err = dsa_switch_vlan_add(ds, info);
707 		break;
708 	case DSA_NOTIFIER_VLAN_DEL:
709 		err = dsa_switch_vlan_del(ds, info);
710 		break;
711 	case DSA_NOTIFIER_MTU:
712 		err = dsa_switch_mtu(ds, info);
713 		break;
714 	case DSA_NOTIFIER_TAG_PROTO:
715 		err = dsa_switch_change_tag_proto(ds, info);
716 		break;
717 	case DSA_NOTIFIER_MRP_ADD:
718 		err = dsa_switch_mrp_add(ds, info);
719 		break;
720 	case DSA_NOTIFIER_MRP_DEL:
721 		err = dsa_switch_mrp_del(ds, info);
722 		break;
723 	case DSA_NOTIFIER_MRP_ADD_RING_ROLE:
724 		err = dsa_switch_mrp_add_ring_role(ds, info);
725 		break;
726 	case DSA_NOTIFIER_MRP_DEL_RING_ROLE:
727 		err = dsa_switch_mrp_del_ring_role(ds, info);
728 		break;
729 	default:
730 		err = -EOPNOTSUPP;
731 		break;
732 	}
733 
734 	if (err)
735 		dev_dbg(ds->dev, "breaking chain for DSA event %lu (%d)\n",
736 			event, err);
737 
738 	return notifier_from_errno(err);
739 }
740 
741 int dsa_switch_register_notifier(struct dsa_switch *ds)
742 {
743 	ds->nb.notifier_call = dsa_switch_event;
744 
745 	return raw_notifier_chain_register(&ds->dst->nh, &ds->nb);
746 }
747 
748 void dsa_switch_unregister_notifier(struct dsa_switch *ds)
749 {
750 	int err;
751 
752 	err = raw_notifier_chain_unregister(&ds->dst->nh, &ds->nb);
753 	if (err)
754 		dev_err(ds->dev, "failed to unregister notifier (%d)\n", err);
755 }
756