xref: /openbmc/linux/net/dsa/switch.c (revision 2d99a7ec)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Handling of a single switch chip, part of a switch fabric
4  *
5  * Copyright (c) 2017 Savoir-faire Linux Inc.
6  *	Vivien Didelot <vivien.didelot@savoirfairelinux.com>
7  */
8 
9 #include <linux/if_bridge.h>
10 #include <linux/netdevice.h>
11 #include <linux/notifier.h>
12 #include <linux/if_vlan.h>
13 #include <net/switchdev.h>
14 
15 #include "dsa.h"
16 #include "netlink.h"
17 #include "port.h"
18 #include "slave.h"
19 #include "switch.h"
20 #include "tag_8021q.h"
21 
22 static unsigned int dsa_switch_fastest_ageing_time(struct dsa_switch *ds,
23 						   unsigned int ageing_time)
24 {
25 	struct dsa_port *dp;
26 
27 	dsa_switch_for_each_port(dp, ds)
28 		if (dp->ageing_time && dp->ageing_time < ageing_time)
29 			ageing_time = dp->ageing_time;
30 
31 	return ageing_time;
32 }
33 
34 static int dsa_switch_ageing_time(struct dsa_switch *ds,
35 				  struct dsa_notifier_ageing_time_info *info)
36 {
37 	unsigned int ageing_time = info->ageing_time;
38 
39 	if (ds->ageing_time_min && ageing_time < ds->ageing_time_min)
40 		return -ERANGE;
41 
42 	if (ds->ageing_time_max && ageing_time > ds->ageing_time_max)
43 		return -ERANGE;
44 
45 	/* Program the fastest ageing time in case of multiple bridges */
46 	ageing_time = dsa_switch_fastest_ageing_time(ds, ageing_time);
47 
48 	if (ds->ops->set_ageing_time)
49 		return ds->ops->set_ageing_time(ds, ageing_time);
50 
51 	return 0;
52 }
53 
54 static bool dsa_port_mtu_match(struct dsa_port *dp,
55 			       struct dsa_notifier_mtu_info *info)
56 {
57 	return dp == info->dp || dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp);
58 }
59 
60 static int dsa_switch_mtu(struct dsa_switch *ds,
61 			  struct dsa_notifier_mtu_info *info)
62 {
63 	struct dsa_port *dp;
64 	int ret;
65 
66 	if (!ds->ops->port_change_mtu)
67 		return -EOPNOTSUPP;
68 
69 	dsa_switch_for_each_port(dp, ds) {
70 		if (dsa_port_mtu_match(dp, info)) {
71 			ret = ds->ops->port_change_mtu(ds, dp->index,
72 						       info->mtu);
73 			if (ret)
74 				return ret;
75 		}
76 	}
77 
78 	return 0;
79 }
80 
81 static int dsa_switch_bridge_join(struct dsa_switch *ds,
82 				  struct dsa_notifier_bridge_info *info)
83 {
84 	int err;
85 
86 	if (info->dp->ds == ds) {
87 		if (!ds->ops->port_bridge_join)
88 			return -EOPNOTSUPP;
89 
90 		err = ds->ops->port_bridge_join(ds, info->dp->index,
91 						info->bridge,
92 						&info->tx_fwd_offload,
93 						info->extack);
94 		if (err)
95 			return err;
96 	}
97 
98 	if (info->dp->ds != ds && ds->ops->crosschip_bridge_join) {
99 		err = ds->ops->crosschip_bridge_join(ds,
100 						     info->dp->ds->dst->index,
101 						     info->dp->ds->index,
102 						     info->dp->index,
103 						     info->bridge,
104 						     info->extack);
105 		if (err)
106 			return err;
107 	}
108 
109 	return 0;
110 }
111 
112 static int dsa_switch_bridge_leave(struct dsa_switch *ds,
113 				   struct dsa_notifier_bridge_info *info)
114 {
115 	if (info->dp->ds == ds && ds->ops->port_bridge_leave)
116 		ds->ops->port_bridge_leave(ds, info->dp->index, info->bridge);
117 
118 	if (info->dp->ds != ds && ds->ops->crosschip_bridge_leave)
119 		ds->ops->crosschip_bridge_leave(ds, info->dp->ds->dst->index,
120 						info->dp->ds->index,
121 						info->dp->index,
122 						info->bridge);
123 
124 	return 0;
125 }
126 
127 /* Matches for all upstream-facing ports (the CPU port and all upstream-facing
128  * DSA links) that sit between the targeted port on which the notifier was
129  * emitted and its dedicated CPU port.
130  */
131 static bool dsa_port_host_address_match(struct dsa_port *dp,
132 					const struct dsa_port *targeted_dp)
133 {
134 	struct dsa_port *cpu_dp = targeted_dp->cpu_dp;
135 
136 	if (dsa_switch_is_upstream_of(dp->ds, targeted_dp->ds))
137 		return dp->index == dsa_towards_port(dp->ds, cpu_dp->ds->index,
138 						     cpu_dp->index);
139 
140 	return false;
141 }
142 
143 static struct dsa_mac_addr *dsa_mac_addr_find(struct list_head *addr_list,
144 					      const unsigned char *addr, u16 vid,
145 					      struct dsa_db db)
146 {
147 	struct dsa_mac_addr *a;
148 
149 	list_for_each_entry(a, addr_list, list)
150 		if (ether_addr_equal(a->addr, addr) && a->vid == vid &&
151 		    dsa_db_equal(&a->db, &db))
152 			return a;
153 
154 	return NULL;
155 }
156 
157 static int dsa_port_do_mdb_add(struct dsa_port *dp,
158 			       const struct switchdev_obj_port_mdb *mdb,
159 			       struct dsa_db db)
160 {
161 	struct dsa_switch *ds = dp->ds;
162 	struct dsa_mac_addr *a;
163 	int port = dp->index;
164 	int err = 0;
165 
166 	/* No need to bother with refcounting for user ports */
167 	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
168 		return ds->ops->port_mdb_add(ds, port, mdb, db);
169 
170 	mutex_lock(&dp->addr_lists_lock);
171 
172 	a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid, db);
173 	if (a) {
174 		refcount_inc(&a->refcount);
175 		goto out;
176 	}
177 
178 	a = kzalloc(sizeof(*a), GFP_KERNEL);
179 	if (!a) {
180 		err = -ENOMEM;
181 		goto out;
182 	}
183 
184 	err = ds->ops->port_mdb_add(ds, port, mdb, db);
185 	if (err) {
186 		kfree(a);
187 		goto out;
188 	}
189 
190 	ether_addr_copy(a->addr, mdb->addr);
191 	a->vid = mdb->vid;
192 	a->db = db;
193 	refcount_set(&a->refcount, 1);
194 	list_add_tail(&a->list, &dp->mdbs);
195 
196 out:
197 	mutex_unlock(&dp->addr_lists_lock);
198 
199 	return err;
200 }
201 
202 static int dsa_port_do_mdb_del(struct dsa_port *dp,
203 			       const struct switchdev_obj_port_mdb *mdb,
204 			       struct dsa_db db)
205 {
206 	struct dsa_switch *ds = dp->ds;
207 	struct dsa_mac_addr *a;
208 	int port = dp->index;
209 	int err = 0;
210 
211 	/* No need to bother with refcounting for user ports */
212 	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
213 		return ds->ops->port_mdb_del(ds, port, mdb, db);
214 
215 	mutex_lock(&dp->addr_lists_lock);
216 
217 	a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid, db);
218 	if (!a) {
219 		err = -ENOENT;
220 		goto out;
221 	}
222 
223 	if (!refcount_dec_and_test(&a->refcount))
224 		goto out;
225 
226 	err = ds->ops->port_mdb_del(ds, port, mdb, db);
227 	if (err) {
228 		refcount_set(&a->refcount, 1);
229 		goto out;
230 	}
231 
232 	list_del(&a->list);
233 	kfree(a);
234 
235 out:
236 	mutex_unlock(&dp->addr_lists_lock);
237 
238 	return err;
239 }
240 
241 static int dsa_port_do_fdb_add(struct dsa_port *dp, const unsigned char *addr,
242 			       u16 vid, struct dsa_db db)
243 {
244 	struct dsa_switch *ds = dp->ds;
245 	struct dsa_mac_addr *a;
246 	int port = dp->index;
247 	int err = 0;
248 
249 	/* No need to bother with refcounting for user ports */
250 	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
251 		return ds->ops->port_fdb_add(ds, port, addr, vid, db);
252 
253 	mutex_lock(&dp->addr_lists_lock);
254 
255 	a = dsa_mac_addr_find(&dp->fdbs, addr, vid, db);
256 	if (a) {
257 		refcount_inc(&a->refcount);
258 		goto out;
259 	}
260 
261 	a = kzalloc(sizeof(*a), GFP_KERNEL);
262 	if (!a) {
263 		err = -ENOMEM;
264 		goto out;
265 	}
266 
267 	err = ds->ops->port_fdb_add(ds, port, addr, vid, db);
268 	if (err) {
269 		kfree(a);
270 		goto out;
271 	}
272 
273 	ether_addr_copy(a->addr, addr);
274 	a->vid = vid;
275 	a->db = db;
276 	refcount_set(&a->refcount, 1);
277 	list_add_tail(&a->list, &dp->fdbs);
278 
279 out:
280 	mutex_unlock(&dp->addr_lists_lock);
281 
282 	return err;
283 }
284 
285 static int dsa_port_do_fdb_del(struct dsa_port *dp, const unsigned char *addr,
286 			       u16 vid, struct dsa_db db)
287 {
288 	struct dsa_switch *ds = dp->ds;
289 	struct dsa_mac_addr *a;
290 	int port = dp->index;
291 	int err = 0;
292 
293 	/* No need to bother with refcounting for user ports */
294 	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
295 		return ds->ops->port_fdb_del(ds, port, addr, vid, db);
296 
297 	mutex_lock(&dp->addr_lists_lock);
298 
299 	a = dsa_mac_addr_find(&dp->fdbs, addr, vid, db);
300 	if (!a) {
301 		err = -ENOENT;
302 		goto out;
303 	}
304 
305 	if (!refcount_dec_and_test(&a->refcount))
306 		goto out;
307 
308 	err = ds->ops->port_fdb_del(ds, port, addr, vid, db);
309 	if (err) {
310 		refcount_set(&a->refcount, 1);
311 		goto out;
312 	}
313 
314 	list_del(&a->list);
315 	kfree(a);
316 
317 out:
318 	mutex_unlock(&dp->addr_lists_lock);
319 
320 	return err;
321 }
322 
323 static int dsa_switch_do_lag_fdb_add(struct dsa_switch *ds, struct dsa_lag *lag,
324 				     const unsigned char *addr, u16 vid,
325 				     struct dsa_db db)
326 {
327 	struct dsa_mac_addr *a;
328 	int err = 0;
329 
330 	mutex_lock(&lag->fdb_lock);
331 
332 	a = dsa_mac_addr_find(&lag->fdbs, addr, vid, db);
333 	if (a) {
334 		refcount_inc(&a->refcount);
335 		goto out;
336 	}
337 
338 	a = kzalloc(sizeof(*a), GFP_KERNEL);
339 	if (!a) {
340 		err = -ENOMEM;
341 		goto out;
342 	}
343 
344 	err = ds->ops->lag_fdb_add(ds, *lag, addr, vid, db);
345 	if (err) {
346 		kfree(a);
347 		goto out;
348 	}
349 
350 	ether_addr_copy(a->addr, addr);
351 	a->vid = vid;
352 	a->db = db;
353 	refcount_set(&a->refcount, 1);
354 	list_add_tail(&a->list, &lag->fdbs);
355 
356 out:
357 	mutex_unlock(&lag->fdb_lock);
358 
359 	return err;
360 }
361 
362 static int dsa_switch_do_lag_fdb_del(struct dsa_switch *ds, struct dsa_lag *lag,
363 				     const unsigned char *addr, u16 vid,
364 				     struct dsa_db db)
365 {
366 	struct dsa_mac_addr *a;
367 	int err = 0;
368 
369 	mutex_lock(&lag->fdb_lock);
370 
371 	a = dsa_mac_addr_find(&lag->fdbs, addr, vid, db);
372 	if (!a) {
373 		err = -ENOENT;
374 		goto out;
375 	}
376 
377 	if (!refcount_dec_and_test(&a->refcount))
378 		goto out;
379 
380 	err = ds->ops->lag_fdb_del(ds, *lag, addr, vid, db);
381 	if (err) {
382 		refcount_set(&a->refcount, 1);
383 		goto out;
384 	}
385 
386 	list_del(&a->list);
387 	kfree(a);
388 
389 out:
390 	mutex_unlock(&lag->fdb_lock);
391 
392 	return err;
393 }
394 
395 static int dsa_switch_host_fdb_add(struct dsa_switch *ds,
396 				   struct dsa_notifier_fdb_info *info)
397 {
398 	struct dsa_port *dp;
399 	int err = 0;
400 
401 	if (!ds->ops->port_fdb_add)
402 		return -EOPNOTSUPP;
403 
404 	dsa_switch_for_each_port(dp, ds) {
405 		if (dsa_port_host_address_match(dp, info->dp)) {
406 			if (dsa_port_is_cpu(dp) && info->dp->cpu_port_in_lag) {
407 				err = dsa_switch_do_lag_fdb_add(ds, dp->lag,
408 								info->addr,
409 								info->vid,
410 								info->db);
411 			} else {
412 				err = dsa_port_do_fdb_add(dp, info->addr,
413 							  info->vid, info->db);
414 			}
415 			if (err)
416 				break;
417 		}
418 	}
419 
420 	return err;
421 }
422 
423 static int dsa_switch_host_fdb_del(struct dsa_switch *ds,
424 				   struct dsa_notifier_fdb_info *info)
425 {
426 	struct dsa_port *dp;
427 	int err = 0;
428 
429 	if (!ds->ops->port_fdb_del)
430 		return -EOPNOTSUPP;
431 
432 	dsa_switch_for_each_port(dp, ds) {
433 		if (dsa_port_host_address_match(dp, info->dp)) {
434 			if (dsa_port_is_cpu(dp) && info->dp->cpu_port_in_lag) {
435 				err = dsa_switch_do_lag_fdb_del(ds, dp->lag,
436 								info->addr,
437 								info->vid,
438 								info->db);
439 			} else {
440 				err = dsa_port_do_fdb_del(dp, info->addr,
441 							  info->vid, info->db);
442 			}
443 			if (err)
444 				break;
445 		}
446 	}
447 
448 	return err;
449 }
450 
451 static int dsa_switch_fdb_add(struct dsa_switch *ds,
452 			      struct dsa_notifier_fdb_info *info)
453 {
454 	int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
455 	struct dsa_port *dp = dsa_to_port(ds, port);
456 
457 	if (!ds->ops->port_fdb_add)
458 		return -EOPNOTSUPP;
459 
460 	return dsa_port_do_fdb_add(dp, info->addr, info->vid, info->db);
461 }
462 
463 static int dsa_switch_fdb_del(struct dsa_switch *ds,
464 			      struct dsa_notifier_fdb_info *info)
465 {
466 	int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
467 	struct dsa_port *dp = dsa_to_port(ds, port);
468 
469 	if (!ds->ops->port_fdb_del)
470 		return -EOPNOTSUPP;
471 
472 	return dsa_port_do_fdb_del(dp, info->addr, info->vid, info->db);
473 }
474 
475 static int dsa_switch_lag_fdb_add(struct dsa_switch *ds,
476 				  struct dsa_notifier_lag_fdb_info *info)
477 {
478 	struct dsa_port *dp;
479 
480 	if (!ds->ops->lag_fdb_add)
481 		return -EOPNOTSUPP;
482 
483 	/* Notify switch only if it has a port in this LAG */
484 	dsa_switch_for_each_port(dp, ds)
485 		if (dsa_port_offloads_lag(dp, info->lag))
486 			return dsa_switch_do_lag_fdb_add(ds, info->lag,
487 							 info->addr, info->vid,
488 							 info->db);
489 
490 	return 0;
491 }
492 
493 static int dsa_switch_lag_fdb_del(struct dsa_switch *ds,
494 				  struct dsa_notifier_lag_fdb_info *info)
495 {
496 	struct dsa_port *dp;
497 
498 	if (!ds->ops->lag_fdb_del)
499 		return -EOPNOTSUPP;
500 
501 	/* Notify switch only if it has a port in this LAG */
502 	dsa_switch_for_each_port(dp, ds)
503 		if (dsa_port_offloads_lag(dp, info->lag))
504 			return dsa_switch_do_lag_fdb_del(ds, info->lag,
505 							 info->addr, info->vid,
506 							 info->db);
507 
508 	return 0;
509 }
510 
511 static int dsa_switch_lag_change(struct dsa_switch *ds,
512 				 struct dsa_notifier_lag_info *info)
513 {
514 	if (info->dp->ds == ds && ds->ops->port_lag_change)
515 		return ds->ops->port_lag_change(ds, info->dp->index);
516 
517 	if (info->dp->ds != ds && ds->ops->crosschip_lag_change)
518 		return ds->ops->crosschip_lag_change(ds, info->dp->ds->index,
519 						     info->dp->index);
520 
521 	return 0;
522 }
523 
524 static int dsa_switch_lag_join(struct dsa_switch *ds,
525 			       struct dsa_notifier_lag_info *info)
526 {
527 	if (info->dp->ds == ds && ds->ops->port_lag_join)
528 		return ds->ops->port_lag_join(ds, info->dp->index, info->lag,
529 					      info->info, info->extack);
530 
531 	if (info->dp->ds != ds && ds->ops->crosschip_lag_join)
532 		return ds->ops->crosschip_lag_join(ds, info->dp->ds->index,
533 						   info->dp->index, info->lag,
534 						   info->info, info->extack);
535 
536 	return -EOPNOTSUPP;
537 }
538 
539 static int dsa_switch_lag_leave(struct dsa_switch *ds,
540 				struct dsa_notifier_lag_info *info)
541 {
542 	if (info->dp->ds == ds && ds->ops->port_lag_leave)
543 		return ds->ops->port_lag_leave(ds, info->dp->index, info->lag);
544 
545 	if (info->dp->ds != ds && ds->ops->crosschip_lag_leave)
546 		return ds->ops->crosschip_lag_leave(ds, info->dp->ds->index,
547 						    info->dp->index, info->lag);
548 
549 	return -EOPNOTSUPP;
550 }
551 
552 static int dsa_switch_mdb_add(struct dsa_switch *ds,
553 			      struct dsa_notifier_mdb_info *info)
554 {
555 	int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
556 	struct dsa_port *dp = dsa_to_port(ds, port);
557 
558 	if (!ds->ops->port_mdb_add)
559 		return -EOPNOTSUPP;
560 
561 	return dsa_port_do_mdb_add(dp, info->mdb, info->db);
562 }
563 
564 static int dsa_switch_mdb_del(struct dsa_switch *ds,
565 			      struct dsa_notifier_mdb_info *info)
566 {
567 	int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
568 	struct dsa_port *dp = dsa_to_port(ds, port);
569 
570 	if (!ds->ops->port_mdb_del)
571 		return -EOPNOTSUPP;
572 
573 	return dsa_port_do_mdb_del(dp, info->mdb, info->db);
574 }
575 
576 static int dsa_switch_host_mdb_add(struct dsa_switch *ds,
577 				   struct dsa_notifier_mdb_info *info)
578 {
579 	struct dsa_port *dp;
580 	int err = 0;
581 
582 	if (!ds->ops->port_mdb_add)
583 		return -EOPNOTSUPP;
584 
585 	dsa_switch_for_each_port(dp, ds) {
586 		if (dsa_port_host_address_match(dp, info->dp)) {
587 			err = dsa_port_do_mdb_add(dp, info->mdb, info->db);
588 			if (err)
589 				break;
590 		}
591 	}
592 
593 	return err;
594 }
595 
596 static int dsa_switch_host_mdb_del(struct dsa_switch *ds,
597 				   struct dsa_notifier_mdb_info *info)
598 {
599 	struct dsa_port *dp;
600 	int err = 0;
601 
602 	if (!ds->ops->port_mdb_del)
603 		return -EOPNOTSUPP;
604 
605 	dsa_switch_for_each_port(dp, ds) {
606 		if (dsa_port_host_address_match(dp, info->dp)) {
607 			err = dsa_port_do_mdb_del(dp, info->mdb, info->db);
608 			if (err)
609 				break;
610 		}
611 	}
612 
613 	return err;
614 }
615 
616 /* Port VLANs match on the targeted port and on all DSA ports */
617 static bool dsa_port_vlan_match(struct dsa_port *dp,
618 				struct dsa_notifier_vlan_info *info)
619 {
620 	return dsa_port_is_dsa(dp) || dp == info->dp;
621 }
622 
623 /* Host VLANs match on the targeted port's CPU port, and on all DSA ports
624  * (upstream and downstream) of that switch and its upstream switches.
625  */
626 static bool dsa_port_host_vlan_match(struct dsa_port *dp,
627 				     const struct dsa_port *targeted_dp)
628 {
629 	struct dsa_port *cpu_dp = targeted_dp->cpu_dp;
630 
631 	if (dsa_switch_is_upstream_of(dp->ds, targeted_dp->ds))
632 		return dsa_port_is_dsa(dp) || dp == cpu_dp;
633 
634 	return false;
635 }
636 
637 static struct dsa_vlan *dsa_vlan_find(struct list_head *vlan_list,
638 				      const struct switchdev_obj_port_vlan *vlan)
639 {
640 	struct dsa_vlan *v;
641 
642 	list_for_each_entry(v, vlan_list, list)
643 		if (v->vid == vlan->vid)
644 			return v;
645 
646 	return NULL;
647 }
648 
649 static int dsa_port_do_vlan_add(struct dsa_port *dp,
650 				const struct switchdev_obj_port_vlan *vlan,
651 				struct netlink_ext_ack *extack)
652 {
653 	struct dsa_switch *ds = dp->ds;
654 	int port = dp->index;
655 	struct dsa_vlan *v;
656 	int err = 0;
657 
658 	/* No need to bother with refcounting for user ports. */
659 	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
660 		return ds->ops->port_vlan_add(ds, port, vlan, extack);
661 
662 	/* No need to propagate on shared ports the existing VLANs that were
663 	 * re-notified after just the flags have changed. This would cause a
664 	 * refcount bump which we need to avoid, since it unbalances the
665 	 * additions with the deletions.
666 	 */
667 	if (vlan->changed)
668 		return 0;
669 
670 	mutex_lock(&dp->vlans_lock);
671 
672 	v = dsa_vlan_find(&dp->vlans, vlan);
673 	if (v) {
674 		refcount_inc(&v->refcount);
675 		goto out;
676 	}
677 
678 	v = kzalloc(sizeof(*v), GFP_KERNEL);
679 	if (!v) {
680 		err = -ENOMEM;
681 		goto out;
682 	}
683 
684 	err = ds->ops->port_vlan_add(ds, port, vlan, extack);
685 	if (err) {
686 		kfree(v);
687 		goto out;
688 	}
689 
690 	v->vid = vlan->vid;
691 	refcount_set(&v->refcount, 1);
692 	list_add_tail(&v->list, &dp->vlans);
693 
694 out:
695 	mutex_unlock(&dp->vlans_lock);
696 
697 	return err;
698 }
699 
700 static int dsa_port_do_vlan_del(struct dsa_port *dp,
701 				const struct switchdev_obj_port_vlan *vlan)
702 {
703 	struct dsa_switch *ds = dp->ds;
704 	int port = dp->index;
705 	struct dsa_vlan *v;
706 	int err = 0;
707 
708 	/* No need to bother with refcounting for user ports */
709 	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
710 		return ds->ops->port_vlan_del(ds, port, vlan);
711 
712 	mutex_lock(&dp->vlans_lock);
713 
714 	v = dsa_vlan_find(&dp->vlans, vlan);
715 	if (!v) {
716 		err = -ENOENT;
717 		goto out;
718 	}
719 
720 	if (!refcount_dec_and_test(&v->refcount))
721 		goto out;
722 
723 	err = ds->ops->port_vlan_del(ds, port, vlan);
724 	if (err) {
725 		refcount_set(&v->refcount, 1);
726 		goto out;
727 	}
728 
729 	list_del(&v->list);
730 	kfree(v);
731 
732 out:
733 	mutex_unlock(&dp->vlans_lock);
734 
735 	return err;
736 }
737 
738 static int dsa_switch_vlan_add(struct dsa_switch *ds,
739 			       struct dsa_notifier_vlan_info *info)
740 {
741 	struct dsa_port *dp;
742 	int err;
743 
744 	if (!ds->ops->port_vlan_add)
745 		return -EOPNOTSUPP;
746 
747 	dsa_switch_for_each_port(dp, ds) {
748 		if (dsa_port_vlan_match(dp, info)) {
749 			err = dsa_port_do_vlan_add(dp, info->vlan,
750 						   info->extack);
751 			if (err)
752 				return err;
753 		}
754 	}
755 
756 	return 0;
757 }
758 
759 static int dsa_switch_vlan_del(struct dsa_switch *ds,
760 			       struct dsa_notifier_vlan_info *info)
761 {
762 	struct dsa_port *dp;
763 	int err;
764 
765 	if (!ds->ops->port_vlan_del)
766 		return -EOPNOTSUPP;
767 
768 	dsa_switch_for_each_port(dp, ds) {
769 		if (dsa_port_vlan_match(dp, info)) {
770 			err = dsa_port_do_vlan_del(dp, info->vlan);
771 			if (err)
772 				return err;
773 		}
774 	}
775 
776 	return 0;
777 }
778 
779 static int dsa_switch_host_vlan_add(struct dsa_switch *ds,
780 				    struct dsa_notifier_vlan_info *info)
781 {
782 	struct dsa_port *dp;
783 	int err;
784 
785 	if (!ds->ops->port_vlan_add)
786 		return -EOPNOTSUPP;
787 
788 	dsa_switch_for_each_port(dp, ds) {
789 		if (dsa_port_host_vlan_match(dp, info->dp)) {
790 			err = dsa_port_do_vlan_add(dp, info->vlan,
791 						   info->extack);
792 			if (err)
793 				return err;
794 		}
795 	}
796 
797 	return 0;
798 }
799 
800 static int dsa_switch_host_vlan_del(struct dsa_switch *ds,
801 				    struct dsa_notifier_vlan_info *info)
802 {
803 	struct dsa_port *dp;
804 	int err;
805 
806 	if (!ds->ops->port_vlan_del)
807 		return -EOPNOTSUPP;
808 
809 	dsa_switch_for_each_port(dp, ds) {
810 		if (dsa_port_host_vlan_match(dp, info->dp)) {
811 			err = dsa_port_do_vlan_del(dp, info->vlan);
812 			if (err)
813 				return err;
814 		}
815 	}
816 
817 	return 0;
818 }
819 
820 static int dsa_switch_change_tag_proto(struct dsa_switch *ds,
821 				       struct dsa_notifier_tag_proto_info *info)
822 {
823 	const struct dsa_device_ops *tag_ops = info->tag_ops;
824 	struct dsa_port *dp, *cpu_dp;
825 	int err;
826 
827 	if (!ds->ops->change_tag_protocol)
828 		return -EOPNOTSUPP;
829 
830 	ASSERT_RTNL();
831 
832 	err = ds->ops->change_tag_protocol(ds, tag_ops->proto);
833 	if (err)
834 		return err;
835 
836 	dsa_switch_for_each_cpu_port(cpu_dp, ds)
837 		dsa_port_set_tag_protocol(cpu_dp, tag_ops);
838 
839 	/* Now that changing the tag protocol can no longer fail, let's update
840 	 * the remaining bits which are "duplicated for faster access", and the
841 	 * bits that depend on the tagger, such as the MTU.
842 	 */
843 	dsa_switch_for_each_user_port(dp, ds) {
844 		struct net_device *slave = dp->slave;
845 
846 		dsa_slave_setup_tagger(slave);
847 
848 		/* rtnl_mutex is held in dsa_tree_change_tag_proto */
849 		dsa_slave_change_mtu(slave, slave->mtu);
850 	}
851 
852 	return 0;
853 }
854 
855 /* We use the same cross-chip notifiers to inform both the tagger side, as well
856  * as the switch side, of connection and disconnection events.
857  * Since ds->tagger_data is owned by the tagger, it isn't a hard error if the
858  * switch side doesn't support connecting to this tagger, and therefore, the
859  * fact that we don't disconnect the tagger side doesn't constitute a memory
860  * leak: the tagger will still operate with persistent per-switch memory, just
861  * with the switch side unconnected to it. What does constitute a hard error is
862  * when the switch side supports connecting but fails.
863  */
864 static int
865 dsa_switch_connect_tag_proto(struct dsa_switch *ds,
866 			     struct dsa_notifier_tag_proto_info *info)
867 {
868 	const struct dsa_device_ops *tag_ops = info->tag_ops;
869 	int err;
870 
871 	/* Notify the new tagger about the connection to this switch */
872 	if (tag_ops->connect) {
873 		err = tag_ops->connect(ds);
874 		if (err)
875 			return err;
876 	}
877 
878 	if (!ds->ops->connect_tag_protocol)
879 		return -EOPNOTSUPP;
880 
881 	/* Notify the switch about the connection to the new tagger */
882 	err = ds->ops->connect_tag_protocol(ds, tag_ops->proto);
883 	if (err) {
884 		/* Revert the new tagger's connection to this tree */
885 		if (tag_ops->disconnect)
886 			tag_ops->disconnect(ds);
887 		return err;
888 	}
889 
890 	return 0;
891 }
892 
893 static int
894 dsa_switch_disconnect_tag_proto(struct dsa_switch *ds,
895 				struct dsa_notifier_tag_proto_info *info)
896 {
897 	const struct dsa_device_ops *tag_ops = info->tag_ops;
898 
899 	/* Notify the tagger about the disconnection from this switch */
900 	if (tag_ops->disconnect && ds->tagger_data)
901 		tag_ops->disconnect(ds);
902 
903 	/* No need to notify the switch, since it shouldn't have any
904 	 * resources to tear down
905 	 */
906 	return 0;
907 }
908 
909 static int
910 dsa_switch_master_state_change(struct dsa_switch *ds,
911 			       struct dsa_notifier_master_state_info *info)
912 {
913 	if (!ds->ops->master_state_change)
914 		return 0;
915 
916 	ds->ops->master_state_change(ds, info->master, info->operational);
917 
918 	return 0;
919 }
920 
921 static int dsa_switch_event(struct notifier_block *nb,
922 			    unsigned long event, void *info)
923 {
924 	struct dsa_switch *ds = container_of(nb, struct dsa_switch, nb);
925 	int err;
926 
927 	switch (event) {
928 	case DSA_NOTIFIER_AGEING_TIME:
929 		err = dsa_switch_ageing_time(ds, info);
930 		break;
931 	case DSA_NOTIFIER_BRIDGE_JOIN:
932 		err = dsa_switch_bridge_join(ds, info);
933 		break;
934 	case DSA_NOTIFIER_BRIDGE_LEAVE:
935 		err = dsa_switch_bridge_leave(ds, info);
936 		break;
937 	case DSA_NOTIFIER_FDB_ADD:
938 		err = dsa_switch_fdb_add(ds, info);
939 		break;
940 	case DSA_NOTIFIER_FDB_DEL:
941 		err = dsa_switch_fdb_del(ds, info);
942 		break;
943 	case DSA_NOTIFIER_HOST_FDB_ADD:
944 		err = dsa_switch_host_fdb_add(ds, info);
945 		break;
946 	case DSA_NOTIFIER_HOST_FDB_DEL:
947 		err = dsa_switch_host_fdb_del(ds, info);
948 		break;
949 	case DSA_NOTIFIER_LAG_FDB_ADD:
950 		err = dsa_switch_lag_fdb_add(ds, info);
951 		break;
952 	case DSA_NOTIFIER_LAG_FDB_DEL:
953 		err = dsa_switch_lag_fdb_del(ds, info);
954 		break;
955 	case DSA_NOTIFIER_LAG_CHANGE:
956 		err = dsa_switch_lag_change(ds, info);
957 		break;
958 	case DSA_NOTIFIER_LAG_JOIN:
959 		err = dsa_switch_lag_join(ds, info);
960 		break;
961 	case DSA_NOTIFIER_LAG_LEAVE:
962 		err = dsa_switch_lag_leave(ds, info);
963 		break;
964 	case DSA_NOTIFIER_MDB_ADD:
965 		err = dsa_switch_mdb_add(ds, info);
966 		break;
967 	case DSA_NOTIFIER_MDB_DEL:
968 		err = dsa_switch_mdb_del(ds, info);
969 		break;
970 	case DSA_NOTIFIER_HOST_MDB_ADD:
971 		err = dsa_switch_host_mdb_add(ds, info);
972 		break;
973 	case DSA_NOTIFIER_HOST_MDB_DEL:
974 		err = dsa_switch_host_mdb_del(ds, info);
975 		break;
976 	case DSA_NOTIFIER_VLAN_ADD:
977 		err = dsa_switch_vlan_add(ds, info);
978 		break;
979 	case DSA_NOTIFIER_VLAN_DEL:
980 		err = dsa_switch_vlan_del(ds, info);
981 		break;
982 	case DSA_NOTIFIER_HOST_VLAN_ADD:
983 		err = dsa_switch_host_vlan_add(ds, info);
984 		break;
985 	case DSA_NOTIFIER_HOST_VLAN_DEL:
986 		err = dsa_switch_host_vlan_del(ds, info);
987 		break;
988 	case DSA_NOTIFIER_MTU:
989 		err = dsa_switch_mtu(ds, info);
990 		break;
991 	case DSA_NOTIFIER_TAG_PROTO:
992 		err = dsa_switch_change_tag_proto(ds, info);
993 		break;
994 	case DSA_NOTIFIER_TAG_PROTO_CONNECT:
995 		err = dsa_switch_connect_tag_proto(ds, info);
996 		break;
997 	case DSA_NOTIFIER_TAG_PROTO_DISCONNECT:
998 		err = dsa_switch_disconnect_tag_proto(ds, info);
999 		break;
1000 	case DSA_NOTIFIER_TAG_8021Q_VLAN_ADD:
1001 		err = dsa_switch_tag_8021q_vlan_add(ds, info);
1002 		break;
1003 	case DSA_NOTIFIER_TAG_8021Q_VLAN_DEL:
1004 		err = dsa_switch_tag_8021q_vlan_del(ds, info);
1005 		break;
1006 	case DSA_NOTIFIER_MASTER_STATE_CHANGE:
1007 		err = dsa_switch_master_state_change(ds, info);
1008 		break;
1009 	default:
1010 		err = -EOPNOTSUPP;
1011 		break;
1012 	}
1013 
1014 	if (err)
1015 		dev_dbg(ds->dev, "breaking chain for DSA event %lu (%d)\n",
1016 			event, err);
1017 
1018 	return notifier_from_errno(err);
1019 }
1020 
1021 /**
1022  * dsa_tree_notify - Execute code for all switches in a DSA switch tree.
1023  * @dst: collection of struct dsa_switch devices to notify.
1024  * @e: event, must be of type DSA_NOTIFIER_*
1025  * @v: event-specific value.
1026  *
1027  * Given a struct dsa_switch_tree, this can be used to run a function once for
1028  * each member DSA switch. The other alternative of traversing the tree is only
1029  * through its ports list, which does not uniquely list the switches.
1030  */
1031 int dsa_tree_notify(struct dsa_switch_tree *dst, unsigned long e, void *v)
1032 {
1033 	struct raw_notifier_head *nh = &dst->nh;
1034 	int err;
1035 
1036 	err = raw_notifier_call_chain(nh, e, v);
1037 
1038 	return notifier_to_errno(err);
1039 }
1040 
1041 /**
1042  * dsa_broadcast - Notify all DSA trees in the system.
1043  * @e: event, must be of type DSA_NOTIFIER_*
1044  * @v: event-specific value.
1045  *
1046  * Can be used to notify the switching fabric of events such as cross-chip
1047  * bridging between disjoint trees (such as islands of tagger-compatible
1048  * switches bridged by an incompatible middle switch).
1049  *
1050  * WARNING: this function is not reliable during probe time, because probing
1051  * between trees is asynchronous and not all DSA trees might have probed.
1052  */
1053 int dsa_broadcast(unsigned long e, void *v)
1054 {
1055 	struct dsa_switch_tree *dst;
1056 	int err = 0;
1057 
1058 	list_for_each_entry(dst, &dsa_tree_list, list) {
1059 		err = dsa_tree_notify(dst, e, v);
1060 		if (err)
1061 			break;
1062 	}
1063 
1064 	return err;
1065 }
1066 
1067 int dsa_switch_register_notifier(struct dsa_switch *ds)
1068 {
1069 	ds->nb.notifier_call = dsa_switch_event;
1070 
1071 	return raw_notifier_chain_register(&ds->dst->nh, &ds->nb);
1072 }
1073 
1074 void dsa_switch_unregister_notifier(struct dsa_switch *ds)
1075 {
1076 	int err;
1077 
1078 	err = raw_notifier_chain_unregister(&ds->dst->nh, &ds->nb);
1079 	if (err)
1080 		dev_err(ds->dev, "failed to unregister notifier (%d)\n", err);
1081 }
1082