xref: /openbmc/linux/net/dsa/switch.c (revision c9933d494c54f72290831191c09bb8488bfd5905)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Handling of a single switch chip, part of a switch fabric
4  *
5  * Copyright (c) 2017 Savoir-faire Linux Inc.
6  *	Vivien Didelot <vivien.didelot@savoirfairelinux.com>
7  */
8 
9 #include <linux/if_bridge.h>
10 #include <linux/netdevice.h>
11 #include <linux/notifier.h>
12 #include <linux/if_vlan.h>
13 #include <net/switchdev.h>
14 
15 #include "dsa_priv.h"
16 
17 static unsigned int dsa_switch_fastest_ageing_time(struct dsa_switch *ds,
18 						   unsigned int ageing_time)
19 {
20 	struct dsa_port *dp;
21 
22 	dsa_switch_for_each_port(dp, ds)
23 		if (dp->ageing_time && dp->ageing_time < ageing_time)
24 			ageing_time = dp->ageing_time;
25 
26 	return ageing_time;
27 }
28 
29 static int dsa_switch_ageing_time(struct dsa_switch *ds,
30 				  struct dsa_notifier_ageing_time_info *info)
31 {
32 	unsigned int ageing_time = info->ageing_time;
33 
34 	if (ds->ageing_time_min && ageing_time < ds->ageing_time_min)
35 		return -ERANGE;
36 
37 	if (ds->ageing_time_max && ageing_time > ds->ageing_time_max)
38 		return -ERANGE;
39 
40 	/* Program the fastest ageing time in case of multiple bridges */
41 	ageing_time = dsa_switch_fastest_ageing_time(ds, ageing_time);
42 
43 	if (ds->ops->set_ageing_time)
44 		return ds->ops->set_ageing_time(ds, ageing_time);
45 
46 	return 0;
47 }
48 
49 static bool dsa_port_mtu_match(struct dsa_port *dp,
50 			       struct dsa_notifier_mtu_info *info)
51 {
52 	return dp == info->dp || dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp);
53 }
54 
55 static int dsa_switch_mtu(struct dsa_switch *ds,
56 			  struct dsa_notifier_mtu_info *info)
57 {
58 	struct dsa_port *dp;
59 	int ret;
60 
61 	if (!ds->ops->port_change_mtu)
62 		return -EOPNOTSUPP;
63 
64 	dsa_switch_for_each_port(dp, ds) {
65 		if (dsa_port_mtu_match(dp, info)) {
66 			ret = ds->ops->port_change_mtu(ds, dp->index,
67 						       info->mtu);
68 			if (ret)
69 				return ret;
70 		}
71 	}
72 
73 	return 0;
74 }
75 
76 static int dsa_switch_bridge_join(struct dsa_switch *ds,
77 				  struct dsa_notifier_bridge_info *info)
78 {
79 	int err;
80 
81 	if (info->dp->ds == ds) {
82 		if (!ds->ops->port_bridge_join)
83 			return -EOPNOTSUPP;
84 
85 		err = ds->ops->port_bridge_join(ds, info->dp->index,
86 						info->bridge,
87 						&info->tx_fwd_offload,
88 						info->extack);
89 		if (err)
90 			return err;
91 	}
92 
93 	if (info->dp->ds != ds && ds->ops->crosschip_bridge_join) {
94 		err = ds->ops->crosschip_bridge_join(ds,
95 						     info->dp->ds->dst->index,
96 						     info->dp->ds->index,
97 						     info->dp->index,
98 						     info->bridge,
99 						     info->extack);
100 		if (err)
101 			return err;
102 	}
103 
104 	return 0;
105 }
106 
107 static int dsa_switch_bridge_leave(struct dsa_switch *ds,
108 				   struct dsa_notifier_bridge_info *info)
109 {
110 	if (info->dp->ds == ds && ds->ops->port_bridge_leave)
111 		ds->ops->port_bridge_leave(ds, info->dp->index, info->bridge);
112 
113 	if (info->dp->ds != ds && ds->ops->crosschip_bridge_leave)
114 		ds->ops->crosschip_bridge_leave(ds, info->dp->ds->dst->index,
115 						info->dp->ds->index,
116 						info->dp->index,
117 						info->bridge);
118 
119 	return 0;
120 }
121 
122 /* Matches for all upstream-facing ports (the CPU port and all upstream-facing
123  * DSA links) that sit between the targeted port on which the notifier was
124  * emitted and its dedicated CPU port.
125  */
126 static bool dsa_port_host_address_match(struct dsa_port *dp,
127 					const struct dsa_port *targeted_dp)
128 {
129 	struct dsa_port *cpu_dp = targeted_dp->cpu_dp;
130 
131 	if (dsa_switch_is_upstream_of(dp->ds, targeted_dp->ds))
132 		return dp->index == dsa_towards_port(dp->ds, cpu_dp->ds->index,
133 						     cpu_dp->index);
134 
135 	return false;
136 }
137 
138 static struct dsa_mac_addr *dsa_mac_addr_find(struct list_head *addr_list,
139 					      const unsigned char *addr, u16 vid,
140 					      struct dsa_db db)
141 {
142 	struct dsa_mac_addr *a;
143 
144 	list_for_each_entry(a, addr_list, list)
145 		if (ether_addr_equal(a->addr, addr) && a->vid == vid &&
146 		    dsa_db_equal(&a->db, &db))
147 			return a;
148 
149 	return NULL;
150 }
151 
152 static int dsa_port_do_mdb_add(struct dsa_port *dp,
153 			       const struct switchdev_obj_port_mdb *mdb,
154 			       struct dsa_db db)
155 {
156 	struct dsa_switch *ds = dp->ds;
157 	struct dsa_mac_addr *a;
158 	int port = dp->index;
159 	int err = 0;
160 
161 	/* No need to bother with refcounting for user ports */
162 	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
163 		return ds->ops->port_mdb_add(ds, port, mdb, db);
164 
165 	mutex_lock(&dp->addr_lists_lock);
166 
167 	a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid, db);
168 	if (a) {
169 		refcount_inc(&a->refcount);
170 		goto out;
171 	}
172 
173 	a = kzalloc(sizeof(*a), GFP_KERNEL);
174 	if (!a) {
175 		err = -ENOMEM;
176 		goto out;
177 	}
178 
179 	err = ds->ops->port_mdb_add(ds, port, mdb, db);
180 	if (err) {
181 		kfree(a);
182 		goto out;
183 	}
184 
185 	ether_addr_copy(a->addr, mdb->addr);
186 	a->vid = mdb->vid;
187 	a->db = db;
188 	refcount_set(&a->refcount, 1);
189 	list_add_tail(&a->list, &dp->mdbs);
190 
191 out:
192 	mutex_unlock(&dp->addr_lists_lock);
193 
194 	return err;
195 }
196 
197 static int dsa_port_do_mdb_del(struct dsa_port *dp,
198 			       const struct switchdev_obj_port_mdb *mdb,
199 			       struct dsa_db db)
200 {
201 	struct dsa_switch *ds = dp->ds;
202 	struct dsa_mac_addr *a;
203 	int port = dp->index;
204 	int err = 0;
205 
206 	/* No need to bother with refcounting for user ports */
207 	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
208 		return ds->ops->port_mdb_del(ds, port, mdb, db);
209 
210 	mutex_lock(&dp->addr_lists_lock);
211 
212 	a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid, db);
213 	if (!a) {
214 		err = -ENOENT;
215 		goto out;
216 	}
217 
218 	if (!refcount_dec_and_test(&a->refcount))
219 		goto out;
220 
221 	err = ds->ops->port_mdb_del(ds, port, mdb, db);
222 	if (err) {
223 		refcount_set(&a->refcount, 1);
224 		goto out;
225 	}
226 
227 	list_del(&a->list);
228 	kfree(a);
229 
230 out:
231 	mutex_unlock(&dp->addr_lists_lock);
232 
233 	return err;
234 }
235 
236 static int dsa_port_do_fdb_add(struct dsa_port *dp, const unsigned char *addr,
237 			       u16 vid, struct dsa_db db)
238 {
239 	struct dsa_switch *ds = dp->ds;
240 	struct dsa_mac_addr *a;
241 	int port = dp->index;
242 	int err = 0;
243 
244 	/* No need to bother with refcounting for user ports */
245 	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
246 		return ds->ops->port_fdb_add(ds, port, addr, vid, db);
247 
248 	mutex_lock(&dp->addr_lists_lock);
249 
250 	a = dsa_mac_addr_find(&dp->fdbs, addr, vid, db);
251 	if (a) {
252 		refcount_inc(&a->refcount);
253 		goto out;
254 	}
255 
256 	a = kzalloc(sizeof(*a), GFP_KERNEL);
257 	if (!a) {
258 		err = -ENOMEM;
259 		goto out;
260 	}
261 
262 	err = ds->ops->port_fdb_add(ds, port, addr, vid, db);
263 	if (err) {
264 		kfree(a);
265 		goto out;
266 	}
267 
268 	ether_addr_copy(a->addr, addr);
269 	a->vid = vid;
270 	a->db = db;
271 	refcount_set(&a->refcount, 1);
272 	list_add_tail(&a->list, &dp->fdbs);
273 
274 out:
275 	mutex_unlock(&dp->addr_lists_lock);
276 
277 	return err;
278 }
279 
280 static int dsa_port_do_fdb_del(struct dsa_port *dp, const unsigned char *addr,
281 			       u16 vid, struct dsa_db db)
282 {
283 	struct dsa_switch *ds = dp->ds;
284 	struct dsa_mac_addr *a;
285 	int port = dp->index;
286 	int err = 0;
287 
288 	/* No need to bother with refcounting for user ports */
289 	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
290 		return ds->ops->port_fdb_del(ds, port, addr, vid, db);
291 
292 	mutex_lock(&dp->addr_lists_lock);
293 
294 	a = dsa_mac_addr_find(&dp->fdbs, addr, vid, db);
295 	if (!a) {
296 		err = -ENOENT;
297 		goto out;
298 	}
299 
300 	if (!refcount_dec_and_test(&a->refcount))
301 		goto out;
302 
303 	err = ds->ops->port_fdb_del(ds, port, addr, vid, db);
304 	if (err) {
305 		refcount_set(&a->refcount, 1);
306 		goto out;
307 	}
308 
309 	list_del(&a->list);
310 	kfree(a);
311 
312 out:
313 	mutex_unlock(&dp->addr_lists_lock);
314 
315 	return err;
316 }
317 
318 static int dsa_switch_do_lag_fdb_add(struct dsa_switch *ds, struct dsa_lag *lag,
319 				     const unsigned char *addr, u16 vid,
320 				     struct dsa_db db)
321 {
322 	struct dsa_mac_addr *a;
323 	int err = 0;
324 
325 	mutex_lock(&lag->fdb_lock);
326 
327 	a = dsa_mac_addr_find(&lag->fdbs, addr, vid, db);
328 	if (a) {
329 		refcount_inc(&a->refcount);
330 		goto out;
331 	}
332 
333 	a = kzalloc(sizeof(*a), GFP_KERNEL);
334 	if (!a) {
335 		err = -ENOMEM;
336 		goto out;
337 	}
338 
339 	err = ds->ops->lag_fdb_add(ds, *lag, addr, vid, db);
340 	if (err) {
341 		kfree(a);
342 		goto out;
343 	}
344 
345 	ether_addr_copy(a->addr, addr);
346 	a->vid = vid;
347 	refcount_set(&a->refcount, 1);
348 	list_add_tail(&a->list, &lag->fdbs);
349 
350 out:
351 	mutex_unlock(&lag->fdb_lock);
352 
353 	return err;
354 }
355 
356 static int dsa_switch_do_lag_fdb_del(struct dsa_switch *ds, struct dsa_lag *lag,
357 				     const unsigned char *addr, u16 vid,
358 				     struct dsa_db db)
359 {
360 	struct dsa_mac_addr *a;
361 	int err = 0;
362 
363 	mutex_lock(&lag->fdb_lock);
364 
365 	a = dsa_mac_addr_find(&lag->fdbs, addr, vid, db);
366 	if (!a) {
367 		err = -ENOENT;
368 		goto out;
369 	}
370 
371 	if (!refcount_dec_and_test(&a->refcount))
372 		goto out;
373 
374 	err = ds->ops->lag_fdb_del(ds, *lag, addr, vid, db);
375 	if (err) {
376 		refcount_set(&a->refcount, 1);
377 		goto out;
378 	}
379 
380 	list_del(&a->list);
381 	kfree(a);
382 
383 out:
384 	mutex_unlock(&lag->fdb_lock);
385 
386 	return err;
387 }
388 
389 static int dsa_switch_host_fdb_add(struct dsa_switch *ds,
390 				   struct dsa_notifier_fdb_info *info)
391 {
392 	struct dsa_port *dp;
393 	int err = 0;
394 
395 	if (!ds->ops->port_fdb_add)
396 		return -EOPNOTSUPP;
397 
398 	dsa_switch_for_each_port(dp, ds) {
399 		if (dsa_port_host_address_match(dp, info->dp)) {
400 			err = dsa_port_do_fdb_add(dp, info->addr, info->vid,
401 						  info->db);
402 			if (err)
403 				break;
404 		}
405 	}
406 
407 	return err;
408 }
409 
410 static int dsa_switch_host_fdb_del(struct dsa_switch *ds,
411 				   struct dsa_notifier_fdb_info *info)
412 {
413 	struct dsa_port *dp;
414 	int err = 0;
415 
416 	if (!ds->ops->port_fdb_del)
417 		return -EOPNOTSUPP;
418 
419 	dsa_switch_for_each_port(dp, ds) {
420 		if (dsa_port_host_address_match(dp, info->dp)) {
421 			err = dsa_port_do_fdb_del(dp, info->addr, info->vid,
422 						  info->db);
423 			if (err)
424 				break;
425 		}
426 	}
427 
428 	return err;
429 }
430 
431 static int dsa_switch_fdb_add(struct dsa_switch *ds,
432 			      struct dsa_notifier_fdb_info *info)
433 {
434 	int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
435 	struct dsa_port *dp = dsa_to_port(ds, port);
436 
437 	if (!ds->ops->port_fdb_add)
438 		return -EOPNOTSUPP;
439 
440 	return dsa_port_do_fdb_add(dp, info->addr, info->vid, info->db);
441 }
442 
443 static int dsa_switch_fdb_del(struct dsa_switch *ds,
444 			      struct dsa_notifier_fdb_info *info)
445 {
446 	int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
447 	struct dsa_port *dp = dsa_to_port(ds, port);
448 
449 	if (!ds->ops->port_fdb_del)
450 		return -EOPNOTSUPP;
451 
452 	return dsa_port_do_fdb_del(dp, info->addr, info->vid, info->db);
453 }
454 
455 static int dsa_switch_lag_fdb_add(struct dsa_switch *ds,
456 				  struct dsa_notifier_lag_fdb_info *info)
457 {
458 	struct dsa_port *dp;
459 
460 	if (!ds->ops->lag_fdb_add)
461 		return -EOPNOTSUPP;
462 
463 	/* Notify switch only if it has a port in this LAG */
464 	dsa_switch_for_each_port(dp, ds)
465 		if (dsa_port_offloads_lag(dp, info->lag))
466 			return dsa_switch_do_lag_fdb_add(ds, info->lag,
467 							 info->addr, info->vid,
468 							 info->db);
469 
470 	return 0;
471 }
472 
473 static int dsa_switch_lag_fdb_del(struct dsa_switch *ds,
474 				  struct dsa_notifier_lag_fdb_info *info)
475 {
476 	struct dsa_port *dp;
477 
478 	if (!ds->ops->lag_fdb_del)
479 		return -EOPNOTSUPP;
480 
481 	/* Notify switch only if it has a port in this LAG */
482 	dsa_switch_for_each_port(dp, ds)
483 		if (dsa_port_offloads_lag(dp, info->lag))
484 			return dsa_switch_do_lag_fdb_del(ds, info->lag,
485 							 info->addr, info->vid,
486 							 info->db);
487 
488 	return 0;
489 }
490 
491 static int dsa_switch_lag_change(struct dsa_switch *ds,
492 				 struct dsa_notifier_lag_info *info)
493 {
494 	if (info->dp->ds == ds && ds->ops->port_lag_change)
495 		return ds->ops->port_lag_change(ds, info->dp->index);
496 
497 	if (info->dp->ds != ds && ds->ops->crosschip_lag_change)
498 		return ds->ops->crosschip_lag_change(ds, info->dp->ds->index,
499 						     info->dp->index);
500 
501 	return 0;
502 }
503 
504 static int dsa_switch_lag_join(struct dsa_switch *ds,
505 			       struct dsa_notifier_lag_info *info)
506 {
507 	if (info->dp->ds == ds && ds->ops->port_lag_join)
508 		return ds->ops->port_lag_join(ds, info->dp->index, info->lag,
509 					      info->info);
510 
511 	if (info->dp->ds != ds && ds->ops->crosschip_lag_join)
512 		return ds->ops->crosschip_lag_join(ds, info->dp->ds->index,
513 						   info->dp->index, info->lag,
514 						   info->info);
515 
516 	return -EOPNOTSUPP;
517 }
518 
519 static int dsa_switch_lag_leave(struct dsa_switch *ds,
520 				struct dsa_notifier_lag_info *info)
521 {
522 	if (info->dp->ds == ds && ds->ops->port_lag_leave)
523 		return ds->ops->port_lag_leave(ds, info->dp->index, info->lag);
524 
525 	if (info->dp->ds != ds && ds->ops->crosschip_lag_leave)
526 		return ds->ops->crosschip_lag_leave(ds, info->dp->ds->index,
527 						    info->dp->index, info->lag);
528 
529 	return -EOPNOTSUPP;
530 }
531 
532 static int dsa_switch_mdb_add(struct dsa_switch *ds,
533 			      struct dsa_notifier_mdb_info *info)
534 {
535 	int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
536 	struct dsa_port *dp = dsa_to_port(ds, port);
537 
538 	if (!ds->ops->port_mdb_add)
539 		return -EOPNOTSUPP;
540 
541 	return dsa_port_do_mdb_add(dp, info->mdb, info->db);
542 }
543 
544 static int dsa_switch_mdb_del(struct dsa_switch *ds,
545 			      struct dsa_notifier_mdb_info *info)
546 {
547 	int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
548 	struct dsa_port *dp = dsa_to_port(ds, port);
549 
550 	if (!ds->ops->port_mdb_del)
551 		return -EOPNOTSUPP;
552 
553 	return dsa_port_do_mdb_del(dp, info->mdb, info->db);
554 }
555 
556 static int dsa_switch_host_mdb_add(struct dsa_switch *ds,
557 				   struct dsa_notifier_mdb_info *info)
558 {
559 	struct dsa_port *dp;
560 	int err = 0;
561 
562 	if (!ds->ops->port_mdb_add)
563 		return -EOPNOTSUPP;
564 
565 	dsa_switch_for_each_port(dp, ds) {
566 		if (dsa_port_host_address_match(dp, info->dp)) {
567 			err = dsa_port_do_mdb_add(dp, info->mdb, info->db);
568 			if (err)
569 				break;
570 		}
571 	}
572 
573 	return err;
574 }
575 
576 static int dsa_switch_host_mdb_del(struct dsa_switch *ds,
577 				   struct dsa_notifier_mdb_info *info)
578 {
579 	struct dsa_port *dp;
580 	int err = 0;
581 
582 	if (!ds->ops->port_mdb_del)
583 		return -EOPNOTSUPP;
584 
585 	dsa_switch_for_each_port(dp, ds) {
586 		if (dsa_port_host_address_match(dp, info->dp)) {
587 			err = dsa_port_do_mdb_del(dp, info->mdb, info->db);
588 			if (err)
589 				break;
590 		}
591 	}
592 
593 	return err;
594 }
595 
596 /* Port VLANs match on the targeted port and on all DSA ports */
597 static bool dsa_port_vlan_match(struct dsa_port *dp,
598 				struct dsa_notifier_vlan_info *info)
599 {
600 	return dsa_port_is_dsa(dp) || dp == info->dp;
601 }
602 
603 /* Host VLANs match on the targeted port's CPU port, and on all DSA ports
604  * (upstream and downstream) of that switch and its upstream switches.
605  */
606 static bool dsa_port_host_vlan_match(struct dsa_port *dp,
607 				     const struct dsa_port *targeted_dp)
608 {
609 	struct dsa_port *cpu_dp = targeted_dp->cpu_dp;
610 
611 	if (dsa_switch_is_upstream_of(dp->ds, targeted_dp->ds))
612 		return dsa_port_is_dsa(dp) || dp == cpu_dp;
613 
614 	return false;
615 }
616 
617 static struct dsa_vlan *dsa_vlan_find(struct list_head *vlan_list,
618 				      const struct switchdev_obj_port_vlan *vlan)
619 {
620 	struct dsa_vlan *v;
621 
622 	list_for_each_entry(v, vlan_list, list)
623 		if (v->vid == vlan->vid)
624 			return v;
625 
626 	return NULL;
627 }
628 
629 static int dsa_port_do_vlan_add(struct dsa_port *dp,
630 				const struct switchdev_obj_port_vlan *vlan,
631 				struct netlink_ext_ack *extack)
632 {
633 	struct dsa_switch *ds = dp->ds;
634 	int port = dp->index;
635 	struct dsa_vlan *v;
636 	int err = 0;
637 
638 	/* No need to bother with refcounting for user ports. */
639 	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
640 		return ds->ops->port_vlan_add(ds, port, vlan, extack);
641 
642 	/* No need to propagate on shared ports the existing VLANs that were
643 	 * re-notified after just the flags have changed. This would cause a
644 	 * refcount bump which we need to avoid, since it unbalances the
645 	 * additions with the deletions.
646 	 */
647 	if (vlan->changed)
648 		return 0;
649 
650 	mutex_lock(&dp->vlans_lock);
651 
652 	v = dsa_vlan_find(&dp->vlans, vlan);
653 	if (v) {
654 		refcount_inc(&v->refcount);
655 		goto out;
656 	}
657 
658 	v = kzalloc(sizeof(*v), GFP_KERNEL);
659 	if (!v) {
660 		err = -ENOMEM;
661 		goto out;
662 	}
663 
664 	err = ds->ops->port_vlan_add(ds, port, vlan, extack);
665 	if (err) {
666 		kfree(v);
667 		goto out;
668 	}
669 
670 	v->vid = vlan->vid;
671 	refcount_set(&v->refcount, 1);
672 	list_add_tail(&v->list, &dp->vlans);
673 
674 out:
675 	mutex_unlock(&dp->vlans_lock);
676 
677 	return err;
678 }
679 
680 static int dsa_port_do_vlan_del(struct dsa_port *dp,
681 				const struct switchdev_obj_port_vlan *vlan)
682 {
683 	struct dsa_switch *ds = dp->ds;
684 	int port = dp->index;
685 	struct dsa_vlan *v;
686 	int err = 0;
687 
688 	/* No need to bother with refcounting for user ports */
689 	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
690 		return ds->ops->port_vlan_del(ds, port, vlan);
691 
692 	mutex_lock(&dp->vlans_lock);
693 
694 	v = dsa_vlan_find(&dp->vlans, vlan);
695 	if (!v) {
696 		err = -ENOENT;
697 		goto out;
698 	}
699 
700 	if (!refcount_dec_and_test(&v->refcount))
701 		goto out;
702 
703 	err = ds->ops->port_vlan_del(ds, port, vlan);
704 	if (err) {
705 		refcount_set(&v->refcount, 1);
706 		goto out;
707 	}
708 
709 	list_del(&v->list);
710 	kfree(v);
711 
712 out:
713 	mutex_unlock(&dp->vlans_lock);
714 
715 	return err;
716 }
717 
718 static int dsa_switch_vlan_add(struct dsa_switch *ds,
719 			       struct dsa_notifier_vlan_info *info)
720 {
721 	struct dsa_port *dp;
722 	int err;
723 
724 	if (!ds->ops->port_vlan_add)
725 		return -EOPNOTSUPP;
726 
727 	dsa_switch_for_each_port(dp, ds) {
728 		if (dsa_port_vlan_match(dp, info)) {
729 			err = dsa_port_do_vlan_add(dp, info->vlan,
730 						   info->extack);
731 			if (err)
732 				return err;
733 		}
734 	}
735 
736 	return 0;
737 }
738 
739 static int dsa_switch_vlan_del(struct dsa_switch *ds,
740 			       struct dsa_notifier_vlan_info *info)
741 {
742 	struct dsa_port *dp;
743 	int err;
744 
745 	if (!ds->ops->port_vlan_del)
746 		return -EOPNOTSUPP;
747 
748 	dsa_switch_for_each_port(dp, ds) {
749 		if (dsa_port_vlan_match(dp, info)) {
750 			err = dsa_port_do_vlan_del(dp, info->vlan);
751 			if (err)
752 				return err;
753 		}
754 	}
755 
756 	return 0;
757 }
758 
759 static int dsa_switch_host_vlan_add(struct dsa_switch *ds,
760 				    struct dsa_notifier_vlan_info *info)
761 {
762 	struct dsa_port *dp;
763 	int err;
764 
765 	if (!ds->ops->port_vlan_add)
766 		return -EOPNOTSUPP;
767 
768 	dsa_switch_for_each_port(dp, ds) {
769 		if (dsa_port_host_vlan_match(dp, info->dp)) {
770 			err = dsa_port_do_vlan_add(dp, info->vlan,
771 						   info->extack);
772 			if (err)
773 				return err;
774 		}
775 	}
776 
777 	return 0;
778 }
779 
780 static int dsa_switch_host_vlan_del(struct dsa_switch *ds,
781 				    struct dsa_notifier_vlan_info *info)
782 {
783 	struct dsa_port *dp;
784 	int err;
785 
786 	if (!ds->ops->port_vlan_del)
787 		return -EOPNOTSUPP;
788 
789 	dsa_switch_for_each_port(dp, ds) {
790 		if (dsa_port_host_vlan_match(dp, info->dp)) {
791 			err = dsa_port_do_vlan_del(dp, info->vlan);
792 			if (err)
793 				return err;
794 		}
795 	}
796 
797 	return 0;
798 }
799 
800 static int dsa_switch_change_tag_proto(struct dsa_switch *ds,
801 				       struct dsa_notifier_tag_proto_info *info)
802 {
803 	const struct dsa_device_ops *tag_ops = info->tag_ops;
804 	struct dsa_port *dp, *cpu_dp;
805 	int err;
806 
807 	if (!ds->ops->change_tag_protocol)
808 		return -EOPNOTSUPP;
809 
810 	ASSERT_RTNL();
811 
812 	dsa_switch_for_each_cpu_port(cpu_dp, ds) {
813 		err = ds->ops->change_tag_protocol(ds, cpu_dp->index,
814 						   tag_ops->proto);
815 		if (err)
816 			return err;
817 
818 		dsa_port_set_tag_protocol(cpu_dp, tag_ops);
819 	}
820 
821 	/* Now that changing the tag protocol can no longer fail, let's update
822 	 * the remaining bits which are "duplicated for faster access", and the
823 	 * bits that depend on the tagger, such as the MTU.
824 	 */
825 	dsa_switch_for_each_user_port(dp, ds) {
826 		struct net_device *slave = dp->slave;
827 
828 		dsa_slave_setup_tagger(slave);
829 
830 		/* rtnl_mutex is held in dsa_tree_change_tag_proto */
831 		dsa_slave_change_mtu(slave, slave->mtu);
832 	}
833 
834 	return 0;
835 }
836 
837 /* We use the same cross-chip notifiers to inform both the tagger side, as well
838  * as the switch side, of connection and disconnection events.
839  * Since ds->tagger_data is owned by the tagger, it isn't a hard error if the
840  * switch side doesn't support connecting to this tagger, and therefore, the
841  * fact that we don't disconnect the tagger side doesn't constitute a memory
842  * leak: the tagger will still operate with persistent per-switch memory, just
843  * with the switch side unconnected to it. What does constitute a hard error is
844  * when the switch side supports connecting but fails.
845  */
846 static int
847 dsa_switch_connect_tag_proto(struct dsa_switch *ds,
848 			     struct dsa_notifier_tag_proto_info *info)
849 {
850 	const struct dsa_device_ops *tag_ops = info->tag_ops;
851 	int err;
852 
853 	/* Notify the new tagger about the connection to this switch */
854 	if (tag_ops->connect) {
855 		err = tag_ops->connect(ds);
856 		if (err)
857 			return err;
858 	}
859 
860 	if (!ds->ops->connect_tag_protocol)
861 		return -EOPNOTSUPP;
862 
863 	/* Notify the switch about the connection to the new tagger */
864 	err = ds->ops->connect_tag_protocol(ds, tag_ops->proto);
865 	if (err) {
866 		/* Revert the new tagger's connection to this tree */
867 		if (tag_ops->disconnect)
868 			tag_ops->disconnect(ds);
869 		return err;
870 	}
871 
872 	return 0;
873 }
874 
875 static int
876 dsa_switch_disconnect_tag_proto(struct dsa_switch *ds,
877 				struct dsa_notifier_tag_proto_info *info)
878 {
879 	const struct dsa_device_ops *tag_ops = info->tag_ops;
880 
881 	/* Notify the tagger about the disconnection from this switch */
882 	if (tag_ops->disconnect && ds->tagger_data)
883 		tag_ops->disconnect(ds);
884 
885 	/* No need to notify the switch, since it shouldn't have any
886 	 * resources to tear down
887 	 */
888 	return 0;
889 }
890 
891 static int
892 dsa_switch_master_state_change(struct dsa_switch *ds,
893 			       struct dsa_notifier_master_state_info *info)
894 {
895 	if (!ds->ops->master_state_change)
896 		return 0;
897 
898 	ds->ops->master_state_change(ds, info->master, info->operational);
899 
900 	return 0;
901 }
902 
903 static int dsa_switch_event(struct notifier_block *nb,
904 			    unsigned long event, void *info)
905 {
906 	struct dsa_switch *ds = container_of(nb, struct dsa_switch, nb);
907 	int err;
908 
909 	switch (event) {
910 	case DSA_NOTIFIER_AGEING_TIME:
911 		err = dsa_switch_ageing_time(ds, info);
912 		break;
913 	case DSA_NOTIFIER_BRIDGE_JOIN:
914 		err = dsa_switch_bridge_join(ds, info);
915 		break;
916 	case DSA_NOTIFIER_BRIDGE_LEAVE:
917 		err = dsa_switch_bridge_leave(ds, info);
918 		break;
919 	case DSA_NOTIFIER_FDB_ADD:
920 		err = dsa_switch_fdb_add(ds, info);
921 		break;
922 	case DSA_NOTIFIER_FDB_DEL:
923 		err = dsa_switch_fdb_del(ds, info);
924 		break;
925 	case DSA_NOTIFIER_HOST_FDB_ADD:
926 		err = dsa_switch_host_fdb_add(ds, info);
927 		break;
928 	case DSA_NOTIFIER_HOST_FDB_DEL:
929 		err = dsa_switch_host_fdb_del(ds, info);
930 		break;
931 	case DSA_NOTIFIER_LAG_FDB_ADD:
932 		err = dsa_switch_lag_fdb_add(ds, info);
933 		break;
934 	case DSA_NOTIFIER_LAG_FDB_DEL:
935 		err = dsa_switch_lag_fdb_del(ds, info);
936 		break;
937 	case DSA_NOTIFIER_LAG_CHANGE:
938 		err = dsa_switch_lag_change(ds, info);
939 		break;
940 	case DSA_NOTIFIER_LAG_JOIN:
941 		err = dsa_switch_lag_join(ds, info);
942 		break;
943 	case DSA_NOTIFIER_LAG_LEAVE:
944 		err = dsa_switch_lag_leave(ds, info);
945 		break;
946 	case DSA_NOTIFIER_MDB_ADD:
947 		err = dsa_switch_mdb_add(ds, info);
948 		break;
949 	case DSA_NOTIFIER_MDB_DEL:
950 		err = dsa_switch_mdb_del(ds, info);
951 		break;
952 	case DSA_NOTIFIER_HOST_MDB_ADD:
953 		err = dsa_switch_host_mdb_add(ds, info);
954 		break;
955 	case DSA_NOTIFIER_HOST_MDB_DEL:
956 		err = dsa_switch_host_mdb_del(ds, info);
957 		break;
958 	case DSA_NOTIFIER_VLAN_ADD:
959 		err = dsa_switch_vlan_add(ds, info);
960 		break;
961 	case DSA_NOTIFIER_VLAN_DEL:
962 		err = dsa_switch_vlan_del(ds, info);
963 		break;
964 	case DSA_NOTIFIER_HOST_VLAN_ADD:
965 		err = dsa_switch_host_vlan_add(ds, info);
966 		break;
967 	case DSA_NOTIFIER_HOST_VLAN_DEL:
968 		err = dsa_switch_host_vlan_del(ds, info);
969 		break;
970 	case DSA_NOTIFIER_MTU:
971 		err = dsa_switch_mtu(ds, info);
972 		break;
973 	case DSA_NOTIFIER_TAG_PROTO:
974 		err = dsa_switch_change_tag_proto(ds, info);
975 		break;
976 	case DSA_NOTIFIER_TAG_PROTO_CONNECT:
977 		err = dsa_switch_connect_tag_proto(ds, info);
978 		break;
979 	case DSA_NOTIFIER_TAG_PROTO_DISCONNECT:
980 		err = dsa_switch_disconnect_tag_proto(ds, info);
981 		break;
982 	case DSA_NOTIFIER_TAG_8021Q_VLAN_ADD:
983 		err = dsa_switch_tag_8021q_vlan_add(ds, info);
984 		break;
985 	case DSA_NOTIFIER_TAG_8021Q_VLAN_DEL:
986 		err = dsa_switch_tag_8021q_vlan_del(ds, info);
987 		break;
988 	case DSA_NOTIFIER_MASTER_STATE_CHANGE:
989 		err = dsa_switch_master_state_change(ds, info);
990 		break;
991 	default:
992 		err = -EOPNOTSUPP;
993 		break;
994 	}
995 
996 	if (err)
997 		dev_dbg(ds->dev, "breaking chain for DSA event %lu (%d)\n",
998 			event, err);
999 
1000 	return notifier_from_errno(err);
1001 }
1002 
1003 int dsa_switch_register_notifier(struct dsa_switch *ds)
1004 {
1005 	ds->nb.notifier_call = dsa_switch_event;
1006 
1007 	return raw_notifier_chain_register(&ds->dst->nh, &ds->nb);
1008 }
1009 
1010 void dsa_switch_unregister_notifier(struct dsa_switch *ds)
1011 {
1012 	int err;
1013 
1014 	err = raw_notifier_chain_unregister(&ds->dst->nh, &ds->nb);
1015 	if (err)
1016 		dev_err(ds->dev, "failed to unregister notifier (%d)\n", err);
1017 }
1018