xref: /openbmc/linux/net/dsa/switch.c (revision b8cd5831)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Handling of a single switch chip, part of a switch fabric
4  *
5  * Copyright (c) 2017 Savoir-faire Linux Inc.
6  *	Vivien Didelot <vivien.didelot@savoirfairelinux.com>
7  */
8 
9 #include <linux/if_bridge.h>
10 #include <linux/netdevice.h>
11 #include <linux/notifier.h>
12 #include <linux/if_vlan.h>
13 #include <net/switchdev.h>
14 
15 #include "dsa_priv.h"
16 
17 static unsigned int dsa_switch_fastest_ageing_time(struct dsa_switch *ds,
18 						   unsigned int ageing_time)
19 {
20 	struct dsa_port *dp;
21 
22 	dsa_switch_for_each_port(dp, ds)
23 		if (dp->ageing_time && dp->ageing_time < ageing_time)
24 			ageing_time = dp->ageing_time;
25 
26 	return ageing_time;
27 }
28 
29 static int dsa_switch_ageing_time(struct dsa_switch *ds,
30 				  struct dsa_notifier_ageing_time_info *info)
31 {
32 	unsigned int ageing_time = info->ageing_time;
33 
34 	if (ds->ageing_time_min && ageing_time < ds->ageing_time_min)
35 		return -ERANGE;
36 
37 	if (ds->ageing_time_max && ageing_time > ds->ageing_time_max)
38 		return -ERANGE;
39 
40 	/* Program the fastest ageing time in case of multiple bridges */
41 	ageing_time = dsa_switch_fastest_ageing_time(ds, ageing_time);
42 
43 	if (ds->ops->set_ageing_time)
44 		return ds->ops->set_ageing_time(ds, ageing_time);
45 
46 	return 0;
47 }
48 
49 static bool dsa_port_mtu_match(struct dsa_port *dp,
50 			       struct dsa_notifier_mtu_info *info)
51 {
52 	if (dp->ds->index == info->sw_index && dp->index == info->port)
53 		return true;
54 
55 	/* Do not propagate to other switches in the tree if the notifier was
56 	 * targeted for a single switch.
57 	 */
58 	if (info->targeted_match)
59 		return false;
60 
61 	if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp))
62 		return true;
63 
64 	return false;
65 }
66 
67 static int dsa_switch_mtu(struct dsa_switch *ds,
68 			  struct dsa_notifier_mtu_info *info)
69 {
70 	struct dsa_port *dp;
71 	int ret;
72 
73 	if (!ds->ops->port_change_mtu)
74 		return -EOPNOTSUPP;
75 
76 	dsa_switch_for_each_port(dp, ds) {
77 		if (dsa_port_mtu_match(dp, info)) {
78 			ret = ds->ops->port_change_mtu(ds, dp->index,
79 						       info->mtu);
80 			if (ret)
81 				return ret;
82 		}
83 	}
84 
85 	return 0;
86 }
87 
88 static int dsa_switch_bridge_join(struct dsa_switch *ds,
89 				  struct dsa_notifier_bridge_info *info)
90 {
91 	struct dsa_switch_tree *dst = ds->dst;
92 	int err;
93 
94 	if (dst->index == info->tree_index && ds->index == info->sw_index) {
95 		if (!ds->ops->port_bridge_join)
96 			return -EOPNOTSUPP;
97 
98 		err = ds->ops->port_bridge_join(ds, info->port, info->bridge,
99 						&info->tx_fwd_offload,
100 						info->extack);
101 		if (err)
102 			return err;
103 	}
104 
105 	if ((dst->index != info->tree_index || ds->index != info->sw_index) &&
106 	    ds->ops->crosschip_bridge_join) {
107 		err = ds->ops->crosschip_bridge_join(ds, info->tree_index,
108 						     info->sw_index,
109 						     info->port, info->bridge,
110 						     info->extack);
111 		if (err)
112 			return err;
113 	}
114 
115 	return 0;
116 }
117 
118 static int dsa_switch_sync_vlan_filtering(struct dsa_switch *ds,
119 					  struct dsa_notifier_bridge_info *info)
120 {
121 	struct netlink_ext_ack extack = {0};
122 	bool change_vlan_filtering = false;
123 	bool vlan_filtering;
124 	struct dsa_port *dp;
125 	int err;
126 
127 	if (ds->needs_standalone_vlan_filtering &&
128 	    !br_vlan_enabled(info->bridge.dev)) {
129 		change_vlan_filtering = true;
130 		vlan_filtering = true;
131 	} else if (!ds->needs_standalone_vlan_filtering &&
132 		   br_vlan_enabled(info->bridge.dev)) {
133 		change_vlan_filtering = true;
134 		vlan_filtering = false;
135 	}
136 
137 	/* If the bridge was vlan_filtering, the bridge core doesn't trigger an
138 	 * event for changing vlan_filtering setting upon slave ports leaving
139 	 * it. That is a good thing, because that lets us handle it and also
140 	 * handle the case where the switch's vlan_filtering setting is global
141 	 * (not per port). When that happens, the correct moment to trigger the
142 	 * vlan_filtering callback is only when the last port leaves the last
143 	 * VLAN-aware bridge.
144 	 */
145 	if (change_vlan_filtering && ds->vlan_filtering_is_global) {
146 		dsa_switch_for_each_port(dp, ds) {
147 			struct net_device *br = dsa_port_bridge_dev_get(dp);
148 
149 			if (br && br_vlan_enabled(br)) {
150 				change_vlan_filtering = false;
151 				break;
152 			}
153 		}
154 	}
155 
156 	if (change_vlan_filtering) {
157 		err = dsa_port_vlan_filtering(dsa_to_port(ds, info->port),
158 					      vlan_filtering, &extack);
159 		if (extack._msg)
160 			dev_err(ds->dev, "port %d: %s\n", info->port,
161 				extack._msg);
162 		if (err && err != -EOPNOTSUPP)
163 			return err;
164 	}
165 
166 	return 0;
167 }
168 
169 static int dsa_switch_bridge_leave(struct dsa_switch *ds,
170 				   struct dsa_notifier_bridge_info *info)
171 {
172 	struct dsa_switch_tree *dst = ds->dst;
173 	int err;
174 
175 	if (dst->index == info->tree_index && ds->index == info->sw_index &&
176 	    ds->ops->port_bridge_leave)
177 		ds->ops->port_bridge_leave(ds, info->port, info->bridge);
178 
179 	if ((dst->index != info->tree_index || ds->index != info->sw_index) &&
180 	    ds->ops->crosschip_bridge_leave)
181 		ds->ops->crosschip_bridge_leave(ds, info->tree_index,
182 						info->sw_index, info->port,
183 						info->bridge);
184 
185 	if (ds->dst->index == info->tree_index && ds->index == info->sw_index) {
186 		err = dsa_switch_sync_vlan_filtering(ds, info);
187 		if (err)
188 			return err;
189 	}
190 
191 	return 0;
192 }
193 
194 /* Matches for all upstream-facing ports (the CPU port and all upstream-facing
195  * DSA links) that sit between the targeted port on which the notifier was
196  * emitted and its dedicated CPU port.
197  */
198 static bool dsa_port_host_address_match(struct dsa_port *dp,
199 					int info_sw_index, int info_port)
200 {
201 	struct dsa_port *targeted_dp, *cpu_dp;
202 	struct dsa_switch *targeted_ds;
203 
204 	targeted_ds = dsa_switch_find(dp->ds->dst->index, info_sw_index);
205 	targeted_dp = dsa_to_port(targeted_ds, info_port);
206 	cpu_dp = targeted_dp->cpu_dp;
207 
208 	if (dsa_switch_is_upstream_of(dp->ds, targeted_ds))
209 		return dp->index == dsa_towards_port(dp->ds, cpu_dp->ds->index,
210 						     cpu_dp->index);
211 
212 	return false;
213 }
214 
215 static bool dsa_db_equal(const struct dsa_db *a, const struct dsa_db *b)
216 {
217 	if (a->type != b->type)
218 		return false;
219 
220 	switch (a->type) {
221 	case DSA_DB_PORT:
222 		return a->dp == b->dp;
223 	case DSA_DB_LAG:
224 		return a->lag.dev == b->lag.dev;
225 	case DSA_DB_BRIDGE:
226 		return a->bridge.num == b->bridge.num;
227 	default:
228 		WARN_ON(1);
229 		return false;
230 	}
231 }
232 
233 static struct dsa_mac_addr *dsa_mac_addr_find(struct list_head *addr_list,
234 					      const unsigned char *addr, u16 vid,
235 					      struct dsa_db db)
236 {
237 	struct dsa_mac_addr *a;
238 
239 	list_for_each_entry(a, addr_list, list)
240 		if (ether_addr_equal(a->addr, addr) && a->vid == vid &&
241 		    dsa_db_equal(&a->db, &db))
242 			return a;
243 
244 	return NULL;
245 }
246 
247 static int dsa_port_do_mdb_add(struct dsa_port *dp,
248 			       const struct switchdev_obj_port_mdb *mdb,
249 			       struct dsa_db db)
250 {
251 	struct dsa_switch *ds = dp->ds;
252 	struct dsa_mac_addr *a;
253 	int port = dp->index;
254 	int err = 0;
255 
256 	/* No need to bother with refcounting for user ports */
257 	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
258 		return ds->ops->port_mdb_add(ds, port, mdb, db);
259 
260 	mutex_lock(&dp->addr_lists_lock);
261 
262 	a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid, db);
263 	if (a) {
264 		refcount_inc(&a->refcount);
265 		goto out;
266 	}
267 
268 	a = kzalloc(sizeof(*a), GFP_KERNEL);
269 	if (!a) {
270 		err = -ENOMEM;
271 		goto out;
272 	}
273 
274 	err = ds->ops->port_mdb_add(ds, port, mdb, db);
275 	if (err) {
276 		kfree(a);
277 		goto out;
278 	}
279 
280 	ether_addr_copy(a->addr, mdb->addr);
281 	a->vid = mdb->vid;
282 	a->db = db;
283 	refcount_set(&a->refcount, 1);
284 	list_add_tail(&a->list, &dp->mdbs);
285 
286 out:
287 	mutex_unlock(&dp->addr_lists_lock);
288 
289 	return err;
290 }
291 
292 static int dsa_port_do_mdb_del(struct dsa_port *dp,
293 			       const struct switchdev_obj_port_mdb *mdb,
294 			       struct dsa_db db)
295 {
296 	struct dsa_switch *ds = dp->ds;
297 	struct dsa_mac_addr *a;
298 	int port = dp->index;
299 	int err = 0;
300 
301 	/* No need to bother with refcounting for user ports */
302 	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
303 		return ds->ops->port_mdb_del(ds, port, mdb, db);
304 
305 	mutex_lock(&dp->addr_lists_lock);
306 
307 	a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid, db);
308 	if (!a) {
309 		err = -ENOENT;
310 		goto out;
311 	}
312 
313 	if (!refcount_dec_and_test(&a->refcount))
314 		goto out;
315 
316 	err = ds->ops->port_mdb_del(ds, port, mdb, db);
317 	if (err) {
318 		refcount_set(&a->refcount, 1);
319 		goto out;
320 	}
321 
322 	list_del(&a->list);
323 	kfree(a);
324 
325 out:
326 	mutex_unlock(&dp->addr_lists_lock);
327 
328 	return err;
329 }
330 
331 static int dsa_port_do_fdb_add(struct dsa_port *dp, const unsigned char *addr,
332 			       u16 vid, struct dsa_db db)
333 {
334 	struct dsa_switch *ds = dp->ds;
335 	struct dsa_mac_addr *a;
336 	int port = dp->index;
337 	int err = 0;
338 
339 	/* No need to bother with refcounting for user ports */
340 	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
341 		return ds->ops->port_fdb_add(ds, port, addr, vid, db);
342 
343 	mutex_lock(&dp->addr_lists_lock);
344 
345 	a = dsa_mac_addr_find(&dp->fdbs, addr, vid, db);
346 	if (a) {
347 		refcount_inc(&a->refcount);
348 		goto out;
349 	}
350 
351 	a = kzalloc(sizeof(*a), GFP_KERNEL);
352 	if (!a) {
353 		err = -ENOMEM;
354 		goto out;
355 	}
356 
357 	err = ds->ops->port_fdb_add(ds, port, addr, vid, db);
358 	if (err) {
359 		kfree(a);
360 		goto out;
361 	}
362 
363 	ether_addr_copy(a->addr, addr);
364 	a->vid = vid;
365 	a->db = db;
366 	refcount_set(&a->refcount, 1);
367 	list_add_tail(&a->list, &dp->fdbs);
368 
369 out:
370 	mutex_unlock(&dp->addr_lists_lock);
371 
372 	return err;
373 }
374 
375 static int dsa_port_do_fdb_del(struct dsa_port *dp, const unsigned char *addr,
376 			       u16 vid, struct dsa_db db)
377 {
378 	struct dsa_switch *ds = dp->ds;
379 	struct dsa_mac_addr *a;
380 	int port = dp->index;
381 	int err = 0;
382 
383 	/* No need to bother with refcounting for user ports */
384 	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
385 		return ds->ops->port_fdb_del(ds, port, addr, vid, db);
386 
387 	mutex_lock(&dp->addr_lists_lock);
388 
389 	a = dsa_mac_addr_find(&dp->fdbs, addr, vid, db);
390 	if (!a) {
391 		err = -ENOENT;
392 		goto out;
393 	}
394 
395 	if (!refcount_dec_and_test(&a->refcount))
396 		goto out;
397 
398 	err = ds->ops->port_fdb_del(ds, port, addr, vid, db);
399 	if (err) {
400 		refcount_set(&a->refcount, 1);
401 		goto out;
402 	}
403 
404 	list_del(&a->list);
405 	kfree(a);
406 
407 out:
408 	mutex_unlock(&dp->addr_lists_lock);
409 
410 	return err;
411 }
412 
413 static int dsa_switch_do_lag_fdb_add(struct dsa_switch *ds, struct dsa_lag *lag,
414 				     const unsigned char *addr, u16 vid,
415 				     struct dsa_db db)
416 {
417 	struct dsa_mac_addr *a;
418 	int err = 0;
419 
420 	mutex_lock(&lag->fdb_lock);
421 
422 	a = dsa_mac_addr_find(&lag->fdbs, addr, vid, db);
423 	if (a) {
424 		refcount_inc(&a->refcount);
425 		goto out;
426 	}
427 
428 	a = kzalloc(sizeof(*a), GFP_KERNEL);
429 	if (!a) {
430 		err = -ENOMEM;
431 		goto out;
432 	}
433 
434 	err = ds->ops->lag_fdb_add(ds, *lag, addr, vid, db);
435 	if (err) {
436 		kfree(a);
437 		goto out;
438 	}
439 
440 	ether_addr_copy(a->addr, addr);
441 	a->vid = vid;
442 	refcount_set(&a->refcount, 1);
443 	list_add_tail(&a->list, &lag->fdbs);
444 
445 out:
446 	mutex_unlock(&lag->fdb_lock);
447 
448 	return err;
449 }
450 
451 static int dsa_switch_do_lag_fdb_del(struct dsa_switch *ds, struct dsa_lag *lag,
452 				     const unsigned char *addr, u16 vid,
453 				     struct dsa_db db)
454 {
455 	struct dsa_mac_addr *a;
456 	int err = 0;
457 
458 	mutex_lock(&lag->fdb_lock);
459 
460 	a = dsa_mac_addr_find(&lag->fdbs, addr, vid, db);
461 	if (!a) {
462 		err = -ENOENT;
463 		goto out;
464 	}
465 
466 	if (!refcount_dec_and_test(&a->refcount))
467 		goto out;
468 
469 	err = ds->ops->lag_fdb_del(ds, *lag, addr, vid, db);
470 	if (err) {
471 		refcount_set(&a->refcount, 1);
472 		goto out;
473 	}
474 
475 	list_del(&a->list);
476 	kfree(a);
477 
478 out:
479 	mutex_unlock(&lag->fdb_lock);
480 
481 	return err;
482 }
483 
484 static int dsa_switch_host_fdb_add(struct dsa_switch *ds,
485 				   struct dsa_notifier_fdb_info *info)
486 {
487 	struct dsa_port *dp;
488 	int err = 0;
489 
490 	if (!ds->ops->port_fdb_add)
491 		return -EOPNOTSUPP;
492 
493 	dsa_switch_for_each_port(dp, ds) {
494 		if (dsa_port_host_address_match(dp, info->sw_index,
495 						info->port)) {
496 			err = dsa_port_do_fdb_add(dp, info->addr, info->vid,
497 						  info->db);
498 			if (err)
499 				break;
500 		}
501 	}
502 
503 	return err;
504 }
505 
506 static int dsa_switch_host_fdb_del(struct dsa_switch *ds,
507 				   struct dsa_notifier_fdb_info *info)
508 {
509 	struct dsa_port *dp;
510 	int err = 0;
511 
512 	if (!ds->ops->port_fdb_del)
513 		return -EOPNOTSUPP;
514 
515 	dsa_switch_for_each_port(dp, ds) {
516 		if (dsa_port_host_address_match(dp, info->sw_index,
517 						info->port)) {
518 			err = dsa_port_do_fdb_del(dp, info->addr, info->vid,
519 						  info->db);
520 			if (err)
521 				break;
522 		}
523 	}
524 
525 	return err;
526 }
527 
528 static int dsa_switch_fdb_add(struct dsa_switch *ds,
529 			      struct dsa_notifier_fdb_info *info)
530 {
531 	int port = dsa_towards_port(ds, info->sw_index, info->port);
532 	struct dsa_port *dp = dsa_to_port(ds, port);
533 
534 	if (!ds->ops->port_fdb_add)
535 		return -EOPNOTSUPP;
536 
537 	return dsa_port_do_fdb_add(dp, info->addr, info->vid, info->db);
538 }
539 
540 static int dsa_switch_fdb_del(struct dsa_switch *ds,
541 			      struct dsa_notifier_fdb_info *info)
542 {
543 	int port = dsa_towards_port(ds, info->sw_index, info->port);
544 	struct dsa_port *dp = dsa_to_port(ds, port);
545 
546 	if (!ds->ops->port_fdb_del)
547 		return -EOPNOTSUPP;
548 
549 	return dsa_port_do_fdb_del(dp, info->addr, info->vid, info->db);
550 }
551 
552 static int dsa_switch_lag_fdb_add(struct dsa_switch *ds,
553 				  struct dsa_notifier_lag_fdb_info *info)
554 {
555 	struct dsa_port *dp;
556 
557 	if (!ds->ops->lag_fdb_add)
558 		return -EOPNOTSUPP;
559 
560 	/* Notify switch only if it has a port in this LAG */
561 	dsa_switch_for_each_port(dp, ds)
562 		if (dsa_port_offloads_lag(dp, info->lag))
563 			return dsa_switch_do_lag_fdb_add(ds, info->lag,
564 							 info->addr, info->vid,
565 							 info->db);
566 
567 	return 0;
568 }
569 
570 static int dsa_switch_lag_fdb_del(struct dsa_switch *ds,
571 				  struct dsa_notifier_lag_fdb_info *info)
572 {
573 	struct dsa_port *dp;
574 
575 	if (!ds->ops->lag_fdb_del)
576 		return -EOPNOTSUPP;
577 
578 	/* Notify switch only if it has a port in this LAG */
579 	dsa_switch_for_each_port(dp, ds)
580 		if (dsa_port_offloads_lag(dp, info->lag))
581 			return dsa_switch_do_lag_fdb_del(ds, info->lag,
582 							 info->addr, info->vid,
583 							 info->db);
584 
585 	return 0;
586 }
587 
588 static int dsa_switch_lag_change(struct dsa_switch *ds,
589 				 struct dsa_notifier_lag_info *info)
590 {
591 	if (ds->index == info->sw_index && ds->ops->port_lag_change)
592 		return ds->ops->port_lag_change(ds, info->port);
593 
594 	if (ds->index != info->sw_index && ds->ops->crosschip_lag_change)
595 		return ds->ops->crosschip_lag_change(ds, info->sw_index,
596 						     info->port);
597 
598 	return 0;
599 }
600 
601 static int dsa_switch_lag_join(struct dsa_switch *ds,
602 			       struct dsa_notifier_lag_info *info)
603 {
604 	if (ds->index == info->sw_index && ds->ops->port_lag_join)
605 		return ds->ops->port_lag_join(ds, info->port, info->lag,
606 					      info->info);
607 
608 	if (ds->index != info->sw_index && ds->ops->crosschip_lag_join)
609 		return ds->ops->crosschip_lag_join(ds, info->sw_index,
610 						   info->port, info->lag,
611 						   info->info);
612 
613 	return -EOPNOTSUPP;
614 }
615 
616 static int dsa_switch_lag_leave(struct dsa_switch *ds,
617 				struct dsa_notifier_lag_info *info)
618 {
619 	if (ds->index == info->sw_index && ds->ops->port_lag_leave)
620 		return ds->ops->port_lag_leave(ds, info->port, info->lag);
621 
622 	if (ds->index != info->sw_index && ds->ops->crosschip_lag_leave)
623 		return ds->ops->crosschip_lag_leave(ds, info->sw_index,
624 						    info->port, info->lag);
625 
626 	return -EOPNOTSUPP;
627 }
628 
629 static int dsa_switch_mdb_add(struct dsa_switch *ds,
630 			      struct dsa_notifier_mdb_info *info)
631 {
632 	int port = dsa_towards_port(ds, info->sw_index, info->port);
633 	struct dsa_port *dp = dsa_to_port(ds, port);
634 
635 	if (!ds->ops->port_mdb_add)
636 		return -EOPNOTSUPP;
637 
638 	return dsa_port_do_mdb_add(dp, info->mdb, info->db);
639 }
640 
641 static int dsa_switch_mdb_del(struct dsa_switch *ds,
642 			      struct dsa_notifier_mdb_info *info)
643 {
644 	int port = dsa_towards_port(ds, info->sw_index, info->port);
645 	struct dsa_port *dp = dsa_to_port(ds, port);
646 
647 	if (!ds->ops->port_mdb_del)
648 		return -EOPNOTSUPP;
649 
650 	return dsa_port_do_mdb_del(dp, info->mdb, info->db);
651 }
652 
653 static int dsa_switch_host_mdb_add(struct dsa_switch *ds,
654 				   struct dsa_notifier_mdb_info *info)
655 {
656 	struct dsa_port *dp;
657 	int err = 0;
658 
659 	if (!ds->ops->port_mdb_add)
660 		return -EOPNOTSUPP;
661 
662 	dsa_switch_for_each_port(dp, ds) {
663 		if (dsa_port_host_address_match(dp, info->sw_index,
664 						info->port)) {
665 			err = dsa_port_do_mdb_add(dp, info->mdb, info->db);
666 			if (err)
667 				break;
668 		}
669 	}
670 
671 	return err;
672 }
673 
674 static int dsa_switch_host_mdb_del(struct dsa_switch *ds,
675 				   struct dsa_notifier_mdb_info *info)
676 {
677 	struct dsa_port *dp;
678 	int err = 0;
679 
680 	if (!ds->ops->port_mdb_del)
681 		return -EOPNOTSUPP;
682 
683 	dsa_switch_for_each_port(dp, ds) {
684 		if (dsa_port_host_address_match(dp, info->sw_index,
685 						info->port)) {
686 			err = dsa_port_do_mdb_del(dp, info->mdb, info->db);
687 			if (err)
688 				break;
689 		}
690 	}
691 
692 	return err;
693 }
694 
695 /* Port VLANs match on the targeted port and on all DSA ports */
696 static bool dsa_port_vlan_match(struct dsa_port *dp,
697 				struct dsa_notifier_vlan_info *info)
698 {
699 	if (dp->ds->index == info->sw_index && dp->index == info->port)
700 		return true;
701 
702 	if (dsa_port_is_dsa(dp))
703 		return true;
704 
705 	return false;
706 }
707 
708 /* Host VLANs match on the targeted port's CPU port, and on all DSA ports
709  * (upstream and downstream) of that switch and its upstream switches.
710  */
711 static bool dsa_port_host_vlan_match(struct dsa_port *dp,
712 				     struct dsa_notifier_vlan_info *info)
713 {
714 	struct dsa_port *targeted_dp, *cpu_dp;
715 	struct dsa_switch *targeted_ds;
716 
717 	targeted_ds = dsa_switch_find(dp->ds->dst->index, info->sw_index);
718 	targeted_dp = dsa_to_port(targeted_ds, info->port);
719 	cpu_dp = targeted_dp->cpu_dp;
720 
721 	if (dsa_switch_is_upstream_of(dp->ds, targeted_ds))
722 		return dsa_port_is_dsa(dp) || dp == cpu_dp;
723 
724 	return false;
725 }
726 
727 static struct dsa_vlan *dsa_vlan_find(struct list_head *vlan_list,
728 				      const struct switchdev_obj_port_vlan *vlan)
729 {
730 	struct dsa_vlan *v;
731 
732 	list_for_each_entry(v, vlan_list, list)
733 		if (v->vid == vlan->vid)
734 			return v;
735 
736 	return NULL;
737 }
738 
739 static int dsa_port_do_vlan_add(struct dsa_port *dp,
740 				const struct switchdev_obj_port_vlan *vlan,
741 				struct netlink_ext_ack *extack)
742 {
743 	struct dsa_switch *ds = dp->ds;
744 	int port = dp->index;
745 	struct dsa_vlan *v;
746 	int err = 0;
747 
748 	/* No need to bother with refcounting for user ports. */
749 	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
750 		return ds->ops->port_vlan_add(ds, port, vlan, extack);
751 
752 	/* No need to propagate on shared ports the existing VLANs that were
753 	 * re-notified after just the flags have changed. This would cause a
754 	 * refcount bump which we need to avoid, since it unbalances the
755 	 * additions with the deletions.
756 	 */
757 	if (vlan->changed)
758 		return 0;
759 
760 	mutex_lock(&dp->vlans_lock);
761 
762 	v = dsa_vlan_find(&dp->vlans, vlan);
763 	if (v) {
764 		refcount_inc(&v->refcount);
765 		goto out;
766 	}
767 
768 	v = kzalloc(sizeof(*v), GFP_KERNEL);
769 	if (!v) {
770 		err = -ENOMEM;
771 		goto out;
772 	}
773 
774 	err = ds->ops->port_vlan_add(ds, port, vlan, extack);
775 	if (err) {
776 		kfree(v);
777 		goto out;
778 	}
779 
780 	v->vid = vlan->vid;
781 	refcount_set(&v->refcount, 1);
782 	list_add_tail(&v->list, &dp->vlans);
783 
784 out:
785 	mutex_unlock(&dp->vlans_lock);
786 
787 	return err;
788 }
789 
790 static int dsa_port_do_vlan_del(struct dsa_port *dp,
791 				const struct switchdev_obj_port_vlan *vlan)
792 {
793 	struct dsa_switch *ds = dp->ds;
794 	int port = dp->index;
795 	struct dsa_vlan *v;
796 	int err = 0;
797 
798 	/* No need to bother with refcounting for user ports */
799 	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
800 		return ds->ops->port_vlan_del(ds, port, vlan);
801 
802 	mutex_lock(&dp->vlans_lock);
803 
804 	v = dsa_vlan_find(&dp->vlans, vlan);
805 	if (!v) {
806 		err = -ENOENT;
807 		goto out;
808 	}
809 
810 	if (!refcount_dec_and_test(&v->refcount))
811 		goto out;
812 
813 	err = ds->ops->port_vlan_del(ds, port, vlan);
814 	if (err) {
815 		refcount_set(&v->refcount, 1);
816 		goto out;
817 	}
818 
819 	list_del(&v->list);
820 	kfree(v);
821 
822 out:
823 	mutex_unlock(&dp->vlans_lock);
824 
825 	return err;
826 }
827 
828 static int dsa_switch_vlan_add(struct dsa_switch *ds,
829 			       struct dsa_notifier_vlan_info *info)
830 {
831 	struct dsa_port *dp;
832 	int err;
833 
834 	if (!ds->ops->port_vlan_add)
835 		return -EOPNOTSUPP;
836 
837 	dsa_switch_for_each_port(dp, ds) {
838 		if (dsa_port_vlan_match(dp, info)) {
839 			err = dsa_port_do_vlan_add(dp, info->vlan,
840 						   info->extack);
841 			if (err)
842 				return err;
843 		}
844 	}
845 
846 	return 0;
847 }
848 
849 static int dsa_switch_vlan_del(struct dsa_switch *ds,
850 			       struct dsa_notifier_vlan_info *info)
851 {
852 	struct dsa_port *dp;
853 	int err;
854 
855 	if (!ds->ops->port_vlan_del)
856 		return -EOPNOTSUPP;
857 
858 	dsa_switch_for_each_port(dp, ds) {
859 		if (dsa_port_vlan_match(dp, info)) {
860 			err = dsa_port_do_vlan_del(dp, info->vlan);
861 			if (err)
862 				return err;
863 		}
864 	}
865 
866 	return 0;
867 }
868 
869 static int dsa_switch_host_vlan_add(struct dsa_switch *ds,
870 				    struct dsa_notifier_vlan_info *info)
871 {
872 	struct dsa_port *dp;
873 	int err;
874 
875 	if (!ds->ops->port_vlan_add)
876 		return -EOPNOTSUPP;
877 
878 	dsa_switch_for_each_port(dp, ds) {
879 		if (dsa_port_host_vlan_match(dp, info)) {
880 			err = dsa_port_do_vlan_add(dp, info->vlan,
881 						   info->extack);
882 			if (err)
883 				return err;
884 		}
885 	}
886 
887 	return 0;
888 }
889 
890 static int dsa_switch_host_vlan_del(struct dsa_switch *ds,
891 				    struct dsa_notifier_vlan_info *info)
892 {
893 	struct dsa_port *dp;
894 	int err;
895 
896 	if (!ds->ops->port_vlan_del)
897 		return -EOPNOTSUPP;
898 
899 	dsa_switch_for_each_port(dp, ds) {
900 		if (dsa_port_host_vlan_match(dp, info)) {
901 			err = dsa_port_do_vlan_del(dp, info->vlan);
902 			if (err)
903 				return err;
904 		}
905 	}
906 
907 	return 0;
908 }
909 
910 static int dsa_switch_change_tag_proto(struct dsa_switch *ds,
911 				       struct dsa_notifier_tag_proto_info *info)
912 {
913 	const struct dsa_device_ops *tag_ops = info->tag_ops;
914 	struct dsa_port *dp, *cpu_dp;
915 	int err;
916 
917 	if (!ds->ops->change_tag_protocol)
918 		return -EOPNOTSUPP;
919 
920 	ASSERT_RTNL();
921 
922 	dsa_switch_for_each_cpu_port(cpu_dp, ds) {
923 		err = ds->ops->change_tag_protocol(ds, cpu_dp->index,
924 						   tag_ops->proto);
925 		if (err)
926 			return err;
927 
928 		dsa_port_set_tag_protocol(cpu_dp, tag_ops);
929 	}
930 
931 	/* Now that changing the tag protocol can no longer fail, let's update
932 	 * the remaining bits which are "duplicated for faster access", and the
933 	 * bits that depend on the tagger, such as the MTU.
934 	 */
935 	dsa_switch_for_each_user_port(dp, ds) {
936 		struct net_device *slave = dp->slave;
937 
938 		dsa_slave_setup_tagger(slave);
939 
940 		/* rtnl_mutex is held in dsa_tree_change_tag_proto */
941 		dsa_slave_change_mtu(slave, slave->mtu);
942 	}
943 
944 	return 0;
945 }
946 
947 /* We use the same cross-chip notifiers to inform both the tagger side, as well
948  * as the switch side, of connection and disconnection events.
949  * Since ds->tagger_data is owned by the tagger, it isn't a hard error if the
950  * switch side doesn't support connecting to this tagger, and therefore, the
951  * fact that we don't disconnect the tagger side doesn't constitute a memory
952  * leak: the tagger will still operate with persistent per-switch memory, just
953  * with the switch side unconnected to it. What does constitute a hard error is
954  * when the switch side supports connecting but fails.
955  */
956 static int
957 dsa_switch_connect_tag_proto(struct dsa_switch *ds,
958 			     struct dsa_notifier_tag_proto_info *info)
959 {
960 	const struct dsa_device_ops *tag_ops = info->tag_ops;
961 	int err;
962 
963 	/* Notify the new tagger about the connection to this switch */
964 	if (tag_ops->connect) {
965 		err = tag_ops->connect(ds);
966 		if (err)
967 			return err;
968 	}
969 
970 	if (!ds->ops->connect_tag_protocol)
971 		return -EOPNOTSUPP;
972 
973 	/* Notify the switch about the connection to the new tagger */
974 	err = ds->ops->connect_tag_protocol(ds, tag_ops->proto);
975 	if (err) {
976 		/* Revert the new tagger's connection to this tree */
977 		if (tag_ops->disconnect)
978 			tag_ops->disconnect(ds);
979 		return err;
980 	}
981 
982 	return 0;
983 }
984 
985 static int
986 dsa_switch_disconnect_tag_proto(struct dsa_switch *ds,
987 				struct dsa_notifier_tag_proto_info *info)
988 {
989 	const struct dsa_device_ops *tag_ops = info->tag_ops;
990 
991 	/* Notify the tagger about the disconnection from this switch */
992 	if (tag_ops->disconnect && ds->tagger_data)
993 		tag_ops->disconnect(ds);
994 
995 	/* No need to notify the switch, since it shouldn't have any
996 	 * resources to tear down
997 	 */
998 	return 0;
999 }
1000 
1001 static int
1002 dsa_switch_master_state_change(struct dsa_switch *ds,
1003 			       struct dsa_notifier_master_state_info *info)
1004 {
1005 	if (!ds->ops->master_state_change)
1006 		return 0;
1007 
1008 	ds->ops->master_state_change(ds, info->master, info->operational);
1009 
1010 	return 0;
1011 }
1012 
1013 static int dsa_switch_event(struct notifier_block *nb,
1014 			    unsigned long event, void *info)
1015 {
1016 	struct dsa_switch *ds = container_of(nb, struct dsa_switch, nb);
1017 	int err;
1018 
1019 	switch (event) {
1020 	case DSA_NOTIFIER_AGEING_TIME:
1021 		err = dsa_switch_ageing_time(ds, info);
1022 		break;
1023 	case DSA_NOTIFIER_BRIDGE_JOIN:
1024 		err = dsa_switch_bridge_join(ds, info);
1025 		break;
1026 	case DSA_NOTIFIER_BRIDGE_LEAVE:
1027 		err = dsa_switch_bridge_leave(ds, info);
1028 		break;
1029 	case DSA_NOTIFIER_FDB_ADD:
1030 		err = dsa_switch_fdb_add(ds, info);
1031 		break;
1032 	case DSA_NOTIFIER_FDB_DEL:
1033 		err = dsa_switch_fdb_del(ds, info);
1034 		break;
1035 	case DSA_NOTIFIER_HOST_FDB_ADD:
1036 		err = dsa_switch_host_fdb_add(ds, info);
1037 		break;
1038 	case DSA_NOTIFIER_HOST_FDB_DEL:
1039 		err = dsa_switch_host_fdb_del(ds, info);
1040 		break;
1041 	case DSA_NOTIFIER_LAG_FDB_ADD:
1042 		err = dsa_switch_lag_fdb_add(ds, info);
1043 		break;
1044 	case DSA_NOTIFIER_LAG_FDB_DEL:
1045 		err = dsa_switch_lag_fdb_del(ds, info);
1046 		break;
1047 	case DSA_NOTIFIER_LAG_CHANGE:
1048 		err = dsa_switch_lag_change(ds, info);
1049 		break;
1050 	case DSA_NOTIFIER_LAG_JOIN:
1051 		err = dsa_switch_lag_join(ds, info);
1052 		break;
1053 	case DSA_NOTIFIER_LAG_LEAVE:
1054 		err = dsa_switch_lag_leave(ds, info);
1055 		break;
1056 	case DSA_NOTIFIER_MDB_ADD:
1057 		err = dsa_switch_mdb_add(ds, info);
1058 		break;
1059 	case DSA_NOTIFIER_MDB_DEL:
1060 		err = dsa_switch_mdb_del(ds, info);
1061 		break;
1062 	case DSA_NOTIFIER_HOST_MDB_ADD:
1063 		err = dsa_switch_host_mdb_add(ds, info);
1064 		break;
1065 	case DSA_NOTIFIER_HOST_MDB_DEL:
1066 		err = dsa_switch_host_mdb_del(ds, info);
1067 		break;
1068 	case DSA_NOTIFIER_VLAN_ADD:
1069 		err = dsa_switch_vlan_add(ds, info);
1070 		break;
1071 	case DSA_NOTIFIER_VLAN_DEL:
1072 		err = dsa_switch_vlan_del(ds, info);
1073 		break;
1074 	case DSA_NOTIFIER_HOST_VLAN_ADD:
1075 		err = dsa_switch_host_vlan_add(ds, info);
1076 		break;
1077 	case DSA_NOTIFIER_HOST_VLAN_DEL:
1078 		err = dsa_switch_host_vlan_del(ds, info);
1079 		break;
1080 	case DSA_NOTIFIER_MTU:
1081 		err = dsa_switch_mtu(ds, info);
1082 		break;
1083 	case DSA_NOTIFIER_TAG_PROTO:
1084 		err = dsa_switch_change_tag_proto(ds, info);
1085 		break;
1086 	case DSA_NOTIFIER_TAG_PROTO_CONNECT:
1087 		err = dsa_switch_connect_tag_proto(ds, info);
1088 		break;
1089 	case DSA_NOTIFIER_TAG_PROTO_DISCONNECT:
1090 		err = dsa_switch_disconnect_tag_proto(ds, info);
1091 		break;
1092 	case DSA_NOTIFIER_TAG_8021Q_VLAN_ADD:
1093 		err = dsa_switch_tag_8021q_vlan_add(ds, info);
1094 		break;
1095 	case DSA_NOTIFIER_TAG_8021Q_VLAN_DEL:
1096 		err = dsa_switch_tag_8021q_vlan_del(ds, info);
1097 		break;
1098 	case DSA_NOTIFIER_MASTER_STATE_CHANGE:
1099 		err = dsa_switch_master_state_change(ds, info);
1100 		break;
1101 	default:
1102 		err = -EOPNOTSUPP;
1103 		break;
1104 	}
1105 
1106 	if (err)
1107 		dev_dbg(ds->dev, "breaking chain for DSA event %lu (%d)\n",
1108 			event, err);
1109 
1110 	return notifier_from_errno(err);
1111 }
1112 
1113 int dsa_switch_register_notifier(struct dsa_switch *ds)
1114 {
1115 	ds->nb.notifier_call = dsa_switch_event;
1116 
1117 	return raw_notifier_chain_register(&ds->dst->nh, &ds->nb);
1118 }
1119 
1120 void dsa_switch_unregister_notifier(struct dsa_switch *ds)
1121 {
1122 	int err;
1123 
1124 	err = raw_notifier_chain_unregister(&ds->dst->nh, &ds->nb);
1125 	if (err)
1126 		dev_err(ds->dev, "failed to unregister notifier (%d)\n", err);
1127 }
1128