xref: /openbmc/linux/net/dsa/dsa.c (revision 47d2ce03dcfb6b7f0373aac6c667715d94caba78)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * DSA topology and switch handling
4  *
5  * Copyright (c) 2008-2009 Marvell Semiconductor
6  * Copyright (c) 2013 Florian Fainelli <florian@openwrt.org>
7  * Copyright (c) 2016 Andrew Lunn <andrew@lunn.ch>
8  */
9 
10 #include <linux/device.h>
11 #include <linux/err.h>
12 #include <linux/list.h>
13 #include <linux/module.h>
14 #include <linux/netdevice.h>
15 #include <linux/slab.h>
16 #include <linux/rtnetlink.h>
17 #include <linux/of.h>
18 #include <linux/of_mdio.h>
19 #include <linux/of_net.h>
20 #include <net/sch_generic.h>
21 
22 #include "devlink.h"
23 #include "dsa.h"
24 #include "dsa_priv.h"
25 #include "master.h"
26 #include "port.h"
27 #include "slave.h"
28 #include "switch.h"
29 #include "tag.h"
30 
31 static DEFINE_MUTEX(dsa2_mutex);
32 LIST_HEAD(dsa_tree_list);
33 
34 static struct workqueue_struct *dsa_owq;
35 
36 /* Track the bridges with forwarding offload enabled */
37 static unsigned long dsa_fwd_offloading_bridges;
38 
39 bool dsa_schedule_work(struct work_struct *work)
40 {
41 	return queue_work(dsa_owq, work);
42 }
43 
44 void dsa_flush_workqueue(void)
45 {
46 	flush_workqueue(dsa_owq);
47 }
48 EXPORT_SYMBOL_GPL(dsa_flush_workqueue);
49 
50 /**
51  * dsa_lag_map() - Map LAG structure to a linear LAG array
52  * @dst: Tree in which to record the mapping.
53  * @lag: LAG structure that is to be mapped to the tree's array.
54  *
55  * dsa_lag_id/dsa_lag_by_id can then be used to translate between the
56  * two spaces. The size of the mapping space is determined by the
57  * driver by setting ds->num_lag_ids. It is perfectly legal to leave
58  * it unset if it is not needed, in which case these functions become
59  * no-ops.
60  */
61 void dsa_lag_map(struct dsa_switch_tree *dst, struct dsa_lag *lag)
62 {
63 	unsigned int id;
64 
65 	for (id = 1; id <= dst->lags_len; id++) {
66 		if (!dsa_lag_by_id(dst, id)) {
67 			dst->lags[id - 1] = lag;
68 			lag->id = id;
69 			return;
70 		}
71 	}
72 
73 	/* No IDs left, which is OK. Some drivers do not need it. The
74 	 * ones that do, e.g. mv88e6xxx, will discover that dsa_lag_id
75 	 * returns an error for this device when joining the LAG. The
76 	 * driver can then return -EOPNOTSUPP back to DSA, which will
77 	 * fall back to a software LAG.
78 	 */
79 }
80 
81 /**
82  * dsa_lag_unmap() - Remove a LAG ID mapping
83  * @dst: Tree in which the mapping is recorded.
84  * @lag: LAG structure that was mapped.
85  *
86  * As there may be multiple users of the mapping, it is only removed
87  * if there are no other references to it.
88  */
89 void dsa_lag_unmap(struct dsa_switch_tree *dst, struct dsa_lag *lag)
90 {
91 	unsigned int id;
92 
93 	dsa_lags_foreach_id(id, dst) {
94 		if (dsa_lag_by_id(dst, id) == lag) {
95 			dst->lags[id - 1] = NULL;
96 			lag->id = 0;
97 			break;
98 		}
99 	}
100 }
101 
102 struct dsa_lag *dsa_tree_lag_find(struct dsa_switch_tree *dst,
103 				  const struct net_device *lag_dev)
104 {
105 	struct dsa_port *dp;
106 
107 	list_for_each_entry(dp, &dst->ports, list)
108 		if (dsa_port_lag_dev_get(dp) == lag_dev)
109 			return dp->lag;
110 
111 	return NULL;
112 }
113 
114 struct dsa_bridge *dsa_tree_bridge_find(struct dsa_switch_tree *dst,
115 					const struct net_device *br)
116 {
117 	struct dsa_port *dp;
118 
119 	list_for_each_entry(dp, &dst->ports, list)
120 		if (dsa_port_bridge_dev_get(dp) == br)
121 			return dp->bridge;
122 
123 	return NULL;
124 }
125 
126 static int dsa_bridge_num_find(const struct net_device *bridge_dev)
127 {
128 	struct dsa_switch_tree *dst;
129 
130 	list_for_each_entry(dst, &dsa_tree_list, list) {
131 		struct dsa_bridge *bridge;
132 
133 		bridge = dsa_tree_bridge_find(dst, bridge_dev);
134 		if (bridge)
135 			return bridge->num;
136 	}
137 
138 	return 0;
139 }
140 
141 unsigned int dsa_bridge_num_get(const struct net_device *bridge_dev, int max)
142 {
143 	unsigned int bridge_num = dsa_bridge_num_find(bridge_dev);
144 
145 	/* Switches without FDB isolation support don't get unique
146 	 * bridge numbering
147 	 */
148 	if (!max)
149 		return 0;
150 
151 	if (!bridge_num) {
152 		/* First port that requests FDB isolation or TX forwarding
153 		 * offload for this bridge
154 		 */
155 		bridge_num = find_next_zero_bit(&dsa_fwd_offloading_bridges,
156 						DSA_MAX_NUM_OFFLOADING_BRIDGES,
157 						1);
158 		if (bridge_num >= max)
159 			return 0;
160 
161 		set_bit(bridge_num, &dsa_fwd_offloading_bridges);
162 	}
163 
164 	return bridge_num;
165 }
166 
167 void dsa_bridge_num_put(const struct net_device *bridge_dev,
168 			unsigned int bridge_num)
169 {
170 	/* Since we refcount bridges, we know that when we call this function
171 	 * it is no longer in use, so we can just go ahead and remove it from
172 	 * the bit mask.
173 	 */
174 	clear_bit(bridge_num, &dsa_fwd_offloading_bridges);
175 }
176 
177 struct dsa_switch *dsa_switch_find(int tree_index, int sw_index)
178 {
179 	struct dsa_switch_tree *dst;
180 	struct dsa_port *dp;
181 
182 	list_for_each_entry(dst, &dsa_tree_list, list) {
183 		if (dst->index != tree_index)
184 			continue;
185 
186 		list_for_each_entry(dp, &dst->ports, list) {
187 			if (dp->ds->index != sw_index)
188 				continue;
189 
190 			return dp->ds;
191 		}
192 	}
193 
194 	return NULL;
195 }
196 EXPORT_SYMBOL_GPL(dsa_switch_find);
197 
198 static struct dsa_switch_tree *dsa_tree_find(int index)
199 {
200 	struct dsa_switch_tree *dst;
201 
202 	list_for_each_entry(dst, &dsa_tree_list, list)
203 		if (dst->index == index)
204 			return dst;
205 
206 	return NULL;
207 }
208 
209 static struct dsa_switch_tree *dsa_tree_alloc(int index)
210 {
211 	struct dsa_switch_tree *dst;
212 
213 	dst = kzalloc(sizeof(*dst), GFP_KERNEL);
214 	if (!dst)
215 		return NULL;
216 
217 	dst->index = index;
218 
219 	INIT_LIST_HEAD(&dst->rtable);
220 
221 	INIT_LIST_HEAD(&dst->ports);
222 
223 	INIT_LIST_HEAD(&dst->list);
224 	list_add_tail(&dst->list, &dsa_tree_list);
225 
226 	kref_init(&dst->refcount);
227 
228 	return dst;
229 }
230 
231 static void dsa_tree_free(struct dsa_switch_tree *dst)
232 {
233 	if (dst->tag_ops)
234 		dsa_tag_driver_put(dst->tag_ops);
235 	list_del(&dst->list);
236 	kfree(dst);
237 }
238 
239 static struct dsa_switch_tree *dsa_tree_get(struct dsa_switch_tree *dst)
240 {
241 	if (dst)
242 		kref_get(&dst->refcount);
243 
244 	return dst;
245 }
246 
247 static struct dsa_switch_tree *dsa_tree_touch(int index)
248 {
249 	struct dsa_switch_tree *dst;
250 
251 	dst = dsa_tree_find(index);
252 	if (dst)
253 		return dsa_tree_get(dst);
254 	else
255 		return dsa_tree_alloc(index);
256 }
257 
258 static void dsa_tree_release(struct kref *ref)
259 {
260 	struct dsa_switch_tree *dst;
261 
262 	dst = container_of(ref, struct dsa_switch_tree, refcount);
263 
264 	dsa_tree_free(dst);
265 }
266 
267 static void dsa_tree_put(struct dsa_switch_tree *dst)
268 {
269 	if (dst)
270 		kref_put(&dst->refcount, dsa_tree_release);
271 }
272 
273 static struct dsa_port *dsa_tree_find_port_by_node(struct dsa_switch_tree *dst,
274 						   struct device_node *dn)
275 {
276 	struct dsa_port *dp;
277 
278 	list_for_each_entry(dp, &dst->ports, list)
279 		if (dp->dn == dn)
280 			return dp;
281 
282 	return NULL;
283 }
284 
285 static struct dsa_link *dsa_link_touch(struct dsa_port *dp,
286 				       struct dsa_port *link_dp)
287 {
288 	struct dsa_switch *ds = dp->ds;
289 	struct dsa_switch_tree *dst;
290 	struct dsa_link *dl;
291 
292 	dst = ds->dst;
293 
294 	list_for_each_entry(dl, &dst->rtable, list)
295 		if (dl->dp == dp && dl->link_dp == link_dp)
296 			return dl;
297 
298 	dl = kzalloc(sizeof(*dl), GFP_KERNEL);
299 	if (!dl)
300 		return NULL;
301 
302 	dl->dp = dp;
303 	dl->link_dp = link_dp;
304 
305 	INIT_LIST_HEAD(&dl->list);
306 	list_add_tail(&dl->list, &dst->rtable);
307 
308 	return dl;
309 }
310 
311 static bool dsa_port_setup_routing_table(struct dsa_port *dp)
312 {
313 	struct dsa_switch *ds = dp->ds;
314 	struct dsa_switch_tree *dst = ds->dst;
315 	struct device_node *dn = dp->dn;
316 	struct of_phandle_iterator it;
317 	struct dsa_port *link_dp;
318 	struct dsa_link *dl;
319 	int err;
320 
321 	of_for_each_phandle(&it, err, dn, "link", NULL, 0) {
322 		link_dp = dsa_tree_find_port_by_node(dst, it.node);
323 		if (!link_dp) {
324 			of_node_put(it.node);
325 			return false;
326 		}
327 
328 		dl = dsa_link_touch(dp, link_dp);
329 		if (!dl) {
330 			of_node_put(it.node);
331 			return false;
332 		}
333 	}
334 
335 	return true;
336 }
337 
338 static bool dsa_tree_setup_routing_table(struct dsa_switch_tree *dst)
339 {
340 	bool complete = true;
341 	struct dsa_port *dp;
342 
343 	list_for_each_entry(dp, &dst->ports, list) {
344 		if (dsa_port_is_dsa(dp)) {
345 			complete = dsa_port_setup_routing_table(dp);
346 			if (!complete)
347 				break;
348 		}
349 	}
350 
351 	return complete;
352 }
353 
354 static struct dsa_port *dsa_tree_find_first_cpu(struct dsa_switch_tree *dst)
355 {
356 	struct dsa_port *dp;
357 
358 	list_for_each_entry(dp, &dst->ports, list)
359 		if (dsa_port_is_cpu(dp))
360 			return dp;
361 
362 	return NULL;
363 }
364 
365 struct net_device *dsa_tree_find_first_master(struct dsa_switch_tree *dst)
366 {
367 	struct device_node *ethernet;
368 	struct net_device *master;
369 	struct dsa_port *cpu_dp;
370 
371 	cpu_dp = dsa_tree_find_first_cpu(dst);
372 	ethernet = of_parse_phandle(cpu_dp->dn, "ethernet", 0);
373 	master = of_find_net_device_by_node(ethernet);
374 	of_node_put(ethernet);
375 
376 	return master;
377 }
378 
379 /* Assign the default CPU port (the first one in the tree) to all ports of the
380  * fabric which don't already have one as part of their own switch.
381  */
382 static int dsa_tree_setup_default_cpu(struct dsa_switch_tree *dst)
383 {
384 	struct dsa_port *cpu_dp, *dp;
385 
386 	cpu_dp = dsa_tree_find_first_cpu(dst);
387 	if (!cpu_dp) {
388 		pr_err("DSA: tree %d has no CPU port\n", dst->index);
389 		return -EINVAL;
390 	}
391 
392 	list_for_each_entry(dp, &dst->ports, list) {
393 		if (dp->cpu_dp)
394 			continue;
395 
396 		if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
397 			dp->cpu_dp = cpu_dp;
398 	}
399 
400 	return 0;
401 }
402 
403 /* Perform initial assignment of CPU ports to user ports and DSA links in the
404  * fabric, giving preference to CPU ports local to each switch. Default to
405  * using the first CPU port in the switch tree if the port does not have a CPU
406  * port local to this switch.
407  */
408 static int dsa_tree_setup_cpu_ports(struct dsa_switch_tree *dst)
409 {
410 	struct dsa_port *cpu_dp, *dp;
411 
412 	list_for_each_entry(cpu_dp, &dst->ports, list) {
413 		if (!dsa_port_is_cpu(cpu_dp))
414 			continue;
415 
416 		/* Prefer a local CPU port */
417 		dsa_switch_for_each_port(dp, cpu_dp->ds) {
418 			/* Prefer the first local CPU port found */
419 			if (dp->cpu_dp)
420 				continue;
421 
422 			if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
423 				dp->cpu_dp = cpu_dp;
424 		}
425 	}
426 
427 	return dsa_tree_setup_default_cpu(dst);
428 }
429 
430 static void dsa_tree_teardown_cpu_ports(struct dsa_switch_tree *dst)
431 {
432 	struct dsa_port *dp;
433 
434 	list_for_each_entry(dp, &dst->ports, list)
435 		if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
436 			dp->cpu_dp = NULL;
437 }
438 
439 static int dsa_port_setup(struct dsa_port *dp)
440 {
441 	bool dsa_port_link_registered = false;
442 	struct dsa_switch *ds = dp->ds;
443 	bool dsa_port_enabled = false;
444 	int err = 0;
445 
446 	if (dp->setup)
447 		return 0;
448 
449 	err = dsa_port_devlink_setup(dp);
450 	if (err)
451 		return err;
452 
453 	switch (dp->type) {
454 	case DSA_PORT_TYPE_UNUSED:
455 		dsa_port_disable(dp);
456 		break;
457 	case DSA_PORT_TYPE_CPU:
458 		if (dp->dn) {
459 			err = dsa_shared_port_link_register_of(dp);
460 			if (err)
461 				break;
462 			dsa_port_link_registered = true;
463 		} else {
464 			dev_warn(ds->dev,
465 				 "skipping link registration for CPU port %d\n",
466 				 dp->index);
467 		}
468 
469 		err = dsa_port_enable(dp, NULL);
470 		if (err)
471 			break;
472 		dsa_port_enabled = true;
473 
474 		break;
475 	case DSA_PORT_TYPE_DSA:
476 		if (dp->dn) {
477 			err = dsa_shared_port_link_register_of(dp);
478 			if (err)
479 				break;
480 			dsa_port_link_registered = true;
481 		} else {
482 			dev_warn(ds->dev,
483 				 "skipping link registration for DSA port %d\n",
484 				 dp->index);
485 		}
486 
487 		err = dsa_port_enable(dp, NULL);
488 		if (err)
489 			break;
490 		dsa_port_enabled = true;
491 
492 		break;
493 	case DSA_PORT_TYPE_USER:
494 		of_get_mac_address(dp->dn, dp->mac);
495 		err = dsa_slave_create(dp);
496 		break;
497 	}
498 
499 	if (err && dsa_port_enabled)
500 		dsa_port_disable(dp);
501 	if (err && dsa_port_link_registered)
502 		dsa_shared_port_link_unregister_of(dp);
503 	if (err) {
504 		dsa_port_devlink_teardown(dp);
505 		return err;
506 	}
507 
508 	dp->setup = true;
509 
510 	return 0;
511 }
512 
513 static void dsa_port_teardown(struct dsa_port *dp)
514 {
515 	if (!dp->setup)
516 		return;
517 
518 	switch (dp->type) {
519 	case DSA_PORT_TYPE_UNUSED:
520 		break;
521 	case DSA_PORT_TYPE_CPU:
522 		dsa_port_disable(dp);
523 		if (dp->dn)
524 			dsa_shared_port_link_unregister_of(dp);
525 		break;
526 	case DSA_PORT_TYPE_DSA:
527 		dsa_port_disable(dp);
528 		if (dp->dn)
529 			dsa_shared_port_link_unregister_of(dp);
530 		break;
531 	case DSA_PORT_TYPE_USER:
532 		if (dp->slave) {
533 			dsa_slave_destroy(dp->slave);
534 			dp->slave = NULL;
535 		}
536 		break;
537 	}
538 
539 	dsa_port_devlink_teardown(dp);
540 
541 	dp->setup = false;
542 }
543 
544 static int dsa_port_setup_as_unused(struct dsa_port *dp)
545 {
546 	dp->type = DSA_PORT_TYPE_UNUSED;
547 	return dsa_port_setup(dp);
548 }
549 
550 static int dsa_switch_setup_tag_protocol(struct dsa_switch *ds)
551 {
552 	const struct dsa_device_ops *tag_ops = ds->dst->tag_ops;
553 	struct dsa_switch_tree *dst = ds->dst;
554 	int err;
555 
556 	if (tag_ops->proto == dst->default_proto)
557 		goto connect;
558 
559 	rtnl_lock();
560 	err = ds->ops->change_tag_protocol(ds, tag_ops->proto);
561 	rtnl_unlock();
562 	if (err) {
563 		dev_err(ds->dev, "Unable to use tag protocol \"%s\": %pe\n",
564 			tag_ops->name, ERR_PTR(err));
565 		return err;
566 	}
567 
568 connect:
569 	if (tag_ops->connect) {
570 		err = tag_ops->connect(ds);
571 		if (err)
572 			return err;
573 	}
574 
575 	if (ds->ops->connect_tag_protocol) {
576 		err = ds->ops->connect_tag_protocol(ds, tag_ops->proto);
577 		if (err) {
578 			dev_err(ds->dev,
579 				"Unable to connect to tag protocol \"%s\": %pe\n",
580 				tag_ops->name, ERR_PTR(err));
581 			goto disconnect;
582 		}
583 	}
584 
585 	return 0;
586 
587 disconnect:
588 	if (tag_ops->disconnect)
589 		tag_ops->disconnect(ds);
590 
591 	return err;
592 }
593 
594 static void dsa_switch_teardown_tag_protocol(struct dsa_switch *ds)
595 {
596 	const struct dsa_device_ops *tag_ops = ds->dst->tag_ops;
597 
598 	if (tag_ops->disconnect)
599 		tag_ops->disconnect(ds);
600 }
601 
602 static int dsa_switch_setup(struct dsa_switch *ds)
603 {
604 	struct device_node *dn;
605 	int err;
606 
607 	if (ds->setup)
608 		return 0;
609 
610 	/* Initialize ds->phys_mii_mask before registering the slave MDIO bus
611 	 * driver and before ops->setup() has run, since the switch drivers and
612 	 * the slave MDIO bus driver rely on these values for probing PHY
613 	 * devices or not
614 	 */
615 	ds->phys_mii_mask |= dsa_user_ports(ds);
616 
617 	err = dsa_switch_devlink_alloc(ds);
618 	if (err)
619 		return err;
620 
621 	err = dsa_switch_register_notifier(ds);
622 	if (err)
623 		goto devlink_free;
624 
625 	ds->configure_vlan_while_not_filtering = true;
626 
627 	err = ds->ops->setup(ds);
628 	if (err < 0)
629 		goto unregister_notifier;
630 
631 	err = dsa_switch_setup_tag_protocol(ds);
632 	if (err)
633 		goto teardown;
634 
635 	if (!ds->slave_mii_bus && ds->ops->phy_read) {
636 		ds->slave_mii_bus = mdiobus_alloc();
637 		if (!ds->slave_mii_bus) {
638 			err = -ENOMEM;
639 			goto teardown;
640 		}
641 
642 		dsa_slave_mii_bus_init(ds);
643 
644 		dn = of_get_child_by_name(ds->dev->of_node, "mdio");
645 
646 		err = of_mdiobus_register(ds->slave_mii_bus, dn);
647 		of_node_put(dn);
648 		if (err < 0)
649 			goto free_slave_mii_bus;
650 	}
651 
652 	dsa_switch_devlink_register(ds);
653 
654 	ds->setup = true;
655 	return 0;
656 
657 free_slave_mii_bus:
658 	if (ds->slave_mii_bus && ds->ops->phy_read)
659 		mdiobus_free(ds->slave_mii_bus);
660 teardown:
661 	if (ds->ops->teardown)
662 		ds->ops->teardown(ds);
663 unregister_notifier:
664 	dsa_switch_unregister_notifier(ds);
665 devlink_free:
666 	dsa_switch_devlink_free(ds);
667 	return err;
668 }
669 
670 static void dsa_switch_teardown(struct dsa_switch *ds)
671 {
672 	if (!ds->setup)
673 		return;
674 
675 	dsa_switch_devlink_unregister(ds);
676 
677 	if (ds->slave_mii_bus && ds->ops->phy_read) {
678 		mdiobus_unregister(ds->slave_mii_bus);
679 		mdiobus_free(ds->slave_mii_bus);
680 		ds->slave_mii_bus = NULL;
681 	}
682 
683 	dsa_switch_teardown_tag_protocol(ds);
684 
685 	if (ds->ops->teardown)
686 		ds->ops->teardown(ds);
687 
688 	dsa_switch_unregister_notifier(ds);
689 
690 	dsa_switch_devlink_free(ds);
691 
692 	ds->setup = false;
693 }
694 
695 /* First tear down the non-shared, then the shared ports. This ensures that
696  * all work items scheduled by our switchdev handlers for user ports have
697  * completed before we destroy the refcounting kept on the shared ports.
698  */
699 static void dsa_tree_teardown_ports(struct dsa_switch_tree *dst)
700 {
701 	struct dsa_port *dp;
702 
703 	list_for_each_entry(dp, &dst->ports, list)
704 		if (dsa_port_is_user(dp) || dsa_port_is_unused(dp))
705 			dsa_port_teardown(dp);
706 
707 	dsa_flush_workqueue();
708 
709 	list_for_each_entry(dp, &dst->ports, list)
710 		if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp))
711 			dsa_port_teardown(dp);
712 }
713 
714 static void dsa_tree_teardown_switches(struct dsa_switch_tree *dst)
715 {
716 	struct dsa_port *dp;
717 
718 	list_for_each_entry(dp, &dst->ports, list)
719 		dsa_switch_teardown(dp->ds);
720 }
721 
722 /* Bring shared ports up first, then non-shared ports */
723 static int dsa_tree_setup_ports(struct dsa_switch_tree *dst)
724 {
725 	struct dsa_port *dp;
726 	int err = 0;
727 
728 	list_for_each_entry(dp, &dst->ports, list) {
729 		if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp)) {
730 			err = dsa_port_setup(dp);
731 			if (err)
732 				goto teardown;
733 		}
734 	}
735 
736 	list_for_each_entry(dp, &dst->ports, list) {
737 		if (dsa_port_is_user(dp) || dsa_port_is_unused(dp)) {
738 			err = dsa_port_setup(dp);
739 			if (err) {
740 				err = dsa_port_setup_as_unused(dp);
741 				if (err)
742 					goto teardown;
743 			}
744 		}
745 	}
746 
747 	return 0;
748 
749 teardown:
750 	dsa_tree_teardown_ports(dst);
751 
752 	return err;
753 }
754 
755 static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
756 {
757 	struct dsa_port *dp;
758 	int err = 0;
759 
760 	list_for_each_entry(dp, &dst->ports, list) {
761 		err = dsa_switch_setup(dp->ds);
762 		if (err) {
763 			dsa_tree_teardown_switches(dst);
764 			break;
765 		}
766 	}
767 
768 	return err;
769 }
770 
771 static int dsa_tree_setup_master(struct dsa_switch_tree *dst)
772 {
773 	struct dsa_port *cpu_dp;
774 	int err = 0;
775 
776 	rtnl_lock();
777 
778 	dsa_tree_for_each_cpu_port(cpu_dp, dst) {
779 		struct net_device *master = cpu_dp->master;
780 		bool admin_up = (master->flags & IFF_UP) &&
781 				!qdisc_tx_is_noop(master);
782 
783 		err = dsa_master_setup(master, cpu_dp);
784 		if (err)
785 			break;
786 
787 		/* Replay master state event */
788 		dsa_tree_master_admin_state_change(dst, master, admin_up);
789 		dsa_tree_master_oper_state_change(dst, master,
790 						  netif_oper_up(master));
791 	}
792 
793 	rtnl_unlock();
794 
795 	return err;
796 }
797 
798 static void dsa_tree_teardown_master(struct dsa_switch_tree *dst)
799 {
800 	struct dsa_port *cpu_dp;
801 
802 	rtnl_lock();
803 
804 	dsa_tree_for_each_cpu_port(cpu_dp, dst) {
805 		struct net_device *master = cpu_dp->master;
806 
807 		/* Synthesizing an "admin down" state is sufficient for
808 		 * the switches to get a notification if the master is
809 		 * currently up and running.
810 		 */
811 		dsa_tree_master_admin_state_change(dst, master, false);
812 
813 		dsa_master_teardown(master);
814 	}
815 
816 	rtnl_unlock();
817 }
818 
819 static int dsa_tree_setup_lags(struct dsa_switch_tree *dst)
820 {
821 	unsigned int len = 0;
822 	struct dsa_port *dp;
823 
824 	list_for_each_entry(dp, &dst->ports, list) {
825 		if (dp->ds->num_lag_ids > len)
826 			len = dp->ds->num_lag_ids;
827 	}
828 
829 	if (!len)
830 		return 0;
831 
832 	dst->lags = kcalloc(len, sizeof(*dst->lags), GFP_KERNEL);
833 	if (!dst->lags)
834 		return -ENOMEM;
835 
836 	dst->lags_len = len;
837 	return 0;
838 }
839 
840 static void dsa_tree_teardown_lags(struct dsa_switch_tree *dst)
841 {
842 	kfree(dst->lags);
843 }
844 
845 static int dsa_tree_setup(struct dsa_switch_tree *dst)
846 {
847 	bool complete;
848 	int err;
849 
850 	if (dst->setup) {
851 		pr_err("DSA: tree %d already setup! Disjoint trees?\n",
852 		       dst->index);
853 		return -EEXIST;
854 	}
855 
856 	complete = dsa_tree_setup_routing_table(dst);
857 	if (!complete)
858 		return 0;
859 
860 	err = dsa_tree_setup_cpu_ports(dst);
861 	if (err)
862 		return err;
863 
864 	err = dsa_tree_setup_switches(dst);
865 	if (err)
866 		goto teardown_cpu_ports;
867 
868 	err = dsa_tree_setup_ports(dst);
869 	if (err)
870 		goto teardown_switches;
871 
872 	err = dsa_tree_setup_master(dst);
873 	if (err)
874 		goto teardown_ports;
875 
876 	err = dsa_tree_setup_lags(dst);
877 	if (err)
878 		goto teardown_master;
879 
880 	dst->setup = true;
881 
882 	pr_info("DSA: tree %d setup\n", dst->index);
883 
884 	return 0;
885 
886 teardown_master:
887 	dsa_tree_teardown_master(dst);
888 teardown_ports:
889 	dsa_tree_teardown_ports(dst);
890 teardown_switches:
891 	dsa_tree_teardown_switches(dst);
892 teardown_cpu_ports:
893 	dsa_tree_teardown_cpu_ports(dst);
894 
895 	return err;
896 }
897 
898 static void dsa_tree_teardown(struct dsa_switch_tree *dst)
899 {
900 	struct dsa_link *dl, *next;
901 
902 	if (!dst->setup)
903 		return;
904 
905 	dsa_tree_teardown_lags(dst);
906 
907 	dsa_tree_teardown_master(dst);
908 
909 	dsa_tree_teardown_ports(dst);
910 
911 	dsa_tree_teardown_switches(dst);
912 
913 	dsa_tree_teardown_cpu_ports(dst);
914 
915 	list_for_each_entry_safe(dl, next, &dst->rtable, list) {
916 		list_del(&dl->list);
917 		kfree(dl);
918 	}
919 
920 	pr_info("DSA: tree %d torn down\n", dst->index);
921 
922 	dst->setup = false;
923 }
924 
925 static int dsa_tree_bind_tag_proto(struct dsa_switch_tree *dst,
926 				   const struct dsa_device_ops *tag_ops)
927 {
928 	const struct dsa_device_ops *old_tag_ops = dst->tag_ops;
929 	struct dsa_notifier_tag_proto_info info;
930 	int err;
931 
932 	dst->tag_ops = tag_ops;
933 
934 	/* Notify the switches from this tree about the connection
935 	 * to the new tagger
936 	 */
937 	info.tag_ops = tag_ops;
938 	err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_CONNECT, &info);
939 	if (err && err != -EOPNOTSUPP)
940 		goto out_disconnect;
941 
942 	/* Notify the old tagger about the disconnection from this tree */
943 	info.tag_ops = old_tag_ops;
944 	dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_DISCONNECT, &info);
945 
946 	return 0;
947 
948 out_disconnect:
949 	info.tag_ops = tag_ops;
950 	dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_DISCONNECT, &info);
951 	dst->tag_ops = old_tag_ops;
952 
953 	return err;
954 }
955 
956 /* Since the dsa/tagging sysfs device attribute is per master, the assumption
957  * is that all DSA switches within a tree share the same tagger, otherwise
958  * they would have formed disjoint trees (different "dsa,member" values).
959  */
960 int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst,
961 			      const struct dsa_device_ops *tag_ops,
962 			      const struct dsa_device_ops *old_tag_ops)
963 {
964 	struct dsa_notifier_tag_proto_info info;
965 	struct dsa_port *dp;
966 	int err = -EBUSY;
967 
968 	if (!rtnl_trylock())
969 		return restart_syscall();
970 
971 	/* At the moment we don't allow changing the tag protocol under
972 	 * traffic. The rtnl_mutex also happens to serialize concurrent
973 	 * attempts to change the tagging protocol. If we ever lift the IFF_UP
974 	 * restriction, there needs to be another mutex which serializes this.
975 	 */
976 	dsa_tree_for_each_user_port(dp, dst) {
977 		if (dsa_port_to_master(dp)->flags & IFF_UP)
978 			goto out_unlock;
979 
980 		if (dp->slave->flags & IFF_UP)
981 			goto out_unlock;
982 	}
983 
984 	/* Notify the tag protocol change */
985 	info.tag_ops = tag_ops;
986 	err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info);
987 	if (err)
988 		goto out_unwind_tagger;
989 
990 	err = dsa_tree_bind_tag_proto(dst, tag_ops);
991 	if (err)
992 		goto out_unwind_tagger;
993 
994 	rtnl_unlock();
995 
996 	return 0;
997 
998 out_unwind_tagger:
999 	info.tag_ops = old_tag_ops;
1000 	dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info);
1001 out_unlock:
1002 	rtnl_unlock();
1003 	return err;
1004 }
1005 
1006 static void dsa_tree_master_state_change(struct dsa_switch_tree *dst,
1007 					 struct net_device *master)
1008 {
1009 	struct dsa_notifier_master_state_info info;
1010 	struct dsa_port *cpu_dp = master->dsa_ptr;
1011 
1012 	info.master = master;
1013 	info.operational = dsa_port_master_is_operational(cpu_dp);
1014 
1015 	dsa_tree_notify(dst, DSA_NOTIFIER_MASTER_STATE_CHANGE, &info);
1016 }
1017 
1018 void dsa_tree_master_admin_state_change(struct dsa_switch_tree *dst,
1019 					struct net_device *master,
1020 					bool up)
1021 {
1022 	struct dsa_port *cpu_dp = master->dsa_ptr;
1023 	bool notify = false;
1024 
1025 	/* Don't keep track of admin state on LAG DSA masters,
1026 	 * but rather just of physical DSA masters
1027 	 */
1028 	if (netif_is_lag_master(master))
1029 		return;
1030 
1031 	if ((dsa_port_master_is_operational(cpu_dp)) !=
1032 	    (up && cpu_dp->master_oper_up))
1033 		notify = true;
1034 
1035 	cpu_dp->master_admin_up = up;
1036 
1037 	if (notify)
1038 		dsa_tree_master_state_change(dst, master);
1039 }
1040 
1041 void dsa_tree_master_oper_state_change(struct dsa_switch_tree *dst,
1042 				       struct net_device *master,
1043 				       bool up)
1044 {
1045 	struct dsa_port *cpu_dp = master->dsa_ptr;
1046 	bool notify = false;
1047 
1048 	/* Don't keep track of oper state on LAG DSA masters,
1049 	 * but rather just of physical DSA masters
1050 	 */
1051 	if (netif_is_lag_master(master))
1052 		return;
1053 
1054 	if ((dsa_port_master_is_operational(cpu_dp)) !=
1055 	    (cpu_dp->master_admin_up && up))
1056 		notify = true;
1057 
1058 	cpu_dp->master_oper_up = up;
1059 
1060 	if (notify)
1061 		dsa_tree_master_state_change(dst, master);
1062 }
1063 
1064 static struct dsa_port *dsa_port_touch(struct dsa_switch *ds, int index)
1065 {
1066 	struct dsa_switch_tree *dst = ds->dst;
1067 	struct dsa_port *dp;
1068 
1069 	dsa_switch_for_each_port(dp, ds)
1070 		if (dp->index == index)
1071 			return dp;
1072 
1073 	dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1074 	if (!dp)
1075 		return NULL;
1076 
1077 	dp->ds = ds;
1078 	dp->index = index;
1079 
1080 	mutex_init(&dp->addr_lists_lock);
1081 	mutex_init(&dp->vlans_lock);
1082 	INIT_LIST_HEAD(&dp->fdbs);
1083 	INIT_LIST_HEAD(&dp->mdbs);
1084 	INIT_LIST_HEAD(&dp->vlans);
1085 	INIT_LIST_HEAD(&dp->list);
1086 	list_add_tail(&dp->list, &dst->ports);
1087 
1088 	return dp;
1089 }
1090 
1091 static int dsa_port_parse_user(struct dsa_port *dp, const char *name)
1092 {
1093 	dp->type = DSA_PORT_TYPE_USER;
1094 	dp->name = name;
1095 
1096 	return 0;
1097 }
1098 
1099 static int dsa_port_parse_dsa(struct dsa_port *dp)
1100 {
1101 	dp->type = DSA_PORT_TYPE_DSA;
1102 
1103 	return 0;
1104 }
1105 
1106 static enum dsa_tag_protocol dsa_get_tag_protocol(struct dsa_port *dp,
1107 						  struct net_device *master)
1108 {
1109 	enum dsa_tag_protocol tag_protocol = DSA_TAG_PROTO_NONE;
1110 	struct dsa_switch *mds, *ds = dp->ds;
1111 	unsigned int mdp_upstream;
1112 	struct dsa_port *mdp;
1113 
1114 	/* It is possible to stack DSA switches onto one another when that
1115 	 * happens the switch driver may want to know if its tagging protocol
1116 	 * is going to work in such a configuration.
1117 	 */
1118 	if (dsa_slave_dev_check(master)) {
1119 		mdp = dsa_slave_to_port(master);
1120 		mds = mdp->ds;
1121 		mdp_upstream = dsa_upstream_port(mds, mdp->index);
1122 		tag_protocol = mds->ops->get_tag_protocol(mds, mdp_upstream,
1123 							  DSA_TAG_PROTO_NONE);
1124 	}
1125 
1126 	/* If the master device is not itself a DSA slave in a disjoint DSA
1127 	 * tree, then return immediately.
1128 	 */
1129 	return ds->ops->get_tag_protocol(ds, dp->index, tag_protocol);
1130 }
1131 
1132 static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *master,
1133 			      const char *user_protocol)
1134 {
1135 	const struct dsa_device_ops *tag_ops = NULL;
1136 	struct dsa_switch *ds = dp->ds;
1137 	struct dsa_switch_tree *dst = ds->dst;
1138 	enum dsa_tag_protocol default_proto;
1139 
1140 	/* Find out which protocol the switch would prefer. */
1141 	default_proto = dsa_get_tag_protocol(dp, master);
1142 	if (dst->default_proto) {
1143 		if (dst->default_proto != default_proto) {
1144 			dev_err(ds->dev,
1145 				"A DSA switch tree can have only one tagging protocol\n");
1146 			return -EINVAL;
1147 		}
1148 	} else {
1149 		dst->default_proto = default_proto;
1150 	}
1151 
1152 	/* See if the user wants to override that preference. */
1153 	if (user_protocol) {
1154 		if (!ds->ops->change_tag_protocol) {
1155 			dev_err(ds->dev, "Tag protocol cannot be modified\n");
1156 			return -EINVAL;
1157 		}
1158 
1159 		tag_ops = dsa_tag_driver_get_by_name(user_protocol);
1160 		if (IS_ERR(tag_ops)) {
1161 			dev_warn(ds->dev,
1162 				 "Failed to find a tagging driver for protocol %s, using default\n",
1163 				 user_protocol);
1164 			tag_ops = NULL;
1165 		}
1166 	}
1167 
1168 	if (!tag_ops)
1169 		tag_ops = dsa_tag_driver_get_by_id(default_proto);
1170 
1171 	if (IS_ERR(tag_ops)) {
1172 		if (PTR_ERR(tag_ops) == -ENOPROTOOPT)
1173 			return -EPROBE_DEFER;
1174 
1175 		dev_warn(ds->dev, "No tagger for this switch\n");
1176 		return PTR_ERR(tag_ops);
1177 	}
1178 
1179 	if (dst->tag_ops) {
1180 		if (dst->tag_ops != tag_ops) {
1181 			dev_err(ds->dev,
1182 				"A DSA switch tree can have only one tagging protocol\n");
1183 
1184 			dsa_tag_driver_put(tag_ops);
1185 			return -EINVAL;
1186 		}
1187 
1188 		/* In the case of multiple CPU ports per switch, the tagging
1189 		 * protocol is still reference-counted only per switch tree.
1190 		 */
1191 		dsa_tag_driver_put(tag_ops);
1192 	} else {
1193 		dst->tag_ops = tag_ops;
1194 	}
1195 
1196 	dp->master = master;
1197 	dp->type = DSA_PORT_TYPE_CPU;
1198 	dsa_port_set_tag_protocol(dp, dst->tag_ops);
1199 	dp->dst = dst;
1200 
1201 	/* At this point, the tree may be configured to use a different
1202 	 * tagger than the one chosen by the switch driver during
1203 	 * .setup, in the case when a user selects a custom protocol
1204 	 * through the DT.
1205 	 *
1206 	 * This is resolved by syncing the driver with the tree in
1207 	 * dsa_switch_setup_tag_protocol once .setup has run and the
1208 	 * driver is ready to accept calls to .change_tag_protocol. If
1209 	 * the driver does not support the custom protocol at that
1210 	 * point, the tree is wholly rejected, thereby ensuring that the
1211 	 * tree and driver are always in agreement on the protocol to
1212 	 * use.
1213 	 */
1214 	return 0;
1215 }
1216 
1217 static int dsa_port_parse_of(struct dsa_port *dp, struct device_node *dn)
1218 {
1219 	struct device_node *ethernet = of_parse_phandle(dn, "ethernet", 0);
1220 	const char *name = of_get_property(dn, "label", NULL);
1221 	bool link = of_property_read_bool(dn, "link");
1222 
1223 	dp->dn = dn;
1224 
1225 	if (ethernet) {
1226 		struct net_device *master;
1227 		const char *user_protocol;
1228 
1229 		master = of_find_net_device_by_node(ethernet);
1230 		of_node_put(ethernet);
1231 		if (!master)
1232 			return -EPROBE_DEFER;
1233 
1234 		user_protocol = of_get_property(dn, "dsa-tag-protocol", NULL);
1235 		return dsa_port_parse_cpu(dp, master, user_protocol);
1236 	}
1237 
1238 	if (link)
1239 		return dsa_port_parse_dsa(dp);
1240 
1241 	return dsa_port_parse_user(dp, name);
1242 }
1243 
1244 static int dsa_switch_parse_ports_of(struct dsa_switch *ds,
1245 				     struct device_node *dn)
1246 {
1247 	struct device_node *ports, *port;
1248 	struct dsa_port *dp;
1249 	int err = 0;
1250 	u32 reg;
1251 
1252 	ports = of_get_child_by_name(dn, "ports");
1253 	if (!ports) {
1254 		/* The second possibility is "ethernet-ports" */
1255 		ports = of_get_child_by_name(dn, "ethernet-ports");
1256 		if (!ports) {
1257 			dev_err(ds->dev, "no ports child node found\n");
1258 			return -EINVAL;
1259 		}
1260 	}
1261 
1262 	for_each_available_child_of_node(ports, port) {
1263 		err = of_property_read_u32(port, "reg", &reg);
1264 		if (err) {
1265 			of_node_put(port);
1266 			goto out_put_node;
1267 		}
1268 
1269 		if (reg >= ds->num_ports) {
1270 			dev_err(ds->dev, "port %pOF index %u exceeds num_ports (%u)\n",
1271 				port, reg, ds->num_ports);
1272 			of_node_put(port);
1273 			err = -EINVAL;
1274 			goto out_put_node;
1275 		}
1276 
1277 		dp = dsa_to_port(ds, reg);
1278 
1279 		err = dsa_port_parse_of(dp, port);
1280 		if (err) {
1281 			of_node_put(port);
1282 			goto out_put_node;
1283 		}
1284 	}
1285 
1286 out_put_node:
1287 	of_node_put(ports);
1288 	return err;
1289 }
1290 
1291 static int dsa_switch_parse_member_of(struct dsa_switch *ds,
1292 				      struct device_node *dn)
1293 {
1294 	u32 m[2] = { 0, 0 };
1295 	int sz;
1296 
1297 	/* Don't error out if this optional property isn't found */
1298 	sz = of_property_read_variable_u32_array(dn, "dsa,member", m, 2, 2);
1299 	if (sz < 0 && sz != -EINVAL)
1300 		return sz;
1301 
1302 	ds->index = m[1];
1303 
1304 	ds->dst = dsa_tree_touch(m[0]);
1305 	if (!ds->dst)
1306 		return -ENOMEM;
1307 
1308 	if (dsa_switch_find(ds->dst->index, ds->index)) {
1309 		dev_err(ds->dev,
1310 			"A DSA switch with index %d already exists in tree %d\n",
1311 			ds->index, ds->dst->index);
1312 		return -EEXIST;
1313 	}
1314 
1315 	if (ds->dst->last_switch < ds->index)
1316 		ds->dst->last_switch = ds->index;
1317 
1318 	return 0;
1319 }
1320 
1321 static int dsa_switch_touch_ports(struct dsa_switch *ds)
1322 {
1323 	struct dsa_port *dp;
1324 	int port;
1325 
1326 	for (port = 0; port < ds->num_ports; port++) {
1327 		dp = dsa_port_touch(ds, port);
1328 		if (!dp)
1329 			return -ENOMEM;
1330 	}
1331 
1332 	return 0;
1333 }
1334 
1335 static int dsa_switch_parse_of(struct dsa_switch *ds, struct device_node *dn)
1336 {
1337 	int err;
1338 
1339 	err = dsa_switch_parse_member_of(ds, dn);
1340 	if (err)
1341 		return err;
1342 
1343 	err = dsa_switch_touch_ports(ds);
1344 	if (err)
1345 		return err;
1346 
1347 	return dsa_switch_parse_ports_of(ds, dn);
1348 }
1349 
1350 static int dev_is_class(struct device *dev, void *class)
1351 {
1352 	if (dev->class != NULL && !strcmp(dev->class->name, class))
1353 		return 1;
1354 
1355 	return 0;
1356 }
1357 
1358 static struct device *dev_find_class(struct device *parent, char *class)
1359 {
1360 	if (dev_is_class(parent, class)) {
1361 		get_device(parent);
1362 		return parent;
1363 	}
1364 
1365 	return device_find_child(parent, class, dev_is_class);
1366 }
1367 
1368 static struct net_device *dsa_dev_to_net_device(struct device *dev)
1369 {
1370 	struct device *d;
1371 
1372 	d = dev_find_class(dev, "net");
1373 	if (d != NULL) {
1374 		struct net_device *nd;
1375 
1376 		nd = to_net_dev(d);
1377 		dev_hold(nd);
1378 		put_device(d);
1379 
1380 		return nd;
1381 	}
1382 
1383 	return NULL;
1384 }
1385 
1386 static int dsa_port_parse(struct dsa_port *dp, const char *name,
1387 			  struct device *dev)
1388 {
1389 	if (!strcmp(name, "cpu")) {
1390 		struct net_device *master;
1391 
1392 		master = dsa_dev_to_net_device(dev);
1393 		if (!master)
1394 			return -EPROBE_DEFER;
1395 
1396 		dev_put(master);
1397 
1398 		return dsa_port_parse_cpu(dp, master, NULL);
1399 	}
1400 
1401 	if (!strcmp(name, "dsa"))
1402 		return dsa_port_parse_dsa(dp);
1403 
1404 	return dsa_port_parse_user(dp, name);
1405 }
1406 
1407 static int dsa_switch_parse_ports(struct dsa_switch *ds,
1408 				  struct dsa_chip_data *cd)
1409 {
1410 	bool valid_name_found = false;
1411 	struct dsa_port *dp;
1412 	struct device *dev;
1413 	const char *name;
1414 	unsigned int i;
1415 	int err;
1416 
1417 	for (i = 0; i < DSA_MAX_PORTS; i++) {
1418 		name = cd->port_names[i];
1419 		dev = cd->netdev[i];
1420 		dp = dsa_to_port(ds, i);
1421 
1422 		if (!name)
1423 			continue;
1424 
1425 		err = dsa_port_parse(dp, name, dev);
1426 		if (err)
1427 			return err;
1428 
1429 		valid_name_found = true;
1430 	}
1431 
1432 	if (!valid_name_found && i == DSA_MAX_PORTS)
1433 		return -EINVAL;
1434 
1435 	return 0;
1436 }
1437 
1438 static int dsa_switch_parse(struct dsa_switch *ds, struct dsa_chip_data *cd)
1439 {
1440 	int err;
1441 
1442 	ds->cd = cd;
1443 
1444 	/* We don't support interconnected switches nor multiple trees via
1445 	 * platform data, so this is the unique switch of the tree.
1446 	 */
1447 	ds->index = 0;
1448 	ds->dst = dsa_tree_touch(0);
1449 	if (!ds->dst)
1450 		return -ENOMEM;
1451 
1452 	err = dsa_switch_touch_ports(ds);
1453 	if (err)
1454 		return err;
1455 
1456 	return dsa_switch_parse_ports(ds, cd);
1457 }
1458 
1459 static void dsa_switch_release_ports(struct dsa_switch *ds)
1460 {
1461 	struct dsa_port *dp, *next;
1462 
1463 	dsa_switch_for_each_port_safe(dp, next, ds) {
1464 		WARN_ON(!list_empty(&dp->fdbs));
1465 		WARN_ON(!list_empty(&dp->mdbs));
1466 		WARN_ON(!list_empty(&dp->vlans));
1467 		list_del(&dp->list);
1468 		kfree(dp);
1469 	}
1470 }
1471 
1472 static int dsa_switch_probe(struct dsa_switch *ds)
1473 {
1474 	struct dsa_switch_tree *dst;
1475 	struct dsa_chip_data *pdata;
1476 	struct device_node *np;
1477 	int err;
1478 
1479 	if (!ds->dev)
1480 		return -ENODEV;
1481 
1482 	pdata = ds->dev->platform_data;
1483 	np = ds->dev->of_node;
1484 
1485 	if (!ds->num_ports)
1486 		return -EINVAL;
1487 
1488 	if (np) {
1489 		err = dsa_switch_parse_of(ds, np);
1490 		if (err)
1491 			dsa_switch_release_ports(ds);
1492 	} else if (pdata) {
1493 		err = dsa_switch_parse(ds, pdata);
1494 		if (err)
1495 			dsa_switch_release_ports(ds);
1496 	} else {
1497 		err = -ENODEV;
1498 	}
1499 
1500 	if (err)
1501 		return err;
1502 
1503 	dst = ds->dst;
1504 	dsa_tree_get(dst);
1505 	err = dsa_tree_setup(dst);
1506 	if (err) {
1507 		dsa_switch_release_ports(ds);
1508 		dsa_tree_put(dst);
1509 	}
1510 
1511 	return err;
1512 }
1513 
1514 int dsa_register_switch(struct dsa_switch *ds)
1515 {
1516 	int err;
1517 
1518 	mutex_lock(&dsa2_mutex);
1519 	err = dsa_switch_probe(ds);
1520 	dsa_tree_put(ds->dst);
1521 	mutex_unlock(&dsa2_mutex);
1522 
1523 	return err;
1524 }
1525 EXPORT_SYMBOL_GPL(dsa_register_switch);
1526 
1527 static void dsa_switch_remove(struct dsa_switch *ds)
1528 {
1529 	struct dsa_switch_tree *dst = ds->dst;
1530 
1531 	dsa_tree_teardown(dst);
1532 	dsa_switch_release_ports(ds);
1533 	dsa_tree_put(dst);
1534 }
1535 
1536 void dsa_unregister_switch(struct dsa_switch *ds)
1537 {
1538 	mutex_lock(&dsa2_mutex);
1539 	dsa_switch_remove(ds);
1540 	mutex_unlock(&dsa2_mutex);
1541 }
1542 EXPORT_SYMBOL_GPL(dsa_unregister_switch);
1543 
1544 /* If the DSA master chooses to unregister its net_device on .shutdown, DSA is
1545  * blocking that operation from completion, due to the dev_hold taken inside
1546  * netdev_upper_dev_link. Unlink the DSA slave interfaces from being uppers of
1547  * the DSA master, so that the system can reboot successfully.
1548  */
1549 void dsa_switch_shutdown(struct dsa_switch *ds)
1550 {
1551 	struct net_device *master, *slave_dev;
1552 	struct dsa_port *dp;
1553 
1554 	mutex_lock(&dsa2_mutex);
1555 
1556 	if (!ds->setup)
1557 		goto out;
1558 
1559 	rtnl_lock();
1560 
1561 	dsa_switch_for_each_user_port(dp, ds) {
1562 		master = dsa_port_to_master(dp);
1563 		slave_dev = dp->slave;
1564 
1565 		netdev_upper_dev_unlink(master, slave_dev);
1566 	}
1567 
1568 	/* Disconnect from further netdevice notifiers on the master,
1569 	 * since netdev_uses_dsa() will now return false.
1570 	 */
1571 	dsa_switch_for_each_cpu_port(dp, ds)
1572 		dp->master->dsa_ptr = NULL;
1573 
1574 	rtnl_unlock();
1575 out:
1576 	mutex_unlock(&dsa2_mutex);
1577 }
1578 EXPORT_SYMBOL_GPL(dsa_switch_shutdown);
1579 
1580 #ifdef CONFIG_PM_SLEEP
1581 static bool dsa_port_is_initialized(const struct dsa_port *dp)
1582 {
1583 	return dp->type == DSA_PORT_TYPE_USER && dp->slave;
1584 }
1585 
1586 int dsa_switch_suspend(struct dsa_switch *ds)
1587 {
1588 	struct dsa_port *dp;
1589 	int ret = 0;
1590 
1591 	/* Suspend slave network devices */
1592 	dsa_switch_for_each_port(dp, ds) {
1593 		if (!dsa_port_is_initialized(dp))
1594 			continue;
1595 
1596 		ret = dsa_slave_suspend(dp->slave);
1597 		if (ret)
1598 			return ret;
1599 	}
1600 
1601 	if (ds->ops->suspend)
1602 		ret = ds->ops->suspend(ds);
1603 
1604 	return ret;
1605 }
1606 EXPORT_SYMBOL_GPL(dsa_switch_suspend);
1607 
1608 int dsa_switch_resume(struct dsa_switch *ds)
1609 {
1610 	struct dsa_port *dp;
1611 	int ret = 0;
1612 
1613 	if (ds->ops->resume)
1614 		ret = ds->ops->resume(ds);
1615 
1616 	if (ret)
1617 		return ret;
1618 
1619 	/* Resume slave network devices */
1620 	dsa_switch_for_each_port(dp, ds) {
1621 		if (!dsa_port_is_initialized(dp))
1622 			continue;
1623 
1624 		ret = dsa_slave_resume(dp->slave);
1625 		if (ret)
1626 			return ret;
1627 	}
1628 
1629 	return 0;
1630 }
1631 EXPORT_SYMBOL_GPL(dsa_switch_resume);
1632 #endif
1633 
1634 struct dsa_port *dsa_port_from_netdev(struct net_device *netdev)
1635 {
1636 	if (!netdev || !dsa_slave_dev_check(netdev))
1637 		return ERR_PTR(-ENODEV);
1638 
1639 	return dsa_slave_to_port(netdev);
1640 }
1641 EXPORT_SYMBOL_GPL(dsa_port_from_netdev);
1642 
1643 bool dsa_db_equal(const struct dsa_db *a, const struct dsa_db *b)
1644 {
1645 	if (a->type != b->type)
1646 		return false;
1647 
1648 	switch (a->type) {
1649 	case DSA_DB_PORT:
1650 		return a->dp == b->dp;
1651 	case DSA_DB_LAG:
1652 		return a->lag.dev == b->lag.dev;
1653 	case DSA_DB_BRIDGE:
1654 		return a->bridge.num == b->bridge.num;
1655 	default:
1656 		WARN_ON(1);
1657 		return false;
1658 	}
1659 }
1660 
1661 bool dsa_fdb_present_in_other_db(struct dsa_switch *ds, int port,
1662 				 const unsigned char *addr, u16 vid,
1663 				 struct dsa_db db)
1664 {
1665 	struct dsa_port *dp = dsa_to_port(ds, port);
1666 	struct dsa_mac_addr *a;
1667 
1668 	lockdep_assert_held(&dp->addr_lists_lock);
1669 
1670 	list_for_each_entry(a, &dp->fdbs, list) {
1671 		if (!ether_addr_equal(a->addr, addr) || a->vid != vid)
1672 			continue;
1673 
1674 		if (a->db.type == db.type && !dsa_db_equal(&a->db, &db))
1675 			return true;
1676 	}
1677 
1678 	return false;
1679 }
1680 EXPORT_SYMBOL_GPL(dsa_fdb_present_in_other_db);
1681 
1682 bool dsa_mdb_present_in_other_db(struct dsa_switch *ds, int port,
1683 				 const struct switchdev_obj_port_mdb *mdb,
1684 				 struct dsa_db db)
1685 {
1686 	struct dsa_port *dp = dsa_to_port(ds, port);
1687 	struct dsa_mac_addr *a;
1688 
1689 	lockdep_assert_held(&dp->addr_lists_lock);
1690 
1691 	list_for_each_entry(a, &dp->mdbs, list) {
1692 		if (!ether_addr_equal(a->addr, mdb->addr) || a->vid != mdb->vid)
1693 			continue;
1694 
1695 		if (a->db.type == db.type && !dsa_db_equal(&a->db, &db))
1696 			return true;
1697 	}
1698 
1699 	return false;
1700 }
1701 EXPORT_SYMBOL_GPL(dsa_mdb_present_in_other_db);
1702 
1703 static int __init dsa_init_module(void)
1704 {
1705 	int rc;
1706 
1707 	dsa_owq = alloc_ordered_workqueue("dsa_ordered",
1708 					  WQ_MEM_RECLAIM);
1709 	if (!dsa_owq)
1710 		return -ENOMEM;
1711 
1712 	rc = dsa_slave_register_notifier();
1713 	if (rc)
1714 		goto register_notifier_fail;
1715 
1716 	dev_add_pack(&dsa_pack_type);
1717 
1718 	rc = rtnl_link_register(&dsa_link_ops);
1719 	if (rc)
1720 		goto netlink_register_fail;
1721 
1722 	return 0;
1723 
1724 netlink_register_fail:
1725 	dsa_slave_unregister_notifier();
1726 	dev_remove_pack(&dsa_pack_type);
1727 register_notifier_fail:
1728 	destroy_workqueue(dsa_owq);
1729 
1730 	return rc;
1731 }
1732 module_init(dsa_init_module);
1733 
1734 static void __exit dsa_cleanup_module(void)
1735 {
1736 	rtnl_link_unregister(&dsa_link_ops);
1737 
1738 	dsa_slave_unregister_notifier();
1739 	dev_remove_pack(&dsa_pack_type);
1740 	destroy_workqueue(dsa_owq);
1741 }
1742 module_exit(dsa_cleanup_module);
1743 
1744 MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
1745 MODULE_DESCRIPTION("Driver for Distributed Switch Architecture switch chips");
1746 MODULE_LICENSE("GPL");
1747 MODULE_ALIAS("platform:dsa");
1748