xref: /openbmc/linux/net/dsa/dsa.c (revision fa0dadde)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * DSA topology and switch handling
4  *
5  * Copyright (c) 2008-2009 Marvell Semiconductor
6  * Copyright (c) 2013 Florian Fainelli <florian@openwrt.org>
7  * Copyright (c) 2016 Andrew Lunn <andrew@lunn.ch>
8  */
9 
10 #include <linux/device.h>
11 #include <linux/err.h>
12 #include <linux/list.h>
13 #include <linux/module.h>
14 #include <linux/netdevice.h>
15 #include <linux/slab.h>
16 #include <linux/rtnetlink.h>
17 #include <linux/of.h>
18 #include <linux/of_mdio.h>
19 #include <linux/of_net.h>
20 #include <net/dsa_stubs.h>
21 #include <net/sch_generic.h>
22 
23 #include "devlink.h"
24 #include "dsa.h"
25 #include "master.h"
26 #include "netlink.h"
27 #include "port.h"
28 #include "slave.h"
29 #include "switch.h"
30 #include "tag.h"
31 
32 #define DSA_MAX_NUM_OFFLOADING_BRIDGES		BITS_PER_LONG
33 
34 static DEFINE_MUTEX(dsa2_mutex);
35 LIST_HEAD(dsa_tree_list);
36 
37 static struct workqueue_struct *dsa_owq;
38 
39 /* Track the bridges with forwarding offload enabled */
40 static unsigned long dsa_fwd_offloading_bridges;
41 
42 bool dsa_schedule_work(struct work_struct *work)
43 {
44 	return queue_work(dsa_owq, work);
45 }
46 
47 void dsa_flush_workqueue(void)
48 {
49 	flush_workqueue(dsa_owq);
50 }
51 EXPORT_SYMBOL_GPL(dsa_flush_workqueue);
52 
53 /**
54  * dsa_lag_map() - Map LAG structure to a linear LAG array
55  * @dst: Tree in which to record the mapping.
56  * @lag: LAG structure that is to be mapped to the tree's array.
57  *
58  * dsa_lag_id/dsa_lag_by_id can then be used to translate between the
59  * two spaces. The size of the mapping space is determined by the
60  * driver by setting ds->num_lag_ids. It is perfectly legal to leave
61  * it unset if it is not needed, in which case these functions become
62  * no-ops.
63  */
64 void dsa_lag_map(struct dsa_switch_tree *dst, struct dsa_lag *lag)
65 {
66 	unsigned int id;
67 
68 	for (id = 1; id <= dst->lags_len; id++) {
69 		if (!dsa_lag_by_id(dst, id)) {
70 			dst->lags[id - 1] = lag;
71 			lag->id = id;
72 			return;
73 		}
74 	}
75 
76 	/* No IDs left, which is OK. Some drivers do not need it. The
77 	 * ones that do, e.g. mv88e6xxx, will discover that dsa_lag_id
78 	 * returns an error for this device when joining the LAG. The
79 	 * driver can then return -EOPNOTSUPP back to DSA, which will
80 	 * fall back to a software LAG.
81 	 */
82 }
83 
84 /**
85  * dsa_lag_unmap() - Remove a LAG ID mapping
86  * @dst: Tree in which the mapping is recorded.
87  * @lag: LAG structure that was mapped.
88  *
89  * As there may be multiple users of the mapping, it is only removed
90  * if there are no other references to it.
91  */
92 void dsa_lag_unmap(struct dsa_switch_tree *dst, struct dsa_lag *lag)
93 {
94 	unsigned int id;
95 
96 	dsa_lags_foreach_id(id, dst) {
97 		if (dsa_lag_by_id(dst, id) == lag) {
98 			dst->lags[id - 1] = NULL;
99 			lag->id = 0;
100 			break;
101 		}
102 	}
103 }
104 
105 struct dsa_lag *dsa_tree_lag_find(struct dsa_switch_tree *dst,
106 				  const struct net_device *lag_dev)
107 {
108 	struct dsa_port *dp;
109 
110 	list_for_each_entry(dp, &dst->ports, list)
111 		if (dsa_port_lag_dev_get(dp) == lag_dev)
112 			return dp->lag;
113 
114 	return NULL;
115 }
116 
117 struct dsa_bridge *dsa_tree_bridge_find(struct dsa_switch_tree *dst,
118 					const struct net_device *br)
119 {
120 	struct dsa_port *dp;
121 
122 	list_for_each_entry(dp, &dst->ports, list)
123 		if (dsa_port_bridge_dev_get(dp) == br)
124 			return dp->bridge;
125 
126 	return NULL;
127 }
128 
129 static int dsa_bridge_num_find(const struct net_device *bridge_dev)
130 {
131 	struct dsa_switch_tree *dst;
132 
133 	list_for_each_entry(dst, &dsa_tree_list, list) {
134 		struct dsa_bridge *bridge;
135 
136 		bridge = dsa_tree_bridge_find(dst, bridge_dev);
137 		if (bridge)
138 			return bridge->num;
139 	}
140 
141 	return 0;
142 }
143 
144 unsigned int dsa_bridge_num_get(const struct net_device *bridge_dev, int max)
145 {
146 	unsigned int bridge_num = dsa_bridge_num_find(bridge_dev);
147 
148 	/* Switches without FDB isolation support don't get unique
149 	 * bridge numbering
150 	 */
151 	if (!max)
152 		return 0;
153 
154 	if (!bridge_num) {
155 		/* First port that requests FDB isolation or TX forwarding
156 		 * offload for this bridge
157 		 */
158 		bridge_num = find_next_zero_bit(&dsa_fwd_offloading_bridges,
159 						DSA_MAX_NUM_OFFLOADING_BRIDGES,
160 						1);
161 		if (bridge_num >= max)
162 			return 0;
163 
164 		set_bit(bridge_num, &dsa_fwd_offloading_bridges);
165 	}
166 
167 	return bridge_num;
168 }
169 
170 void dsa_bridge_num_put(const struct net_device *bridge_dev,
171 			unsigned int bridge_num)
172 {
173 	/* Since we refcount bridges, we know that when we call this function
174 	 * it is no longer in use, so we can just go ahead and remove it from
175 	 * the bit mask.
176 	 */
177 	clear_bit(bridge_num, &dsa_fwd_offloading_bridges);
178 }
179 
180 struct dsa_switch *dsa_switch_find(int tree_index, int sw_index)
181 {
182 	struct dsa_switch_tree *dst;
183 	struct dsa_port *dp;
184 
185 	list_for_each_entry(dst, &dsa_tree_list, list) {
186 		if (dst->index != tree_index)
187 			continue;
188 
189 		list_for_each_entry(dp, &dst->ports, list) {
190 			if (dp->ds->index != sw_index)
191 				continue;
192 
193 			return dp->ds;
194 		}
195 	}
196 
197 	return NULL;
198 }
199 EXPORT_SYMBOL_GPL(dsa_switch_find);
200 
201 static struct dsa_switch_tree *dsa_tree_find(int index)
202 {
203 	struct dsa_switch_tree *dst;
204 
205 	list_for_each_entry(dst, &dsa_tree_list, list)
206 		if (dst->index == index)
207 			return dst;
208 
209 	return NULL;
210 }
211 
212 static struct dsa_switch_tree *dsa_tree_alloc(int index)
213 {
214 	struct dsa_switch_tree *dst;
215 
216 	dst = kzalloc(sizeof(*dst), GFP_KERNEL);
217 	if (!dst)
218 		return NULL;
219 
220 	dst->index = index;
221 
222 	INIT_LIST_HEAD(&dst->rtable);
223 
224 	INIT_LIST_HEAD(&dst->ports);
225 
226 	INIT_LIST_HEAD(&dst->list);
227 	list_add_tail(&dst->list, &dsa_tree_list);
228 
229 	kref_init(&dst->refcount);
230 
231 	return dst;
232 }
233 
234 static void dsa_tree_free(struct dsa_switch_tree *dst)
235 {
236 	if (dst->tag_ops)
237 		dsa_tag_driver_put(dst->tag_ops);
238 	list_del(&dst->list);
239 	kfree(dst);
240 }
241 
242 static struct dsa_switch_tree *dsa_tree_get(struct dsa_switch_tree *dst)
243 {
244 	if (dst)
245 		kref_get(&dst->refcount);
246 
247 	return dst;
248 }
249 
250 static struct dsa_switch_tree *dsa_tree_touch(int index)
251 {
252 	struct dsa_switch_tree *dst;
253 
254 	dst = dsa_tree_find(index);
255 	if (dst)
256 		return dsa_tree_get(dst);
257 	else
258 		return dsa_tree_alloc(index);
259 }
260 
261 static void dsa_tree_release(struct kref *ref)
262 {
263 	struct dsa_switch_tree *dst;
264 
265 	dst = container_of(ref, struct dsa_switch_tree, refcount);
266 
267 	dsa_tree_free(dst);
268 }
269 
270 static void dsa_tree_put(struct dsa_switch_tree *dst)
271 {
272 	if (dst)
273 		kref_put(&dst->refcount, dsa_tree_release);
274 }
275 
276 static struct dsa_port *dsa_tree_find_port_by_node(struct dsa_switch_tree *dst,
277 						   struct device_node *dn)
278 {
279 	struct dsa_port *dp;
280 
281 	list_for_each_entry(dp, &dst->ports, list)
282 		if (dp->dn == dn)
283 			return dp;
284 
285 	return NULL;
286 }
287 
288 static struct dsa_link *dsa_link_touch(struct dsa_port *dp,
289 				       struct dsa_port *link_dp)
290 {
291 	struct dsa_switch *ds = dp->ds;
292 	struct dsa_switch_tree *dst;
293 	struct dsa_link *dl;
294 
295 	dst = ds->dst;
296 
297 	list_for_each_entry(dl, &dst->rtable, list)
298 		if (dl->dp == dp && dl->link_dp == link_dp)
299 			return dl;
300 
301 	dl = kzalloc(sizeof(*dl), GFP_KERNEL);
302 	if (!dl)
303 		return NULL;
304 
305 	dl->dp = dp;
306 	dl->link_dp = link_dp;
307 
308 	INIT_LIST_HEAD(&dl->list);
309 	list_add_tail(&dl->list, &dst->rtable);
310 
311 	return dl;
312 }
313 
314 static bool dsa_port_setup_routing_table(struct dsa_port *dp)
315 {
316 	struct dsa_switch *ds = dp->ds;
317 	struct dsa_switch_tree *dst = ds->dst;
318 	struct device_node *dn = dp->dn;
319 	struct of_phandle_iterator it;
320 	struct dsa_port *link_dp;
321 	struct dsa_link *dl;
322 	int err;
323 
324 	of_for_each_phandle(&it, err, dn, "link", NULL, 0) {
325 		link_dp = dsa_tree_find_port_by_node(dst, it.node);
326 		if (!link_dp) {
327 			of_node_put(it.node);
328 			return false;
329 		}
330 
331 		dl = dsa_link_touch(dp, link_dp);
332 		if (!dl) {
333 			of_node_put(it.node);
334 			return false;
335 		}
336 	}
337 
338 	return true;
339 }
340 
341 static bool dsa_tree_setup_routing_table(struct dsa_switch_tree *dst)
342 {
343 	bool complete = true;
344 	struct dsa_port *dp;
345 
346 	list_for_each_entry(dp, &dst->ports, list) {
347 		if (dsa_port_is_dsa(dp)) {
348 			complete = dsa_port_setup_routing_table(dp);
349 			if (!complete)
350 				break;
351 		}
352 	}
353 
354 	return complete;
355 }
356 
357 static struct dsa_port *dsa_tree_find_first_cpu(struct dsa_switch_tree *dst)
358 {
359 	struct dsa_port *dp;
360 
361 	list_for_each_entry(dp, &dst->ports, list)
362 		if (dsa_port_is_cpu(dp))
363 			return dp;
364 
365 	return NULL;
366 }
367 
368 struct net_device *dsa_tree_find_first_master(struct dsa_switch_tree *dst)
369 {
370 	struct device_node *ethernet;
371 	struct net_device *master;
372 	struct dsa_port *cpu_dp;
373 
374 	cpu_dp = dsa_tree_find_first_cpu(dst);
375 	ethernet = of_parse_phandle(cpu_dp->dn, "ethernet", 0);
376 	master = of_find_net_device_by_node(ethernet);
377 	of_node_put(ethernet);
378 
379 	return master;
380 }
381 
382 /* Assign the default CPU port (the first one in the tree) to all ports of the
383  * fabric which don't already have one as part of their own switch.
384  */
385 static int dsa_tree_setup_default_cpu(struct dsa_switch_tree *dst)
386 {
387 	struct dsa_port *cpu_dp, *dp;
388 
389 	cpu_dp = dsa_tree_find_first_cpu(dst);
390 	if (!cpu_dp) {
391 		pr_err("DSA: tree %d has no CPU port\n", dst->index);
392 		return -EINVAL;
393 	}
394 
395 	list_for_each_entry(dp, &dst->ports, list) {
396 		if (dp->cpu_dp)
397 			continue;
398 
399 		if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
400 			dp->cpu_dp = cpu_dp;
401 	}
402 
403 	return 0;
404 }
405 
406 /* Perform initial assignment of CPU ports to user ports and DSA links in the
407  * fabric, giving preference to CPU ports local to each switch. Default to
408  * using the first CPU port in the switch tree if the port does not have a CPU
409  * port local to this switch.
410  */
411 static int dsa_tree_setup_cpu_ports(struct dsa_switch_tree *dst)
412 {
413 	struct dsa_port *cpu_dp, *dp;
414 
415 	list_for_each_entry(cpu_dp, &dst->ports, list) {
416 		if (!dsa_port_is_cpu(cpu_dp))
417 			continue;
418 
419 		/* Prefer a local CPU port */
420 		dsa_switch_for_each_port(dp, cpu_dp->ds) {
421 			/* Prefer the first local CPU port found */
422 			if (dp->cpu_dp)
423 				continue;
424 
425 			if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
426 				dp->cpu_dp = cpu_dp;
427 		}
428 	}
429 
430 	return dsa_tree_setup_default_cpu(dst);
431 }
432 
433 static void dsa_tree_teardown_cpu_ports(struct dsa_switch_tree *dst)
434 {
435 	struct dsa_port *dp;
436 
437 	list_for_each_entry(dp, &dst->ports, list)
438 		if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
439 			dp->cpu_dp = NULL;
440 }
441 
442 static int dsa_port_setup(struct dsa_port *dp)
443 {
444 	bool dsa_port_link_registered = false;
445 	struct dsa_switch *ds = dp->ds;
446 	bool dsa_port_enabled = false;
447 	int err = 0;
448 
449 	if (dp->setup)
450 		return 0;
451 
452 	err = dsa_port_devlink_setup(dp);
453 	if (err)
454 		return err;
455 
456 	switch (dp->type) {
457 	case DSA_PORT_TYPE_UNUSED:
458 		dsa_port_disable(dp);
459 		break;
460 	case DSA_PORT_TYPE_CPU:
461 		if (dp->dn) {
462 			err = dsa_shared_port_link_register_of(dp);
463 			if (err)
464 				break;
465 			dsa_port_link_registered = true;
466 		} else {
467 			dev_warn(ds->dev,
468 				 "skipping link registration for CPU port %d\n",
469 				 dp->index);
470 		}
471 
472 		err = dsa_port_enable(dp, NULL);
473 		if (err)
474 			break;
475 		dsa_port_enabled = true;
476 
477 		break;
478 	case DSA_PORT_TYPE_DSA:
479 		if (dp->dn) {
480 			err = dsa_shared_port_link_register_of(dp);
481 			if (err)
482 				break;
483 			dsa_port_link_registered = true;
484 		} else {
485 			dev_warn(ds->dev,
486 				 "skipping link registration for DSA port %d\n",
487 				 dp->index);
488 		}
489 
490 		err = dsa_port_enable(dp, NULL);
491 		if (err)
492 			break;
493 		dsa_port_enabled = true;
494 
495 		break;
496 	case DSA_PORT_TYPE_USER:
497 		of_get_mac_address(dp->dn, dp->mac);
498 		err = dsa_slave_create(dp);
499 		break;
500 	}
501 
502 	if (err && dsa_port_enabled)
503 		dsa_port_disable(dp);
504 	if (err && dsa_port_link_registered)
505 		dsa_shared_port_link_unregister_of(dp);
506 	if (err) {
507 		dsa_port_devlink_teardown(dp);
508 		return err;
509 	}
510 
511 	dp->setup = true;
512 
513 	return 0;
514 }
515 
516 static void dsa_port_teardown(struct dsa_port *dp)
517 {
518 	if (!dp->setup)
519 		return;
520 
521 	switch (dp->type) {
522 	case DSA_PORT_TYPE_UNUSED:
523 		break;
524 	case DSA_PORT_TYPE_CPU:
525 		dsa_port_disable(dp);
526 		if (dp->dn)
527 			dsa_shared_port_link_unregister_of(dp);
528 		break;
529 	case DSA_PORT_TYPE_DSA:
530 		dsa_port_disable(dp);
531 		if (dp->dn)
532 			dsa_shared_port_link_unregister_of(dp);
533 		break;
534 	case DSA_PORT_TYPE_USER:
535 		if (dp->slave) {
536 			dsa_slave_destroy(dp->slave);
537 			dp->slave = NULL;
538 		}
539 		break;
540 	}
541 
542 	dsa_port_devlink_teardown(dp);
543 
544 	dp->setup = false;
545 }
546 
547 static int dsa_port_setup_as_unused(struct dsa_port *dp)
548 {
549 	dp->type = DSA_PORT_TYPE_UNUSED;
550 	return dsa_port_setup(dp);
551 }
552 
553 static int dsa_switch_setup_tag_protocol(struct dsa_switch *ds)
554 {
555 	const struct dsa_device_ops *tag_ops = ds->dst->tag_ops;
556 	struct dsa_switch_tree *dst = ds->dst;
557 	int err;
558 
559 	if (tag_ops->proto == dst->default_proto)
560 		goto connect;
561 
562 	rtnl_lock();
563 	err = ds->ops->change_tag_protocol(ds, tag_ops->proto);
564 	rtnl_unlock();
565 	if (err) {
566 		dev_err(ds->dev, "Unable to use tag protocol \"%s\": %pe\n",
567 			tag_ops->name, ERR_PTR(err));
568 		return err;
569 	}
570 
571 connect:
572 	if (tag_ops->connect) {
573 		err = tag_ops->connect(ds);
574 		if (err)
575 			return err;
576 	}
577 
578 	if (ds->ops->connect_tag_protocol) {
579 		err = ds->ops->connect_tag_protocol(ds, tag_ops->proto);
580 		if (err) {
581 			dev_err(ds->dev,
582 				"Unable to connect to tag protocol \"%s\": %pe\n",
583 				tag_ops->name, ERR_PTR(err));
584 			goto disconnect;
585 		}
586 	}
587 
588 	return 0;
589 
590 disconnect:
591 	if (tag_ops->disconnect)
592 		tag_ops->disconnect(ds);
593 
594 	return err;
595 }
596 
597 static void dsa_switch_teardown_tag_protocol(struct dsa_switch *ds)
598 {
599 	const struct dsa_device_ops *tag_ops = ds->dst->tag_ops;
600 
601 	if (tag_ops->disconnect)
602 		tag_ops->disconnect(ds);
603 }
604 
605 static int dsa_switch_setup(struct dsa_switch *ds)
606 {
607 	struct device_node *dn;
608 	int err;
609 
610 	if (ds->setup)
611 		return 0;
612 
613 	/* Initialize ds->phys_mii_mask before registering the slave MDIO bus
614 	 * driver and before ops->setup() has run, since the switch drivers and
615 	 * the slave MDIO bus driver rely on these values for probing PHY
616 	 * devices or not
617 	 */
618 	ds->phys_mii_mask |= dsa_user_ports(ds);
619 
620 	err = dsa_switch_devlink_alloc(ds);
621 	if (err)
622 		return err;
623 
624 	err = dsa_switch_register_notifier(ds);
625 	if (err)
626 		goto devlink_free;
627 
628 	ds->configure_vlan_while_not_filtering = true;
629 
630 	err = ds->ops->setup(ds);
631 	if (err < 0)
632 		goto unregister_notifier;
633 
634 	err = dsa_switch_setup_tag_protocol(ds);
635 	if (err)
636 		goto teardown;
637 
638 	if (!ds->slave_mii_bus && ds->ops->phy_read) {
639 		ds->slave_mii_bus = mdiobus_alloc();
640 		if (!ds->slave_mii_bus) {
641 			err = -ENOMEM;
642 			goto teardown;
643 		}
644 
645 		dsa_slave_mii_bus_init(ds);
646 
647 		dn = of_get_child_by_name(ds->dev->of_node, "mdio");
648 
649 		err = of_mdiobus_register(ds->slave_mii_bus, dn);
650 		of_node_put(dn);
651 		if (err < 0)
652 			goto free_slave_mii_bus;
653 	}
654 
655 	dsa_switch_devlink_register(ds);
656 
657 	ds->setup = true;
658 	return 0;
659 
660 free_slave_mii_bus:
661 	if (ds->slave_mii_bus && ds->ops->phy_read)
662 		mdiobus_free(ds->slave_mii_bus);
663 teardown:
664 	if (ds->ops->teardown)
665 		ds->ops->teardown(ds);
666 unregister_notifier:
667 	dsa_switch_unregister_notifier(ds);
668 devlink_free:
669 	dsa_switch_devlink_free(ds);
670 	return err;
671 }
672 
673 static void dsa_switch_teardown(struct dsa_switch *ds)
674 {
675 	if (!ds->setup)
676 		return;
677 
678 	dsa_switch_devlink_unregister(ds);
679 
680 	if (ds->slave_mii_bus && ds->ops->phy_read) {
681 		mdiobus_unregister(ds->slave_mii_bus);
682 		mdiobus_free(ds->slave_mii_bus);
683 		ds->slave_mii_bus = NULL;
684 	}
685 
686 	dsa_switch_teardown_tag_protocol(ds);
687 
688 	if (ds->ops->teardown)
689 		ds->ops->teardown(ds);
690 
691 	dsa_switch_unregister_notifier(ds);
692 
693 	dsa_switch_devlink_free(ds);
694 
695 	ds->setup = false;
696 }
697 
698 /* First tear down the non-shared, then the shared ports. This ensures that
699  * all work items scheduled by our switchdev handlers for user ports have
700  * completed before we destroy the refcounting kept on the shared ports.
701  */
702 static void dsa_tree_teardown_ports(struct dsa_switch_tree *dst)
703 {
704 	struct dsa_port *dp;
705 
706 	list_for_each_entry(dp, &dst->ports, list)
707 		if (dsa_port_is_user(dp) || dsa_port_is_unused(dp))
708 			dsa_port_teardown(dp);
709 
710 	dsa_flush_workqueue();
711 
712 	list_for_each_entry(dp, &dst->ports, list)
713 		if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp))
714 			dsa_port_teardown(dp);
715 }
716 
717 static void dsa_tree_teardown_switches(struct dsa_switch_tree *dst)
718 {
719 	struct dsa_port *dp;
720 
721 	list_for_each_entry(dp, &dst->ports, list)
722 		dsa_switch_teardown(dp->ds);
723 }
724 
725 /* Bring shared ports up first, then non-shared ports */
726 static int dsa_tree_setup_ports(struct dsa_switch_tree *dst)
727 {
728 	struct dsa_port *dp;
729 	int err = 0;
730 
731 	list_for_each_entry(dp, &dst->ports, list) {
732 		if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp)) {
733 			err = dsa_port_setup(dp);
734 			if (err)
735 				goto teardown;
736 		}
737 	}
738 
739 	list_for_each_entry(dp, &dst->ports, list) {
740 		if (dsa_port_is_user(dp) || dsa_port_is_unused(dp)) {
741 			err = dsa_port_setup(dp);
742 			if (err) {
743 				err = dsa_port_setup_as_unused(dp);
744 				if (err)
745 					goto teardown;
746 			}
747 		}
748 	}
749 
750 	return 0;
751 
752 teardown:
753 	dsa_tree_teardown_ports(dst);
754 
755 	return err;
756 }
757 
758 static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
759 {
760 	struct dsa_port *dp;
761 	int err = 0;
762 
763 	list_for_each_entry(dp, &dst->ports, list) {
764 		err = dsa_switch_setup(dp->ds);
765 		if (err) {
766 			dsa_tree_teardown_switches(dst);
767 			break;
768 		}
769 	}
770 
771 	return err;
772 }
773 
774 static int dsa_tree_setup_master(struct dsa_switch_tree *dst)
775 {
776 	struct dsa_port *cpu_dp;
777 	int err = 0;
778 
779 	rtnl_lock();
780 
781 	dsa_tree_for_each_cpu_port(cpu_dp, dst) {
782 		struct net_device *master = cpu_dp->master;
783 		bool admin_up = (master->flags & IFF_UP) &&
784 				!qdisc_tx_is_noop(master);
785 
786 		err = dsa_master_setup(master, cpu_dp);
787 		if (err)
788 			break;
789 
790 		/* Replay master state event */
791 		dsa_tree_master_admin_state_change(dst, master, admin_up);
792 		dsa_tree_master_oper_state_change(dst, master,
793 						  netif_oper_up(master));
794 	}
795 
796 	rtnl_unlock();
797 
798 	return err;
799 }
800 
801 static void dsa_tree_teardown_master(struct dsa_switch_tree *dst)
802 {
803 	struct dsa_port *cpu_dp;
804 
805 	rtnl_lock();
806 
807 	dsa_tree_for_each_cpu_port(cpu_dp, dst) {
808 		struct net_device *master = cpu_dp->master;
809 
810 		/* Synthesizing an "admin down" state is sufficient for
811 		 * the switches to get a notification if the master is
812 		 * currently up and running.
813 		 */
814 		dsa_tree_master_admin_state_change(dst, master, false);
815 
816 		dsa_master_teardown(master);
817 	}
818 
819 	rtnl_unlock();
820 }
821 
822 static int dsa_tree_setup_lags(struct dsa_switch_tree *dst)
823 {
824 	unsigned int len = 0;
825 	struct dsa_port *dp;
826 
827 	list_for_each_entry(dp, &dst->ports, list) {
828 		if (dp->ds->num_lag_ids > len)
829 			len = dp->ds->num_lag_ids;
830 	}
831 
832 	if (!len)
833 		return 0;
834 
835 	dst->lags = kcalloc(len, sizeof(*dst->lags), GFP_KERNEL);
836 	if (!dst->lags)
837 		return -ENOMEM;
838 
839 	dst->lags_len = len;
840 	return 0;
841 }
842 
843 static void dsa_tree_teardown_lags(struct dsa_switch_tree *dst)
844 {
845 	kfree(dst->lags);
846 }
847 
848 static int dsa_tree_setup(struct dsa_switch_tree *dst)
849 {
850 	bool complete;
851 	int err;
852 
853 	if (dst->setup) {
854 		pr_err("DSA: tree %d already setup! Disjoint trees?\n",
855 		       dst->index);
856 		return -EEXIST;
857 	}
858 
859 	complete = dsa_tree_setup_routing_table(dst);
860 	if (!complete)
861 		return 0;
862 
863 	err = dsa_tree_setup_cpu_ports(dst);
864 	if (err)
865 		return err;
866 
867 	err = dsa_tree_setup_switches(dst);
868 	if (err)
869 		goto teardown_cpu_ports;
870 
871 	err = dsa_tree_setup_ports(dst);
872 	if (err)
873 		goto teardown_switches;
874 
875 	err = dsa_tree_setup_master(dst);
876 	if (err)
877 		goto teardown_ports;
878 
879 	err = dsa_tree_setup_lags(dst);
880 	if (err)
881 		goto teardown_master;
882 
883 	dst->setup = true;
884 
885 	pr_info("DSA: tree %d setup\n", dst->index);
886 
887 	return 0;
888 
889 teardown_master:
890 	dsa_tree_teardown_master(dst);
891 teardown_ports:
892 	dsa_tree_teardown_ports(dst);
893 teardown_switches:
894 	dsa_tree_teardown_switches(dst);
895 teardown_cpu_ports:
896 	dsa_tree_teardown_cpu_ports(dst);
897 
898 	return err;
899 }
900 
901 static void dsa_tree_teardown(struct dsa_switch_tree *dst)
902 {
903 	struct dsa_link *dl, *next;
904 
905 	if (!dst->setup)
906 		return;
907 
908 	dsa_tree_teardown_lags(dst);
909 
910 	dsa_tree_teardown_master(dst);
911 
912 	dsa_tree_teardown_ports(dst);
913 
914 	dsa_tree_teardown_switches(dst);
915 
916 	dsa_tree_teardown_cpu_ports(dst);
917 
918 	list_for_each_entry_safe(dl, next, &dst->rtable, list) {
919 		list_del(&dl->list);
920 		kfree(dl);
921 	}
922 
923 	pr_info("DSA: tree %d torn down\n", dst->index);
924 
925 	dst->setup = false;
926 }
927 
928 static int dsa_tree_bind_tag_proto(struct dsa_switch_tree *dst,
929 				   const struct dsa_device_ops *tag_ops)
930 {
931 	const struct dsa_device_ops *old_tag_ops = dst->tag_ops;
932 	struct dsa_notifier_tag_proto_info info;
933 	int err;
934 
935 	dst->tag_ops = tag_ops;
936 
937 	/* Notify the switches from this tree about the connection
938 	 * to the new tagger
939 	 */
940 	info.tag_ops = tag_ops;
941 	err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_CONNECT, &info);
942 	if (err && err != -EOPNOTSUPP)
943 		goto out_disconnect;
944 
945 	/* Notify the old tagger about the disconnection from this tree */
946 	info.tag_ops = old_tag_ops;
947 	dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_DISCONNECT, &info);
948 
949 	return 0;
950 
951 out_disconnect:
952 	info.tag_ops = tag_ops;
953 	dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_DISCONNECT, &info);
954 	dst->tag_ops = old_tag_ops;
955 
956 	return err;
957 }
958 
959 /* Since the dsa/tagging sysfs device attribute is per master, the assumption
960  * is that all DSA switches within a tree share the same tagger, otherwise
961  * they would have formed disjoint trees (different "dsa,member" values).
962  */
963 int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst,
964 			      const struct dsa_device_ops *tag_ops,
965 			      const struct dsa_device_ops *old_tag_ops)
966 {
967 	struct dsa_notifier_tag_proto_info info;
968 	struct dsa_port *dp;
969 	int err = -EBUSY;
970 
971 	if (!rtnl_trylock())
972 		return restart_syscall();
973 
974 	/* At the moment we don't allow changing the tag protocol under
975 	 * traffic. The rtnl_mutex also happens to serialize concurrent
976 	 * attempts to change the tagging protocol. If we ever lift the IFF_UP
977 	 * restriction, there needs to be another mutex which serializes this.
978 	 */
979 	dsa_tree_for_each_user_port(dp, dst) {
980 		if (dsa_port_to_master(dp)->flags & IFF_UP)
981 			goto out_unlock;
982 
983 		if (dp->slave->flags & IFF_UP)
984 			goto out_unlock;
985 	}
986 
987 	/* Notify the tag protocol change */
988 	info.tag_ops = tag_ops;
989 	err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info);
990 	if (err)
991 		goto out_unwind_tagger;
992 
993 	err = dsa_tree_bind_tag_proto(dst, tag_ops);
994 	if (err)
995 		goto out_unwind_tagger;
996 
997 	rtnl_unlock();
998 
999 	return 0;
1000 
1001 out_unwind_tagger:
1002 	info.tag_ops = old_tag_ops;
1003 	dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info);
1004 out_unlock:
1005 	rtnl_unlock();
1006 	return err;
1007 }
1008 
1009 static void dsa_tree_master_state_change(struct dsa_switch_tree *dst,
1010 					 struct net_device *master)
1011 {
1012 	struct dsa_notifier_master_state_info info;
1013 	struct dsa_port *cpu_dp = master->dsa_ptr;
1014 
1015 	info.master = master;
1016 	info.operational = dsa_port_master_is_operational(cpu_dp);
1017 
1018 	dsa_tree_notify(dst, DSA_NOTIFIER_MASTER_STATE_CHANGE, &info);
1019 }
1020 
1021 void dsa_tree_master_admin_state_change(struct dsa_switch_tree *dst,
1022 					struct net_device *master,
1023 					bool up)
1024 {
1025 	struct dsa_port *cpu_dp = master->dsa_ptr;
1026 	bool notify = false;
1027 
1028 	/* Don't keep track of admin state on LAG DSA masters,
1029 	 * but rather just of physical DSA masters
1030 	 */
1031 	if (netif_is_lag_master(master))
1032 		return;
1033 
1034 	if ((dsa_port_master_is_operational(cpu_dp)) !=
1035 	    (up && cpu_dp->master_oper_up))
1036 		notify = true;
1037 
1038 	cpu_dp->master_admin_up = up;
1039 
1040 	if (notify)
1041 		dsa_tree_master_state_change(dst, master);
1042 }
1043 
1044 void dsa_tree_master_oper_state_change(struct dsa_switch_tree *dst,
1045 				       struct net_device *master,
1046 				       bool up)
1047 {
1048 	struct dsa_port *cpu_dp = master->dsa_ptr;
1049 	bool notify = false;
1050 
1051 	/* Don't keep track of oper state on LAG DSA masters,
1052 	 * but rather just of physical DSA masters
1053 	 */
1054 	if (netif_is_lag_master(master))
1055 		return;
1056 
1057 	if ((dsa_port_master_is_operational(cpu_dp)) !=
1058 	    (cpu_dp->master_admin_up && up))
1059 		notify = true;
1060 
1061 	cpu_dp->master_oper_up = up;
1062 
1063 	if (notify)
1064 		dsa_tree_master_state_change(dst, master);
1065 }
1066 
1067 static struct dsa_port *dsa_port_touch(struct dsa_switch *ds, int index)
1068 {
1069 	struct dsa_switch_tree *dst = ds->dst;
1070 	struct dsa_port *dp;
1071 
1072 	dsa_switch_for_each_port(dp, ds)
1073 		if (dp->index == index)
1074 			return dp;
1075 
1076 	dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1077 	if (!dp)
1078 		return NULL;
1079 
1080 	dp->ds = ds;
1081 	dp->index = index;
1082 
1083 	mutex_init(&dp->addr_lists_lock);
1084 	mutex_init(&dp->vlans_lock);
1085 	INIT_LIST_HEAD(&dp->fdbs);
1086 	INIT_LIST_HEAD(&dp->mdbs);
1087 	INIT_LIST_HEAD(&dp->vlans);
1088 	INIT_LIST_HEAD(&dp->list);
1089 	list_add_tail(&dp->list, &dst->ports);
1090 
1091 	return dp;
1092 }
1093 
1094 static int dsa_port_parse_user(struct dsa_port *dp, const char *name)
1095 {
1096 	dp->type = DSA_PORT_TYPE_USER;
1097 	dp->name = name;
1098 
1099 	return 0;
1100 }
1101 
1102 static int dsa_port_parse_dsa(struct dsa_port *dp)
1103 {
1104 	dp->type = DSA_PORT_TYPE_DSA;
1105 
1106 	return 0;
1107 }
1108 
1109 static enum dsa_tag_protocol dsa_get_tag_protocol(struct dsa_port *dp,
1110 						  struct net_device *master)
1111 {
1112 	enum dsa_tag_protocol tag_protocol = DSA_TAG_PROTO_NONE;
1113 	struct dsa_switch *mds, *ds = dp->ds;
1114 	unsigned int mdp_upstream;
1115 	struct dsa_port *mdp;
1116 
1117 	/* It is possible to stack DSA switches onto one another when that
1118 	 * happens the switch driver may want to know if its tagging protocol
1119 	 * is going to work in such a configuration.
1120 	 */
1121 	if (dsa_slave_dev_check(master)) {
1122 		mdp = dsa_slave_to_port(master);
1123 		mds = mdp->ds;
1124 		mdp_upstream = dsa_upstream_port(mds, mdp->index);
1125 		tag_protocol = mds->ops->get_tag_protocol(mds, mdp_upstream,
1126 							  DSA_TAG_PROTO_NONE);
1127 	}
1128 
1129 	/* If the master device is not itself a DSA slave in a disjoint DSA
1130 	 * tree, then return immediately.
1131 	 */
1132 	return ds->ops->get_tag_protocol(ds, dp->index, tag_protocol);
1133 }
1134 
1135 static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *master,
1136 			      const char *user_protocol)
1137 {
1138 	const struct dsa_device_ops *tag_ops = NULL;
1139 	struct dsa_switch *ds = dp->ds;
1140 	struct dsa_switch_tree *dst = ds->dst;
1141 	enum dsa_tag_protocol default_proto;
1142 
1143 	/* Find out which protocol the switch would prefer. */
1144 	default_proto = dsa_get_tag_protocol(dp, master);
1145 	if (dst->default_proto) {
1146 		if (dst->default_proto != default_proto) {
1147 			dev_err(ds->dev,
1148 				"A DSA switch tree can have only one tagging protocol\n");
1149 			return -EINVAL;
1150 		}
1151 	} else {
1152 		dst->default_proto = default_proto;
1153 	}
1154 
1155 	/* See if the user wants to override that preference. */
1156 	if (user_protocol) {
1157 		if (!ds->ops->change_tag_protocol) {
1158 			dev_err(ds->dev, "Tag protocol cannot be modified\n");
1159 			return -EINVAL;
1160 		}
1161 
1162 		tag_ops = dsa_tag_driver_get_by_name(user_protocol);
1163 		if (IS_ERR(tag_ops)) {
1164 			dev_warn(ds->dev,
1165 				 "Failed to find a tagging driver for protocol %s, using default\n",
1166 				 user_protocol);
1167 			tag_ops = NULL;
1168 		}
1169 	}
1170 
1171 	if (!tag_ops)
1172 		tag_ops = dsa_tag_driver_get_by_id(default_proto);
1173 
1174 	if (IS_ERR(tag_ops)) {
1175 		if (PTR_ERR(tag_ops) == -ENOPROTOOPT)
1176 			return -EPROBE_DEFER;
1177 
1178 		dev_warn(ds->dev, "No tagger for this switch\n");
1179 		return PTR_ERR(tag_ops);
1180 	}
1181 
1182 	if (dst->tag_ops) {
1183 		if (dst->tag_ops != tag_ops) {
1184 			dev_err(ds->dev,
1185 				"A DSA switch tree can have only one tagging protocol\n");
1186 
1187 			dsa_tag_driver_put(tag_ops);
1188 			return -EINVAL;
1189 		}
1190 
1191 		/* In the case of multiple CPU ports per switch, the tagging
1192 		 * protocol is still reference-counted only per switch tree.
1193 		 */
1194 		dsa_tag_driver_put(tag_ops);
1195 	} else {
1196 		dst->tag_ops = tag_ops;
1197 	}
1198 
1199 	dp->master = master;
1200 	dp->type = DSA_PORT_TYPE_CPU;
1201 	dsa_port_set_tag_protocol(dp, dst->tag_ops);
1202 	dp->dst = dst;
1203 
1204 	/* At this point, the tree may be configured to use a different
1205 	 * tagger than the one chosen by the switch driver during
1206 	 * .setup, in the case when a user selects a custom protocol
1207 	 * through the DT.
1208 	 *
1209 	 * This is resolved by syncing the driver with the tree in
1210 	 * dsa_switch_setup_tag_protocol once .setup has run and the
1211 	 * driver is ready to accept calls to .change_tag_protocol. If
1212 	 * the driver does not support the custom protocol at that
1213 	 * point, the tree is wholly rejected, thereby ensuring that the
1214 	 * tree and driver are always in agreement on the protocol to
1215 	 * use.
1216 	 */
1217 	return 0;
1218 }
1219 
1220 static int dsa_port_parse_of(struct dsa_port *dp, struct device_node *dn)
1221 {
1222 	struct device_node *ethernet = of_parse_phandle(dn, "ethernet", 0);
1223 	const char *name = of_get_property(dn, "label", NULL);
1224 	bool link = of_property_read_bool(dn, "link");
1225 
1226 	dp->dn = dn;
1227 
1228 	if (ethernet) {
1229 		struct net_device *master;
1230 		const char *user_protocol;
1231 
1232 		master = of_find_net_device_by_node(ethernet);
1233 		of_node_put(ethernet);
1234 		if (!master)
1235 			return -EPROBE_DEFER;
1236 
1237 		user_protocol = of_get_property(dn, "dsa-tag-protocol", NULL);
1238 		return dsa_port_parse_cpu(dp, master, user_protocol);
1239 	}
1240 
1241 	if (link)
1242 		return dsa_port_parse_dsa(dp);
1243 
1244 	return dsa_port_parse_user(dp, name);
1245 }
1246 
1247 static int dsa_switch_parse_ports_of(struct dsa_switch *ds,
1248 				     struct device_node *dn)
1249 {
1250 	struct device_node *ports, *port;
1251 	struct dsa_port *dp;
1252 	int err = 0;
1253 	u32 reg;
1254 
1255 	ports = of_get_child_by_name(dn, "ports");
1256 	if (!ports) {
1257 		/* The second possibility is "ethernet-ports" */
1258 		ports = of_get_child_by_name(dn, "ethernet-ports");
1259 		if (!ports) {
1260 			dev_err(ds->dev, "no ports child node found\n");
1261 			return -EINVAL;
1262 		}
1263 	}
1264 
1265 	for_each_available_child_of_node(ports, port) {
1266 		err = of_property_read_u32(port, "reg", &reg);
1267 		if (err) {
1268 			of_node_put(port);
1269 			goto out_put_node;
1270 		}
1271 
1272 		if (reg >= ds->num_ports) {
1273 			dev_err(ds->dev, "port %pOF index %u exceeds num_ports (%u)\n",
1274 				port, reg, ds->num_ports);
1275 			of_node_put(port);
1276 			err = -EINVAL;
1277 			goto out_put_node;
1278 		}
1279 
1280 		dp = dsa_to_port(ds, reg);
1281 
1282 		err = dsa_port_parse_of(dp, port);
1283 		if (err) {
1284 			of_node_put(port);
1285 			goto out_put_node;
1286 		}
1287 	}
1288 
1289 out_put_node:
1290 	of_node_put(ports);
1291 	return err;
1292 }
1293 
1294 static int dsa_switch_parse_member_of(struct dsa_switch *ds,
1295 				      struct device_node *dn)
1296 {
1297 	u32 m[2] = { 0, 0 };
1298 	int sz;
1299 
1300 	/* Don't error out if this optional property isn't found */
1301 	sz = of_property_read_variable_u32_array(dn, "dsa,member", m, 2, 2);
1302 	if (sz < 0 && sz != -EINVAL)
1303 		return sz;
1304 
1305 	ds->index = m[1];
1306 
1307 	ds->dst = dsa_tree_touch(m[0]);
1308 	if (!ds->dst)
1309 		return -ENOMEM;
1310 
1311 	if (dsa_switch_find(ds->dst->index, ds->index)) {
1312 		dev_err(ds->dev,
1313 			"A DSA switch with index %d already exists in tree %d\n",
1314 			ds->index, ds->dst->index);
1315 		return -EEXIST;
1316 	}
1317 
1318 	if (ds->dst->last_switch < ds->index)
1319 		ds->dst->last_switch = ds->index;
1320 
1321 	return 0;
1322 }
1323 
1324 static int dsa_switch_touch_ports(struct dsa_switch *ds)
1325 {
1326 	struct dsa_port *dp;
1327 	int port;
1328 
1329 	for (port = 0; port < ds->num_ports; port++) {
1330 		dp = dsa_port_touch(ds, port);
1331 		if (!dp)
1332 			return -ENOMEM;
1333 	}
1334 
1335 	return 0;
1336 }
1337 
1338 static int dsa_switch_parse_of(struct dsa_switch *ds, struct device_node *dn)
1339 {
1340 	int err;
1341 
1342 	err = dsa_switch_parse_member_of(ds, dn);
1343 	if (err)
1344 		return err;
1345 
1346 	err = dsa_switch_touch_ports(ds);
1347 	if (err)
1348 		return err;
1349 
1350 	return dsa_switch_parse_ports_of(ds, dn);
1351 }
1352 
1353 static int dev_is_class(struct device *dev, void *class)
1354 {
1355 	if (dev->class != NULL && !strcmp(dev->class->name, class))
1356 		return 1;
1357 
1358 	return 0;
1359 }
1360 
1361 static struct device *dev_find_class(struct device *parent, char *class)
1362 {
1363 	if (dev_is_class(parent, class)) {
1364 		get_device(parent);
1365 		return parent;
1366 	}
1367 
1368 	return device_find_child(parent, class, dev_is_class);
1369 }
1370 
1371 static struct net_device *dsa_dev_to_net_device(struct device *dev)
1372 {
1373 	struct device *d;
1374 
1375 	d = dev_find_class(dev, "net");
1376 	if (d != NULL) {
1377 		struct net_device *nd;
1378 
1379 		nd = to_net_dev(d);
1380 		dev_hold(nd);
1381 		put_device(d);
1382 
1383 		return nd;
1384 	}
1385 
1386 	return NULL;
1387 }
1388 
1389 static int dsa_port_parse(struct dsa_port *dp, const char *name,
1390 			  struct device *dev)
1391 {
1392 	if (!strcmp(name, "cpu")) {
1393 		struct net_device *master;
1394 
1395 		master = dsa_dev_to_net_device(dev);
1396 		if (!master)
1397 			return -EPROBE_DEFER;
1398 
1399 		dev_put(master);
1400 
1401 		return dsa_port_parse_cpu(dp, master, NULL);
1402 	}
1403 
1404 	if (!strcmp(name, "dsa"))
1405 		return dsa_port_parse_dsa(dp);
1406 
1407 	return dsa_port_parse_user(dp, name);
1408 }
1409 
1410 static int dsa_switch_parse_ports(struct dsa_switch *ds,
1411 				  struct dsa_chip_data *cd)
1412 {
1413 	bool valid_name_found = false;
1414 	struct dsa_port *dp;
1415 	struct device *dev;
1416 	const char *name;
1417 	unsigned int i;
1418 	int err;
1419 
1420 	for (i = 0; i < DSA_MAX_PORTS; i++) {
1421 		name = cd->port_names[i];
1422 		dev = cd->netdev[i];
1423 		dp = dsa_to_port(ds, i);
1424 
1425 		if (!name)
1426 			continue;
1427 
1428 		err = dsa_port_parse(dp, name, dev);
1429 		if (err)
1430 			return err;
1431 
1432 		valid_name_found = true;
1433 	}
1434 
1435 	if (!valid_name_found && i == DSA_MAX_PORTS)
1436 		return -EINVAL;
1437 
1438 	return 0;
1439 }
1440 
1441 static int dsa_switch_parse(struct dsa_switch *ds, struct dsa_chip_data *cd)
1442 {
1443 	int err;
1444 
1445 	ds->cd = cd;
1446 
1447 	/* We don't support interconnected switches nor multiple trees via
1448 	 * platform data, so this is the unique switch of the tree.
1449 	 */
1450 	ds->index = 0;
1451 	ds->dst = dsa_tree_touch(0);
1452 	if (!ds->dst)
1453 		return -ENOMEM;
1454 
1455 	err = dsa_switch_touch_ports(ds);
1456 	if (err)
1457 		return err;
1458 
1459 	return dsa_switch_parse_ports(ds, cd);
1460 }
1461 
1462 static void dsa_switch_release_ports(struct dsa_switch *ds)
1463 {
1464 	struct dsa_port *dp, *next;
1465 
1466 	dsa_switch_for_each_port_safe(dp, next, ds) {
1467 		WARN_ON(!list_empty(&dp->fdbs));
1468 		WARN_ON(!list_empty(&dp->mdbs));
1469 		WARN_ON(!list_empty(&dp->vlans));
1470 		list_del(&dp->list);
1471 		kfree(dp);
1472 	}
1473 }
1474 
1475 static int dsa_switch_probe(struct dsa_switch *ds)
1476 {
1477 	struct dsa_switch_tree *dst;
1478 	struct dsa_chip_data *pdata;
1479 	struct device_node *np;
1480 	int err;
1481 
1482 	if (!ds->dev)
1483 		return -ENODEV;
1484 
1485 	pdata = ds->dev->platform_data;
1486 	np = ds->dev->of_node;
1487 
1488 	if (!ds->num_ports)
1489 		return -EINVAL;
1490 
1491 	if (np) {
1492 		err = dsa_switch_parse_of(ds, np);
1493 		if (err)
1494 			dsa_switch_release_ports(ds);
1495 	} else if (pdata) {
1496 		err = dsa_switch_parse(ds, pdata);
1497 		if (err)
1498 			dsa_switch_release_ports(ds);
1499 	} else {
1500 		err = -ENODEV;
1501 	}
1502 
1503 	if (err)
1504 		return err;
1505 
1506 	dst = ds->dst;
1507 	dsa_tree_get(dst);
1508 	err = dsa_tree_setup(dst);
1509 	if (err) {
1510 		dsa_switch_release_ports(ds);
1511 		dsa_tree_put(dst);
1512 	}
1513 
1514 	return err;
1515 }
1516 
1517 int dsa_register_switch(struct dsa_switch *ds)
1518 {
1519 	int err;
1520 
1521 	mutex_lock(&dsa2_mutex);
1522 	err = dsa_switch_probe(ds);
1523 	dsa_tree_put(ds->dst);
1524 	mutex_unlock(&dsa2_mutex);
1525 
1526 	return err;
1527 }
1528 EXPORT_SYMBOL_GPL(dsa_register_switch);
1529 
1530 static void dsa_switch_remove(struct dsa_switch *ds)
1531 {
1532 	struct dsa_switch_tree *dst = ds->dst;
1533 
1534 	dsa_tree_teardown(dst);
1535 	dsa_switch_release_ports(ds);
1536 	dsa_tree_put(dst);
1537 }
1538 
1539 void dsa_unregister_switch(struct dsa_switch *ds)
1540 {
1541 	mutex_lock(&dsa2_mutex);
1542 	dsa_switch_remove(ds);
1543 	mutex_unlock(&dsa2_mutex);
1544 }
1545 EXPORT_SYMBOL_GPL(dsa_unregister_switch);
1546 
1547 /* If the DSA master chooses to unregister its net_device on .shutdown, DSA is
1548  * blocking that operation from completion, due to the dev_hold taken inside
1549  * netdev_upper_dev_link. Unlink the DSA slave interfaces from being uppers of
1550  * the DSA master, so that the system can reboot successfully.
1551  */
1552 void dsa_switch_shutdown(struct dsa_switch *ds)
1553 {
1554 	struct net_device *master, *slave_dev;
1555 	struct dsa_port *dp;
1556 
1557 	mutex_lock(&dsa2_mutex);
1558 
1559 	if (!ds->setup)
1560 		goto out;
1561 
1562 	rtnl_lock();
1563 
1564 	dsa_switch_for_each_user_port(dp, ds) {
1565 		master = dsa_port_to_master(dp);
1566 		slave_dev = dp->slave;
1567 
1568 		netdev_upper_dev_unlink(master, slave_dev);
1569 	}
1570 
1571 	/* Disconnect from further netdevice notifiers on the master,
1572 	 * since netdev_uses_dsa() will now return false.
1573 	 */
1574 	dsa_switch_for_each_cpu_port(dp, ds)
1575 		dp->master->dsa_ptr = NULL;
1576 
1577 	rtnl_unlock();
1578 out:
1579 	mutex_unlock(&dsa2_mutex);
1580 }
1581 EXPORT_SYMBOL_GPL(dsa_switch_shutdown);
1582 
1583 #ifdef CONFIG_PM_SLEEP
1584 static bool dsa_port_is_initialized(const struct dsa_port *dp)
1585 {
1586 	return dp->type == DSA_PORT_TYPE_USER && dp->slave;
1587 }
1588 
1589 int dsa_switch_suspend(struct dsa_switch *ds)
1590 {
1591 	struct dsa_port *dp;
1592 	int ret = 0;
1593 
1594 	/* Suspend slave network devices */
1595 	dsa_switch_for_each_port(dp, ds) {
1596 		if (!dsa_port_is_initialized(dp))
1597 			continue;
1598 
1599 		ret = dsa_slave_suspend(dp->slave);
1600 		if (ret)
1601 			return ret;
1602 	}
1603 
1604 	if (ds->ops->suspend)
1605 		ret = ds->ops->suspend(ds);
1606 
1607 	return ret;
1608 }
1609 EXPORT_SYMBOL_GPL(dsa_switch_suspend);
1610 
1611 int dsa_switch_resume(struct dsa_switch *ds)
1612 {
1613 	struct dsa_port *dp;
1614 	int ret = 0;
1615 
1616 	if (ds->ops->resume)
1617 		ret = ds->ops->resume(ds);
1618 
1619 	if (ret)
1620 		return ret;
1621 
1622 	/* Resume slave network devices */
1623 	dsa_switch_for_each_port(dp, ds) {
1624 		if (!dsa_port_is_initialized(dp))
1625 			continue;
1626 
1627 		ret = dsa_slave_resume(dp->slave);
1628 		if (ret)
1629 			return ret;
1630 	}
1631 
1632 	return 0;
1633 }
1634 EXPORT_SYMBOL_GPL(dsa_switch_resume);
1635 #endif
1636 
1637 struct dsa_port *dsa_port_from_netdev(struct net_device *netdev)
1638 {
1639 	if (!netdev || !dsa_slave_dev_check(netdev))
1640 		return ERR_PTR(-ENODEV);
1641 
1642 	return dsa_slave_to_port(netdev);
1643 }
1644 EXPORT_SYMBOL_GPL(dsa_port_from_netdev);
1645 
1646 bool dsa_db_equal(const struct dsa_db *a, const struct dsa_db *b)
1647 {
1648 	if (a->type != b->type)
1649 		return false;
1650 
1651 	switch (a->type) {
1652 	case DSA_DB_PORT:
1653 		return a->dp == b->dp;
1654 	case DSA_DB_LAG:
1655 		return a->lag.dev == b->lag.dev;
1656 	case DSA_DB_BRIDGE:
1657 		return a->bridge.num == b->bridge.num;
1658 	default:
1659 		WARN_ON(1);
1660 		return false;
1661 	}
1662 }
1663 
1664 bool dsa_fdb_present_in_other_db(struct dsa_switch *ds, int port,
1665 				 const unsigned char *addr, u16 vid,
1666 				 struct dsa_db db)
1667 {
1668 	struct dsa_port *dp = dsa_to_port(ds, port);
1669 	struct dsa_mac_addr *a;
1670 
1671 	lockdep_assert_held(&dp->addr_lists_lock);
1672 
1673 	list_for_each_entry(a, &dp->fdbs, list) {
1674 		if (!ether_addr_equal(a->addr, addr) || a->vid != vid)
1675 			continue;
1676 
1677 		if (a->db.type == db.type && !dsa_db_equal(&a->db, &db))
1678 			return true;
1679 	}
1680 
1681 	return false;
1682 }
1683 EXPORT_SYMBOL_GPL(dsa_fdb_present_in_other_db);
1684 
1685 bool dsa_mdb_present_in_other_db(struct dsa_switch *ds, int port,
1686 				 const struct switchdev_obj_port_mdb *mdb,
1687 				 struct dsa_db db)
1688 {
1689 	struct dsa_port *dp = dsa_to_port(ds, port);
1690 	struct dsa_mac_addr *a;
1691 
1692 	lockdep_assert_held(&dp->addr_lists_lock);
1693 
1694 	list_for_each_entry(a, &dp->mdbs, list) {
1695 		if (!ether_addr_equal(a->addr, mdb->addr) || a->vid != mdb->vid)
1696 			continue;
1697 
1698 		if (a->db.type == db.type && !dsa_db_equal(&a->db, &db))
1699 			return true;
1700 	}
1701 
1702 	return false;
1703 }
1704 EXPORT_SYMBOL_GPL(dsa_mdb_present_in_other_db);
1705 
1706 static const struct dsa_stubs __dsa_stubs = {
1707 	.master_hwtstamp_validate = __dsa_master_hwtstamp_validate,
1708 };
1709 
1710 static void dsa_register_stubs(void)
1711 {
1712 	dsa_stubs = &__dsa_stubs;
1713 }
1714 
1715 static void dsa_unregister_stubs(void)
1716 {
1717 	dsa_stubs = NULL;
1718 }
1719 
1720 static int __init dsa_init_module(void)
1721 {
1722 	int rc;
1723 
1724 	dsa_owq = alloc_ordered_workqueue("dsa_ordered",
1725 					  WQ_MEM_RECLAIM);
1726 	if (!dsa_owq)
1727 		return -ENOMEM;
1728 
1729 	rc = dsa_slave_register_notifier();
1730 	if (rc)
1731 		goto register_notifier_fail;
1732 
1733 	dev_add_pack(&dsa_pack_type);
1734 
1735 	rc = rtnl_link_register(&dsa_link_ops);
1736 	if (rc)
1737 		goto netlink_register_fail;
1738 
1739 	dsa_register_stubs();
1740 
1741 	return 0;
1742 
1743 netlink_register_fail:
1744 	dsa_slave_unregister_notifier();
1745 	dev_remove_pack(&dsa_pack_type);
1746 register_notifier_fail:
1747 	destroy_workqueue(dsa_owq);
1748 
1749 	return rc;
1750 }
1751 module_init(dsa_init_module);
1752 
1753 static void __exit dsa_cleanup_module(void)
1754 {
1755 	dsa_unregister_stubs();
1756 
1757 	rtnl_link_unregister(&dsa_link_ops);
1758 
1759 	dsa_slave_unregister_notifier();
1760 	dev_remove_pack(&dsa_pack_type);
1761 	destroy_workqueue(dsa_owq);
1762 }
1763 module_exit(dsa_cleanup_module);
1764 
1765 MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
1766 MODULE_DESCRIPTION("Driver for Distributed Switch Architecture switch chips");
1767 MODULE_LICENSE("GPL");
1768 MODULE_ALIAS("platform:dsa");
1769