Lines Matching +full:dp +full:- +full:bridge
1 // SPDX-License-Identifier: GPL-2.0-or-later
5 * Copyright (c) 2008-2009 Marvell Semiconductor
54 * dsa_lag_map() - Map LAG structure to a linear LAG array
60 * driver by setting ds->num_lag_ids. It is perfectly legal to leave
62 * no-ops.
68 for (id = 1; id <= dst->lags_len; id++) { in dsa_lag_map()
70 dst->lags[id - 1] = lag; in dsa_lag_map()
71 lag->id = id; in dsa_lag_map()
79 * driver can then return -EOPNOTSUPP back to DSA, which will in dsa_lag_map()
85 * dsa_lag_unmap() - Remove a LAG ID mapping
98 dst->lags[id - 1] = NULL; in dsa_lag_unmap()
99 lag->id = 0; in dsa_lag_unmap()
108 struct dsa_port *dp; in dsa_tree_lag_find() local
110 list_for_each_entry(dp, &dst->ports, list) in dsa_tree_lag_find()
111 if (dsa_port_lag_dev_get(dp) == lag_dev) in dsa_tree_lag_find()
112 return dp->lag; in dsa_tree_lag_find()
120 struct dsa_port *dp; in dsa_tree_bridge_find() local
122 list_for_each_entry(dp, &dst->ports, list) in dsa_tree_bridge_find()
123 if (dsa_port_bridge_dev_get(dp) == br) in dsa_tree_bridge_find()
124 return dp->bridge; in dsa_tree_bridge_find()
134 struct dsa_bridge *bridge; in dsa_bridge_num_find() local
136 bridge = dsa_tree_bridge_find(dst, bridge_dev); in dsa_bridge_num_find()
137 if (bridge) in dsa_bridge_num_find()
138 return bridge->num; in dsa_bridge_num_find()
149 * bridge numbering in dsa_bridge_num_get()
156 * offload for this bridge in dsa_bridge_num_get()
183 struct dsa_port *dp; in dsa_switch_find() local
186 if (dst->index != tree_index) in dsa_switch_find()
189 list_for_each_entry(dp, &dst->ports, list) { in dsa_switch_find()
190 if (dp->ds->index != sw_index) in dsa_switch_find()
193 return dp->ds; in dsa_switch_find()
206 if (dst->index == index) in dsa_tree_find()
220 dst->index = index; in dsa_tree_alloc()
222 INIT_LIST_HEAD(&dst->rtable); in dsa_tree_alloc()
224 INIT_LIST_HEAD(&dst->ports); in dsa_tree_alloc()
226 INIT_LIST_HEAD(&dst->list); in dsa_tree_alloc()
227 list_add_tail(&dst->list, &dsa_tree_list); in dsa_tree_alloc()
229 kref_init(&dst->refcount); in dsa_tree_alloc()
236 if (dst->tag_ops) in dsa_tree_free()
237 dsa_tag_driver_put(dst->tag_ops); in dsa_tree_free()
238 list_del(&dst->list); in dsa_tree_free()
245 kref_get(&dst->refcount); in dsa_tree_get()
273 kref_put(&dst->refcount, dsa_tree_release); in dsa_tree_put()
279 struct dsa_port *dp; in dsa_tree_find_port_by_node() local
281 list_for_each_entry(dp, &dst->ports, list) in dsa_tree_find_port_by_node()
282 if (dp->dn == dn) in dsa_tree_find_port_by_node()
283 return dp; in dsa_tree_find_port_by_node()
288 static struct dsa_link *dsa_link_touch(struct dsa_port *dp, in dsa_link_touch() argument
291 struct dsa_switch *ds = dp->ds; in dsa_link_touch()
295 dst = ds->dst; in dsa_link_touch()
297 list_for_each_entry(dl, &dst->rtable, list) in dsa_link_touch()
298 if (dl->dp == dp && dl->link_dp == link_dp) in dsa_link_touch()
305 dl->dp = dp; in dsa_link_touch()
306 dl->link_dp = link_dp; in dsa_link_touch()
308 INIT_LIST_HEAD(&dl->list); in dsa_link_touch()
309 list_add_tail(&dl->list, &dst->rtable); in dsa_link_touch()
314 static bool dsa_port_setup_routing_table(struct dsa_port *dp) in dsa_port_setup_routing_table() argument
316 struct dsa_switch *ds = dp->ds; in dsa_port_setup_routing_table()
317 struct dsa_switch_tree *dst = ds->dst; in dsa_port_setup_routing_table()
318 struct device_node *dn = dp->dn; in dsa_port_setup_routing_table()
331 dl = dsa_link_touch(dp, link_dp); in dsa_port_setup_routing_table()
344 struct dsa_port *dp; in dsa_tree_setup_routing_table() local
346 list_for_each_entry(dp, &dst->ports, list) { in dsa_tree_setup_routing_table()
347 if (dsa_port_is_dsa(dp)) { in dsa_tree_setup_routing_table()
348 complete = dsa_port_setup_routing_table(dp); in dsa_tree_setup_routing_table()
359 struct dsa_port *dp; in dsa_tree_find_first_cpu() local
361 list_for_each_entry(dp, &dst->ports, list) in dsa_tree_find_first_cpu()
362 if (dsa_port_is_cpu(dp)) in dsa_tree_find_first_cpu()
363 return dp; in dsa_tree_find_first_cpu()
375 ethernet = of_parse_phandle(cpu_dp->dn, "ethernet", 0); in dsa_tree_find_first_master()
387 struct dsa_port *cpu_dp, *dp; in dsa_tree_setup_default_cpu() local
391 pr_err("DSA: tree %d has no CPU port\n", dst->index); in dsa_tree_setup_default_cpu()
392 return -EINVAL; in dsa_tree_setup_default_cpu()
395 list_for_each_entry(dp, &dst->ports, list) { in dsa_tree_setup_default_cpu()
396 if (dp->cpu_dp) in dsa_tree_setup_default_cpu()
399 if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp)) in dsa_tree_setup_default_cpu()
400 dp->cpu_dp = cpu_dp; in dsa_tree_setup_default_cpu()
411 if (!ds->ops->preferred_default_local_cpu_port) in dsa_switch_preferred_default_local_cpu_port()
414 cpu_dp = ds->ops->preferred_default_local_cpu_port(ds); in dsa_switch_preferred_default_local_cpu_port()
418 if (WARN_ON(!dsa_port_is_cpu(cpu_dp) || cpu_dp->ds != ds)) in dsa_switch_preferred_default_local_cpu_port()
431 struct dsa_port *preferred_cpu_dp, *cpu_dp, *dp; in dsa_tree_setup_cpu_ports() local
433 list_for_each_entry(cpu_dp, &dst->ports, list) { in dsa_tree_setup_cpu_ports()
437 preferred_cpu_dp = dsa_switch_preferred_default_local_cpu_port(cpu_dp->ds); in dsa_tree_setup_cpu_ports()
442 dsa_switch_for_each_port(dp, cpu_dp->ds) { in dsa_tree_setup_cpu_ports()
444 if (dp->cpu_dp) in dsa_tree_setup_cpu_ports()
447 if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp)) in dsa_tree_setup_cpu_ports()
448 dp->cpu_dp = cpu_dp; in dsa_tree_setup_cpu_ports()
457 struct dsa_port *dp; in dsa_tree_teardown_cpu_ports() local
459 list_for_each_entry(dp, &dst->ports, list) in dsa_tree_teardown_cpu_ports()
460 if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp)) in dsa_tree_teardown_cpu_ports()
461 dp->cpu_dp = NULL; in dsa_tree_teardown_cpu_ports()
464 static int dsa_port_setup(struct dsa_port *dp) in dsa_port_setup() argument
467 struct dsa_switch *ds = dp->ds; in dsa_port_setup()
471 if (dp->setup) in dsa_port_setup()
474 err = dsa_port_devlink_setup(dp); in dsa_port_setup()
478 switch (dp->type) { in dsa_port_setup()
480 dsa_port_disable(dp); in dsa_port_setup()
483 if (dp->dn) { in dsa_port_setup()
484 err = dsa_shared_port_link_register_of(dp); in dsa_port_setup()
489 dev_warn(ds->dev, in dsa_port_setup()
491 dp->index); in dsa_port_setup()
494 err = dsa_port_enable(dp, NULL); in dsa_port_setup()
501 if (dp->dn) { in dsa_port_setup()
502 err = dsa_shared_port_link_register_of(dp); in dsa_port_setup()
507 dev_warn(ds->dev, in dsa_port_setup()
509 dp->index); in dsa_port_setup()
512 err = dsa_port_enable(dp, NULL); in dsa_port_setup()
519 of_get_mac_address(dp->dn, dp->mac); in dsa_port_setup()
520 err = dsa_slave_create(dp); in dsa_port_setup()
525 dsa_port_disable(dp); in dsa_port_setup()
527 dsa_shared_port_link_unregister_of(dp); in dsa_port_setup()
529 dsa_port_devlink_teardown(dp); in dsa_port_setup()
533 dp->setup = true; in dsa_port_setup()
538 static void dsa_port_teardown(struct dsa_port *dp) in dsa_port_teardown() argument
540 if (!dp->setup) in dsa_port_teardown()
543 switch (dp->type) { in dsa_port_teardown()
547 dsa_port_disable(dp); in dsa_port_teardown()
548 if (dp->dn) in dsa_port_teardown()
549 dsa_shared_port_link_unregister_of(dp); in dsa_port_teardown()
552 dsa_port_disable(dp); in dsa_port_teardown()
553 if (dp->dn) in dsa_port_teardown()
554 dsa_shared_port_link_unregister_of(dp); in dsa_port_teardown()
557 if (dp->slave) { in dsa_port_teardown()
558 dsa_slave_destroy(dp->slave); in dsa_port_teardown()
559 dp->slave = NULL; in dsa_port_teardown()
564 dsa_port_devlink_teardown(dp); in dsa_port_teardown()
566 dp->setup = false; in dsa_port_teardown()
569 static int dsa_port_setup_as_unused(struct dsa_port *dp) in dsa_port_setup_as_unused() argument
571 dp->type = DSA_PORT_TYPE_UNUSED; in dsa_port_setup_as_unused()
572 return dsa_port_setup(dp); in dsa_port_setup_as_unused()
577 const struct dsa_device_ops *tag_ops = ds->dst->tag_ops; in dsa_switch_setup_tag_protocol()
578 struct dsa_switch_tree *dst = ds->dst; in dsa_switch_setup_tag_protocol()
581 if (tag_ops->proto == dst->default_proto) in dsa_switch_setup_tag_protocol()
585 err = ds->ops->change_tag_protocol(ds, tag_ops->proto); in dsa_switch_setup_tag_protocol()
588 dev_err(ds->dev, "Unable to use tag protocol \"%s\": %pe\n", in dsa_switch_setup_tag_protocol()
589 tag_ops->name, ERR_PTR(err)); in dsa_switch_setup_tag_protocol()
594 if (tag_ops->connect) { in dsa_switch_setup_tag_protocol()
595 err = tag_ops->connect(ds); in dsa_switch_setup_tag_protocol()
600 if (ds->ops->connect_tag_protocol) { in dsa_switch_setup_tag_protocol()
601 err = ds->ops->connect_tag_protocol(ds, tag_ops->proto); in dsa_switch_setup_tag_protocol()
603 dev_err(ds->dev, in dsa_switch_setup_tag_protocol()
605 tag_ops->name, ERR_PTR(err)); in dsa_switch_setup_tag_protocol()
613 if (tag_ops->disconnect) in dsa_switch_setup_tag_protocol()
614 tag_ops->disconnect(ds); in dsa_switch_setup_tag_protocol()
621 const struct dsa_device_ops *tag_ops = ds->dst->tag_ops; in dsa_switch_teardown_tag_protocol()
623 if (tag_ops->disconnect) in dsa_switch_teardown_tag_protocol()
624 tag_ops->disconnect(ds); in dsa_switch_teardown_tag_protocol()
632 if (ds->setup) in dsa_switch_setup()
635 /* Initialize ds->phys_mii_mask before registering the slave MDIO bus in dsa_switch_setup()
636 * driver and before ops->setup() has run, since the switch drivers and in dsa_switch_setup()
640 ds->phys_mii_mask |= dsa_user_ports(ds); in dsa_switch_setup()
650 ds->configure_vlan_while_not_filtering = true; in dsa_switch_setup()
652 err = ds->ops->setup(ds); in dsa_switch_setup()
660 if (!ds->slave_mii_bus && ds->ops->phy_read) { in dsa_switch_setup()
661 ds->slave_mii_bus = mdiobus_alloc(); in dsa_switch_setup()
662 if (!ds->slave_mii_bus) { in dsa_switch_setup()
663 err = -ENOMEM; in dsa_switch_setup()
669 dn = of_get_child_by_name(ds->dev->of_node, "mdio"); in dsa_switch_setup()
671 err = of_mdiobus_register(ds->slave_mii_bus, dn); in dsa_switch_setup()
679 ds->setup = true; in dsa_switch_setup()
683 if (ds->slave_mii_bus && ds->ops->phy_read) in dsa_switch_setup()
684 mdiobus_free(ds->slave_mii_bus); in dsa_switch_setup()
686 if (ds->ops->teardown) in dsa_switch_setup()
687 ds->ops->teardown(ds); in dsa_switch_setup()
697 if (!ds->setup) in dsa_switch_teardown()
702 if (ds->slave_mii_bus && ds->ops->phy_read) { in dsa_switch_teardown()
703 mdiobus_unregister(ds->slave_mii_bus); in dsa_switch_teardown()
704 mdiobus_free(ds->slave_mii_bus); in dsa_switch_teardown()
705 ds->slave_mii_bus = NULL; in dsa_switch_teardown()
710 if (ds->ops->teardown) in dsa_switch_teardown()
711 ds->ops->teardown(ds); in dsa_switch_teardown()
717 ds->setup = false; in dsa_switch_teardown()
720 /* First tear down the non-shared, then the shared ports. This ensures that
726 struct dsa_port *dp; in dsa_tree_teardown_ports() local
728 list_for_each_entry(dp, &dst->ports, list) in dsa_tree_teardown_ports()
729 if (dsa_port_is_user(dp) || dsa_port_is_unused(dp)) in dsa_tree_teardown_ports()
730 dsa_port_teardown(dp); in dsa_tree_teardown_ports()
734 list_for_each_entry(dp, &dst->ports, list) in dsa_tree_teardown_ports()
735 if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp)) in dsa_tree_teardown_ports()
736 dsa_port_teardown(dp); in dsa_tree_teardown_ports()
741 struct dsa_port *dp; in dsa_tree_teardown_switches() local
743 list_for_each_entry(dp, &dst->ports, list) in dsa_tree_teardown_switches()
744 dsa_switch_teardown(dp->ds); in dsa_tree_teardown_switches()
747 /* Bring shared ports up first, then non-shared ports */
750 struct dsa_port *dp; in dsa_tree_setup_ports() local
753 list_for_each_entry(dp, &dst->ports, list) { in dsa_tree_setup_ports()
754 if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp)) { in dsa_tree_setup_ports()
755 err = dsa_port_setup(dp); in dsa_tree_setup_ports()
761 list_for_each_entry(dp, &dst->ports, list) { in dsa_tree_setup_ports()
762 if (dsa_port_is_user(dp) || dsa_port_is_unused(dp)) { in dsa_tree_setup_ports()
763 err = dsa_port_setup(dp); in dsa_tree_setup_ports()
765 err = dsa_port_setup_as_unused(dp); in dsa_tree_setup_ports()
782 struct dsa_port *dp; in dsa_tree_setup_switches() local
785 list_for_each_entry(dp, &dst->ports, list) { in dsa_tree_setup_switches()
786 err = dsa_switch_setup(dp->ds); in dsa_tree_setup_switches()
804 struct net_device *master = cpu_dp->master; in dsa_tree_setup_master()
805 bool admin_up = (master->flags & IFF_UP) && in dsa_tree_setup_master()
830 struct net_device *master = cpu_dp->master; in dsa_tree_teardown_master()
847 struct dsa_port *dp; in dsa_tree_setup_lags() local
849 list_for_each_entry(dp, &dst->ports, list) { in dsa_tree_setup_lags()
850 if (dp->ds->num_lag_ids > len) in dsa_tree_setup_lags()
851 len = dp->ds->num_lag_ids; in dsa_tree_setup_lags()
857 dst->lags = kcalloc(len, sizeof(*dst->lags), GFP_KERNEL); in dsa_tree_setup_lags()
858 if (!dst->lags) in dsa_tree_setup_lags()
859 return -ENOMEM; in dsa_tree_setup_lags()
861 dst->lags_len = len; in dsa_tree_setup_lags()
867 kfree(dst->lags); in dsa_tree_teardown_lags()
875 if (dst->setup) { in dsa_tree_setup()
877 dst->index); in dsa_tree_setup()
878 return -EEXIST; in dsa_tree_setup()
905 dst->setup = true; in dsa_tree_setup()
907 pr_info("DSA: tree %d setup\n", dst->index); in dsa_tree_setup()
927 if (!dst->setup) in dsa_tree_teardown()
940 list_for_each_entry_safe(dl, next, &dst->rtable, list) { in dsa_tree_teardown()
941 list_del(&dl->list); in dsa_tree_teardown()
945 pr_info("DSA: tree %d torn down\n", dst->index); in dsa_tree_teardown()
947 dst->setup = false; in dsa_tree_teardown()
953 const struct dsa_device_ops *old_tag_ops = dst->tag_ops; in dsa_tree_bind_tag_proto()
957 dst->tag_ops = tag_ops; in dsa_tree_bind_tag_proto()
964 if (err && err != -EOPNOTSUPP) in dsa_tree_bind_tag_proto()
976 dst->tag_ops = old_tag_ops; in dsa_tree_bind_tag_proto()
990 struct dsa_port *dp; in dsa_tree_change_tag_proto() local
991 int err = -EBUSY; in dsa_tree_change_tag_proto()
1001 dsa_tree_for_each_user_port(dp, dst) { in dsa_tree_change_tag_proto()
1002 if (dsa_port_to_master(dp)->flags & IFF_UP) in dsa_tree_change_tag_proto()
1005 if (dp->slave->flags & IFF_UP) in dsa_tree_change_tag_proto()
1035 struct dsa_port *cpu_dp = master->dsa_ptr; in dsa_tree_master_state_change()
1047 struct dsa_port *cpu_dp = master->dsa_ptr; in dsa_tree_master_admin_state_change()
1057 (up && cpu_dp->master_oper_up)) in dsa_tree_master_admin_state_change()
1060 cpu_dp->master_admin_up = up; in dsa_tree_master_admin_state_change()
1070 struct dsa_port *cpu_dp = master->dsa_ptr; in dsa_tree_master_oper_state_change()
1080 (cpu_dp->master_admin_up && up)) in dsa_tree_master_oper_state_change()
1083 cpu_dp->master_oper_up = up; in dsa_tree_master_oper_state_change()
1091 struct dsa_switch_tree *dst = ds->dst; in dsa_port_touch()
1092 struct dsa_port *dp; in dsa_port_touch() local
1094 dsa_switch_for_each_port(dp, ds) in dsa_port_touch()
1095 if (dp->index == index) in dsa_port_touch()
1096 return dp; in dsa_port_touch()
1098 dp = kzalloc(sizeof(*dp), GFP_KERNEL); in dsa_port_touch()
1099 if (!dp) in dsa_port_touch()
1102 dp->ds = ds; in dsa_port_touch()
1103 dp->index = index; in dsa_port_touch()
1105 mutex_init(&dp->addr_lists_lock); in dsa_port_touch()
1106 mutex_init(&dp->vlans_lock); in dsa_port_touch()
1107 INIT_LIST_HEAD(&dp->fdbs); in dsa_port_touch()
1108 INIT_LIST_HEAD(&dp->mdbs); in dsa_port_touch()
1109 INIT_LIST_HEAD(&dp->vlans); /* also initializes &dp->user_vlans */ in dsa_port_touch()
1110 INIT_LIST_HEAD(&dp->list); in dsa_port_touch()
1111 list_add_tail(&dp->list, &dst->ports); in dsa_port_touch()
1113 return dp; in dsa_port_touch()
1116 static int dsa_port_parse_user(struct dsa_port *dp, const char *name) in dsa_port_parse_user() argument
1118 dp->type = DSA_PORT_TYPE_USER; in dsa_port_parse_user()
1119 dp->name = name; in dsa_port_parse_user()
1124 static int dsa_port_parse_dsa(struct dsa_port *dp) in dsa_port_parse_dsa() argument
1126 dp->type = DSA_PORT_TYPE_DSA; in dsa_port_parse_dsa()
1131 static enum dsa_tag_protocol dsa_get_tag_protocol(struct dsa_port *dp, in dsa_get_tag_protocol() argument
1135 struct dsa_switch *mds, *ds = dp->ds; in dsa_get_tag_protocol()
1145 mds = mdp->ds; in dsa_get_tag_protocol()
1146 mdp_upstream = dsa_upstream_port(mds, mdp->index); in dsa_get_tag_protocol()
1147 tag_protocol = mds->ops->get_tag_protocol(mds, mdp_upstream, in dsa_get_tag_protocol()
1154 return ds->ops->get_tag_protocol(ds, dp->index, tag_protocol); in dsa_get_tag_protocol()
1157 static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *master, in dsa_port_parse_cpu() argument
1161 struct dsa_switch *ds = dp->ds; in dsa_port_parse_cpu()
1162 struct dsa_switch_tree *dst = ds->dst; in dsa_port_parse_cpu()
1166 default_proto = dsa_get_tag_protocol(dp, master); in dsa_port_parse_cpu()
1167 if (dst->default_proto) { in dsa_port_parse_cpu()
1168 if (dst->default_proto != default_proto) { in dsa_port_parse_cpu()
1169 dev_err(ds->dev, in dsa_port_parse_cpu()
1171 return -EINVAL; in dsa_port_parse_cpu()
1174 dst->default_proto = default_proto; in dsa_port_parse_cpu()
1179 if (!ds->ops->change_tag_protocol) { in dsa_port_parse_cpu()
1180 dev_err(ds->dev, "Tag protocol cannot be modified\n"); in dsa_port_parse_cpu()
1181 return -EINVAL; in dsa_port_parse_cpu()
1186 dev_warn(ds->dev, in dsa_port_parse_cpu()
1197 if (PTR_ERR(tag_ops) == -ENOPROTOOPT) in dsa_port_parse_cpu()
1198 return -EPROBE_DEFER; in dsa_port_parse_cpu()
1200 dev_warn(ds->dev, "No tagger for this switch\n"); in dsa_port_parse_cpu()
1204 if (dst->tag_ops) { in dsa_port_parse_cpu()
1205 if (dst->tag_ops != tag_ops) { in dsa_port_parse_cpu()
1206 dev_err(ds->dev, in dsa_port_parse_cpu()
1210 return -EINVAL; in dsa_port_parse_cpu()
1214 * protocol is still reference-counted only per switch tree. in dsa_port_parse_cpu()
1218 dst->tag_ops = tag_ops; in dsa_port_parse_cpu()
1221 dp->master = master; in dsa_port_parse_cpu()
1222 dp->type = DSA_PORT_TYPE_CPU; in dsa_port_parse_cpu()
1223 dsa_port_set_tag_protocol(dp, dst->tag_ops); in dsa_port_parse_cpu()
1224 dp->dst = dst; in dsa_port_parse_cpu()
1242 static int dsa_port_parse_of(struct dsa_port *dp, struct device_node *dn) in dsa_port_parse_of() argument
1248 dp->dn = dn; in dsa_port_parse_of()
1257 return -EPROBE_DEFER; in dsa_port_parse_of()
1259 user_protocol = of_get_property(dn, "dsa-tag-protocol", NULL); in dsa_port_parse_of()
1260 return dsa_port_parse_cpu(dp, master, user_protocol); in dsa_port_parse_of()
1264 return dsa_port_parse_dsa(dp); in dsa_port_parse_of()
1266 return dsa_port_parse_user(dp, name); in dsa_port_parse_of()
1273 struct dsa_port *dp; in dsa_switch_parse_ports_of() local
1279 /* The second possibility is "ethernet-ports" */ in dsa_switch_parse_ports_of()
1280 ports = of_get_child_by_name(dn, "ethernet-ports"); in dsa_switch_parse_ports_of()
1282 dev_err(ds->dev, "no ports child node found\n"); in dsa_switch_parse_ports_of()
1283 return -EINVAL; in dsa_switch_parse_ports_of()
1294 if (reg >= ds->num_ports) { in dsa_switch_parse_ports_of()
1295 dev_err(ds->dev, "port %pOF index %u exceeds num_ports (%u)\n", in dsa_switch_parse_ports_of()
1296 port, reg, ds->num_ports); in dsa_switch_parse_ports_of()
1298 err = -EINVAL; in dsa_switch_parse_ports_of()
1302 dp = dsa_to_port(ds, reg); in dsa_switch_parse_ports_of()
1304 err = dsa_port_parse_of(dp, port); in dsa_switch_parse_ports_of()
1324 if (sz < 0 && sz != -EINVAL) in dsa_switch_parse_member_of()
1327 ds->index = m[1]; in dsa_switch_parse_member_of()
1329 ds->dst = dsa_tree_touch(m[0]); in dsa_switch_parse_member_of()
1330 if (!ds->dst) in dsa_switch_parse_member_of()
1331 return -ENOMEM; in dsa_switch_parse_member_of()
1333 if (dsa_switch_find(ds->dst->index, ds->index)) { in dsa_switch_parse_member_of()
1334 dev_err(ds->dev, in dsa_switch_parse_member_of()
1336 ds->index, ds->dst->index); in dsa_switch_parse_member_of()
1337 return -EEXIST; in dsa_switch_parse_member_of()
1340 if (ds->dst->last_switch < ds->index) in dsa_switch_parse_member_of()
1341 ds->dst->last_switch = ds->index; in dsa_switch_parse_member_of()
1348 struct dsa_port *dp; in dsa_switch_touch_ports() local
1351 for (port = 0; port < ds->num_ports; port++) { in dsa_switch_touch_ports()
1352 dp = dsa_port_touch(ds, port); in dsa_switch_touch_ports()
1353 if (!dp) in dsa_switch_touch_ports()
1354 return -ENOMEM; in dsa_switch_touch_ports()
1377 if (dev->class != NULL && !strcmp(dev->class->name, class)) in dev_is_class()
1411 static int dsa_port_parse(struct dsa_port *dp, const char *name, in dsa_port_parse() argument
1419 return -EPROBE_DEFER; in dsa_port_parse()
1423 return dsa_port_parse_cpu(dp, master, NULL); in dsa_port_parse()
1427 return dsa_port_parse_dsa(dp); in dsa_port_parse()
1429 return dsa_port_parse_user(dp, name); in dsa_port_parse()
1436 struct dsa_port *dp; in dsa_switch_parse_ports() local
1443 name = cd->port_names[i]; in dsa_switch_parse_ports()
1444 dev = cd->netdev[i]; in dsa_switch_parse_ports()
1445 dp = dsa_to_port(ds, i); in dsa_switch_parse_ports()
1450 err = dsa_port_parse(dp, name, dev); in dsa_switch_parse_ports()
1458 return -EINVAL; in dsa_switch_parse_ports()
1467 ds->cd = cd; in dsa_switch_parse()
1472 ds->index = 0; in dsa_switch_parse()
1473 ds->dst = dsa_tree_touch(0); in dsa_switch_parse()
1474 if (!ds->dst) in dsa_switch_parse()
1475 return -ENOMEM; in dsa_switch_parse()
1486 struct dsa_port *dp, *next; in dsa_switch_release_ports() local
1488 dsa_switch_for_each_port_safe(dp, next, ds) { in dsa_switch_release_ports()
1489 WARN_ON(!list_empty(&dp->fdbs)); in dsa_switch_release_ports()
1490 WARN_ON(!list_empty(&dp->mdbs)); in dsa_switch_release_ports()
1491 WARN_ON(!list_empty(&dp->vlans)); in dsa_switch_release_ports()
1492 list_del(&dp->list); in dsa_switch_release_ports()
1493 kfree(dp); in dsa_switch_release_ports()
1504 if (!ds->dev) in dsa_switch_probe()
1505 return -ENODEV; in dsa_switch_probe()
1507 pdata = ds->dev->platform_data; in dsa_switch_probe()
1508 np = ds->dev->of_node; in dsa_switch_probe()
1510 if (!ds->num_ports) in dsa_switch_probe()
1511 return -EINVAL; in dsa_switch_probe()
1522 err = -ENODEV; in dsa_switch_probe()
1528 dst = ds->dst; in dsa_switch_probe()
1545 dsa_tree_put(ds->dst); in dsa_register_switch()
1554 struct dsa_switch_tree *dst = ds->dst; in dsa_switch_remove()
1577 struct dsa_port *dp; in dsa_switch_shutdown() local
1581 if (!ds->setup) in dsa_switch_shutdown()
1586 dsa_switch_for_each_user_port(dp, ds) { in dsa_switch_shutdown()
1587 master = dsa_port_to_master(dp); in dsa_switch_shutdown()
1588 slave_dev = dp->slave; in dsa_switch_shutdown()
1596 dsa_switch_for_each_cpu_port(dp, ds) in dsa_switch_shutdown()
1597 dp->master->dsa_ptr = NULL; in dsa_switch_shutdown()
1606 static bool dsa_port_is_initialized(const struct dsa_port *dp) in dsa_port_is_initialized() argument
1608 return dp->type == DSA_PORT_TYPE_USER && dp->slave; in dsa_port_is_initialized()
1613 struct dsa_port *dp; in dsa_switch_suspend() local
1617 dsa_switch_for_each_port(dp, ds) { in dsa_switch_suspend()
1618 if (!dsa_port_is_initialized(dp)) in dsa_switch_suspend()
1621 ret = dsa_slave_suspend(dp->slave); in dsa_switch_suspend()
1626 if (ds->ops->suspend) in dsa_switch_suspend()
1627 ret = ds->ops->suspend(ds); in dsa_switch_suspend()
1635 struct dsa_port *dp; in dsa_switch_resume() local
1638 if (ds->ops->resume) in dsa_switch_resume()
1639 ret = ds->ops->resume(ds); in dsa_switch_resume()
1645 dsa_switch_for_each_port(dp, ds) { in dsa_switch_resume()
1646 if (!dsa_port_is_initialized(dp)) in dsa_switch_resume()
1649 ret = dsa_slave_resume(dp->slave); in dsa_switch_resume()
1662 return ERR_PTR(-ENODEV); in dsa_port_from_netdev()
1670 if (a->type != b->type) in dsa_db_equal()
1673 switch (a->type) { in dsa_db_equal()
1675 return a->dp == b->dp; in dsa_db_equal()
1677 return a->lag.dev == b->lag.dev; in dsa_db_equal()
1679 return a->bridge.num == b->bridge.num; in dsa_db_equal()
1690 struct dsa_port *dp = dsa_to_port(ds, port); in dsa_fdb_present_in_other_db() local
1693 lockdep_assert_held(&dp->addr_lists_lock); in dsa_fdb_present_in_other_db()
1695 list_for_each_entry(a, &dp->fdbs, list) { in dsa_fdb_present_in_other_db()
1696 if (!ether_addr_equal(a->addr, addr) || a->vid != vid) in dsa_fdb_present_in_other_db()
1699 if (a->db.type == db.type && !dsa_db_equal(&a->db, &db)) in dsa_fdb_present_in_other_db()
1711 struct dsa_port *dp = dsa_to_port(ds, port); in dsa_mdb_present_in_other_db() local
1714 lockdep_assert_held(&dp->addr_lists_lock); in dsa_mdb_present_in_other_db()
1716 list_for_each_entry(a, &dp->mdbs, list) { in dsa_mdb_present_in_other_db()
1717 if (!ether_addr_equal(a->addr, mdb->addr) || a->vid != mdb->vid) in dsa_mdb_present_in_other_db()
1720 if (a->db.type == db.type && !dsa_db_equal(&a->db, &db)) in dsa_mdb_present_in_other_db()
1749 return -ENOMEM; in dsa_init_module()