slave.c (3c22baeab40b2f8e75907cfd7aa69147d5343d2c) | slave.c (e13c2075280e5b25118d3330752b47f919d6545e) |
---|---|
1// SPDX-License-Identifier: GPL-2.0-or-later 2/* 3 * net/dsa/slave.c - Slave device handling 4 * Copyright (c) 2008-2009 Marvell Semiconductor 5 */ 6 7#include <linux/list.h> 8#include <linux/etherdevice.h> --- 828 unchanged lines hidden (view full) --- 837 838 list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list) 839 if (mall_tc_entry->cookie == cookie) 840 return mall_tc_entry; 841 842 return NULL; 843} 844 | 1// SPDX-License-Identifier: GPL-2.0-or-later 2/* 3 * net/dsa/slave.c - Slave device handling 4 * Copyright (c) 2008-2009 Marvell Semiconductor 5 */ 6 7#include <linux/list.h> 8#include <linux/etherdevice.h> --- 828 unchanged lines hidden (view full) --- 837 838 list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list) 839 if (mall_tc_entry->cookie == cookie) 840 return mall_tc_entry; 841 842 return NULL; 843} 844 |
845static int dsa_slave_add_cls_matchall(struct net_device *dev, 846 struct tc_cls_matchall_offload *cls, 847 bool ingress) | 845static int 846dsa_slave_add_cls_matchall_mirred(struct net_device *dev, 847 struct tc_cls_matchall_offload *cls, 848 bool ingress) |
848{ 849 struct dsa_port *dp = dsa_slave_to_port(dev); 850 struct dsa_slave_priv *p = netdev_priv(dev); | 849{ 850 struct dsa_port *dp = dsa_slave_to_port(dev); 851 struct dsa_slave_priv *p = netdev_priv(dev); |
852 struct dsa_mall_mirror_tc_entry *mirror; |
|
851 struct dsa_mall_tc_entry *mall_tc_entry; | 853 struct dsa_mall_tc_entry *mall_tc_entry; |
852 __be16 protocol = cls->common.protocol; | |
853 struct dsa_switch *ds = dp->ds; 854 struct flow_action_entry *act; 855 struct dsa_port *to_dp; | 854 struct dsa_switch *ds = dp->ds; 855 struct flow_action_entry *act; 856 struct dsa_port *to_dp; |
856 int err = -EOPNOTSUPP; | 857 int err; |
857 | 858 |
859 act = &cls->rule->action.entries[0]; 860 |
|
858 if (!ds->ops->port_mirror_add) 859 return err; 860 | 861 if (!ds->ops->port_mirror_add) 862 return err; 863 |
861 if (!flow_offload_has_one_action(&cls->rule->action)) | 864 if (!act->dev) 865 return -EINVAL; 866 867 if (!flow_action_basic_hw_stats_check(&cls->rule->action, 868 cls->common.extack)) |
862 return err; 863 864 act = &cls->rule->action.entries[0]; 865 | 869 return err; 870 871 act = &cls->rule->action.entries[0]; 872 |
866 if (act->id == FLOW_ACTION_MIRRED && protocol == htons(ETH_P_ALL)) { 867 struct dsa_mall_mirror_tc_entry *mirror; | 873 if (!dsa_slave_dev_check(act->dev)) 874 return -EOPNOTSUPP; |
868 | 875 |
869 if (!act->dev) 870 return -EINVAL; | 876 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); 877 if (!mall_tc_entry) 878 return -ENOMEM; |
871 | 879 |
872 if (!dsa_slave_dev_check(act->dev)) 873 return -EOPNOTSUPP; | 880 mall_tc_entry->cookie = cls->cookie; 881 mall_tc_entry->type = DSA_PORT_MALL_MIRROR; 882 mirror = &mall_tc_entry->mirror; |
874 | 883 |
875 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); 876 if (!mall_tc_entry) 877 return -ENOMEM; | 884 to_dp = dsa_slave_to_port(act->dev); |
878 | 885 |
879 mall_tc_entry->cookie = cls->cookie; 880 mall_tc_entry->type = DSA_PORT_MALL_MIRROR; 881 mirror = &mall_tc_entry->mirror; | 886 mirror->to_local_port = to_dp->index; 887 mirror->ingress = ingress; |
882 | 888 |
883 to_dp = dsa_slave_to_port(act->dev); | 889 err = ds->ops->port_mirror_add(ds, dp->index, mirror, ingress); 890 if (err) { 891 kfree(mall_tc_entry); 892 return err; 893 } |
884 | 894 |
885 mirror->to_local_port = to_dp->index; 886 mirror->ingress = ingress; | 895 list_add_tail(&mall_tc_entry->list, &p->mall_tc_list); |
887 | 896 |
888 err = ds->ops->port_mirror_add(ds, dp->index, mirror, ingress); 889 if (err) { 890 kfree(mall_tc_entry); 891 return err; 892 } | 897 return err; 898} |
893 | 899 |
894 list_add_tail(&mall_tc_entry->list, &p->mall_tc_list); 895 } | 900static int dsa_slave_add_cls_matchall(struct net_device *dev, 901 struct tc_cls_matchall_offload *cls, 902 bool ingress) 903{ 904 int err = -EOPNOTSUPP; |
896 | 905 |
897 return 0; | 906 if (cls->common.protocol == htons(ETH_P_ALL) && 907 flow_offload_has_one_action(&cls->rule->action) && 908 cls->rule->action.entries[0].id == FLOW_ACTION_MIRRED) 909 err = dsa_slave_add_cls_matchall_mirred(dev, cls, ingress); 910 911 return err; |
898} 899 900static void dsa_slave_del_cls_matchall(struct net_device *dev, 901 struct tc_cls_matchall_offload *cls) 902{ 903 struct dsa_port *dp = dsa_slave_to_port(dev); 904 struct dsa_mall_tc_entry *mall_tc_entry; 905 struct dsa_switch *ds = dp->ds; --- 31 unchanged lines hidden (view full) --- 937 case TC_CLSMATCHALL_DESTROY: 938 dsa_slave_del_cls_matchall(dev, cls); 939 return 0; 940 default: 941 return -EOPNOTSUPP; 942 } 943} 944 | 912} 913 914static void dsa_slave_del_cls_matchall(struct net_device *dev, 915 struct tc_cls_matchall_offload *cls) 916{ 917 struct dsa_port *dp = dsa_slave_to_port(dev); 918 struct dsa_mall_tc_entry *mall_tc_entry; 919 struct dsa_switch *ds = dp->ds; --- 31 unchanged lines hidden (view full) --- 951 case TC_CLSMATCHALL_DESTROY: 952 dsa_slave_del_cls_matchall(dev, cls); 953 return 0; 954 default: 955 return -EOPNOTSUPP; 956 } 957} 958 |
959static int dsa_slave_add_cls_flower(struct net_device *dev, 960 struct flow_cls_offload *cls, 961 bool ingress) 962{ 963 struct dsa_port *dp = dsa_slave_to_port(dev); 964 struct dsa_switch *ds = dp->ds; 965 int port = dp->index; 966 967 if (!ds->ops->cls_flower_add) 968 return -EOPNOTSUPP; 969 970 return ds->ops->cls_flower_add(ds, port, cls, ingress); 971} 972 973static int dsa_slave_del_cls_flower(struct net_device *dev, 974 struct flow_cls_offload *cls, 975 bool ingress) 976{ 977 struct dsa_port *dp = dsa_slave_to_port(dev); 978 struct dsa_switch *ds = dp->ds; 979 int port = dp->index; 980 981 if (!ds->ops->cls_flower_del) 982 return -EOPNOTSUPP; 983 984 return ds->ops->cls_flower_del(ds, port, cls, ingress); 985} 986 987static int dsa_slave_stats_cls_flower(struct net_device *dev, 988 struct flow_cls_offload *cls, 989 bool ingress) 990{ 991 struct dsa_port *dp = dsa_slave_to_port(dev); 992 struct dsa_switch *ds = dp->ds; 993 int port = dp->index; 994 995 if (!ds->ops->cls_flower_stats) 996 return -EOPNOTSUPP; 997 998 return ds->ops->cls_flower_stats(ds, port, cls, ingress); 999} 1000 1001static int dsa_slave_setup_tc_cls_flower(struct net_device *dev, 1002 struct flow_cls_offload *cls, 1003 bool ingress) 1004{ 1005 switch (cls->command) { 1006 case FLOW_CLS_REPLACE: 1007 return dsa_slave_add_cls_flower(dev, cls, ingress); 1008 case FLOW_CLS_DESTROY: 1009 return dsa_slave_del_cls_flower(dev, cls, ingress); 1010 case FLOW_CLS_STATS: 1011 return dsa_slave_stats_cls_flower(dev, cls, ingress); 1012 default: 1013 return -EOPNOTSUPP; 1014 } 1015} 1016 |
|
945static int dsa_slave_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 946 void *cb_priv, bool ingress) 947{ 948 struct net_device *dev = cb_priv; 949 950 if (!tc_can_offload(dev)) 951 return -EOPNOTSUPP; 952 953 switch (type) { 954 case TC_SETUP_CLSMATCHALL: 955 return dsa_slave_setup_tc_cls_matchall(dev, type_data, ingress); | 1017static int dsa_slave_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 1018 void *cb_priv, bool ingress) 1019{ 1020 struct net_device *dev = cb_priv; 1021 1022 if (!tc_can_offload(dev)) 1023 return -EOPNOTSUPP; 1024 1025 switch (type) { 1026 case TC_SETUP_CLSMATCHALL: 1027 return dsa_slave_setup_tc_cls_matchall(dev, type_data, ingress); |
1028 case TC_SETUP_CLSFLOWER: 1029 return dsa_slave_setup_tc_cls_flower(dev, type_data, ingress); |
|
956 default: 957 return -EOPNOTSUPP; 958 } 959} 960 961static int dsa_slave_setup_tc_block_cb_ig(enum tc_setup_type type, 962 void *type_data, void *cb_priv) 963{ --- 185 unchanged lines hidden (view full) --- 1149 } 1150 1151 /* Do not deprogram the CPU port as it may be shared with other user 1152 * ports which can be members of this VLAN as well. 1153 */ 1154 return dsa_port_vid_del(dp, vid); 1155} 1156 | 1030 default: 1031 return -EOPNOTSUPP; 1032 } 1033} 1034 1035static int dsa_slave_setup_tc_block_cb_ig(enum tc_setup_type type, 1036 void *type_data, void *cb_priv) 1037{ --- 185 unchanged lines hidden (view full) --- 1223 } 1224 1225 /* Do not deprogram the CPU port as it may be shared with other user 1226 * ports which can be members of this VLAN as well. 1227 */ 1228 return dsa_port_vid_del(dp, vid); 1229} 1230 |
1231struct dsa_hw_port { 1232 struct list_head list; 1233 struct net_device *dev; 1234 int old_mtu; 1235}; 1236 1237static int dsa_hw_port_list_set_mtu(struct list_head *hw_port_list, int mtu) 1238{ 1239 const struct dsa_hw_port *p; 1240 int err; 1241 1242 list_for_each_entry(p, hw_port_list, list) { 1243 if (p->dev->mtu == mtu) 1244 continue; 1245 1246 err = dev_set_mtu(p->dev, mtu); 1247 if (err) 1248 goto rollback; 1249 } 1250 1251 return 0; 1252 1253rollback: 1254 list_for_each_entry_continue_reverse(p, hw_port_list, list) { 1255 if (p->dev->mtu == p->old_mtu) 1256 continue; 1257 1258 if (dev_set_mtu(p->dev, p->old_mtu)) 1259 netdev_err(p->dev, "Failed to restore MTU\n"); 1260 } 1261 1262 return err; 1263} 1264 1265static void dsa_hw_port_list_free(struct list_head *hw_port_list) 1266{ 1267 struct dsa_hw_port *p, *n; 1268 1269 list_for_each_entry_safe(p, n, hw_port_list, list) 1270 kfree(p); 1271} 1272 1273/* Make the hardware datapath to/from @dev limited to a common MTU */ 1274void dsa_bridge_mtu_normalization(struct dsa_port *dp) 1275{ 1276 struct list_head hw_port_list; 1277 struct dsa_switch_tree *dst; 1278 int min_mtu = ETH_MAX_MTU; 1279 struct dsa_port *other_dp; 1280 int err; 1281 1282 if (!dp->ds->mtu_enforcement_ingress) 1283 return; 1284 1285 if (!dp->bridge_dev) 1286 return; 1287 1288 INIT_LIST_HEAD(&hw_port_list); 1289 1290 /* Populate the list of ports that are part of the same bridge 1291 * as the newly added/modified port 1292 */ 1293 list_for_each_entry(dst, &dsa_tree_list, list) { 1294 list_for_each_entry(other_dp, &dst->ports, list) { 1295 struct dsa_hw_port *hw_port; 1296 struct net_device *slave; 1297 1298 if (other_dp->type != DSA_PORT_TYPE_USER) 1299 continue; 1300 1301 if (other_dp->bridge_dev != dp->bridge_dev) 1302 continue; 1303 1304 if (!other_dp->ds->mtu_enforcement_ingress) 1305 continue; 1306 1307 slave = other_dp->slave; 1308 1309 if (min_mtu > slave->mtu) 1310 min_mtu = slave->mtu; 1311 1312 hw_port = kzalloc(sizeof(*hw_port), GFP_KERNEL); 1313 if (!hw_port) 1314 goto out; 1315 1316 hw_port->dev = slave; 1317 hw_port->old_mtu = slave->mtu; 1318 1319 list_add(&hw_port->list, &hw_port_list); 1320 } 1321 } 1322 1323 /* Attempt to configure the entire hardware bridge to the newly added 1324 * interface's MTU first, regardless of whether the intention of the 1325 * user was to raise or lower it. 1326 */ 1327 err = dsa_hw_port_list_set_mtu(&hw_port_list, dp->slave->mtu); 1328 if (!err) 1329 goto out; 1330 1331 /* Clearly that didn't work out so well, so just set the minimum MTU on 1332 * all hardware bridge ports now. If this fails too, then all ports will 1333 * still have their old MTU rolled back anyway. 1334 */ 1335 dsa_hw_port_list_set_mtu(&hw_port_list, min_mtu); 1336 1337out: 1338 dsa_hw_port_list_free(&hw_port_list); 1339} 1340 1341static int dsa_slave_change_mtu(struct net_device *dev, int new_mtu) 1342{ 1343 struct net_device *master = dsa_slave_to_master(dev); 1344 struct dsa_port *dp = dsa_slave_to_port(dev); 1345 struct dsa_slave_priv *p = netdev_priv(dev); 1346 struct dsa_switch *ds = p->dp->ds; 1347 struct dsa_port *cpu_dp; 1348 int port = p->dp->index; 1349 int largest_mtu = 0; 1350 int new_master_mtu; 1351 int old_master_mtu; 1352 int mtu_limit; 1353 int cpu_mtu; 1354 int err, i; 1355 1356 if (!ds->ops->port_change_mtu) 1357 return -EOPNOTSUPP; 1358 1359 for (i = 0; i < ds->num_ports; i++) { 1360 int slave_mtu; 1361 1362 if (!dsa_is_user_port(ds, i)) 1363 continue; 1364 1365 /* During probe, this function will be called for each slave 1366 * device, while not all of them have been allocated. That's 1367 * ok, it doesn't change what the maximum is, so ignore it. 1368 */ 1369 if (!dsa_to_port(ds, i)->slave) 1370 continue; 1371 1372 /* Pretend that we already applied the setting, which we 1373 * actually haven't (still haven't done all integrity checks) 1374 */ 1375 if (i == port) 1376 slave_mtu = new_mtu; 1377 else 1378 slave_mtu = dsa_to_port(ds, i)->slave->mtu; 1379 1380 if (largest_mtu < slave_mtu) 1381 largest_mtu = slave_mtu; 1382 } 1383 1384 cpu_dp = dsa_to_port(ds, port)->cpu_dp; 1385 1386 mtu_limit = min_t(int, master->max_mtu, dev->max_mtu); 1387 old_master_mtu = master->mtu; 1388 new_master_mtu = largest_mtu + cpu_dp->tag_ops->overhead; 1389 if (new_master_mtu > mtu_limit) 1390 return -ERANGE; 1391 1392 /* If the master MTU isn't over limit, there's no need to check the CPU 1393 * MTU, since that surely isn't either. 1394 */ 1395 cpu_mtu = largest_mtu; 1396 1397 /* Start applying stuff */ 1398 if (new_master_mtu != old_master_mtu) { 1399 err = dev_set_mtu(master, new_master_mtu); 1400 if (err < 0) 1401 goto out_master_failed; 1402 1403 /* We only need to propagate the MTU of the CPU port to 1404 * upstream switches. 1405 */ 1406 err = dsa_port_mtu_change(cpu_dp, cpu_mtu, true); 1407 if (err) 1408 goto out_cpu_failed; 1409 } 1410 1411 err = dsa_port_mtu_change(dp, new_mtu, false); 1412 if (err) 1413 goto out_port_failed; 1414 1415 dev->mtu = new_mtu; 1416 1417 dsa_bridge_mtu_normalization(dp); 1418 1419 return 0; 1420 1421out_port_failed: 1422 if (new_master_mtu != old_master_mtu) 1423 dsa_port_mtu_change(cpu_dp, old_master_mtu - 1424 cpu_dp->tag_ops->overhead, 1425 true); 1426out_cpu_failed: 1427 if (new_master_mtu != old_master_mtu) 1428 dev_set_mtu(master, old_master_mtu); 1429out_master_failed: 1430 return err; 1431} 1432 |
|
1157static const struct ethtool_ops dsa_slave_ethtool_ops = { 1158 .get_drvinfo = dsa_slave_get_drvinfo, 1159 .get_regs_len = dsa_slave_get_regs_len, 1160 .get_regs = dsa_slave_get_regs, 1161 .nway_reset = dsa_slave_nway_reset, 1162 .get_link = ethtool_op_get_link, 1163 .get_eeprom_len = dsa_slave_get_eeprom_len, 1164 .get_eeprom = dsa_slave_get_eeprom, --- 61 unchanged lines hidden (view full) --- 1226#endif 1227 .ndo_get_phys_port_name = dsa_slave_get_phys_port_name, 1228 .ndo_setup_tc = dsa_slave_setup_tc, 1229 .ndo_get_stats64 = dsa_slave_get_stats64, 1230 .ndo_get_port_parent_id = dsa_slave_get_port_parent_id, 1231 .ndo_vlan_rx_add_vid = dsa_slave_vlan_rx_add_vid, 1232 .ndo_vlan_rx_kill_vid = dsa_slave_vlan_rx_kill_vid, 1233 .ndo_get_devlink_port = dsa_slave_get_devlink_port, | 1433static const struct ethtool_ops dsa_slave_ethtool_ops = { 1434 .get_drvinfo = dsa_slave_get_drvinfo, 1435 .get_regs_len = dsa_slave_get_regs_len, 1436 .get_regs = dsa_slave_get_regs, 1437 .nway_reset = dsa_slave_nway_reset, 1438 .get_link = ethtool_op_get_link, 1439 .get_eeprom_len = dsa_slave_get_eeprom_len, 1440 .get_eeprom = dsa_slave_get_eeprom, --- 61 unchanged lines hidden (view full) --- 1502#endif 1503 .ndo_get_phys_port_name = dsa_slave_get_phys_port_name, 1504 .ndo_setup_tc = dsa_slave_setup_tc, 1505 .ndo_get_stats64 = dsa_slave_get_stats64, 1506 .ndo_get_port_parent_id = dsa_slave_get_port_parent_id, 1507 .ndo_vlan_rx_add_vid = dsa_slave_vlan_rx_add_vid, 1508 .ndo_vlan_rx_kill_vid = dsa_slave_vlan_rx_kill_vid, 1509 .ndo_get_devlink_port = dsa_slave_get_devlink_port, |
1510 .ndo_change_mtu = dsa_slave_change_mtu, |
|
1234}; 1235 1236static struct device_type dsa_type = { 1237 .name = "dsa", 1238}; 1239 1240void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up) 1241{ --- 154 unchanged lines hidden (view full) --- 1396 slave_dev->ethtool_ops = &dsa_slave_ethtool_ops; 1397 if (!IS_ERR_OR_NULL(port->mac)) 1398 ether_addr_copy(slave_dev->dev_addr, port->mac); 1399 else 1400 eth_hw_addr_inherit(slave_dev, master); 1401 slave_dev->priv_flags |= IFF_NO_QUEUE; 1402 slave_dev->netdev_ops = &dsa_slave_netdev_ops; 1403 slave_dev->min_mtu = 0; | 1511}; 1512 1513static struct device_type dsa_type = { 1514 .name = "dsa", 1515}; 1516 1517void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up) 1518{ --- 154 unchanged lines hidden (view full) --- 1673 slave_dev->ethtool_ops = &dsa_slave_ethtool_ops; 1674 if (!IS_ERR_OR_NULL(port->mac)) 1675 ether_addr_copy(slave_dev->dev_addr, port->mac); 1676 else 1677 eth_hw_addr_inherit(slave_dev, master); 1678 slave_dev->priv_flags |= IFF_NO_QUEUE; 1679 slave_dev->netdev_ops = &dsa_slave_netdev_ops; 1680 slave_dev->min_mtu = 0; |
1404 slave_dev->max_mtu = ETH_MAX_MTU; | 1681 if (ds->ops->port_max_mtu) 1682 slave_dev->max_mtu = ds->ops->port_max_mtu(ds, port->index); 1683 else 1684 slave_dev->max_mtu = ETH_MAX_MTU; |
1405 SET_NETDEV_DEVTYPE(slave_dev, &dsa_type); 1406 1407 SET_NETDEV_DEV(slave_dev, port->ds->dev); 1408 slave_dev->dev.of_node = port->dn; 1409 slave_dev->vlan_features = master->vlan_features; 1410 1411 p = netdev_priv(slave_dev); 1412 p->stats64 = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 1413 if (!p->stats64) { 1414 free_netdev(slave_dev); 1415 return -ENOMEM; 1416 } 1417 p->dp = port; 1418 INIT_LIST_HEAD(&p->mall_tc_list); 1419 p->xmit = cpu_dp->tag_ops->xmit; 1420 port->slave = slave_dev; 1421 | 1685 SET_NETDEV_DEVTYPE(slave_dev, &dsa_type); 1686 1687 SET_NETDEV_DEV(slave_dev, port->ds->dev); 1688 slave_dev->dev.of_node = port->dn; 1689 slave_dev->vlan_features = master->vlan_features; 1690 1691 p = netdev_priv(slave_dev); 1692 p->stats64 = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 1693 if (!p->stats64) { 1694 free_netdev(slave_dev); 1695 return -ENOMEM; 1696 } 1697 p->dp = port; 1698 INIT_LIST_HEAD(&p->mall_tc_list); 1699 p->xmit = cpu_dp->tag_ops->xmit; 1700 port->slave = slave_dev; 1701 |
1702 rtnl_lock(); 1703 ret = dsa_slave_change_mtu(slave_dev, ETH_DATA_LEN); 1704 rtnl_unlock(); 1705 if (ret && ret != -EOPNOTSUPP) { 1706 dev_err(ds->dev, "error %d setting MTU on port %d\n", 1707 ret, port->index); 1708 goto out_free; 1709 } 1710 |
|
1422 netif_carrier_off(slave_dev); 1423 1424 ret = dsa_slave_phy_setup(slave_dev); 1425 if (ret) { 1426 netdev_err(master, "error %d setting up slave phy\n", ret); 1427 goto out_free; 1428 } 1429 --- 46 unchanged lines hidden (view full) --- 1476 struct netdev_notifier_changeupper_info *info) 1477{ 1478 struct dsa_port *dp = dsa_slave_to_port(dev); 1479 int err = NOTIFY_DONE; 1480 1481 if (netif_is_bridge_master(info->upper_dev)) { 1482 if (info->linking) { 1483 err = dsa_port_bridge_join(dp, info->upper_dev); | 1711 netif_carrier_off(slave_dev); 1712 1713 ret = dsa_slave_phy_setup(slave_dev); 1714 if (ret) { 1715 netdev_err(master, "error %d setting up slave phy\n", ret); 1716 goto out_free; 1717 } 1718 --- 46 unchanged lines hidden (view full) --- 1765 struct netdev_notifier_changeupper_info *info) 1766{ 1767 struct dsa_port *dp = dsa_slave_to_port(dev); 1768 int err = NOTIFY_DONE; 1769 1770 if (netif_is_bridge_master(info->upper_dev)) { 1771 if (info->linking) { 1772 err = dsa_port_bridge_join(dp, info->upper_dev); |
1773 if (!err) 1774 dsa_bridge_mtu_normalization(dp); |
|
1484 err = notifier_from_errno(err); 1485 } else { 1486 dsa_port_bridge_leave(dp, info->upper_dev); 1487 err = NOTIFY_OK; 1488 } 1489 } 1490 1491 return err; --- 248 unchanged lines hidden --- | 1775 err = notifier_from_errno(err); 1776 } else { 1777 dsa_port_bridge_leave(dp, info->upper_dev); 1778 err = NOTIFY_OK; 1779 } 1780 } 1781 1782 return err; --- 248 unchanged lines hidden --- |