slave.c (0fdc50dfab47d525b71a9f0d8310746cdc0c09c5) slave.c (54a0ed0df49609f4e3f098f8943e38e389dc2e15)
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * net/dsa/slave.c - Slave device handling
4 * Copyright (c) 2008-2009 Marvell Semiconductor
5 */
6
7#include <linux/list.h>
8#include <linux/etherdevice.h>

--- 300 unchanged lines hidden (view full) ---

309{
310 struct dsa_port *dp = dsa_slave_to_port(dev);
311 struct switchdev_obj_port_vlan vlan;
312 int err;
313
314 if (obj->orig_dev != dev)
315 return -EOPNOTSUPP;
316
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * net/dsa/slave.c - Slave device handling
4 * Copyright (c) 2008-2009 Marvell Semiconductor
5 */
6
7#include <linux/list.h>
8#include <linux/etherdevice.h>

--- 300 unchanged lines hidden (view full) ---

309{
310 struct dsa_port *dp = dsa_slave_to_port(dev);
311 struct switchdev_obj_port_vlan vlan;
312 int err;
313
314 if (obj->orig_dev != dev)
315 return -EOPNOTSUPP;
316
317 if (dp->bridge_dev && !br_vlan_enabled(dp->bridge_dev))
317 if (dsa_port_skip_vlan_configuration(dp))
318 return 0;
319
320 vlan = *SWITCHDEV_OBJ_PORT_VLAN(obj);
321
322 err = dsa_port_vlan_add(dp, &vlan, trans);
323 if (err)
324 return err;
325

--- 50 unchanged lines hidden (view full) ---

376static int dsa_slave_vlan_del(struct net_device *dev,
377 const struct switchdev_obj *obj)
378{
379 struct dsa_port *dp = dsa_slave_to_port(dev);
380
381 if (obj->orig_dev != dev)
382 return -EOPNOTSUPP;
383
318 return 0;
319
320 vlan = *SWITCHDEV_OBJ_PORT_VLAN(obj);
321
322 err = dsa_port_vlan_add(dp, &vlan, trans);
323 if (err)
324 return err;
325

--- 50 unchanged lines hidden (view full) ---

376static int dsa_slave_vlan_del(struct net_device *dev,
377 const struct switchdev_obj *obj)
378{
379 struct dsa_port *dp = dsa_slave_to_port(dev);
380
381 if (obj->orig_dev != dev)
382 return -EOPNOTSUPP;
383
384 if (dp->bridge_dev && !br_vlan_enabled(dp->bridge_dev))
384 if (dsa_port_skip_vlan_configuration(dp))
385 return 0;
386
387 /* Do not deprogram the CPU port as it may be shared with other user
388 * ports which can be members of this VLAN as well.
389 */
390 return dsa_port_vlan_del(dp, SWITCHDEV_OBJ_PORT_VLAN(obj));
391}
392

--- 47 unchanged lines hidden (view full) ---

440}
441
442static inline netdev_tx_t dsa_slave_netpoll_send_skb(struct net_device *dev,
443 struct sk_buff *skb)
444{
445#ifdef CONFIG_NET_POLL_CONTROLLER
446 struct dsa_slave_priv *p = netdev_priv(dev);
447
385 return 0;
386
387 /* Do not deprogram the CPU port as it may be shared with other user
388 * ports which can be members of this VLAN as well.
389 */
390 return dsa_port_vlan_del(dp, SWITCHDEV_OBJ_PORT_VLAN(obj));
391}
392

--- 47 unchanged lines hidden (view full) ---

440}
441
442static inline netdev_tx_t dsa_slave_netpoll_send_skb(struct net_device *dev,
443 struct sk_buff *skb)
444{
445#ifdef CONFIG_NET_POLL_CONTROLLER
446 struct dsa_slave_priv *p = netdev_priv(dev);
447
448 if (p->netpoll)
449 netpoll_send_skb(p->netpoll, skb);
448 return netpoll_send_skb(p->netpoll, skb);
450#else
451 BUG();
449#else
450 BUG();
452#endif
453 return NETDEV_TX_OK;
451 return NETDEV_TX_OK;
452#endif
454}
455
456static void dsa_skb_tx_timestamp(struct dsa_slave_priv *p,
457 struct sk_buff *skb)
458{
459 struct dsa_switch *ds = p->dp->ds;
460 struct sk_buff *clone;
461 unsigned int type;

--- 375 unchanged lines hidden (view full) ---

837
838 list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list)
839 if (mall_tc_entry->cookie == cookie)
840 return mall_tc_entry;
841
842 return NULL;
843}
844
453}
454
455static void dsa_skb_tx_timestamp(struct dsa_slave_priv *p,
456 struct sk_buff *skb)
457{
458 struct dsa_switch *ds = p->dp->ds;
459 struct sk_buff *clone;
460 unsigned int type;

--- 375 unchanged lines hidden (view full) ---

836
837 list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list)
838 if (mall_tc_entry->cookie == cookie)
839 return mall_tc_entry;
840
841 return NULL;
842}
843
845static int dsa_slave_add_cls_matchall(struct net_device *dev,
846 struct tc_cls_matchall_offload *cls,
847 bool ingress)
844static int
845dsa_slave_add_cls_matchall_mirred(struct net_device *dev,
846 struct tc_cls_matchall_offload *cls,
847 bool ingress)
848{
849 struct dsa_port *dp = dsa_slave_to_port(dev);
850 struct dsa_slave_priv *p = netdev_priv(dev);
848{
849 struct dsa_port *dp = dsa_slave_to_port(dev);
850 struct dsa_slave_priv *p = netdev_priv(dev);
851 struct dsa_mall_mirror_tc_entry *mirror;
851 struct dsa_mall_tc_entry *mall_tc_entry;
852 struct dsa_mall_tc_entry *mall_tc_entry;
852 __be16 protocol = cls->common.protocol;
853 struct dsa_switch *ds = dp->ds;
854 struct flow_action_entry *act;
855 struct dsa_port *to_dp;
853 struct dsa_switch *ds = dp->ds;
854 struct flow_action_entry *act;
855 struct dsa_port *to_dp;
856 int err = -EOPNOTSUPP;
856 int err;
857
858 if (!ds->ops->port_mirror_add)
857
858 if (!ds->ops->port_mirror_add)
859 return err;
859 return -EOPNOTSUPP;
860
860
861 if (!flow_offload_has_one_action(&cls->rule->action))
862 return err;
861 if (!flow_action_basic_hw_stats_check(&cls->rule->action,
862 cls->common.extack))
863 return -EOPNOTSUPP;
863
864 act = &cls->rule->action.entries[0];
865
864
865 act = &cls->rule->action.entries[0];
866
866 if (act->id == FLOW_ACTION_MIRRED && protocol == htons(ETH_P_ALL)) {
867 struct dsa_mall_mirror_tc_entry *mirror;
867 if (!act->dev)
868 return -EINVAL;
868
869
869 if (!act->dev)
870 return -EINVAL;
870 if (!dsa_slave_dev_check(act->dev))
871 return -EOPNOTSUPP;
871
872
872 if (!dsa_slave_dev_check(act->dev))
873 return -EOPNOTSUPP;
873 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
874 if (!mall_tc_entry)
875 return -ENOMEM;
874
876
875 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
876 if (!mall_tc_entry)
877 return -ENOMEM;
877 mall_tc_entry->cookie = cls->cookie;
878 mall_tc_entry->type = DSA_PORT_MALL_MIRROR;
879 mirror = &mall_tc_entry->mirror;
878
880
879 mall_tc_entry->cookie = cls->cookie;
880 mall_tc_entry->type = DSA_PORT_MALL_MIRROR;
881 mirror = &mall_tc_entry->mirror;
881 to_dp = dsa_slave_to_port(act->dev);
882
882
883 to_dp = dsa_slave_to_port(act->dev);
883 mirror->to_local_port = to_dp->index;
884 mirror->ingress = ingress;
884
885
885 mirror->to_local_port = to_dp->index;
886 mirror->ingress = ingress;
886 err = ds->ops->port_mirror_add(ds, dp->index, mirror, ingress);
887 if (err) {
888 kfree(mall_tc_entry);
889 return err;
890 }
887
891
888 err = ds->ops->port_mirror_add(ds, dp->index, mirror, ingress);
889 if (err) {
890 kfree(mall_tc_entry);
891 return err;
892 list_add_tail(&mall_tc_entry->list, &p->mall_tc_list);
893
894 return err;
895}
896
897static int
898dsa_slave_add_cls_matchall_police(struct net_device *dev,
899 struct tc_cls_matchall_offload *cls,
900 bool ingress)
901{
902 struct netlink_ext_ack *extack = cls->common.extack;
903 struct dsa_port *dp = dsa_slave_to_port(dev);
904 struct dsa_slave_priv *p = netdev_priv(dev);
905 struct dsa_mall_policer_tc_entry *policer;
906 struct dsa_mall_tc_entry *mall_tc_entry;
907 struct dsa_switch *ds = dp->ds;
908 struct flow_action_entry *act;
909 int err;
910
911 if (!ds->ops->port_policer_add) {
912 NL_SET_ERR_MSG_MOD(extack,
913 "Policing offload not implemented");
914 return -EOPNOTSUPP;
915 }
916
917 if (!ingress) {
918 NL_SET_ERR_MSG_MOD(extack,
919 "Only supported on ingress qdisc");
920 return -EOPNOTSUPP;
921 }
922
923 if (!flow_action_basic_hw_stats_check(&cls->rule->action,
924 cls->common.extack))
925 return -EOPNOTSUPP;
926
927 list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list) {
928 if (mall_tc_entry->type == DSA_PORT_MALL_POLICER) {
929 NL_SET_ERR_MSG_MOD(extack,
930 "Only one port policer allowed");
931 return -EEXIST;
892 }
932 }
933 }
893
934
894 list_add_tail(&mall_tc_entry->list, &p->mall_tc_list);
935 act = &cls->rule->action.entries[0];
936
937 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
938 if (!mall_tc_entry)
939 return -ENOMEM;
940
941 mall_tc_entry->cookie = cls->cookie;
942 mall_tc_entry->type = DSA_PORT_MALL_POLICER;
943 policer = &mall_tc_entry->policer;
944 policer->rate_bytes_per_sec = act->police.rate_bytes_ps;
945 policer->burst = act->police.burst;
946
947 err = ds->ops->port_policer_add(ds, dp->index, policer);
948 if (err) {
949 kfree(mall_tc_entry);
950 return err;
895 }
896
951 }
952
897 return 0;
953 list_add_tail(&mall_tc_entry->list, &p->mall_tc_list);
954
955 return err;
898}
899
956}
957
958static int dsa_slave_add_cls_matchall(struct net_device *dev,
959 struct tc_cls_matchall_offload *cls,
960 bool ingress)
961{
962 int err = -EOPNOTSUPP;
963
964 if (cls->common.protocol == htons(ETH_P_ALL) &&
965 flow_offload_has_one_action(&cls->rule->action) &&
966 cls->rule->action.entries[0].id == FLOW_ACTION_MIRRED)
967 err = dsa_slave_add_cls_matchall_mirred(dev, cls, ingress);
968 else if (flow_offload_has_one_action(&cls->rule->action) &&
969 cls->rule->action.entries[0].id == FLOW_ACTION_POLICE)
970 err = dsa_slave_add_cls_matchall_police(dev, cls, ingress);
971
972 return err;
973}
974
900static void dsa_slave_del_cls_matchall(struct net_device *dev,
901 struct tc_cls_matchall_offload *cls)
902{
903 struct dsa_port *dp = dsa_slave_to_port(dev);
904 struct dsa_mall_tc_entry *mall_tc_entry;
905 struct dsa_switch *ds = dp->ds;
906
975static void dsa_slave_del_cls_matchall(struct net_device *dev,
976 struct tc_cls_matchall_offload *cls)
977{
978 struct dsa_port *dp = dsa_slave_to_port(dev);
979 struct dsa_mall_tc_entry *mall_tc_entry;
980 struct dsa_switch *ds = dp->ds;
981
907 if (!ds->ops->port_mirror_del)
908 return;
909
910 mall_tc_entry = dsa_slave_mall_tc_entry_find(dev, cls->cookie);
911 if (!mall_tc_entry)
912 return;
913
914 list_del(&mall_tc_entry->list);
915
916 switch (mall_tc_entry->type) {
917 case DSA_PORT_MALL_MIRROR:
982 mall_tc_entry = dsa_slave_mall_tc_entry_find(dev, cls->cookie);
983 if (!mall_tc_entry)
984 return;
985
986 list_del(&mall_tc_entry->list);
987
988 switch (mall_tc_entry->type) {
989 case DSA_PORT_MALL_MIRROR:
918 ds->ops->port_mirror_del(ds, dp->index, &mall_tc_entry->mirror);
990 if (ds->ops->port_mirror_del)
991 ds->ops->port_mirror_del(ds, dp->index,
992 &mall_tc_entry->mirror);
919 break;
993 break;
994 case DSA_PORT_MALL_POLICER:
995 if (ds->ops->port_policer_del)
996 ds->ops->port_policer_del(ds, dp->index);
997 break;
920 default:
921 WARN_ON(1);
922 }
923
924 kfree(mall_tc_entry);
925}
926
927static int dsa_slave_setup_tc_cls_matchall(struct net_device *dev,

--- 9 unchanged lines hidden (view full) ---

937 case TC_CLSMATCHALL_DESTROY:
938 dsa_slave_del_cls_matchall(dev, cls);
939 return 0;
940 default:
941 return -EOPNOTSUPP;
942 }
943}
944
998 default:
999 WARN_ON(1);
1000 }
1001
1002 kfree(mall_tc_entry);
1003}
1004
1005static int dsa_slave_setup_tc_cls_matchall(struct net_device *dev,

--- 9 unchanged lines hidden (view full) ---

1015 case TC_CLSMATCHALL_DESTROY:
1016 dsa_slave_del_cls_matchall(dev, cls);
1017 return 0;
1018 default:
1019 return -EOPNOTSUPP;
1020 }
1021}
1022
1023static int dsa_slave_add_cls_flower(struct net_device *dev,
1024 struct flow_cls_offload *cls,
1025 bool ingress)
1026{
1027 struct dsa_port *dp = dsa_slave_to_port(dev);
1028 struct dsa_switch *ds = dp->ds;
1029 int port = dp->index;
1030
1031 if (!ds->ops->cls_flower_add)
1032 return -EOPNOTSUPP;
1033
1034 return ds->ops->cls_flower_add(ds, port, cls, ingress);
1035}
1036
1037static int dsa_slave_del_cls_flower(struct net_device *dev,
1038 struct flow_cls_offload *cls,
1039 bool ingress)
1040{
1041 struct dsa_port *dp = dsa_slave_to_port(dev);
1042 struct dsa_switch *ds = dp->ds;
1043 int port = dp->index;
1044
1045 if (!ds->ops->cls_flower_del)
1046 return -EOPNOTSUPP;
1047
1048 return ds->ops->cls_flower_del(ds, port, cls, ingress);
1049}
1050
1051static int dsa_slave_stats_cls_flower(struct net_device *dev,
1052 struct flow_cls_offload *cls,
1053 bool ingress)
1054{
1055 struct dsa_port *dp = dsa_slave_to_port(dev);
1056 struct dsa_switch *ds = dp->ds;
1057 int port = dp->index;
1058
1059 if (!ds->ops->cls_flower_stats)
1060 return -EOPNOTSUPP;
1061
1062 return ds->ops->cls_flower_stats(ds, port, cls, ingress);
1063}
1064
1065static int dsa_slave_setup_tc_cls_flower(struct net_device *dev,
1066 struct flow_cls_offload *cls,
1067 bool ingress)
1068{
1069 switch (cls->command) {
1070 case FLOW_CLS_REPLACE:
1071 return dsa_slave_add_cls_flower(dev, cls, ingress);
1072 case FLOW_CLS_DESTROY:
1073 return dsa_slave_del_cls_flower(dev, cls, ingress);
1074 case FLOW_CLS_STATS:
1075 return dsa_slave_stats_cls_flower(dev, cls, ingress);
1076 default:
1077 return -EOPNOTSUPP;
1078 }
1079}
1080
945static int dsa_slave_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
946 void *cb_priv, bool ingress)
947{
948 struct net_device *dev = cb_priv;
949
950 if (!tc_can_offload(dev))
951 return -EOPNOTSUPP;
952
953 switch (type) {
954 case TC_SETUP_CLSMATCHALL:
955 return dsa_slave_setup_tc_cls_matchall(dev, type_data, ingress);
1081static int dsa_slave_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
1082 void *cb_priv, bool ingress)
1083{
1084 struct net_device *dev = cb_priv;
1085
1086 if (!tc_can_offload(dev))
1087 return -EOPNOTSUPP;
1088
1089 switch (type) {
1090 case TC_SETUP_CLSMATCHALL:
1091 return dsa_slave_setup_tc_cls_matchall(dev, type_data, ingress);
1092 case TC_SETUP_CLSFLOWER:
1093 return dsa_slave_setup_tc_cls_flower(dev, type_data, ingress);
956 default:
957 return -EOPNOTSUPP;
958 }
959}
960
961static int dsa_slave_setup_tc_block_cb_ig(enum tc_setup_type type,
962 void *type_data, void *cb_priv)
963{

--- 133 unchanged lines hidden (view full) ---

1097 struct dsa_port *dp = dsa_slave_to_port(dev);
1098 struct bridge_vlan_info info;
1099 int ret;
1100
1101 /* Check for a possible bridge VLAN entry now since there is no
1102 * need to emulate the switchdev prepare + commit phase.
1103 */
1104 if (dp->bridge_dev) {
1094 default:
1095 return -EOPNOTSUPP;
1096 }
1097}
1098
1099static int dsa_slave_setup_tc_block_cb_ig(enum tc_setup_type type,
1100 void *type_data, void *cb_priv)
1101{

--- 133 unchanged lines hidden (view full) ---

1235 struct dsa_port *dp = dsa_slave_to_port(dev);
1236 struct bridge_vlan_info info;
1237 int ret;
1238
1239 /* Check for a possible bridge VLAN entry now since there is no
1240 * need to emulate the switchdev prepare + commit phase.
1241 */
1242 if (dp->bridge_dev) {
1105 if (!br_vlan_enabled(dp->bridge_dev))
1243 if (dsa_port_skip_vlan_configuration(dp))
1106 return 0;
1107
1108 /* br_vlan_get_info() returns -EINVAL or -ENOENT if the
1109 * device, respectively the VID is not found, returning
1110 * 0 means success, which is a failure for us here.
1111 */
1112 ret = br_vlan_get_info(dp->bridge_dev, vid, &info);
1113 if (ret == 0)

--- 17 unchanged lines hidden (view full) ---

1131 struct dsa_port *dp = dsa_slave_to_port(dev);
1132 struct bridge_vlan_info info;
1133 int ret;
1134
1135 /* Check for a possible bridge VLAN entry now since there is no
1136 * need to emulate the switchdev prepare + commit phase.
1137 */
1138 if (dp->bridge_dev) {
1244 return 0;
1245
1246 /* br_vlan_get_info() returns -EINVAL or -ENOENT if the
1247 * device, respectively the VID is not found, returning
1248 * 0 means success, which is a failure for us here.
1249 */
1250 ret = br_vlan_get_info(dp->bridge_dev, vid, &info);
1251 if (ret == 0)

--- 17 unchanged lines hidden (view full) ---

1269 struct dsa_port *dp = dsa_slave_to_port(dev);
1270 struct bridge_vlan_info info;
1271 int ret;
1272
1273 /* Check for a possible bridge VLAN entry now since there is no
1274 * need to emulate the switchdev prepare + commit phase.
1275 */
1276 if (dp->bridge_dev) {
1139 if (!br_vlan_enabled(dp->bridge_dev))
1277 if (dsa_port_skip_vlan_configuration(dp))
1140 return 0;
1141
1142 /* br_vlan_get_info() returns -EINVAL or -ENOENT if the
1143 * device, respectively the VID is not found, returning
1144 * 0 means success, which is a failure for us here.
1145 */
1146 ret = br_vlan_get_info(dp->bridge_dev, vid, &info);
1147 if (ret == 0)
1148 return -EBUSY;
1149 }
1150
1151 /* Do not deprogram the CPU port as it may be shared with other user
1152 * ports which can be members of this VLAN as well.
1153 */
1154 return dsa_port_vid_del(dp, vid);
1155}
1156
1278 return 0;
1279
1280 /* br_vlan_get_info() returns -EINVAL or -ENOENT if the
1281 * device, respectively the VID is not found, returning
1282 * 0 means success, which is a failure for us here.
1283 */
1284 ret = br_vlan_get_info(dp->bridge_dev, vid, &info);
1285 if (ret == 0)
1286 return -EBUSY;
1287 }
1288
1289 /* Do not deprogram the CPU port as it may be shared with other user
1290 * ports which can be members of this VLAN as well.
1291 */
1292 return dsa_port_vid_del(dp, vid);
1293}
1294
1295struct dsa_hw_port {
1296 struct list_head list;
1297 struct net_device *dev;
1298 int old_mtu;
1299};
1300
1301static int dsa_hw_port_list_set_mtu(struct list_head *hw_port_list, int mtu)
1302{
1303 const struct dsa_hw_port *p;
1304 int err;
1305
1306 list_for_each_entry(p, hw_port_list, list) {
1307 if (p->dev->mtu == mtu)
1308 continue;
1309
1310 err = dev_set_mtu(p->dev, mtu);
1311 if (err)
1312 goto rollback;
1313 }
1314
1315 return 0;
1316
1317rollback:
1318 list_for_each_entry_continue_reverse(p, hw_port_list, list) {
1319 if (p->dev->mtu == p->old_mtu)
1320 continue;
1321
1322 if (dev_set_mtu(p->dev, p->old_mtu))
1323 netdev_err(p->dev, "Failed to restore MTU\n");
1324 }
1325
1326 return err;
1327}
1328
1329static void dsa_hw_port_list_free(struct list_head *hw_port_list)
1330{
1331 struct dsa_hw_port *p, *n;
1332
1333 list_for_each_entry_safe(p, n, hw_port_list, list)
1334 kfree(p);
1335}
1336
1337/* Make the hardware datapath to/from @dev limited to a common MTU */
1338static void dsa_bridge_mtu_normalization(struct dsa_port *dp)
1339{
1340 struct list_head hw_port_list;
1341 struct dsa_switch_tree *dst;
1342 int min_mtu = ETH_MAX_MTU;
1343 struct dsa_port *other_dp;
1344 int err;
1345
1346 if (!dp->ds->mtu_enforcement_ingress)
1347 return;
1348
1349 if (!dp->bridge_dev)
1350 return;
1351
1352 INIT_LIST_HEAD(&hw_port_list);
1353
1354 /* Populate the list of ports that are part of the same bridge
1355 * as the newly added/modified port
1356 */
1357 list_for_each_entry(dst, &dsa_tree_list, list) {
1358 list_for_each_entry(other_dp, &dst->ports, list) {
1359 struct dsa_hw_port *hw_port;
1360 struct net_device *slave;
1361
1362 if (other_dp->type != DSA_PORT_TYPE_USER)
1363 continue;
1364
1365 if (other_dp->bridge_dev != dp->bridge_dev)
1366 continue;
1367
1368 if (!other_dp->ds->mtu_enforcement_ingress)
1369 continue;
1370
1371 slave = other_dp->slave;
1372
1373 if (min_mtu > slave->mtu)
1374 min_mtu = slave->mtu;
1375
1376 hw_port = kzalloc(sizeof(*hw_port), GFP_KERNEL);
1377 if (!hw_port)
1378 goto out;
1379
1380 hw_port->dev = slave;
1381 hw_port->old_mtu = slave->mtu;
1382
1383 list_add(&hw_port->list, &hw_port_list);
1384 }
1385 }
1386
1387 /* Attempt to configure the entire hardware bridge to the newly added
1388 * interface's MTU first, regardless of whether the intention of the
1389 * user was to raise or lower it.
1390 */
1391 err = dsa_hw_port_list_set_mtu(&hw_port_list, dp->slave->mtu);
1392 if (!err)
1393 goto out;
1394
1395 /* Clearly that didn't work out so well, so just set the minimum MTU on
1396 * all hardware bridge ports now. If this fails too, then all ports will
1397 * still have their old MTU rolled back anyway.
1398 */
1399 dsa_hw_port_list_set_mtu(&hw_port_list, min_mtu);
1400
1401out:
1402 dsa_hw_port_list_free(&hw_port_list);
1403}
1404
1405static int dsa_slave_change_mtu(struct net_device *dev, int new_mtu)
1406{
1407 struct net_device *master = dsa_slave_to_master(dev);
1408 struct dsa_port *dp = dsa_slave_to_port(dev);
1409 struct dsa_slave_priv *p = netdev_priv(dev);
1410 struct dsa_switch *ds = p->dp->ds;
1411 struct dsa_port *cpu_dp;
1412 int port = p->dp->index;
1413 int largest_mtu = 0;
1414 int new_master_mtu;
1415 int old_master_mtu;
1416 int mtu_limit;
1417 int cpu_mtu;
1418 int err, i;
1419
1420 if (!ds->ops->port_change_mtu)
1421 return -EOPNOTSUPP;
1422
1423 for (i = 0; i < ds->num_ports; i++) {
1424 int slave_mtu;
1425
1426 if (!dsa_is_user_port(ds, i))
1427 continue;
1428
1429 /* During probe, this function will be called for each slave
1430 * device, while not all of them have been allocated. That's
1431 * ok, it doesn't change what the maximum is, so ignore it.
1432 */
1433 if (!dsa_to_port(ds, i)->slave)
1434 continue;
1435
1436 /* Pretend that we already applied the setting, which we
1437 * actually haven't (still haven't done all integrity checks)
1438 */
1439 if (i == port)
1440 slave_mtu = new_mtu;
1441 else
1442 slave_mtu = dsa_to_port(ds, i)->slave->mtu;
1443
1444 if (largest_mtu < slave_mtu)
1445 largest_mtu = slave_mtu;
1446 }
1447
1448 cpu_dp = dsa_to_port(ds, port)->cpu_dp;
1449
1450 mtu_limit = min_t(int, master->max_mtu, dev->max_mtu);
1451 old_master_mtu = master->mtu;
1452 new_master_mtu = largest_mtu + cpu_dp->tag_ops->overhead;
1453 if (new_master_mtu > mtu_limit)
1454 return -ERANGE;
1455
1456 /* If the master MTU isn't over limit, there's no need to check the CPU
1457 * MTU, since that surely isn't either.
1458 */
1459 cpu_mtu = largest_mtu;
1460
1461 /* Start applying stuff */
1462 if (new_master_mtu != old_master_mtu) {
1463 err = dev_set_mtu(master, new_master_mtu);
1464 if (err < 0)
1465 goto out_master_failed;
1466
1467 /* We only need to propagate the MTU of the CPU port to
1468 * upstream switches.
1469 */
1470 err = dsa_port_mtu_change(cpu_dp, cpu_mtu, true);
1471 if (err)
1472 goto out_cpu_failed;
1473 }
1474
1475 err = dsa_port_mtu_change(dp, new_mtu, false);
1476 if (err)
1477 goto out_port_failed;
1478
1479 dev->mtu = new_mtu;
1480
1481 dsa_bridge_mtu_normalization(dp);
1482
1483 return 0;
1484
1485out_port_failed:
1486 if (new_master_mtu != old_master_mtu)
1487 dsa_port_mtu_change(cpu_dp, old_master_mtu -
1488 cpu_dp->tag_ops->overhead,
1489 true);
1490out_cpu_failed:
1491 if (new_master_mtu != old_master_mtu)
1492 dev_set_mtu(master, old_master_mtu);
1493out_master_failed:
1494 return err;
1495}
1496
1157static const struct ethtool_ops dsa_slave_ethtool_ops = {
1158 .get_drvinfo = dsa_slave_get_drvinfo,
1159 .get_regs_len = dsa_slave_get_regs_len,
1160 .get_regs = dsa_slave_get_regs,
1161 .nway_reset = dsa_slave_nway_reset,
1162 .get_link = ethtool_op_get_link,
1163 .get_eeprom_len = dsa_slave_get_eeprom_len,
1164 .get_eeprom = dsa_slave_get_eeprom,

--- 61 unchanged lines hidden (view full) ---

1226#endif
1227 .ndo_get_phys_port_name = dsa_slave_get_phys_port_name,
1228 .ndo_setup_tc = dsa_slave_setup_tc,
1229 .ndo_get_stats64 = dsa_slave_get_stats64,
1230 .ndo_get_port_parent_id = dsa_slave_get_port_parent_id,
1231 .ndo_vlan_rx_add_vid = dsa_slave_vlan_rx_add_vid,
1232 .ndo_vlan_rx_kill_vid = dsa_slave_vlan_rx_kill_vid,
1233 .ndo_get_devlink_port = dsa_slave_get_devlink_port,
1497static const struct ethtool_ops dsa_slave_ethtool_ops = {
1498 .get_drvinfo = dsa_slave_get_drvinfo,
1499 .get_regs_len = dsa_slave_get_regs_len,
1500 .get_regs = dsa_slave_get_regs,
1501 .nway_reset = dsa_slave_nway_reset,
1502 .get_link = ethtool_op_get_link,
1503 .get_eeprom_len = dsa_slave_get_eeprom_len,
1504 .get_eeprom = dsa_slave_get_eeprom,

--- 61 unchanged lines hidden (view full) ---

1566#endif
1567 .ndo_get_phys_port_name = dsa_slave_get_phys_port_name,
1568 .ndo_setup_tc = dsa_slave_setup_tc,
1569 .ndo_get_stats64 = dsa_slave_get_stats64,
1570 .ndo_get_port_parent_id = dsa_slave_get_port_parent_id,
1571 .ndo_vlan_rx_add_vid = dsa_slave_vlan_rx_add_vid,
1572 .ndo_vlan_rx_kill_vid = dsa_slave_vlan_rx_kill_vid,
1573 .ndo_get_devlink_port = dsa_slave_get_devlink_port,
1574 .ndo_change_mtu = dsa_slave_change_mtu,
1234};
1235
1236static struct device_type dsa_type = {
1237 .name = "dsa",
1238};
1239
1240void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up)
1241{
1242 const struct dsa_port *dp = dsa_to_port(ds, port);
1243
1575};
1576
1577static struct device_type dsa_type = {
1578 .name = "dsa",
1579};
1580
1581void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up)
1582{
1583 const struct dsa_port *dp = dsa_to_port(ds, port);
1584
1244 phylink_mac_change(dp->pl, up);
1585 if (dp->pl)
1586 phylink_mac_change(dp->pl, up);
1245}
1246EXPORT_SYMBOL_GPL(dsa_port_phylink_mac_change);
1247
1587}
1588EXPORT_SYMBOL_GPL(dsa_port_phylink_mac_change);
1589
1248static void dsa_slave_phylink_fixed_state(struct net_device *dev,
1590static void dsa_slave_phylink_fixed_state(struct phylink_config *config,
1249 struct phylink_link_state *state)
1250{
1591 struct phylink_link_state *state)
1592{
1251 struct dsa_port *dp = dsa_slave_to_port(dev);
1593 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
1252 struct dsa_switch *ds = dp->ds;
1253
1254 /* No need to check that this operation is valid, the callback would
1255 * not be called if it was not.
1256 */
1257 ds->ops->phylink_fixed_state(ds, dp->index, state);
1258}
1259

--- 23 unchanged lines hidden (view full) ---

1283
1284 ret = of_get_phy_mode(port_dn, &mode);
1285 if (ret)
1286 mode = PHY_INTERFACE_MODE_NA;
1287
1288 dp->pl_config.dev = &slave_dev->dev;
1289 dp->pl_config.type = PHYLINK_NETDEV;
1290
1594 struct dsa_switch *ds = dp->ds;
1595
1596 /* No need to check that this operation is valid, the callback would
1597 * not be called if it was not.
1598 */
1599 ds->ops->phylink_fixed_state(ds, dp->index, state);
1600}
1601

--- 23 unchanged lines hidden (view full) ---

1625
1626 ret = of_get_phy_mode(port_dn, &mode);
1627 if (ret)
1628 mode = PHY_INTERFACE_MODE_NA;
1629
1630 dp->pl_config.dev = &slave_dev->dev;
1631 dp->pl_config.type = PHYLINK_NETDEV;
1632
1633 /* The get_fixed_state callback takes precedence over polling the
1634 * link GPIO in PHYLINK (see phylink_get_fixed_state). Only set
1635 * this if the switch provides such a callback.
1636 */
1637 if (ds->ops->phylink_fixed_state) {
1638 dp->pl_config.get_fixed_state = dsa_slave_phylink_fixed_state;
1639 dp->pl_config.poll_fixed_state = true;
1640 }
1641
1291 dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(port_dn), mode,
1292 &dsa_port_phylink_mac_ops);
1293 if (IS_ERR(dp->pl)) {
1294 netdev_err(slave_dev,
1295 "error creating PHYLINK: %ld\n", PTR_ERR(dp->pl));
1296 return PTR_ERR(dp->pl);
1297 }
1298
1642 dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(port_dn), mode,
1643 &dsa_port_phylink_mac_ops);
1644 if (IS_ERR(dp->pl)) {
1645 netdev_err(slave_dev,
1646 "error creating PHYLINK: %ld\n", PTR_ERR(dp->pl));
1647 return PTR_ERR(dp->pl);
1648 }
1649
1299 /* Register only if the switch provides such a callback, since this
1300 * callback takes precedence over polling the link GPIO in PHYLINK
1301 * (see phylink_get_fixed_state).
1302 */
1303 if (ds->ops->phylink_fixed_state)
1304 phylink_fixed_state_cb(dp->pl, dsa_slave_phylink_fixed_state);
1305
1306 if (ds->ops->get_phy_flags)
1307 phy_flags = ds->ops->get_phy_flags(ds, dp->index);
1308
1309 ret = phylink_of_phy_connect(dp->pl, port_dn, phy_flags);
1310 if (ret == -ENODEV && ds->slave_mii_bus) {
1311 /* We could not connect to a designated PHY or SFP, so try to
1312 * use the switch internal MDIO bus instead
1313 */

--- 5 unchanged lines hidden (view full) ---

1319 phylink_destroy(dp->pl);
1320 return ret;
1321 }
1322 }
1323
1324 return ret;
1325}
1326
1650 if (ds->ops->get_phy_flags)
1651 phy_flags = ds->ops->get_phy_flags(ds, dp->index);
1652
1653 ret = phylink_of_phy_connect(dp->pl, port_dn, phy_flags);
1654 if (ret == -ENODEV && ds->slave_mii_bus) {
1655 /* We could not connect to a designated PHY or SFP, so try to
1656 * use the switch internal MDIO bus instead
1657 */

--- 5 unchanged lines hidden (view full) ---

1663 phylink_destroy(dp->pl);
1664 return ret;
1665 }
1666 }
1667
1668 return ret;
1669}
1670
1671static struct lock_class_key dsa_slave_netdev_xmit_lock_key;
1672static void dsa_slave_set_lockdep_class_one(struct net_device *dev,
1673 struct netdev_queue *txq,
1674 void *_unused)
1675{
1676 lockdep_set_class(&txq->_xmit_lock,
1677 &dsa_slave_netdev_xmit_lock_key);
1678}
1679
1327int dsa_slave_suspend(struct net_device *slave_dev)
1328{
1329 struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1330
1331 if (!netif_running(slave_dev))
1332 return 0;
1333
1334 netif_device_detach(slave_dev);

--- 61 unchanged lines hidden (view full) ---

1396 slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
1397 if (!IS_ERR_OR_NULL(port->mac))
1398 ether_addr_copy(slave_dev->dev_addr, port->mac);
1399 else
1400 eth_hw_addr_inherit(slave_dev, master);
1401 slave_dev->priv_flags |= IFF_NO_QUEUE;
1402 slave_dev->netdev_ops = &dsa_slave_netdev_ops;
1403 slave_dev->min_mtu = 0;
1680int dsa_slave_suspend(struct net_device *slave_dev)
1681{
1682 struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1683
1684 if (!netif_running(slave_dev))
1685 return 0;
1686
1687 netif_device_detach(slave_dev);

--- 61 unchanged lines hidden (view full) ---

1749 slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
1750 if (!IS_ERR_OR_NULL(port->mac))
1751 ether_addr_copy(slave_dev->dev_addr, port->mac);
1752 else
1753 eth_hw_addr_inherit(slave_dev, master);
1754 slave_dev->priv_flags |= IFF_NO_QUEUE;
1755 slave_dev->netdev_ops = &dsa_slave_netdev_ops;
1756 slave_dev->min_mtu = 0;
1404 slave_dev->max_mtu = ETH_MAX_MTU;
1757 if (ds->ops->port_max_mtu)
1758 slave_dev->max_mtu = ds->ops->port_max_mtu(ds, port->index);
1759 else
1760 slave_dev->max_mtu = ETH_MAX_MTU;
1405 SET_NETDEV_DEVTYPE(slave_dev, &dsa_type);
1406
1761 SET_NETDEV_DEVTYPE(slave_dev, &dsa_type);
1762
1763 netdev_for_each_tx_queue(slave_dev, dsa_slave_set_lockdep_class_one,
1764 NULL);
1765
1407 SET_NETDEV_DEV(slave_dev, port->ds->dev);
1408 slave_dev->dev.of_node = port->dn;
1409 slave_dev->vlan_features = master->vlan_features;
1410
1411 p = netdev_priv(slave_dev);
1412 p->stats64 = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1413 if (!p->stats64) {
1414 free_netdev(slave_dev);
1415 return -ENOMEM;
1416 }
1766 SET_NETDEV_DEV(slave_dev, port->ds->dev);
1767 slave_dev->dev.of_node = port->dn;
1768 slave_dev->vlan_features = master->vlan_features;
1769
1770 p = netdev_priv(slave_dev);
1771 p->stats64 = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1772 if (!p->stats64) {
1773 free_netdev(slave_dev);
1774 return -ENOMEM;
1775 }
1776
1777 ret = gro_cells_init(&p->gcells, slave_dev);
1778 if (ret)
1779 goto out_free;
1780
1417 p->dp = port;
1418 INIT_LIST_HEAD(&p->mall_tc_list);
1419 p->xmit = cpu_dp->tag_ops->xmit;
1420 port->slave = slave_dev;
1421
1781 p->dp = port;
1782 INIT_LIST_HEAD(&p->mall_tc_list);
1783 p->xmit = cpu_dp->tag_ops->xmit;
1784 port->slave = slave_dev;
1785
1786 rtnl_lock();
1787 ret = dsa_slave_change_mtu(slave_dev, ETH_DATA_LEN);
1788 rtnl_unlock();
1789 if (ret)
1790 dev_warn(ds->dev, "nonfatal error %d setting MTU on port %d\n",
1791 ret, port->index);
1792
1422 netif_carrier_off(slave_dev);
1423
1424 ret = dsa_slave_phy_setup(slave_dev);
1425 if (ret) {
1426 netdev_err(master, "error %d setting up slave phy\n", ret);
1793 netif_carrier_off(slave_dev);
1794
1795 ret = dsa_slave_phy_setup(slave_dev);
1796 if (ret) {
1797 netdev_err(master, "error %d setting up slave phy\n", ret);
1427 goto out_free;
1798 goto out_gcells;
1428 }
1429
1430 dsa_slave_notify(slave_dev, DSA_PORT_REGISTER);
1431
1432 ret = register_netdev(slave_dev);
1433 if (ret) {
1434 netdev_err(master, "error %d registering interface %s\n",
1435 ret, slave_dev->name);
1436 goto out_phy;
1437 }
1438
1439 return 0;
1440
1441out_phy:
1442 rtnl_lock();
1443 phylink_disconnect_phy(p->dp->pl);
1444 rtnl_unlock();
1445 phylink_destroy(p->dp->pl);
1799 }
1800
1801 dsa_slave_notify(slave_dev, DSA_PORT_REGISTER);
1802
1803 ret = register_netdev(slave_dev);
1804 if (ret) {
1805 netdev_err(master, "error %d registering interface %s\n",
1806 ret, slave_dev->name);
1807 goto out_phy;
1808 }
1809
1810 return 0;
1811
1812out_phy:
1813 rtnl_lock();
1814 phylink_disconnect_phy(p->dp->pl);
1815 rtnl_unlock();
1816 phylink_destroy(p->dp->pl);
1817out_gcells:
1818 gro_cells_destroy(&p->gcells);
1446out_free:
1447 free_percpu(p->stats64);
1448 free_netdev(slave_dev);
1449 port->slave = NULL;
1450 return ret;
1451}
1452
1453void dsa_slave_destroy(struct net_device *slave_dev)

--- 4 unchanged lines hidden (view full) ---

1458 netif_carrier_off(slave_dev);
1459 rtnl_lock();
1460 phylink_disconnect_phy(dp->pl);
1461 rtnl_unlock();
1462
1463 dsa_slave_notify(slave_dev, DSA_PORT_UNREGISTER);
1464 unregister_netdev(slave_dev);
1465 phylink_destroy(dp->pl);
1819out_free:
1820 free_percpu(p->stats64);
1821 free_netdev(slave_dev);
1822 port->slave = NULL;
1823 return ret;
1824}
1825
1826void dsa_slave_destroy(struct net_device *slave_dev)

--- 4 unchanged lines hidden (view full) ---

1831 netif_carrier_off(slave_dev);
1832 rtnl_lock();
1833 phylink_disconnect_phy(dp->pl);
1834 rtnl_unlock();
1835
1836 dsa_slave_notify(slave_dev, DSA_PORT_UNREGISTER);
1837 unregister_netdev(slave_dev);
1838 phylink_destroy(dp->pl);
1839 gro_cells_destroy(&p->gcells);
1466 free_percpu(p->stats64);
1467 free_netdev(slave_dev);
1468}
1469
1470bool dsa_slave_dev_check(const struct net_device *dev)
1471{
1472 return dev->netdev_ops == &dsa_slave_netdev_ops;
1473}
1474
1475static int dsa_slave_changeupper(struct net_device *dev,
1476 struct netdev_notifier_changeupper_info *info)
1477{
1478 struct dsa_port *dp = dsa_slave_to_port(dev);
1479 int err = NOTIFY_DONE;
1480
1481 if (netif_is_bridge_master(info->upper_dev)) {
1482 if (info->linking) {
1483 err = dsa_port_bridge_join(dp, info->upper_dev);
1840 free_percpu(p->stats64);
1841 free_netdev(slave_dev);
1842}
1843
1844bool dsa_slave_dev_check(const struct net_device *dev)
1845{
1846 return dev->netdev_ops == &dsa_slave_netdev_ops;
1847}
1848
1849static int dsa_slave_changeupper(struct net_device *dev,
1850 struct netdev_notifier_changeupper_info *info)
1851{
1852 struct dsa_port *dp = dsa_slave_to_port(dev);
1853 int err = NOTIFY_DONE;
1854
1855 if (netif_is_bridge_master(info->upper_dev)) {
1856 if (info->linking) {
1857 err = dsa_port_bridge_join(dp, info->upper_dev);
1858 if (!err)
1859 dsa_bridge_mtu_normalization(dp);
1484 err = notifier_from_errno(err);
1485 } else {
1486 dsa_port_bridge_leave(dp, info->upper_dev);
1487 err = NOTIFY_OK;
1488 }
1489 }
1490
1491 return err;

--- 248 unchanged lines hidden ---
1860 err = notifier_from_errno(err);
1861 } else {
1862 dsa_port_bridge_leave(dp, info->upper_dev);
1863 err = NOTIFY_OK;
1864 }
1865 }
1866
1867 return err;

--- 248 unchanged lines hidden ---