rtnetlink.c (8c57a5e7b2820f349c95b8c8393fec1e0f4070d2) rtnetlink.c (d1fdd9138682e0f272beee0cb08b6328c5478b26)
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Routing netlink socket interface: protocol independent part.
7 *
8 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>

--- 57 unchanged lines hidden (view full) ---

66static DEFINE_MUTEX(rtnl_mutex);
67
68void rtnl_lock(void)
69{
70 mutex_lock(&rtnl_mutex);
71}
72EXPORT_SYMBOL(rtnl_lock);
73
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Routing netlink socket interface: protocol independent part.
7 *
8 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>

--- 57 unchanged lines hidden (view full) ---

66static DEFINE_MUTEX(rtnl_mutex);
67
68void rtnl_lock(void)
69{
70 mutex_lock(&rtnl_mutex);
71}
72EXPORT_SYMBOL(rtnl_lock);
73
74static struct sk_buff *defer_kfree_skb_list;
75void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail)
76{
77 if (head && tail) {
78 tail->next = defer_kfree_skb_list;
79 defer_kfree_skb_list = head;
80 }
81}
82EXPORT_SYMBOL(rtnl_kfree_skbs);
83
74void __rtnl_unlock(void)
75{
84void __rtnl_unlock(void)
85{
86 struct sk_buff *head = defer_kfree_skb_list;
87
88 defer_kfree_skb_list = NULL;
89
76 mutex_unlock(&rtnl_mutex);
90 mutex_unlock(&rtnl_mutex);
91
92 while (head) {
93 struct sk_buff *next = head->next;
94
95 kfree_skb(head);
96 cond_resched();
97 head = next;
98 }
77}
78
79void rtnl_unlock(void)
80{
81 /* This fellow will unlock it for us. */
82 netdev_run_todo();
83}
84EXPORT_SYMBOL(rtnl_unlock);

--- 718 unchanged lines hidden (view full) ---

803 a->tx_window_errors = b->tx_window_errors;
804
805 a->rx_compressed = b->rx_compressed;
806 a->tx_compressed = b->tx_compressed;
807
808 a->rx_nohandler = b->rx_nohandler;
809}
810
99}
100
101void rtnl_unlock(void)
102{
103 /* This fellow will unlock it for us. */
104 netdev_run_todo();
105}
106EXPORT_SYMBOL(rtnl_unlock);

--- 718 unchanged lines hidden (view full) ---

825 a->tx_window_errors = b->tx_window_errors;
826
827 a->rx_compressed = b->rx_compressed;
828 a->tx_compressed = b->tx_compressed;
829
830 a->rx_nohandler = b->rx_nohandler;
831}
832
811static void copy_rtnl_link_stats64(void *v, const struct rtnl_link_stats64 *b)
812{
813 memcpy(v, b, sizeof(*b));
814}
815
816/* All VF info */
817static inline int rtnl_vfinfo_size(const struct net_device *dev,
818 u32 ext_filter_mask)
819{
820 if (dev->dev.parent && dev_is_pci(dev->dev.parent) &&
821 (ext_filter_mask & RTEXT_FILTER_VF)) {
822 int num_vfs = dev_num_vf(dev->dev.parent);
823 size_t size = nla_total_size(sizeof(struct nlattr));
824 size += nla_total_size(num_vfs * sizeof(struct nlattr));
825 size += num_vfs *
826 (nla_total_size(sizeof(struct ifla_vf_mac)) +
827 nla_total_size(sizeof(struct ifla_vf_vlan)) +
828 nla_total_size(sizeof(struct ifla_vf_spoofchk)) +
829 nla_total_size(sizeof(struct ifla_vf_rate)) +
830 nla_total_size(sizeof(struct ifla_vf_link_state)) +
831 nla_total_size(sizeof(struct ifla_vf_rss_query_en)) +
832 /* IFLA_VF_STATS_RX_PACKETS */
833/* All VF info */
834static inline int rtnl_vfinfo_size(const struct net_device *dev,
835 u32 ext_filter_mask)
836{
837 if (dev->dev.parent && dev_is_pci(dev->dev.parent) &&
838 (ext_filter_mask & RTEXT_FILTER_VF)) {
839 int num_vfs = dev_num_vf(dev->dev.parent);
840 size_t size = nla_total_size(sizeof(struct nlattr));
841 size += nla_total_size(num_vfs * sizeof(struct nlattr));
842 size += num_vfs *
843 (nla_total_size(sizeof(struct ifla_vf_mac)) +
844 nla_total_size(sizeof(struct ifla_vf_vlan)) +
845 nla_total_size(sizeof(struct ifla_vf_spoofchk)) +
846 nla_total_size(sizeof(struct ifla_vf_rate)) +
847 nla_total_size(sizeof(struct ifla_vf_link_state)) +
848 nla_total_size(sizeof(struct ifla_vf_rss_query_en)) +
849 /* IFLA_VF_STATS_RX_PACKETS */
833 nla_total_size(sizeof(__u64)) +
850 nla_total_size_64bit(sizeof(__u64)) +
834 /* IFLA_VF_STATS_TX_PACKETS */
851 /* IFLA_VF_STATS_TX_PACKETS */
835 nla_total_size(sizeof(__u64)) +
852 nla_total_size_64bit(sizeof(__u64)) +
836 /* IFLA_VF_STATS_RX_BYTES */
853 /* IFLA_VF_STATS_RX_BYTES */
837 nla_total_size(sizeof(__u64)) +
854 nla_total_size_64bit(sizeof(__u64)) +
838 /* IFLA_VF_STATS_TX_BYTES */
855 /* IFLA_VF_STATS_TX_BYTES */
839 nla_total_size(sizeof(__u64)) +
856 nla_total_size_64bit(sizeof(__u64)) +
840 /* IFLA_VF_STATS_BROADCAST */
857 /* IFLA_VF_STATS_BROADCAST */
841 nla_total_size(sizeof(__u64)) +
858 nla_total_size_64bit(sizeof(__u64)) +
842 /* IFLA_VF_STATS_MULTICAST */
859 /* IFLA_VF_STATS_MULTICAST */
843 nla_total_size(sizeof(__u64)) +
860 nla_total_size_64bit(sizeof(__u64)) +
844 nla_total_size(sizeof(struct ifla_vf_trust)));
845 return size;
846 } else
847 return 0;
848}
849
850static size_t rtnl_port_size(const struct net_device *dev,
851 u32 ext_filter_mask)

--- 17 unchanged lines hidden (view full) ---

869 return 0;
870 if (dev_num_vf(dev->dev.parent))
871 return port_self_size + vf_ports_size +
872 vf_port_size * dev_num_vf(dev->dev.parent);
873 else
874 return port_self_size;
875}
876
861 nla_total_size(sizeof(struct ifla_vf_trust)));
862 return size;
863 } else
864 return 0;
865}
866
867static size_t rtnl_port_size(const struct net_device *dev,
868 u32 ext_filter_mask)

--- 17 unchanged lines hidden (view full) ---

886 return 0;
887 if (dev_num_vf(dev->dev.parent))
888 return port_self_size + vf_ports_size +
889 vf_port_size * dev_num_vf(dev->dev.parent);
890 else
891 return port_self_size;
892}
893
894static size_t rtnl_xdp_size(const struct net_device *dev)
895{
896 size_t xdp_size = nla_total_size(1); /* XDP_ATTACHED */
897
898 if (!dev->netdev_ops->ndo_xdp)
899 return 0;
900 else
901 return xdp_size;
902}
903
877static noinline size_t if_nlmsg_size(const struct net_device *dev,
878 u32 ext_filter_mask)
879{
880 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
881 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
882 + nla_total_size(IFALIASZ) /* IFLA_IFALIAS */
883 + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */
904static noinline size_t if_nlmsg_size(const struct net_device *dev,
905 u32 ext_filter_mask)
906{
907 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
908 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
909 + nla_total_size(IFALIASZ) /* IFLA_IFALIAS */
910 + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */
884 + nla_total_size(sizeof(struct rtnl_link_ifmap))
911 + nla_total_size_64bit(sizeof(struct rtnl_link_ifmap))
885 + nla_total_size(sizeof(struct rtnl_link_stats))
912 + nla_total_size(sizeof(struct rtnl_link_stats))
886 + nla_total_size(sizeof(struct rtnl_link_stats64))
913 + nla_total_size_64bit(sizeof(struct rtnl_link_stats64))
887 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
888 + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */
889 + nla_total_size(4) /* IFLA_TXQLEN */
890 + nla_total_size(4) /* IFLA_WEIGHT */
891 + nla_total_size(4) /* IFLA_MTU */
892 + nla_total_size(4) /* IFLA_LINK */
893 + nla_total_size(4) /* IFLA_MASTER */
894 + nla_total_size(1) /* IFLA_CARRIER */

--- 10 unchanged lines hidden (view full) ---

905 & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
906 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
907 + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
908 + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
909 + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */
910 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */
911 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */
912 + nla_total_size(IFNAMSIZ) /* IFLA_PHYS_PORT_NAME */
914 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
915 + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */
916 + nla_total_size(4) /* IFLA_TXQLEN */
917 + nla_total_size(4) /* IFLA_WEIGHT */
918 + nla_total_size(4) /* IFLA_MTU */
919 + nla_total_size(4) /* IFLA_LINK */
920 + nla_total_size(4) /* IFLA_MASTER */
921 + nla_total_size(1) /* IFLA_CARRIER */

--- 10 unchanged lines hidden (view full) ---

932 & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
933 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
934 + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
935 + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
936 + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */
937 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */
938 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */
939 + nla_total_size(IFNAMSIZ) /* IFLA_PHYS_PORT_NAME */
940 + rtnl_xdp_size(dev) /* IFLA_XDP */
913 + nla_total_size(1); /* IFLA_PROTO_DOWN */
914
915}
916
917static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
918{
919 struct nlattr *vf_ports;
920 struct nlattr *vf_port;

--- 128 unchanged lines hidden (view full) ---

1049 return -EMSGSIZE;
1050
1051 return 0;
1052}
1053
1054static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb,
1055 struct net_device *dev)
1056{
941 + nla_total_size(1); /* IFLA_PROTO_DOWN */
942
943}
944
945static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
946{
947 struct nlattr *vf_ports;
948 struct nlattr *vf_port;

--- 128 unchanged lines hidden (view full) ---

1077 return -EMSGSIZE;
1078
1079 return 0;
1080}
1081
1082static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb,
1083 struct net_device *dev)
1084{
1057 const struct rtnl_link_stats64 *stats;
1058 struct rtnl_link_stats64 temp;
1085 struct rtnl_link_stats64 *sp;
1059 struct nlattr *attr;
1060
1086 struct nlattr *attr;
1087
1061 stats = dev_get_stats(dev, &temp);
1062
1063 attr = nla_reserve(skb, IFLA_STATS,
1064 sizeof(struct rtnl_link_stats));
1088 attr = nla_reserve_64bit(skb, IFLA_STATS64,
1089 sizeof(struct rtnl_link_stats64), IFLA_PAD);
1065 if (!attr)
1066 return -EMSGSIZE;
1067
1090 if (!attr)
1091 return -EMSGSIZE;
1092
1068 copy_rtnl_link_stats(nla_data(attr), stats);
1093 sp = nla_data(attr);
1094 dev_get_stats(dev, sp);
1069
1095
1070 attr = nla_reserve(skb, IFLA_STATS64,
1071 sizeof(struct rtnl_link_stats64));
1096 attr = nla_reserve(skb, IFLA_STATS,
1097 sizeof(struct rtnl_link_stats));
1072 if (!attr)
1073 return -EMSGSIZE;
1074
1098 if (!attr)
1099 return -EMSGSIZE;
1100
1075 copy_rtnl_link_stats64(nla_data(attr), stats);
1101 copy_rtnl_link_stats(nla_data(attr), sp);
1076
1077 return 0;
1078}
1079
1080static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
1081 struct net_device *dev,
1082 int vfs_num,
1083 struct nlattr *vfinfo)

--- 71 unchanged lines hidden (view full) ---

1155 dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num,
1156 &vf_stats);
1157 vfstats = nla_nest_start(skb, IFLA_VF_STATS);
1158 if (!vfstats) {
1159 nla_nest_cancel(skb, vf);
1160 nla_nest_cancel(skb, vfinfo);
1161 return -EMSGSIZE;
1162 }
1102
1103 return 0;
1104}
1105
1106static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
1107 struct net_device *dev,
1108 int vfs_num,
1109 struct nlattr *vfinfo)

--- 71 unchanged lines hidden (view full) ---

1181 dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num,
1182 &vf_stats);
1183 vfstats = nla_nest_start(skb, IFLA_VF_STATS);
1184 if (!vfstats) {
1185 nla_nest_cancel(skb, vf);
1186 nla_nest_cancel(skb, vfinfo);
1187 return -EMSGSIZE;
1188 }
1163 if (nla_put_u64(skb, IFLA_VF_STATS_RX_PACKETS,
1164 vf_stats.rx_packets) ||
1165 nla_put_u64(skb, IFLA_VF_STATS_TX_PACKETS,
1166 vf_stats.tx_packets) ||
1167 nla_put_u64(skb, IFLA_VF_STATS_RX_BYTES,
1168 vf_stats.rx_bytes) ||
1169 nla_put_u64(skb, IFLA_VF_STATS_TX_BYTES,
1170 vf_stats.tx_bytes) ||
1171 nla_put_u64(skb, IFLA_VF_STATS_BROADCAST,
1172 vf_stats.broadcast) ||
1173 nla_put_u64(skb, IFLA_VF_STATS_MULTICAST,
1174 vf_stats.multicast))
1189 if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS,
1190 vf_stats.rx_packets, IFLA_VF_STATS_PAD) ||
1191 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS,
1192 vf_stats.tx_packets, IFLA_VF_STATS_PAD) ||
1193 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_BYTES,
1194 vf_stats.rx_bytes, IFLA_VF_STATS_PAD) ||
1195 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_BYTES,
1196 vf_stats.tx_bytes, IFLA_VF_STATS_PAD) ||
1197 nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST,
1198 vf_stats.broadcast, IFLA_VF_STATS_PAD) ||
1199 nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST,
1200 vf_stats.multicast, IFLA_VF_STATS_PAD))
1175 return -EMSGSIZE;
1176 nla_nest_end(skb, vfstats);
1177 nla_nest_end(skb, vf);
1178 return 0;
1179}
1180
1181static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
1182{
1183 struct rtnl_link_ifmap map;
1184
1185 memset(&map, 0, sizeof(map));
1186 map.mem_start = dev->mem_start;
1187 map.mem_end = dev->mem_end;
1188 map.base_addr = dev->base_addr;
1189 map.irq = dev->irq;
1190 map.dma = dev->dma;
1191 map.port = dev->if_port;
1192
1201 return -EMSGSIZE;
1202 nla_nest_end(skb, vfstats);
1203 nla_nest_end(skb, vf);
1204 return 0;
1205}
1206
1207static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
1208{
1209 struct rtnl_link_ifmap map;
1210
1211 memset(&map, 0, sizeof(map));
1212 map.mem_start = dev->mem_start;
1213 map.mem_end = dev->mem_end;
1214 map.base_addr = dev->base_addr;
1215 map.irq = dev->irq;
1216 map.dma = dev->dma;
1217 map.port = dev->if_port;
1218
1193 if (nla_put(skb, IFLA_MAP, sizeof(map), &map))
1219 if (nla_put_64bit(skb, IFLA_MAP, sizeof(map), &map, IFLA_PAD))
1194 return -EMSGSIZE;
1195
1196 return 0;
1197}
1198
1220 return -EMSGSIZE;
1221
1222 return 0;
1223}
1224
1225static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev)
1226{
1227 struct netdev_xdp xdp_op = {};
1228 struct nlattr *xdp;
1229 int err;
1230
1231 if (!dev->netdev_ops->ndo_xdp)
1232 return 0;
1233 xdp = nla_nest_start(skb, IFLA_XDP);
1234 if (!xdp)
1235 return -EMSGSIZE;
1236 xdp_op.command = XDP_QUERY_PROG;
1237 err = dev->netdev_ops->ndo_xdp(dev, &xdp_op);
1238 if (err)
1239 goto err_cancel;
1240 err = nla_put_u8(skb, IFLA_XDP_ATTACHED, xdp_op.prog_attached);
1241 if (err)
1242 goto err_cancel;
1243
1244 nla_nest_end(skb, xdp);
1245 return 0;
1246
1247err_cancel:
1248 nla_nest_cancel(skb, xdp);
1249 return err;
1250}
1251
1199static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
1200 int type, u32 pid, u32 seq, u32 change,
1201 unsigned int flags, u32 ext_filter_mask)
1202{
1203 struct ifinfomsg *ifm;
1204 struct nlmsghdr *nlh;
1205 struct nlattr *af_spec;
1206 struct rtnl_af_ops *af_ops;

--- 80 unchanged lines hidden (view full) ---

1287 }
1288
1289 nla_nest_end(skb, vfinfo);
1290 }
1291
1292 if (rtnl_port_fill(skb, dev, ext_filter_mask))
1293 goto nla_put_failure;
1294
1252static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
1253 int type, u32 pid, u32 seq, u32 change,
1254 unsigned int flags, u32 ext_filter_mask)
1255{
1256 struct ifinfomsg *ifm;
1257 struct nlmsghdr *nlh;
1258 struct nlattr *af_spec;
1259 struct rtnl_af_ops *af_ops;

--- 80 unchanged lines hidden (view full) ---

1340 }
1341
1342 nla_nest_end(skb, vfinfo);
1343 }
1344
1345 if (rtnl_port_fill(skb, dev, ext_filter_mask))
1346 goto nla_put_failure;
1347
1348 if (rtnl_xdp_fill(skb, dev))
1349 goto nla_put_failure;
1350
1295 if (dev->rtnl_link_ops || rtnl_have_link_slave_info(dev)) {
1296 if (rtnl_link_fill(skb, dev) < 0)
1297 goto nla_put_failure;
1298 }
1299
1300 if (dev->rtnl_link_ops &&
1301 dev->rtnl_link_ops->get_link_net) {
1302 struct net *link_net = dev->rtnl_link_ops->get_link_net(dev);

--- 69 unchanged lines hidden (view full) ---

1372 [IFLA_PROMISCUITY] = { .type = NLA_U32 },
1373 [IFLA_NUM_TX_QUEUES] = { .type = NLA_U32 },
1374 [IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 },
1375 [IFLA_PHYS_PORT_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
1376 [IFLA_CARRIER_CHANGES] = { .type = NLA_U32 }, /* ignored */
1377 [IFLA_PHYS_SWITCH_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
1378 [IFLA_LINK_NETNSID] = { .type = NLA_S32 },
1379 [IFLA_PROTO_DOWN] = { .type = NLA_U8 },
1351 if (dev->rtnl_link_ops || rtnl_have_link_slave_info(dev)) {
1352 if (rtnl_link_fill(skb, dev) < 0)
1353 goto nla_put_failure;
1354 }
1355
1356 if (dev->rtnl_link_ops &&
1357 dev->rtnl_link_ops->get_link_net) {
1358 struct net *link_net = dev->rtnl_link_ops->get_link_net(dev);

--- 69 unchanged lines hidden (view full) ---

1428 [IFLA_PROMISCUITY] = { .type = NLA_U32 },
1429 [IFLA_NUM_TX_QUEUES] = { .type = NLA_U32 },
1430 [IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 },
1431 [IFLA_PHYS_PORT_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
1432 [IFLA_CARRIER_CHANGES] = { .type = NLA_U32 }, /* ignored */
1433 [IFLA_PHYS_SWITCH_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
1434 [IFLA_LINK_NETNSID] = { .type = NLA_S32 },
1435 [IFLA_PROTO_DOWN] = { .type = NLA_U8 },
1436 [IFLA_XDP] = { .type = NLA_NESTED },
1380};
1381
1382static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
1383 [IFLA_INFO_KIND] = { .type = NLA_STRING },
1384 [IFLA_INFO_DATA] = { .type = NLA_NESTED },
1385 [IFLA_INFO_SLAVE_KIND] = { .type = NLA_STRING },
1386 [IFLA_INFO_SLAVE_DATA] = { .type = NLA_NESTED },
1387};

--- 21 unchanged lines hidden (view full) ---

1409 [IFLA_PORT_INSTANCE_UUID] = { .type = NLA_BINARY,
1410 .len = PORT_UUID_MAX },
1411 [IFLA_PORT_HOST_UUID] = { .type = NLA_STRING,
1412 .len = PORT_UUID_MAX },
1413 [IFLA_PORT_REQUEST] = { .type = NLA_U8, },
1414 [IFLA_PORT_RESPONSE] = { .type = NLA_U16, },
1415};
1416
1437};
1438
1439static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
1440 [IFLA_INFO_KIND] = { .type = NLA_STRING },
1441 [IFLA_INFO_DATA] = { .type = NLA_NESTED },
1442 [IFLA_INFO_SLAVE_KIND] = { .type = NLA_STRING },
1443 [IFLA_INFO_SLAVE_DATA] = { .type = NLA_NESTED },
1444};

--- 21 unchanged lines hidden (view full) ---

1466 [IFLA_PORT_INSTANCE_UUID] = { .type = NLA_BINARY,
1467 .len = PORT_UUID_MAX },
1468 [IFLA_PORT_HOST_UUID] = { .type = NLA_STRING,
1469 .len = PORT_UUID_MAX },
1470 [IFLA_PORT_REQUEST] = { .type = NLA_U8, },
1471 [IFLA_PORT_RESPONSE] = { .type = NLA_U16, },
1472};
1473
1474static const struct nla_policy ifla_xdp_policy[IFLA_XDP_MAX + 1] = {
1475 [IFLA_XDP_FD] = { .type = NLA_S32 },
1476 [IFLA_XDP_ATTACHED] = { .type = NLA_U8 },
1477};
1478
1417static const struct rtnl_link_ops *linkinfo_to_kind_ops(const struct nlattr *nla)
1418{
1419 const struct rtnl_link_ops *ops = NULL;
1420 struct nlattr *linfo[IFLA_INFO_MAX + 1];
1421
1422 if (nla_parse_nested(linfo, IFLA_INFO_MAX, nla, ifla_info_policy) < 0)
1423 return NULL;
1424

--- 482 unchanged lines hidden (view full) ---

1907 err = dev_change_carrier(dev, nla_get_u8(tb[IFLA_CARRIER]));
1908 if (err)
1909 goto errout;
1910 status |= DO_SETLINK_MODIFIED;
1911 }
1912
1913 if (tb[IFLA_TXQLEN]) {
1914 unsigned long value = nla_get_u32(tb[IFLA_TXQLEN]);
1479static const struct rtnl_link_ops *linkinfo_to_kind_ops(const struct nlattr *nla)
1480{
1481 const struct rtnl_link_ops *ops = NULL;
1482 struct nlattr *linfo[IFLA_INFO_MAX + 1];
1483
1484 if (nla_parse_nested(linfo, IFLA_INFO_MAX, nla, ifla_info_policy) < 0)
1485 return NULL;
1486

--- 482 unchanged lines hidden (view full) ---

1969 err = dev_change_carrier(dev, nla_get_u8(tb[IFLA_CARRIER]));
1970 if (err)
1971 goto errout;
1972 status |= DO_SETLINK_MODIFIED;
1973 }
1974
1975 if (tb[IFLA_TXQLEN]) {
1976 unsigned long value = nla_get_u32(tb[IFLA_TXQLEN]);
1977 unsigned long orig_len = dev->tx_queue_len;
1915
1978
1916 if (dev->tx_queue_len ^ value)
1979 if (dev->tx_queue_len ^ value) {
1980 dev->tx_queue_len = value;
1981 err = call_netdevice_notifiers(
1982 NETDEV_CHANGE_TX_QUEUE_LEN, dev);
1983 err = notifier_to_errno(err);
1984 if (err) {
1985 dev->tx_queue_len = orig_len;
1986 goto errout;
1987 }
1917 status |= DO_SETLINK_NOTIFY;
1988 status |= DO_SETLINK_NOTIFY;
1918
1919 dev->tx_queue_len = value;
1989 }
1920 }
1921
1922 if (tb[IFLA_OPERSTATE])
1923 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
1924
1925 if (tb[IFLA_LINKMODE]) {
1926 unsigned char value = nla_get_u8(tb[IFLA_LINKMODE]);
1927

--- 98 unchanged lines hidden (view full) ---

2026 if (tb[IFLA_PROTO_DOWN]) {
2027 err = dev_change_proto_down(dev,
2028 nla_get_u8(tb[IFLA_PROTO_DOWN]));
2029 if (err)
2030 goto errout;
2031 status |= DO_SETLINK_NOTIFY;
2032 }
2033
1990 }
1991
1992 if (tb[IFLA_OPERSTATE])
1993 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
1994
1995 if (tb[IFLA_LINKMODE]) {
1996 unsigned char value = nla_get_u8(tb[IFLA_LINKMODE]);
1997

--- 98 unchanged lines hidden (view full) ---

2096 if (tb[IFLA_PROTO_DOWN]) {
2097 err = dev_change_proto_down(dev,
2098 nla_get_u8(tb[IFLA_PROTO_DOWN]));
2099 if (err)
2100 goto errout;
2101 status |= DO_SETLINK_NOTIFY;
2102 }
2103
2104 if (tb[IFLA_XDP]) {
2105 struct nlattr *xdp[IFLA_XDP_MAX + 1];
2106
2107 err = nla_parse_nested(xdp, IFLA_XDP_MAX, tb[IFLA_XDP],
2108 ifla_xdp_policy);
2109 if (err < 0)
2110 goto errout;
2111
2112 if (xdp[IFLA_XDP_FD]) {
2113 err = dev_change_xdp_fd(dev,
2114 nla_get_s32(xdp[IFLA_XDP_FD]));
2115 if (err)
2116 goto errout;
2117 status |= DO_SETLINK_NOTIFY;
2118 }
2119 }
2120
2034errout:
2035 if (status & DO_SETLINK_MODIFIED) {
2036 if (status & DO_SETLINK_NOTIFY)
2037 netdev_state_change(dev);
2038
2039 if (err < 0)
2040 net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n",
2041 dev->name);

--- 1406 unchanged lines hidden (view full) ---

3448 }
3449
3450 if (have_flags)
3451 memcpy(nla_data(attr), &flags, sizeof(flags));
3452out:
3453 return err;
3454}
3455
2121errout:
2122 if (status & DO_SETLINK_MODIFIED) {
2123 if (status & DO_SETLINK_NOTIFY)
2124 netdev_state_change(dev);
2125
2126 if (err < 0)
2127 net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n",
2128 dev->name);

--- 1406 unchanged lines hidden (view full) ---

3535 }
3536
3537 if (have_flags)
3538 memcpy(nla_data(attr), &flags, sizeof(flags));
3539out:
3540 return err;
3541}
3542
3543static bool stats_attr_valid(unsigned int mask, int attrid, int idxattr)
3544{
3545 return (mask & IFLA_STATS_FILTER_BIT(attrid)) &&
3546 (!idxattr || idxattr == attrid);
3547}
3548
3549static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
3550 int type, u32 pid, u32 seq, u32 change,
3551 unsigned int flags, unsigned int filter_mask,
3552 int *idxattr, int *prividx)
3553{
3554 struct if_stats_msg *ifsm;
3555 struct nlmsghdr *nlh;
3556 struct nlattr *attr;
3557 int s_prividx = *prividx;
3558
3559 ASSERT_RTNL();
3560
3561 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifsm), flags);
3562 if (!nlh)
3563 return -EMSGSIZE;
3564
3565 ifsm = nlmsg_data(nlh);
3566 ifsm->ifindex = dev->ifindex;
3567 ifsm->filter_mask = filter_mask;
3568
3569 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, *idxattr)) {
3570 struct rtnl_link_stats64 *sp;
3571
3572 attr = nla_reserve_64bit(skb, IFLA_STATS_LINK_64,
3573 sizeof(struct rtnl_link_stats64),
3574 IFLA_STATS_UNSPEC);
3575 if (!attr)
3576 goto nla_put_failure;
3577
3578 sp = nla_data(attr);
3579 dev_get_stats(dev, sp);
3580 }
3581
3582 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, *idxattr)) {
3583 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
3584
3585 if (ops && ops->fill_linkxstats) {
3586 int err;
3587
3588 *idxattr = IFLA_STATS_LINK_XSTATS;
3589 attr = nla_nest_start(skb,
3590 IFLA_STATS_LINK_XSTATS);
3591 if (!attr)
3592 goto nla_put_failure;
3593
3594 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
3595 nla_nest_end(skb, attr);
3596 if (err)
3597 goto nla_put_failure;
3598 *idxattr = 0;
3599 }
3600 }
3601
3602 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE,
3603 *idxattr)) {
3604 const struct rtnl_link_ops *ops = NULL;
3605 const struct net_device *master;
3606
3607 master = netdev_master_upper_dev_get(dev);
3608 if (master)
3609 ops = master->rtnl_link_ops;
3610 if (ops && ops->fill_linkxstats) {
3611 int err;
3612
3613 *idxattr = IFLA_STATS_LINK_XSTATS_SLAVE;
3614 attr = nla_nest_start(skb,
3615 IFLA_STATS_LINK_XSTATS_SLAVE);
3616 if (!attr)
3617 goto nla_put_failure;
3618
3619 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
3620 nla_nest_end(skb, attr);
3621 if (err)
3622 goto nla_put_failure;
3623 *idxattr = 0;
3624 }
3625 }
3626
3627 nlmsg_end(skb, nlh);
3628
3629 return 0;
3630
3631nla_put_failure:
3632 /* not a multi message or no progress mean a real error */
3633 if (!(flags & NLM_F_MULTI) || s_prividx == *prividx)
3634 nlmsg_cancel(skb, nlh);
3635 else
3636 nlmsg_end(skb, nlh);
3637
3638 return -EMSGSIZE;
3639}
3640
3641static const struct nla_policy ifla_stats_policy[IFLA_STATS_MAX + 1] = {
3642 [IFLA_STATS_LINK_64] = { .len = sizeof(struct rtnl_link_stats64) },
3643};
3644
3645static size_t if_nlmsg_stats_size(const struct net_device *dev,
3646 u32 filter_mask)
3647{
3648 size_t size = 0;
3649
3650 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, 0))
3651 size += nla_total_size_64bit(sizeof(struct rtnl_link_stats64));
3652
3653 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, 0)) {
3654 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
3655 int attr = IFLA_STATS_LINK_XSTATS;
3656
3657 if (ops && ops->get_linkxstats_size) {
3658 size += nla_total_size(ops->get_linkxstats_size(dev,
3659 attr));
3660 /* for IFLA_STATS_LINK_XSTATS */
3661 size += nla_total_size(0);
3662 }
3663 }
3664
3665 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE, 0)) {
3666 struct net_device *_dev = (struct net_device *)dev;
3667 const struct rtnl_link_ops *ops = NULL;
3668 const struct net_device *master;
3669
3670 /* netdev_master_upper_dev_get can't take const */
3671 master = netdev_master_upper_dev_get(_dev);
3672 if (master)
3673 ops = master->rtnl_link_ops;
3674 if (ops && ops->get_linkxstats_size) {
3675 int attr = IFLA_STATS_LINK_XSTATS_SLAVE;
3676
3677 size += nla_total_size(ops->get_linkxstats_size(dev,
3678 attr));
3679 /* for IFLA_STATS_LINK_XSTATS_SLAVE */
3680 size += nla_total_size(0);
3681 }
3682 }
3683
3684 return size;
3685}
3686
3687static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh)
3688{
3689 struct net *net = sock_net(skb->sk);
3690 struct net_device *dev = NULL;
3691 int idxattr = 0, prividx = 0;
3692 struct if_stats_msg *ifsm;
3693 struct sk_buff *nskb;
3694 u32 filter_mask;
3695 int err;
3696
3697 ifsm = nlmsg_data(nlh);
3698 if (ifsm->ifindex > 0)
3699 dev = __dev_get_by_index(net, ifsm->ifindex);
3700 else
3701 return -EINVAL;
3702
3703 if (!dev)
3704 return -ENODEV;
3705
3706 filter_mask = ifsm->filter_mask;
3707 if (!filter_mask)
3708 return -EINVAL;
3709
3710 nskb = nlmsg_new(if_nlmsg_stats_size(dev, filter_mask), GFP_KERNEL);
3711 if (!nskb)
3712 return -ENOBUFS;
3713
3714 err = rtnl_fill_statsinfo(nskb, dev, RTM_NEWSTATS,
3715 NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
3716 0, filter_mask, &idxattr, &prividx);
3717 if (err < 0) {
3718 /* -EMSGSIZE implies BUG in if_nlmsg_stats_size */
3719 WARN_ON(err == -EMSGSIZE);
3720 kfree_skb(nskb);
3721 } else {
3722 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
3723 }
3724
3725 return err;
3726}
3727
3728static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb)
3729{
3730 int h, s_h, err, s_idx, s_idxattr, s_prividx;
3731 struct net *net = sock_net(skb->sk);
3732 unsigned int flags = NLM_F_MULTI;
3733 struct if_stats_msg *ifsm;
3734 struct hlist_head *head;
3735 struct net_device *dev;
3736 u32 filter_mask = 0;
3737 int idx = 0;
3738
3739 s_h = cb->args[0];
3740 s_idx = cb->args[1];
3741 s_idxattr = cb->args[2];
3742 s_prividx = cb->args[3];
3743
3744 cb->seq = net->dev_base_seq;
3745
3746 ifsm = nlmsg_data(cb->nlh);
3747 filter_mask = ifsm->filter_mask;
3748 if (!filter_mask)
3749 return -EINVAL;
3750
3751 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
3752 idx = 0;
3753 head = &net->dev_index_head[h];
3754 hlist_for_each_entry(dev, head, index_hlist) {
3755 if (idx < s_idx)
3756 goto cont;
3757 err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS,
3758 NETLINK_CB(cb->skb).portid,
3759 cb->nlh->nlmsg_seq, 0,
3760 flags, filter_mask,
3761 &s_idxattr, &s_prividx);
3762 /* If we ran out of room on the first message,
3763 * we're in trouble
3764 */
3765 WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
3766
3767 if (err < 0)
3768 goto out;
3769 s_prividx = 0;
3770 s_idxattr = 0;
3771 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
3772cont:
3773 idx++;
3774 }
3775 }
3776out:
3777 cb->args[3] = s_prividx;
3778 cb->args[2] = s_idxattr;
3779 cb->args[1] = idx;
3780 cb->args[0] = h;
3781
3782 return skb->len;
3783}
3784
3456/* Process one rtnetlink message. */
3457
3458static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
3459{
3460 struct net *net = sock_net(skb->sk);
3461 rtnl_doit_func doit;
3462 int kind;
3463 int family;

--- 133 unchanged lines hidden (view full) ---

3597
3598 rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, NULL);
3599 rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL, NULL);
3600 rtnl_register(PF_BRIDGE, RTM_GETNEIGH, NULL, rtnl_fdb_dump, NULL);
3601
3602 rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, NULL);
3603 rtnl_register(PF_BRIDGE, RTM_DELLINK, rtnl_bridge_dellink, NULL, NULL);
3604 rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, NULL);
3785/* Process one rtnetlink message. */
3786
3787static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
3788{
3789 struct net *net = sock_net(skb->sk);
3790 rtnl_doit_func doit;
3791 int kind;
3792 int family;

--- 133 unchanged lines hidden (view full) ---

3926
3927 rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, NULL);
3928 rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL, NULL);
3929 rtnl_register(PF_BRIDGE, RTM_GETNEIGH, NULL, rtnl_fdb_dump, NULL);
3930
3931 rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, NULL);
3932 rtnl_register(PF_BRIDGE, RTM_DELLINK, rtnl_bridge_dellink, NULL, NULL);
3933 rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, NULL);
3934
3935 rtnl_register(PF_UNSPEC, RTM_GETSTATS, rtnl_stats_get, rtnl_stats_dump,
3936 NULL);
3605}
3937}