12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * net-sysfs.c - network device class and attributes 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * Copyright (c) 2003 Stephen Hemminger <shemminger@osdl.org> 61da177e4SLinus Torvalds */ 71da177e4SLinus Torvalds 84fc268d2SRandy Dunlap #include <linux/capability.h> 91da177e4SLinus Torvalds #include <linux/kernel.h> 101da177e4SLinus Torvalds #include <linux/netdevice.h> 111da177e4SLinus Torvalds #include <linux/if_arp.h> 125a0e3ad6STejun Heo #include <linux/slab.h> 13174cd4b1SIngo Molnar #include <linux/sched/signal.h> 1407bbecb3SAlex Belits #include <linux/sched/isolation.h> 15608b4b95SEric W. Biederman #include <linux/nsproxy.h> 161da177e4SLinus Torvalds #include <net/sock.h> 17608b4b95SEric W. Biederman #include <net/net_namespace.h> 181da177e4SLinus Torvalds #include <linux/rtnetlink.h> 19fec5e652STom Herbert #include <linux/vmalloc.h> 20bc3b2d7fSPaul Gortmaker #include <linux/export.h> 21114cf580STom Herbert #include <linux/jiffies.h> 229802c8e2SMing Lei #include <linux/pm_runtime.h> 23aa836df9SFlorian Fainelli #include <linux/of.h> 2488832a22SBen Dooks #include <linux/of_net.h> 254d99f660SAndrei Vagin #include <linux/cpu.h> 261da177e4SLinus Torvalds 27342709efSPavel Emelyanov #include "net-sysfs.h" 28342709efSPavel Emelyanov 298b41d188SEric W. Biederman #ifdef CONFIG_SYSFS 301da177e4SLinus Torvalds static const char fmt_hex[] = "%#x\n"; 311da177e4SLinus Torvalds static const char fmt_dec[] = "%d\n"; 321da177e4SLinus Torvalds static const char fmt_ulong[] = "%lu\n"; 33be1f3c2cSBen Hutchings static const char fmt_u64[] = "%llu\n"; 341da177e4SLinus Torvalds 351da177e4SLinus Torvalds static inline int dev_isalive(const struct net_device *dev) 361da177e4SLinus Torvalds { 37fe9925b5SStephen Hemminger return dev->reg_state <= NETREG_REGISTERED; 381da177e4SLinus Torvalds } 391da177e4SLinus Torvalds 401da177e4SLinus Torvalds /* use same locking rules as GIF* ioctl's */ 4143cb76d9SGreg Kroah-Hartman static ssize_t netdev_show(const struct device *dev, 4243cb76d9SGreg Kroah-Hartman struct device_attribute *attr, char *buf, 431da177e4SLinus Torvalds ssize_t (*format)(const struct net_device *, char *)) 441da177e4SLinus Torvalds { 456b53dafeSWANG Cong struct net_device *ndev = to_net_dev(dev); 461da177e4SLinus Torvalds ssize_t ret = -EINVAL; 471da177e4SLinus Torvalds 481da177e4SLinus Torvalds read_lock(&dev_base_lock); 496b53dafeSWANG Cong if (dev_isalive(ndev)) 506b53dafeSWANG Cong ret = (*format)(ndev, buf); 511da177e4SLinus Torvalds read_unlock(&dev_base_lock); 521da177e4SLinus Torvalds 531da177e4SLinus Torvalds return ret; 541da177e4SLinus Torvalds } 551da177e4SLinus Torvalds 561da177e4SLinus Torvalds /* generate a show function for simple field */ 571da177e4SLinus Torvalds #define NETDEVICE_SHOW(field, format_string) \ 586b53dafeSWANG Cong static ssize_t format_##field(const struct net_device *dev, char *buf) \ 591da177e4SLinus Torvalds { \ 606b53dafeSWANG Cong return sprintf(buf, format_string, dev->field); \ 611da177e4SLinus Torvalds } \ 626be8aeefSGreg Kroah-Hartman static ssize_t field##_show(struct device *dev, \ 6343cb76d9SGreg Kroah-Hartman struct device_attribute *attr, char *buf) \ 641da177e4SLinus Torvalds { \ 6543cb76d9SGreg Kroah-Hartman return netdev_show(dev, attr, buf, format_##field); \ 666be8aeefSGreg Kroah-Hartman } \ 671da177e4SLinus Torvalds 686be8aeefSGreg Kroah-Hartman #define NETDEVICE_SHOW_RO(field, format_string) \ 696be8aeefSGreg Kroah-Hartman NETDEVICE_SHOW(field, format_string); \ 706be8aeefSGreg Kroah-Hartman static DEVICE_ATTR_RO(field) 716be8aeefSGreg Kroah-Hartman 726be8aeefSGreg Kroah-Hartman #define NETDEVICE_SHOW_RW(field, format_string) \ 736be8aeefSGreg Kroah-Hartman NETDEVICE_SHOW(field, format_string); \ 746be8aeefSGreg Kroah-Hartman static DEVICE_ATTR_RW(field) 751da177e4SLinus Torvalds 761da177e4SLinus Torvalds /* use same locking and permission rules as SIF* ioctl's */ 7743cb76d9SGreg Kroah-Hartman static ssize_t netdev_store(struct device *dev, struct device_attribute *attr, 781da177e4SLinus Torvalds const char *buf, size_t len, 791da177e4SLinus Torvalds int (*set)(struct net_device *, unsigned long)) 801da177e4SLinus Torvalds { 815e1fccc0SEric W. Biederman struct net_device *netdev = to_net_dev(dev); 825e1fccc0SEric W. Biederman struct net *net = dev_net(netdev); 831da177e4SLinus Torvalds unsigned long new; 845f0224a6SColin Ian King int ret; 851da177e4SLinus Torvalds 865e1fccc0SEric W. Biederman if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 871da177e4SLinus Torvalds return -EPERM; 881da177e4SLinus Torvalds 89e1e420c7SShuah Khan ret = kstrtoul(buf, 0, &new); 90e1e420c7SShuah Khan if (ret) 911da177e4SLinus Torvalds goto err; 921da177e4SLinus Torvalds 935a5990d3SStephen Hemminger if (!rtnl_trylock()) 94336ca57cSEric W. Biederman return restart_syscall(); 955a5990d3SStephen Hemminger 965e1fccc0SEric W. Biederman if (dev_isalive(netdev)) { 976648c65eSstephen hemminger ret = (*set)(netdev, new); 986648c65eSstephen hemminger if (ret == 0) 991da177e4SLinus Torvalds ret = len; 1001da177e4SLinus Torvalds } 1011da177e4SLinus Torvalds rtnl_unlock(); 1021da177e4SLinus Torvalds err: 1031da177e4SLinus Torvalds return ret; 1041da177e4SLinus Torvalds } 1051da177e4SLinus Torvalds 1066be8aeefSGreg Kroah-Hartman NETDEVICE_SHOW_RO(dev_id, fmt_hex); 1073f85944fSAmir Vadai NETDEVICE_SHOW_RO(dev_port, fmt_dec); 1086be8aeefSGreg Kroah-Hartman NETDEVICE_SHOW_RO(addr_assign_type, fmt_dec); 1096be8aeefSGreg Kroah-Hartman NETDEVICE_SHOW_RO(addr_len, fmt_dec); 1106be8aeefSGreg Kroah-Hartman NETDEVICE_SHOW_RO(ifindex, fmt_dec); 1116be8aeefSGreg Kroah-Hartman NETDEVICE_SHOW_RO(type, fmt_dec); 1126be8aeefSGreg Kroah-Hartman NETDEVICE_SHOW_RO(link_mode, fmt_dec); 1131da177e4SLinus Torvalds 114a54acb3aSNicolas Dichtel static ssize_t iflink_show(struct device *dev, struct device_attribute *attr, 115a54acb3aSNicolas Dichtel char *buf) 116a54acb3aSNicolas Dichtel { 117a54acb3aSNicolas Dichtel struct net_device *ndev = to_net_dev(dev); 118a54acb3aSNicolas Dichtel 119a54acb3aSNicolas Dichtel return sprintf(buf, fmt_dec, dev_get_iflink(ndev)); 120a54acb3aSNicolas Dichtel } 121a54acb3aSNicolas Dichtel static DEVICE_ATTR_RO(iflink); 122a54acb3aSNicolas Dichtel 1236b53dafeSWANG Cong static ssize_t format_name_assign_type(const struct net_device *dev, char *buf) 124685343fcSTom Gundersen { 1256b53dafeSWANG Cong return sprintf(buf, fmt_dec, dev->name_assign_type); 126685343fcSTom Gundersen } 127685343fcSTom Gundersen 128685343fcSTom Gundersen static ssize_t name_assign_type_show(struct device *dev, 129685343fcSTom Gundersen struct device_attribute *attr, 130685343fcSTom Gundersen char *buf) 131685343fcSTom Gundersen { 1326b53dafeSWANG Cong struct net_device *ndev = to_net_dev(dev); 133685343fcSTom Gundersen ssize_t ret = -EINVAL; 134685343fcSTom Gundersen 1356b53dafeSWANG Cong if (ndev->name_assign_type != NET_NAME_UNKNOWN) 136685343fcSTom Gundersen ret = netdev_show(dev, attr, buf, format_name_assign_type); 137685343fcSTom Gundersen 138685343fcSTom Gundersen return ret; 139685343fcSTom Gundersen } 140685343fcSTom Gundersen static DEVICE_ATTR_RO(name_assign_type); 141685343fcSTom Gundersen 1421da177e4SLinus Torvalds /* use same locking rules as GIFHWADDR ioctl's */ 1436be8aeefSGreg Kroah-Hartman static ssize_t address_show(struct device *dev, struct device_attribute *attr, 14443cb76d9SGreg Kroah-Hartman char *buf) 1451da177e4SLinus Torvalds { 1466b53dafeSWANG Cong struct net_device *ndev = to_net_dev(dev); 1471da177e4SLinus Torvalds ssize_t ret = -EINVAL; 1481da177e4SLinus Torvalds 1491da177e4SLinus Torvalds read_lock(&dev_base_lock); 1506b53dafeSWANG Cong if (dev_isalive(ndev)) 1516b53dafeSWANG Cong ret = sysfs_format_mac(buf, ndev->dev_addr, ndev->addr_len); 1521da177e4SLinus Torvalds read_unlock(&dev_base_lock); 1531da177e4SLinus Torvalds return ret; 1541da177e4SLinus Torvalds } 1556be8aeefSGreg Kroah-Hartman static DEVICE_ATTR_RO(address); 1561da177e4SLinus Torvalds 1576be8aeefSGreg Kroah-Hartman static ssize_t broadcast_show(struct device *dev, 15843cb76d9SGreg Kroah-Hartman struct device_attribute *attr, char *buf) 1591da177e4SLinus Torvalds { 1606b53dafeSWANG Cong struct net_device *ndev = to_net_dev(dev); 1616648c65eSstephen hemminger 1626b53dafeSWANG Cong if (dev_isalive(ndev)) 1636b53dafeSWANG Cong return sysfs_format_mac(buf, ndev->broadcast, ndev->addr_len); 1641da177e4SLinus Torvalds return -EINVAL; 1651da177e4SLinus Torvalds } 1666be8aeefSGreg Kroah-Hartman static DEVICE_ATTR_RO(broadcast); 1671da177e4SLinus Torvalds 1686b53dafeSWANG Cong static int change_carrier(struct net_device *dev, unsigned long new_carrier) 169fdae0fdeSJiri Pirko { 1706b53dafeSWANG Cong if (!netif_running(dev)) 171fdae0fdeSJiri Pirko return -EINVAL; 1726b53dafeSWANG Cong return dev_change_carrier(dev, (bool)new_carrier); 173fdae0fdeSJiri Pirko } 174fdae0fdeSJiri Pirko 1756be8aeefSGreg Kroah-Hartman static ssize_t carrier_store(struct device *dev, struct device_attribute *attr, 176fdae0fdeSJiri Pirko const char *buf, size_t len) 177fdae0fdeSJiri Pirko { 178fdae0fdeSJiri Pirko return netdev_store(dev, attr, buf, len, change_carrier); 179fdae0fdeSJiri Pirko } 180fdae0fdeSJiri Pirko 1816be8aeefSGreg Kroah-Hartman static ssize_t carrier_show(struct device *dev, 18243cb76d9SGreg Kroah-Hartman struct device_attribute *attr, char *buf) 1831da177e4SLinus Torvalds { 1841da177e4SLinus Torvalds struct net_device *netdev = to_net_dev(dev); 1856648c65eSstephen hemminger 1866648c65eSstephen hemminger if (netif_running(netdev)) 1871da177e4SLinus Torvalds return sprintf(buf, fmt_dec, !!netif_carrier_ok(netdev)); 1886648c65eSstephen hemminger 1891da177e4SLinus Torvalds return -EINVAL; 1901da177e4SLinus Torvalds } 1916be8aeefSGreg Kroah-Hartman static DEVICE_ATTR_RW(carrier); 1921da177e4SLinus Torvalds 1936be8aeefSGreg Kroah-Hartman static ssize_t speed_show(struct device *dev, 194d519e17eSAndy Gospodarek struct device_attribute *attr, char *buf) 195d519e17eSAndy Gospodarek { 196d519e17eSAndy Gospodarek struct net_device *netdev = to_net_dev(dev); 197d519e17eSAndy Gospodarek int ret = -EINVAL; 198d519e17eSAndy Gospodarek 199d519e17eSAndy Gospodarek if (!rtnl_trylock()) 200d519e17eSAndy Gospodarek return restart_syscall(); 201d519e17eSAndy Gospodarek 2028ae6dacaSDavid Decotigny if (netif_running(netdev)) { 2037cad1bacSDavid Decotigny struct ethtool_link_ksettings cmd; 2047cad1bacSDavid Decotigny 2057cad1bacSDavid Decotigny if (!__ethtool_get_link_ksettings(netdev, &cmd)) 2067cad1bacSDavid Decotigny ret = sprintf(buf, fmt_dec, cmd.base.speed); 207d519e17eSAndy Gospodarek } 208d519e17eSAndy Gospodarek rtnl_unlock(); 209d519e17eSAndy Gospodarek return ret; 210d519e17eSAndy Gospodarek } 2116be8aeefSGreg Kroah-Hartman static DEVICE_ATTR_RO(speed); 212d519e17eSAndy Gospodarek 2136be8aeefSGreg Kroah-Hartman static ssize_t duplex_show(struct device *dev, 214d519e17eSAndy Gospodarek struct device_attribute *attr, char *buf) 215d519e17eSAndy Gospodarek { 216d519e17eSAndy Gospodarek struct net_device *netdev = to_net_dev(dev); 217d519e17eSAndy Gospodarek int ret = -EINVAL; 218d519e17eSAndy Gospodarek 219d519e17eSAndy Gospodarek if (!rtnl_trylock()) 220d519e17eSAndy Gospodarek return restart_syscall(); 221d519e17eSAndy Gospodarek 2228ae6dacaSDavid Decotigny if (netif_running(netdev)) { 2237cad1bacSDavid Decotigny struct ethtool_link_ksettings cmd; 2247cad1bacSDavid Decotigny 2257cad1bacSDavid Decotigny if (!__ethtool_get_link_ksettings(netdev, &cmd)) { 226c6c13965SNikolay Aleksandrov const char *duplex; 2277cad1bacSDavid Decotigny 2287cad1bacSDavid Decotigny switch (cmd.base.duplex) { 229c6c13965SNikolay Aleksandrov case DUPLEX_HALF: 230c6c13965SNikolay Aleksandrov duplex = "half"; 231c6c13965SNikolay Aleksandrov break; 232c6c13965SNikolay Aleksandrov case DUPLEX_FULL: 233c6c13965SNikolay Aleksandrov duplex = "full"; 234c6c13965SNikolay Aleksandrov break; 235c6c13965SNikolay Aleksandrov default: 236c6c13965SNikolay Aleksandrov duplex = "unknown"; 237c6c13965SNikolay Aleksandrov break; 238c6c13965SNikolay Aleksandrov } 239c6c13965SNikolay Aleksandrov ret = sprintf(buf, "%s\n", duplex); 240c6c13965SNikolay Aleksandrov } 241d519e17eSAndy Gospodarek } 242d519e17eSAndy Gospodarek rtnl_unlock(); 243d519e17eSAndy Gospodarek return ret; 244d519e17eSAndy Gospodarek } 2456be8aeefSGreg Kroah-Hartman static DEVICE_ATTR_RO(duplex); 246d519e17eSAndy Gospodarek 247db30a577SAndrew Lunn static ssize_t testing_show(struct device *dev, 248db30a577SAndrew Lunn struct device_attribute *attr, char *buf) 249db30a577SAndrew Lunn { 250db30a577SAndrew Lunn struct net_device *netdev = to_net_dev(dev); 251db30a577SAndrew Lunn 252db30a577SAndrew Lunn if (netif_running(netdev)) 253db30a577SAndrew Lunn return sprintf(buf, fmt_dec, !!netif_testing(netdev)); 254db30a577SAndrew Lunn 255db30a577SAndrew Lunn return -EINVAL; 256db30a577SAndrew Lunn } 257db30a577SAndrew Lunn static DEVICE_ATTR_RO(testing); 258db30a577SAndrew Lunn 2596be8aeefSGreg Kroah-Hartman static ssize_t dormant_show(struct device *dev, 26043cb76d9SGreg Kroah-Hartman struct device_attribute *attr, char *buf) 261b00055aaSStefan Rompf { 262b00055aaSStefan Rompf struct net_device *netdev = to_net_dev(dev); 263b00055aaSStefan Rompf 264b00055aaSStefan Rompf if (netif_running(netdev)) 265b00055aaSStefan Rompf return sprintf(buf, fmt_dec, !!netif_dormant(netdev)); 266b00055aaSStefan Rompf 267b00055aaSStefan Rompf return -EINVAL; 268b00055aaSStefan Rompf } 2696be8aeefSGreg Kroah-Hartman static DEVICE_ATTR_RO(dormant); 270b00055aaSStefan Rompf 27136cbd3dcSJan Engelhardt static const char *const operstates[] = { 272b00055aaSStefan Rompf "unknown", 273b00055aaSStefan Rompf "notpresent", /* currently unused */ 274b00055aaSStefan Rompf "down", 275b00055aaSStefan Rompf "lowerlayerdown", 276db30a577SAndrew Lunn "testing", 277b00055aaSStefan Rompf "dormant", 278b00055aaSStefan Rompf "up" 279b00055aaSStefan Rompf }; 280b00055aaSStefan Rompf 2816be8aeefSGreg Kroah-Hartman static ssize_t operstate_show(struct device *dev, 28243cb76d9SGreg Kroah-Hartman struct device_attribute *attr, char *buf) 283b00055aaSStefan Rompf { 284b00055aaSStefan Rompf const struct net_device *netdev = to_net_dev(dev); 285b00055aaSStefan Rompf unsigned char operstate; 286b00055aaSStefan Rompf 287b00055aaSStefan Rompf read_lock(&dev_base_lock); 288b00055aaSStefan Rompf operstate = netdev->operstate; 289b00055aaSStefan Rompf if (!netif_running(netdev)) 290b00055aaSStefan Rompf operstate = IF_OPER_DOWN; 291b00055aaSStefan Rompf read_unlock(&dev_base_lock); 292b00055aaSStefan Rompf 293e3a5cd9eSAdrian Bunk if (operstate >= ARRAY_SIZE(operstates)) 294b00055aaSStefan Rompf return -EINVAL; /* should not happen */ 295b00055aaSStefan Rompf 296b00055aaSStefan Rompf return sprintf(buf, "%s\n", operstates[operstate]); 297b00055aaSStefan Rompf } 2986be8aeefSGreg Kroah-Hartman static DEVICE_ATTR_RO(operstate); 299b00055aaSStefan Rompf 3002d3b479dSdavid decotigny static ssize_t carrier_changes_show(struct device *dev, 3012d3b479dSdavid decotigny struct device_attribute *attr, 3022d3b479dSdavid decotigny char *buf) 3032d3b479dSdavid decotigny { 3042d3b479dSdavid decotigny struct net_device *netdev = to_net_dev(dev); 3056648c65eSstephen hemminger 3062d3b479dSdavid decotigny return sprintf(buf, fmt_dec, 307b2d3bcfaSDavid Decotigny atomic_read(&netdev->carrier_up_count) + 308b2d3bcfaSDavid Decotigny atomic_read(&netdev->carrier_down_count)); 3092d3b479dSdavid decotigny } 3102d3b479dSdavid decotigny static DEVICE_ATTR_RO(carrier_changes); 3112d3b479dSdavid decotigny 312b2d3bcfaSDavid Decotigny static ssize_t carrier_up_count_show(struct device *dev, 313b2d3bcfaSDavid Decotigny struct device_attribute *attr, 314b2d3bcfaSDavid Decotigny char *buf) 315b2d3bcfaSDavid Decotigny { 316b2d3bcfaSDavid Decotigny struct net_device *netdev = to_net_dev(dev); 317b2d3bcfaSDavid Decotigny 318b2d3bcfaSDavid Decotigny return sprintf(buf, fmt_dec, atomic_read(&netdev->carrier_up_count)); 319b2d3bcfaSDavid Decotigny } 320b2d3bcfaSDavid Decotigny static DEVICE_ATTR_RO(carrier_up_count); 321b2d3bcfaSDavid Decotigny 322b2d3bcfaSDavid Decotigny static ssize_t carrier_down_count_show(struct device *dev, 323b2d3bcfaSDavid Decotigny struct device_attribute *attr, 324b2d3bcfaSDavid Decotigny char *buf) 325b2d3bcfaSDavid Decotigny { 326b2d3bcfaSDavid Decotigny struct net_device *netdev = to_net_dev(dev); 327b2d3bcfaSDavid Decotigny 328b2d3bcfaSDavid Decotigny return sprintf(buf, fmt_dec, atomic_read(&netdev->carrier_down_count)); 329b2d3bcfaSDavid Decotigny } 330b2d3bcfaSDavid Decotigny static DEVICE_ATTR_RO(carrier_down_count); 331b2d3bcfaSDavid Decotigny 3321da177e4SLinus Torvalds /* read-write attributes */ 3331da177e4SLinus Torvalds 3346b53dafeSWANG Cong static int change_mtu(struct net_device *dev, unsigned long new_mtu) 3351da177e4SLinus Torvalds { 3366b53dafeSWANG Cong return dev_set_mtu(dev, (int)new_mtu); 3371da177e4SLinus Torvalds } 3381da177e4SLinus Torvalds 3396be8aeefSGreg Kroah-Hartman static ssize_t mtu_store(struct device *dev, struct device_attribute *attr, 34043cb76d9SGreg Kroah-Hartman const char *buf, size_t len) 3411da177e4SLinus Torvalds { 34243cb76d9SGreg Kroah-Hartman return netdev_store(dev, attr, buf, len, change_mtu); 3431da177e4SLinus Torvalds } 3446be8aeefSGreg Kroah-Hartman NETDEVICE_SHOW_RW(mtu, fmt_dec); 3451da177e4SLinus Torvalds 3466b53dafeSWANG Cong static int change_flags(struct net_device *dev, unsigned long new_flags) 3471da177e4SLinus Torvalds { 348567c5e13SPetr Machata return dev_change_flags(dev, (unsigned int)new_flags, NULL); 3491da177e4SLinus Torvalds } 3501da177e4SLinus Torvalds 3516be8aeefSGreg Kroah-Hartman static ssize_t flags_store(struct device *dev, struct device_attribute *attr, 35243cb76d9SGreg Kroah-Hartman const char *buf, size_t len) 3531da177e4SLinus Torvalds { 35443cb76d9SGreg Kroah-Hartman return netdev_store(dev, attr, buf, len, change_flags); 3551da177e4SLinus Torvalds } 3566be8aeefSGreg Kroah-Hartman NETDEVICE_SHOW_RW(flags, fmt_hex); 3571da177e4SLinus Torvalds 3586be8aeefSGreg Kroah-Hartman static ssize_t tx_queue_len_store(struct device *dev, 35943cb76d9SGreg Kroah-Hartman struct device_attribute *attr, 36043cb76d9SGreg Kroah-Hartman const char *buf, size_t len) 3611da177e4SLinus Torvalds { 3625e1fccc0SEric W. Biederman if (!capable(CAP_NET_ADMIN)) 3635e1fccc0SEric W. Biederman return -EPERM; 3645e1fccc0SEric W. Biederman 3656a643ddbSCong Wang return netdev_store(dev, attr, buf, len, dev_change_tx_queue_len); 3661da177e4SLinus Torvalds } 3670cd29503SAlexey Dobriyan NETDEVICE_SHOW_RW(tx_queue_len, fmt_dec); 3681da177e4SLinus Torvalds 3693b47d303SEric Dumazet static int change_gro_flush_timeout(struct net_device *dev, unsigned long val) 3703b47d303SEric Dumazet { 3717e417a66SEric Dumazet WRITE_ONCE(dev->gro_flush_timeout, val); 3723b47d303SEric Dumazet return 0; 3733b47d303SEric Dumazet } 3743b47d303SEric Dumazet 3753b47d303SEric Dumazet static ssize_t gro_flush_timeout_store(struct device *dev, 3763b47d303SEric Dumazet struct device_attribute *attr, 3773b47d303SEric Dumazet const char *buf, size_t len) 3783b47d303SEric Dumazet { 3793b47d303SEric Dumazet if (!capable(CAP_NET_ADMIN)) 3803b47d303SEric Dumazet return -EPERM; 3813b47d303SEric Dumazet 3823b47d303SEric Dumazet return netdev_store(dev, attr, buf, len, change_gro_flush_timeout); 3833b47d303SEric Dumazet } 3843b47d303SEric Dumazet NETDEVICE_SHOW_RW(gro_flush_timeout, fmt_ulong); 3853b47d303SEric Dumazet 3866f8b12d6SEric Dumazet static int change_napi_defer_hard_irqs(struct net_device *dev, unsigned long val) 3876f8b12d6SEric Dumazet { 3887e417a66SEric Dumazet WRITE_ONCE(dev->napi_defer_hard_irqs, val); 3896f8b12d6SEric Dumazet return 0; 3906f8b12d6SEric Dumazet } 3916f8b12d6SEric Dumazet 3926f8b12d6SEric Dumazet static ssize_t napi_defer_hard_irqs_store(struct device *dev, 3936f8b12d6SEric Dumazet struct device_attribute *attr, 3946f8b12d6SEric Dumazet const char *buf, size_t len) 3956f8b12d6SEric Dumazet { 3966f8b12d6SEric Dumazet if (!capable(CAP_NET_ADMIN)) 3976f8b12d6SEric Dumazet return -EPERM; 3986f8b12d6SEric Dumazet 3996f8b12d6SEric Dumazet return netdev_store(dev, attr, buf, len, change_napi_defer_hard_irqs); 4006f8b12d6SEric Dumazet } 4016f8b12d6SEric Dumazet NETDEVICE_SHOW_RW(napi_defer_hard_irqs, fmt_dec); 4026f8b12d6SEric Dumazet 4036be8aeefSGreg Kroah-Hartman static ssize_t ifalias_store(struct device *dev, struct device_attribute *attr, 4040b815a1aSStephen Hemminger const char *buf, size_t len) 4050b815a1aSStephen Hemminger { 4060b815a1aSStephen Hemminger struct net_device *netdev = to_net_dev(dev); 4075e1fccc0SEric W. Biederman struct net *net = dev_net(netdev); 4080b815a1aSStephen Hemminger size_t count = len; 409c92eb77aSRoopa Prabhu ssize_t ret = 0; 4100b815a1aSStephen Hemminger 4115e1fccc0SEric W. Biederman if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 4120b815a1aSStephen Hemminger return -EPERM; 4130b815a1aSStephen Hemminger 4140b815a1aSStephen Hemminger /* ignore trailing newline */ 4150b815a1aSStephen Hemminger if (len > 0 && buf[len - 1] == '\n') 4160b815a1aSStephen Hemminger --count; 4170b815a1aSStephen Hemminger 418c92eb77aSRoopa Prabhu if (!rtnl_trylock()) 419c92eb77aSRoopa Prabhu return restart_syscall(); 4200b815a1aSStephen Hemminger 421c92eb77aSRoopa Prabhu if (dev_isalive(netdev)) { 422c92eb77aSRoopa Prabhu ret = dev_set_alias(netdev, buf, count); 423c92eb77aSRoopa Prabhu if (ret < 0) 424c92eb77aSRoopa Prabhu goto err; 425c92eb77aSRoopa Prabhu ret = len; 426c92eb77aSRoopa Prabhu netdev_state_change(netdev); 427c92eb77aSRoopa Prabhu } 428c92eb77aSRoopa Prabhu err: 429c92eb77aSRoopa Prabhu rtnl_unlock(); 430c92eb77aSRoopa Prabhu 431c92eb77aSRoopa Prabhu return ret; 4320b815a1aSStephen Hemminger } 4330b815a1aSStephen Hemminger 4346be8aeefSGreg Kroah-Hartman static ssize_t ifalias_show(struct device *dev, 4350b815a1aSStephen Hemminger struct device_attribute *attr, char *buf) 4360b815a1aSStephen Hemminger { 4370b815a1aSStephen Hemminger const struct net_device *netdev = to_net_dev(dev); 4386c557001SFlorian Westphal char tmp[IFALIASZ]; 4390b815a1aSStephen Hemminger ssize_t ret = 0; 4400b815a1aSStephen Hemminger 4416c557001SFlorian Westphal ret = dev_get_alias(netdev, tmp, sizeof(tmp)); 4426c557001SFlorian Westphal if (ret > 0) 4436c557001SFlorian Westphal ret = sprintf(buf, "%s\n", tmp); 4440b815a1aSStephen Hemminger return ret; 4450b815a1aSStephen Hemminger } 4466be8aeefSGreg Kroah-Hartman static DEVICE_ATTR_RW(ifalias); 447a512b92bSVlad Dogaru 4486b53dafeSWANG Cong static int change_group(struct net_device *dev, unsigned long new_group) 449a512b92bSVlad Dogaru { 4506b53dafeSWANG Cong dev_set_group(dev, (int)new_group); 451a512b92bSVlad Dogaru return 0; 452a512b92bSVlad Dogaru } 453a512b92bSVlad Dogaru 4546be8aeefSGreg Kroah-Hartman static ssize_t group_store(struct device *dev, struct device_attribute *attr, 455a512b92bSVlad Dogaru const char *buf, size_t len) 456a512b92bSVlad Dogaru { 457a512b92bSVlad Dogaru return netdev_store(dev, attr, buf, len, change_group); 458a512b92bSVlad Dogaru } 4596be8aeefSGreg Kroah-Hartman NETDEVICE_SHOW(group, fmt_dec); 460d6444062SJoe Perches static DEVICE_ATTR(netdev_group, 0644, group_show, group_store); 461a512b92bSVlad Dogaru 462d746d707SAnuradha Karuppiah static int change_proto_down(struct net_device *dev, unsigned long proto_down) 463d746d707SAnuradha Karuppiah { 464d746d707SAnuradha Karuppiah return dev_change_proto_down(dev, (bool)proto_down); 465d746d707SAnuradha Karuppiah } 466d746d707SAnuradha Karuppiah 467d746d707SAnuradha Karuppiah static ssize_t proto_down_store(struct device *dev, 468d746d707SAnuradha Karuppiah struct device_attribute *attr, 469d746d707SAnuradha Karuppiah const char *buf, size_t len) 470d746d707SAnuradha Karuppiah { 471d746d707SAnuradha Karuppiah return netdev_store(dev, attr, buf, len, change_proto_down); 472d746d707SAnuradha Karuppiah } 473d746d707SAnuradha Karuppiah NETDEVICE_SHOW_RW(proto_down, fmt_dec); 474d746d707SAnuradha Karuppiah 475cc998ff8SLinus Torvalds static ssize_t phys_port_id_show(struct device *dev, 476ff80e519SJiri Pirko struct device_attribute *attr, char *buf) 477ff80e519SJiri Pirko { 478ff80e519SJiri Pirko struct net_device *netdev = to_net_dev(dev); 479ff80e519SJiri Pirko ssize_t ret = -EINVAL; 480ff80e519SJiri Pirko 481ff80e519SJiri Pirko if (!rtnl_trylock()) 482ff80e519SJiri Pirko return restart_syscall(); 483ff80e519SJiri Pirko 484ff80e519SJiri Pirko if (dev_isalive(netdev)) { 48502637fceSJiri Pirko struct netdev_phys_item_id ppid; 486ff80e519SJiri Pirko 487ff80e519SJiri Pirko ret = dev_get_phys_port_id(netdev, &ppid); 488ff80e519SJiri Pirko if (!ret) 489ff80e519SJiri Pirko ret = sprintf(buf, "%*phN\n", ppid.id_len, ppid.id); 490ff80e519SJiri Pirko } 491ff80e519SJiri Pirko rtnl_unlock(); 492ff80e519SJiri Pirko 493ff80e519SJiri Pirko return ret; 494ff80e519SJiri Pirko } 495cc998ff8SLinus Torvalds static DEVICE_ATTR_RO(phys_port_id); 496ff80e519SJiri Pirko 497db24a904SDavid Ahern static ssize_t phys_port_name_show(struct device *dev, 498db24a904SDavid Ahern struct device_attribute *attr, char *buf) 499db24a904SDavid Ahern { 500db24a904SDavid Ahern struct net_device *netdev = to_net_dev(dev); 501db24a904SDavid Ahern ssize_t ret = -EINVAL; 502db24a904SDavid Ahern 503db24a904SDavid Ahern if (!rtnl_trylock()) 504db24a904SDavid Ahern return restart_syscall(); 505db24a904SDavid Ahern 506db24a904SDavid Ahern if (dev_isalive(netdev)) { 507db24a904SDavid Ahern char name[IFNAMSIZ]; 508db24a904SDavid Ahern 509db24a904SDavid Ahern ret = dev_get_phys_port_name(netdev, name, sizeof(name)); 510db24a904SDavid Ahern if (!ret) 511db24a904SDavid Ahern ret = sprintf(buf, "%s\n", name); 512db24a904SDavid Ahern } 513db24a904SDavid Ahern rtnl_unlock(); 514db24a904SDavid Ahern 515db24a904SDavid Ahern return ret; 516db24a904SDavid Ahern } 517db24a904SDavid Ahern static DEVICE_ATTR_RO(phys_port_name); 518db24a904SDavid Ahern 519aecbe01eSJiri Pirko static ssize_t phys_switch_id_show(struct device *dev, 520aecbe01eSJiri Pirko struct device_attribute *attr, char *buf) 521aecbe01eSJiri Pirko { 522aecbe01eSJiri Pirko struct net_device *netdev = to_net_dev(dev); 523aecbe01eSJiri Pirko ssize_t ret = -EINVAL; 524aecbe01eSJiri Pirko 525aecbe01eSJiri Pirko if (!rtnl_trylock()) 526aecbe01eSJiri Pirko return restart_syscall(); 527aecbe01eSJiri Pirko 528aecbe01eSJiri Pirko if (dev_isalive(netdev)) { 529bccb3025SFlorian Fainelli struct netdev_phys_item_id ppid = { }; 530aecbe01eSJiri Pirko 531bccb3025SFlorian Fainelli ret = dev_get_port_parent_id(netdev, &ppid, false); 532aecbe01eSJiri Pirko if (!ret) 533bccb3025SFlorian Fainelli ret = sprintf(buf, "%*phN\n", ppid.id_len, ppid.id); 534aecbe01eSJiri Pirko } 535aecbe01eSJiri Pirko rtnl_unlock(); 536aecbe01eSJiri Pirko 537aecbe01eSJiri Pirko return ret; 538aecbe01eSJiri Pirko } 539aecbe01eSJiri Pirko static DEVICE_ATTR_RO(phys_switch_id); 540aecbe01eSJiri Pirko 5415fdd2f0eSWei Wang static ssize_t threaded_show(struct device *dev, 5425fdd2f0eSWei Wang struct device_attribute *attr, char *buf) 5435fdd2f0eSWei Wang { 5445fdd2f0eSWei Wang struct net_device *netdev = to_net_dev(dev); 5455fdd2f0eSWei Wang ssize_t ret = -EINVAL; 5465fdd2f0eSWei Wang 5475fdd2f0eSWei Wang if (!rtnl_trylock()) 5485fdd2f0eSWei Wang return restart_syscall(); 5495fdd2f0eSWei Wang 5505fdd2f0eSWei Wang if (dev_isalive(netdev)) 5515fdd2f0eSWei Wang ret = sprintf(buf, fmt_dec, netdev->threaded); 5525fdd2f0eSWei Wang 5535fdd2f0eSWei Wang rtnl_unlock(); 5545fdd2f0eSWei Wang return ret; 5555fdd2f0eSWei Wang } 5565fdd2f0eSWei Wang 5575fdd2f0eSWei Wang static int modify_napi_threaded(struct net_device *dev, unsigned long val) 5585fdd2f0eSWei Wang { 5595fdd2f0eSWei Wang int ret; 5605fdd2f0eSWei Wang 5615fdd2f0eSWei Wang if (list_empty(&dev->napi_list)) 5625fdd2f0eSWei Wang return -EOPNOTSUPP; 5635fdd2f0eSWei Wang 5645fdd2f0eSWei Wang if (val != 0 && val != 1) 5655fdd2f0eSWei Wang return -EOPNOTSUPP; 5665fdd2f0eSWei Wang 5675fdd2f0eSWei Wang ret = dev_set_threaded(dev, val); 5685fdd2f0eSWei Wang 5695fdd2f0eSWei Wang return ret; 5705fdd2f0eSWei Wang } 5715fdd2f0eSWei Wang 5725fdd2f0eSWei Wang static ssize_t threaded_store(struct device *dev, 5735fdd2f0eSWei Wang struct device_attribute *attr, 5745fdd2f0eSWei Wang const char *buf, size_t len) 5755fdd2f0eSWei Wang { 5765fdd2f0eSWei Wang return netdev_store(dev, attr, buf, len, modify_napi_threaded); 5775fdd2f0eSWei Wang } 5785fdd2f0eSWei Wang static DEVICE_ATTR_RW(threaded); 5795fdd2f0eSWei Wang 580ec6cc599Sstephen hemminger static struct attribute *net_class_attrs[] __ro_after_init = { 5816be8aeefSGreg Kroah-Hartman &dev_attr_netdev_group.attr, 5826be8aeefSGreg Kroah-Hartman &dev_attr_type.attr, 5836be8aeefSGreg Kroah-Hartman &dev_attr_dev_id.attr, 5843f85944fSAmir Vadai &dev_attr_dev_port.attr, 5856be8aeefSGreg Kroah-Hartman &dev_attr_iflink.attr, 5866be8aeefSGreg Kroah-Hartman &dev_attr_ifindex.attr, 587685343fcSTom Gundersen &dev_attr_name_assign_type.attr, 5886be8aeefSGreg Kroah-Hartman &dev_attr_addr_assign_type.attr, 5896be8aeefSGreg Kroah-Hartman &dev_attr_addr_len.attr, 5906be8aeefSGreg Kroah-Hartman &dev_attr_link_mode.attr, 5916be8aeefSGreg Kroah-Hartman &dev_attr_address.attr, 5926be8aeefSGreg Kroah-Hartman &dev_attr_broadcast.attr, 5936be8aeefSGreg Kroah-Hartman &dev_attr_speed.attr, 5946be8aeefSGreg Kroah-Hartman &dev_attr_duplex.attr, 5956be8aeefSGreg Kroah-Hartman &dev_attr_dormant.attr, 596db30a577SAndrew Lunn &dev_attr_testing.attr, 5976be8aeefSGreg Kroah-Hartman &dev_attr_operstate.attr, 5982d3b479dSdavid decotigny &dev_attr_carrier_changes.attr, 5996be8aeefSGreg Kroah-Hartman &dev_attr_ifalias.attr, 6006be8aeefSGreg Kroah-Hartman &dev_attr_carrier.attr, 6016be8aeefSGreg Kroah-Hartman &dev_attr_mtu.attr, 6026be8aeefSGreg Kroah-Hartman &dev_attr_flags.attr, 6036be8aeefSGreg Kroah-Hartman &dev_attr_tx_queue_len.attr, 6043b47d303SEric Dumazet &dev_attr_gro_flush_timeout.attr, 6056f8b12d6SEric Dumazet &dev_attr_napi_defer_hard_irqs.attr, 606cc998ff8SLinus Torvalds &dev_attr_phys_port_id.attr, 607db24a904SDavid Ahern &dev_attr_phys_port_name.attr, 608aecbe01eSJiri Pirko &dev_attr_phys_switch_id.attr, 609d746d707SAnuradha Karuppiah &dev_attr_proto_down.attr, 610b2d3bcfaSDavid Decotigny &dev_attr_carrier_up_count.attr, 611b2d3bcfaSDavid Decotigny &dev_attr_carrier_down_count.attr, 6125fdd2f0eSWei Wang &dev_attr_threaded.attr, 6136be8aeefSGreg Kroah-Hartman NULL, 6141da177e4SLinus Torvalds }; 6156be8aeefSGreg Kroah-Hartman ATTRIBUTE_GROUPS(net_class); 6161da177e4SLinus Torvalds 6171da177e4SLinus Torvalds /* Show a given an attribute in the statistics group */ 61843cb76d9SGreg Kroah-Hartman static ssize_t netstat_show(const struct device *d, 61943cb76d9SGreg Kroah-Hartman struct device_attribute *attr, char *buf, 6201da177e4SLinus Torvalds unsigned long offset) 6211da177e4SLinus Torvalds { 62243cb76d9SGreg Kroah-Hartman struct net_device *dev = to_net_dev(d); 6231da177e4SLinus Torvalds ssize_t ret = -EINVAL; 6241da177e4SLinus Torvalds 625be1f3c2cSBen Hutchings WARN_ON(offset > sizeof(struct rtnl_link_stats64) || 626be1f3c2cSBen Hutchings offset % sizeof(u64) != 0); 6271da177e4SLinus Torvalds 6281da177e4SLinus Torvalds read_lock(&dev_base_lock); 62996e74088SPavel Emelyanov if (dev_isalive(dev)) { 63028172739SEric Dumazet struct rtnl_link_stats64 temp; 63128172739SEric Dumazet const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp); 63228172739SEric Dumazet 633be1f3c2cSBen Hutchings ret = sprintf(buf, fmt_u64, *(u64 *)(((u8 *)stats) + offset)); 63496e74088SPavel Emelyanov } 6351da177e4SLinus Torvalds read_unlock(&dev_base_lock); 6361da177e4SLinus Torvalds return ret; 6371da177e4SLinus Torvalds } 6381da177e4SLinus Torvalds 6391da177e4SLinus Torvalds /* generate a read-only statistics attribute */ 6401da177e4SLinus Torvalds #define NETSTAT_ENTRY(name) \ 6416be8aeefSGreg Kroah-Hartman static ssize_t name##_show(struct device *d, \ 64243cb76d9SGreg Kroah-Hartman struct device_attribute *attr, char *buf) \ 6431da177e4SLinus Torvalds { \ 64443cb76d9SGreg Kroah-Hartman return netstat_show(d, attr, buf, \ 645be1f3c2cSBen Hutchings offsetof(struct rtnl_link_stats64, name)); \ 6461da177e4SLinus Torvalds } \ 6476be8aeefSGreg Kroah-Hartman static DEVICE_ATTR_RO(name) 6481da177e4SLinus Torvalds 6491da177e4SLinus Torvalds NETSTAT_ENTRY(rx_packets); 6501da177e4SLinus Torvalds NETSTAT_ENTRY(tx_packets); 6511da177e4SLinus Torvalds NETSTAT_ENTRY(rx_bytes); 6521da177e4SLinus Torvalds NETSTAT_ENTRY(tx_bytes); 6531da177e4SLinus Torvalds NETSTAT_ENTRY(rx_errors); 6541da177e4SLinus Torvalds NETSTAT_ENTRY(tx_errors); 6551da177e4SLinus Torvalds NETSTAT_ENTRY(rx_dropped); 6561da177e4SLinus Torvalds NETSTAT_ENTRY(tx_dropped); 6571da177e4SLinus Torvalds NETSTAT_ENTRY(multicast); 6581da177e4SLinus Torvalds NETSTAT_ENTRY(collisions); 6591da177e4SLinus Torvalds NETSTAT_ENTRY(rx_length_errors); 6601da177e4SLinus Torvalds NETSTAT_ENTRY(rx_over_errors); 6611da177e4SLinus Torvalds NETSTAT_ENTRY(rx_crc_errors); 6621da177e4SLinus Torvalds NETSTAT_ENTRY(rx_frame_errors); 6631da177e4SLinus Torvalds NETSTAT_ENTRY(rx_fifo_errors); 6641da177e4SLinus Torvalds NETSTAT_ENTRY(rx_missed_errors); 6651da177e4SLinus Torvalds NETSTAT_ENTRY(tx_aborted_errors); 6661da177e4SLinus Torvalds NETSTAT_ENTRY(tx_carrier_errors); 6671da177e4SLinus Torvalds NETSTAT_ENTRY(tx_fifo_errors); 6681da177e4SLinus Torvalds NETSTAT_ENTRY(tx_heartbeat_errors); 6691da177e4SLinus Torvalds NETSTAT_ENTRY(tx_window_errors); 6701da177e4SLinus Torvalds NETSTAT_ENTRY(rx_compressed); 6711da177e4SLinus Torvalds NETSTAT_ENTRY(tx_compressed); 6726e7333d3SJarod Wilson NETSTAT_ENTRY(rx_nohandler); 6731da177e4SLinus Torvalds 674ec6cc599Sstephen hemminger static struct attribute *netstat_attrs[] __ro_after_init = { 67543cb76d9SGreg Kroah-Hartman &dev_attr_rx_packets.attr, 67643cb76d9SGreg Kroah-Hartman &dev_attr_tx_packets.attr, 67743cb76d9SGreg Kroah-Hartman &dev_attr_rx_bytes.attr, 67843cb76d9SGreg Kroah-Hartman &dev_attr_tx_bytes.attr, 67943cb76d9SGreg Kroah-Hartman &dev_attr_rx_errors.attr, 68043cb76d9SGreg Kroah-Hartman &dev_attr_tx_errors.attr, 68143cb76d9SGreg Kroah-Hartman &dev_attr_rx_dropped.attr, 68243cb76d9SGreg Kroah-Hartman &dev_attr_tx_dropped.attr, 68343cb76d9SGreg Kroah-Hartman &dev_attr_multicast.attr, 68443cb76d9SGreg Kroah-Hartman &dev_attr_collisions.attr, 68543cb76d9SGreg Kroah-Hartman &dev_attr_rx_length_errors.attr, 68643cb76d9SGreg Kroah-Hartman &dev_attr_rx_over_errors.attr, 68743cb76d9SGreg Kroah-Hartman &dev_attr_rx_crc_errors.attr, 68843cb76d9SGreg Kroah-Hartman &dev_attr_rx_frame_errors.attr, 68943cb76d9SGreg Kroah-Hartman &dev_attr_rx_fifo_errors.attr, 69043cb76d9SGreg Kroah-Hartman &dev_attr_rx_missed_errors.attr, 69143cb76d9SGreg Kroah-Hartman &dev_attr_tx_aborted_errors.attr, 69243cb76d9SGreg Kroah-Hartman &dev_attr_tx_carrier_errors.attr, 69343cb76d9SGreg Kroah-Hartman &dev_attr_tx_fifo_errors.attr, 69443cb76d9SGreg Kroah-Hartman &dev_attr_tx_heartbeat_errors.attr, 69543cb76d9SGreg Kroah-Hartman &dev_attr_tx_window_errors.attr, 69643cb76d9SGreg Kroah-Hartman &dev_attr_rx_compressed.attr, 69743cb76d9SGreg Kroah-Hartman &dev_attr_tx_compressed.attr, 6986e7333d3SJarod Wilson &dev_attr_rx_nohandler.attr, 6991da177e4SLinus Torvalds NULL 7001da177e4SLinus Torvalds }; 7011da177e4SLinus Torvalds 70238ef00ccSArvind Yadav static const struct attribute_group netstat_group = { 7031da177e4SLinus Torvalds .name = "statistics", 7041da177e4SLinus Torvalds .attrs = netstat_attrs, 7051da177e4SLinus Torvalds }; 70638c1a01cSJohannes Berg 70738c1a01cSJohannes Berg #if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211) 70838c1a01cSJohannes Berg static struct attribute *wireless_attrs[] = { 70938c1a01cSJohannes Berg NULL 71038c1a01cSJohannes Berg }; 71138c1a01cSJohannes Berg 71238ef00ccSArvind Yadav static const struct attribute_group wireless_group = { 71338c1a01cSJohannes Berg .name = "wireless", 71438c1a01cSJohannes Berg .attrs = wireless_attrs, 71538c1a01cSJohannes Berg }; 71638c1a01cSJohannes Berg #endif 7176be8aeefSGreg Kroah-Hartman 7186be8aeefSGreg Kroah-Hartman #else /* CONFIG_SYSFS */ 7196be8aeefSGreg Kroah-Hartman #define net_class_groups NULL 720d6523ddfSEric W. Biederman #endif /* CONFIG_SYSFS */ 7211da177e4SLinus Torvalds 722a953be53SMichael Dalton #ifdef CONFIG_SYSFS 7236648c65eSstephen hemminger #define to_rx_queue_attr(_attr) \ 7246648c65eSstephen hemminger container_of(_attr, struct rx_queue_attribute, attr) 7250a9627f2STom Herbert 7260a9627f2STom Herbert #define to_rx_queue(obj) container_of(obj, struct netdev_rx_queue, kobj) 7270a9627f2STom Herbert 7280a9627f2STom Herbert static ssize_t rx_queue_attr_show(struct kobject *kobj, struct attribute *attr, 7290a9627f2STom Herbert char *buf) 7300a9627f2STom Herbert { 731667e427bSstephen hemminger const struct rx_queue_attribute *attribute = to_rx_queue_attr(attr); 7320a9627f2STom Herbert struct netdev_rx_queue *queue = to_rx_queue(kobj); 7330a9627f2STom Herbert 7340a9627f2STom Herbert if (!attribute->show) 7350a9627f2STom Herbert return -EIO; 7360a9627f2STom Herbert 737718ad681Sstephen hemminger return attribute->show(queue, buf); 7380a9627f2STom Herbert } 7390a9627f2STom Herbert 7400a9627f2STom Herbert static ssize_t rx_queue_attr_store(struct kobject *kobj, struct attribute *attr, 7410a9627f2STom Herbert const char *buf, size_t count) 7420a9627f2STom Herbert { 743667e427bSstephen hemminger const struct rx_queue_attribute *attribute = to_rx_queue_attr(attr); 7440a9627f2STom Herbert struct netdev_rx_queue *queue = to_rx_queue(kobj); 7450a9627f2STom Herbert 7460a9627f2STom Herbert if (!attribute->store) 7470a9627f2STom Herbert return -EIO; 7480a9627f2STom Herbert 749718ad681Sstephen hemminger return attribute->store(queue, buf, count); 7500a9627f2STom Herbert } 7510a9627f2STom Herbert 752fa50d645Sstephen hemminger static const struct sysfs_ops rx_queue_sysfs_ops = { 7530a9627f2STom Herbert .show = rx_queue_attr_show, 7540a9627f2STom Herbert .store = rx_queue_attr_store, 7550a9627f2STom Herbert }; 7560a9627f2STom Herbert 757a953be53SMichael Dalton #ifdef CONFIG_RPS 758718ad681Sstephen hemminger static ssize_t show_rps_map(struct netdev_rx_queue *queue, char *buf) 7590a9627f2STom Herbert { 7600a9627f2STom Herbert struct rps_map *map; 7610a9627f2STom Herbert cpumask_var_t mask; 762f0906827STejun Heo int i, len; 7630a9627f2STom Herbert 7640a9627f2STom Herbert if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) 7650a9627f2STom Herbert return -ENOMEM; 7660a9627f2STom Herbert 7670a9627f2STom Herbert rcu_read_lock(); 7680a9627f2STom Herbert map = rcu_dereference(queue->rps_map); 7690a9627f2STom Herbert if (map) 7700a9627f2STom Herbert for (i = 0; i < map->len; i++) 7710a9627f2STom Herbert cpumask_set_cpu(map->cpus[i], mask); 7720a9627f2STom Herbert 773f0906827STejun Heo len = snprintf(buf, PAGE_SIZE, "%*pb\n", cpumask_pr_args(mask)); 7740a9627f2STom Herbert rcu_read_unlock(); 7750a9627f2STom Herbert free_cpumask_var(mask); 7760a9627f2STom Herbert 777f0906827STejun Heo return len < PAGE_SIZE ? len : -EINVAL; 7780a9627f2STom Herbert } 7790a9627f2STom Herbert 780f5acb907SEric Dumazet static ssize_t store_rps_map(struct netdev_rx_queue *queue, 7810a9627f2STom Herbert const char *buf, size_t len) 7820a9627f2STom Herbert { 7830a9627f2STom Herbert struct rps_map *old_map, *map; 7840a9627f2STom Herbert cpumask_var_t mask; 78507bbecb3SAlex Belits int err, cpu, i, hk_flags; 786da65ad1fSSasha Levin static DEFINE_MUTEX(rps_map_mutex); 7870a9627f2STom Herbert 7880a9627f2STom Herbert if (!capable(CAP_NET_ADMIN)) 7890a9627f2STom Herbert return -EPERM; 7900a9627f2STom Herbert 7910a9627f2STom Herbert if (!alloc_cpumask_var(&mask, GFP_KERNEL)) 7920a9627f2STom Herbert return -ENOMEM; 7930a9627f2STom Herbert 7940a9627f2STom Herbert err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits); 7950a9627f2STom Herbert if (err) { 7960a9627f2STom Herbert free_cpumask_var(mask); 7970a9627f2STom Herbert return err; 7980a9627f2STom Herbert } 7990a9627f2STom Herbert 8002e0d8fefSEric Dumazet if (!cpumask_empty(mask)) { 80107bbecb3SAlex Belits hk_flags = HK_FLAG_DOMAIN | HK_FLAG_WQ; 80207bbecb3SAlex Belits cpumask_and(mask, mask, housekeeping_cpumask(hk_flags)); 80307bbecb3SAlex Belits if (cpumask_empty(mask)) { 80407bbecb3SAlex Belits free_cpumask_var(mask); 80507bbecb3SAlex Belits return -EINVAL; 80607bbecb3SAlex Belits } 8072e0d8fefSEric Dumazet } 80807bbecb3SAlex Belits 80995c96174SEric Dumazet map = kzalloc(max_t(unsigned int, 8100a9627f2STom Herbert RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES), 8110a9627f2STom Herbert GFP_KERNEL); 8120a9627f2STom Herbert if (!map) { 8130a9627f2STom Herbert free_cpumask_var(mask); 8140a9627f2STom Herbert return -ENOMEM; 8150a9627f2STom Herbert } 8160a9627f2STom Herbert 8170a9627f2STom Herbert i = 0; 8180a9627f2STom Herbert for_each_cpu_and(cpu, mask, cpu_online_mask) 8190a9627f2STom Herbert map->cpus[i++] = cpu; 8200a9627f2STom Herbert 8216648c65eSstephen hemminger if (i) { 8220a9627f2STom Herbert map->len = i; 8236648c65eSstephen hemminger } else { 8240a9627f2STom Herbert kfree(map); 8250a9627f2STom Herbert map = NULL; 8260a9627f2STom Herbert } 8270a9627f2STom Herbert 828da65ad1fSSasha Levin mutex_lock(&rps_map_mutex); 8296e3f7fafSEric Dumazet old_map = rcu_dereference_protected(queue->rps_map, 830da65ad1fSSasha Levin mutex_is_locked(&rps_map_mutex)); 8310a9627f2STom Herbert rcu_assign_pointer(queue->rps_map, map); 8320a9627f2STom Herbert 833adc9300eSEric Dumazet if (map) 834dc05360fSEric Dumazet static_branch_inc(&rps_needed); 83510e4ea75STom Herbert if (old_map) 836dc05360fSEric Dumazet static_branch_dec(&rps_needed); 83710e4ea75STom Herbert 838da65ad1fSSasha Levin mutex_unlock(&rps_map_mutex); 83910e4ea75STom Herbert 84010e4ea75STom Herbert if (old_map) 84110e4ea75STom Herbert kfree_rcu(old_map, rcu); 84210e4ea75STom Herbert 8430a9627f2STom Herbert free_cpumask_var(mask); 8440a9627f2STom Herbert return len; 8450a9627f2STom Herbert } 8460a9627f2STom Herbert 847fec5e652STom Herbert static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue, 848fec5e652STom Herbert char *buf) 849fec5e652STom Herbert { 850fec5e652STom Herbert struct rps_dev_flow_table *flow_table; 85160b778ceSEric Dumazet unsigned long val = 0; 852fec5e652STom Herbert 853fec5e652STom Herbert rcu_read_lock(); 854fec5e652STom Herbert flow_table = rcu_dereference(queue->rps_flow_table); 855fec5e652STom Herbert if (flow_table) 85660b778ceSEric Dumazet val = (unsigned long)flow_table->mask + 1; 857fec5e652STom Herbert rcu_read_unlock(); 858fec5e652STom Herbert 85960b778ceSEric Dumazet return sprintf(buf, "%lu\n", val); 860fec5e652STom Herbert } 861fec5e652STom Herbert 862fec5e652STom Herbert static void rps_dev_flow_table_release(struct rcu_head *rcu) 863fec5e652STom Herbert { 864fec5e652STom Herbert struct rps_dev_flow_table *table = container_of(rcu, 865fec5e652STom Herbert struct rps_dev_flow_table, rcu); 866243198d0SAl Viro vfree(table); 867fec5e652STom Herbert } 868fec5e652STom Herbert 869f5acb907SEric Dumazet static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue, 870fec5e652STom Herbert const char *buf, size_t len) 871fec5e652STom Herbert { 87260b778ceSEric Dumazet unsigned long mask, count; 873fec5e652STom Herbert struct rps_dev_flow_table *table, *old_table; 874fec5e652STom Herbert static DEFINE_SPINLOCK(rps_dev_flow_lock); 87560b778ceSEric Dumazet int rc; 876fec5e652STom Herbert 877fec5e652STom Herbert if (!capable(CAP_NET_ADMIN)) 878fec5e652STom Herbert return -EPERM; 879fec5e652STom Herbert 88060b778ceSEric Dumazet rc = kstrtoul(buf, 0, &count); 88160b778ceSEric Dumazet if (rc < 0) 88260b778ceSEric Dumazet return rc; 883fec5e652STom Herbert 884fec5e652STom Herbert if (count) { 88560b778ceSEric Dumazet mask = count - 1; 88660b778ceSEric Dumazet /* mask = roundup_pow_of_two(count) - 1; 88760b778ceSEric Dumazet * without overflows... 88860b778ceSEric Dumazet */ 88960b778ceSEric Dumazet while ((mask | (mask >> 1)) != mask) 89060b778ceSEric Dumazet mask |= (mask >> 1); 89160b778ceSEric Dumazet /* On 64 bit arches, must check mask fits in table->mask (u32), 8928e3bff96Sstephen hemminger * and on 32bit arches, must check 8938e3bff96Sstephen hemminger * RPS_DEV_FLOW_TABLE_SIZE(mask + 1) doesn't overflow. 89460b778ceSEric Dumazet */ 89560b778ceSEric Dumazet #if BITS_PER_LONG > 32 89660b778ceSEric Dumazet if (mask > (unsigned long)(u32)mask) 897a0a129f8SXi Wang return -EINVAL; 89860b778ceSEric Dumazet #else 89960b778ceSEric Dumazet if (mask > (ULONG_MAX - RPS_DEV_FLOW_TABLE_SIZE(1)) 900a0a129f8SXi Wang / sizeof(struct rps_dev_flow)) { 901fec5e652STom Herbert /* Enforce a limit to prevent overflow */ 902fec5e652STom Herbert return -EINVAL; 903fec5e652STom Herbert } 90460b778ceSEric Dumazet #endif 90560b778ceSEric Dumazet table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(mask + 1)); 906fec5e652STom Herbert if (!table) 907fec5e652STom Herbert return -ENOMEM; 908fec5e652STom Herbert 90960b778ceSEric Dumazet table->mask = mask; 91060b778ceSEric Dumazet for (count = 0; count <= mask; count++) 91160b778ceSEric Dumazet table->flows[count].cpu = RPS_NO_CPU; 9126648c65eSstephen hemminger } else { 913fec5e652STom Herbert table = NULL; 9146648c65eSstephen hemminger } 915fec5e652STom Herbert 916fec5e652STom Herbert spin_lock(&rps_dev_flow_lock); 9176e3f7fafSEric Dumazet old_table = rcu_dereference_protected(queue->rps_flow_table, 9186e3f7fafSEric Dumazet lockdep_is_held(&rps_dev_flow_lock)); 919fec5e652STom Herbert rcu_assign_pointer(queue->rps_flow_table, table); 920fec5e652STom Herbert spin_unlock(&rps_dev_flow_lock); 921fec5e652STom Herbert 922fec5e652STom Herbert if (old_table) 923fec5e652STom Herbert call_rcu(&old_table->rcu, rps_dev_flow_table_release); 924fec5e652STom Herbert 925fec5e652STom Herbert return len; 926fec5e652STom Herbert } 927fec5e652STom Herbert 928667e427bSstephen hemminger static struct rx_queue_attribute rps_cpus_attribute __ro_after_init 929d6444062SJoe Perches = __ATTR(rps_cpus, 0644, show_rps_map, store_rps_map); 9300a9627f2STom Herbert 931667e427bSstephen hemminger static struct rx_queue_attribute rps_dev_flow_table_cnt_attribute __ro_after_init 932d6444062SJoe Perches = __ATTR(rps_flow_cnt, 0644, 933fec5e652STom Herbert show_rps_dev_flow_table_cnt, store_rps_dev_flow_table_cnt); 934a953be53SMichael Dalton #endif /* CONFIG_RPS */ 935fec5e652STom Herbert 936667e427bSstephen hemminger static struct attribute *rx_queue_default_attrs[] __ro_after_init = { 937a953be53SMichael Dalton #ifdef CONFIG_RPS 9380a9627f2STom Herbert &rps_cpus_attribute.attr, 939fec5e652STom Herbert &rps_dev_flow_table_cnt_attribute.attr, 940a953be53SMichael Dalton #endif 9410a9627f2STom Herbert NULL 9420a9627f2STom Herbert }; 943be0d6926SKimberly Brown ATTRIBUTE_GROUPS(rx_queue_default); 9440a9627f2STom Herbert 9450a9627f2STom Herbert static void rx_queue_release(struct kobject *kobj) 9460a9627f2STom Herbert { 9470a9627f2STom Herbert struct netdev_rx_queue *queue = to_rx_queue(kobj); 948a953be53SMichael Dalton #ifdef CONFIG_RPS 9496e3f7fafSEric Dumazet struct rps_map *map; 9506e3f7fafSEric Dumazet struct rps_dev_flow_table *flow_table; 9510a9627f2STom Herbert 95233d480ceSEric Dumazet map = rcu_dereference_protected(queue->rps_map, 1); 9539ea19481SJohn Fastabend if (map) { 9549ea19481SJohn Fastabend RCU_INIT_POINTER(queue->rps_map, NULL); 955f6f80238SLai Jiangshan kfree_rcu(map, rcu); 9569ea19481SJohn Fastabend } 9576e3f7fafSEric Dumazet 95833d480ceSEric Dumazet flow_table = rcu_dereference_protected(queue->rps_flow_table, 1); 9599ea19481SJohn Fastabend if (flow_table) { 9609ea19481SJohn Fastabend RCU_INIT_POINTER(queue->rps_flow_table, NULL); 9616e3f7fafSEric Dumazet call_rcu(&flow_table->rcu, rps_dev_flow_table_release); 9629ea19481SJohn Fastabend } 963a953be53SMichael Dalton #endif 9640a9627f2STom Herbert 9659ea19481SJohn Fastabend memset(kobj, 0, sizeof(*kobj)); 966fe822240STom Herbert dev_put(queue->dev); 9670a9627f2STom Herbert } 9680a9627f2STom Herbert 96982ef3d5dSWeilong Chen static const void *rx_queue_namespace(struct kobject *kobj) 97082ef3d5dSWeilong Chen { 97182ef3d5dSWeilong Chen struct netdev_rx_queue *queue = to_rx_queue(kobj); 97282ef3d5dSWeilong Chen struct device *dev = &queue->dev->dev; 97382ef3d5dSWeilong Chen const void *ns = NULL; 97482ef3d5dSWeilong Chen 97582ef3d5dSWeilong Chen if (dev->class && dev->class->ns_type) 97682ef3d5dSWeilong Chen ns = dev->class->namespace(dev); 97782ef3d5dSWeilong Chen 97882ef3d5dSWeilong Chen return ns; 97982ef3d5dSWeilong Chen } 98082ef3d5dSWeilong Chen 981b0e37c0dSDmitry Torokhov static void rx_queue_get_ownership(struct kobject *kobj, 982b0e37c0dSDmitry Torokhov kuid_t *uid, kgid_t *gid) 983b0e37c0dSDmitry Torokhov { 984b0e37c0dSDmitry Torokhov const struct net *net = rx_queue_namespace(kobj); 985b0e37c0dSDmitry Torokhov 986b0e37c0dSDmitry Torokhov net_ns_get_ownership(net, uid, gid); 987b0e37c0dSDmitry Torokhov } 988b0e37c0dSDmitry Torokhov 989667e427bSstephen hemminger static struct kobj_type rx_queue_ktype __ro_after_init = { 9900a9627f2STom Herbert .sysfs_ops = &rx_queue_sysfs_ops, 9910a9627f2STom Herbert .release = rx_queue_release, 992be0d6926SKimberly Brown .default_groups = rx_queue_default_groups, 993b0e37c0dSDmitry Torokhov .namespace = rx_queue_namespace, 994b0e37c0dSDmitry Torokhov .get_ownership = rx_queue_get_ownership, 9950a9627f2STom Herbert }; 9960a9627f2STom Herbert 9976b53dafeSWANG Cong static int rx_queue_add_kobject(struct net_device *dev, int index) 9980a9627f2STom Herbert { 9996b53dafeSWANG Cong struct netdev_rx_queue *queue = dev->_rx + index; 10000a9627f2STom Herbert struct kobject *kobj = &queue->kobj; 10010a9627f2STom Herbert int error = 0; 10020a9627f2STom Herbert 1003ddd9b5e3SJouni Hogander /* Kobject_put later will trigger rx_queue_release call which 1004ddd9b5e3SJouni Hogander * decreases dev refcount: Take that reference here 1005ddd9b5e3SJouni Hogander */ 1006ddd9b5e3SJouni Hogander dev_hold(queue->dev); 1007ddd9b5e3SJouni Hogander 10086b53dafeSWANG Cong kobj->kset = dev->queues_kset; 10090a9627f2STom Herbert error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL, 10100a9627f2STom Herbert "rx-%u", index); 1011a953be53SMichael Dalton if (error) 1012b8eb7183SJouni Hogander goto err; 1013a953be53SMichael Dalton 10146b53dafeSWANG Cong if (dev->sysfs_rx_queue_group) { 10156b53dafeSWANG Cong error = sysfs_create_group(kobj, dev->sysfs_rx_queue_group); 1016b8eb7183SJouni Hogander if (error) 1017b8eb7183SJouni Hogander goto err; 10180a9627f2STom Herbert } 10190a9627f2STom Herbert 10200a9627f2STom Herbert kobject_uevent(kobj, KOBJ_ADD); 10210a9627f2STom Herbert 10220a9627f2STom Herbert return error; 1023b8eb7183SJouni Hogander 1024b8eb7183SJouni Hogander err: 1025b8eb7183SJouni Hogander kobject_put(kobj); 1026b8eb7183SJouni Hogander return error; 10270a9627f2STom Herbert } 1028d755407dSChristian Brauner 1029d755407dSChristian Brauner static int rx_queue_change_owner(struct net_device *dev, int index, kuid_t kuid, 1030d755407dSChristian Brauner kgid_t kgid) 1031d755407dSChristian Brauner { 1032d755407dSChristian Brauner struct netdev_rx_queue *queue = dev->_rx + index; 1033d755407dSChristian Brauner struct kobject *kobj = &queue->kobj; 1034d755407dSChristian Brauner int error; 1035d755407dSChristian Brauner 1036d755407dSChristian Brauner error = sysfs_change_owner(kobj, kuid, kgid); 1037d755407dSChristian Brauner if (error) 1038d755407dSChristian Brauner return error; 1039d755407dSChristian Brauner 1040d755407dSChristian Brauner if (dev->sysfs_rx_queue_group) 1041d755407dSChristian Brauner error = sysfs_group_change_owner( 1042d755407dSChristian Brauner kobj, dev->sysfs_rx_queue_group, kuid, kgid); 1043d755407dSChristian Brauner 1044d755407dSChristian Brauner return error; 1045d755407dSChristian Brauner } 104680dd6eacSPaul Bolle #endif /* CONFIG_SYSFS */ 10470a9627f2STom Herbert 104862fe0b40SBen Hutchings int 10496b53dafeSWANG Cong net_rx_queue_update_kobjects(struct net_device *dev, int old_num, int new_num) 10500a9627f2STom Herbert { 1051a953be53SMichael Dalton #ifdef CONFIG_SYSFS 10520a9627f2STom Herbert int i; 10530a9627f2STom Herbert int error = 0; 10540a9627f2STom Herbert 1055a953be53SMichael Dalton #ifndef CONFIG_RPS 10566b53dafeSWANG Cong if (!dev->sysfs_rx_queue_group) 1057a953be53SMichael Dalton return 0; 1058a953be53SMichael Dalton #endif 105962fe0b40SBen Hutchings for (i = old_num; i < new_num; i++) { 10606b53dafeSWANG Cong error = rx_queue_add_kobject(dev, i); 106162fe0b40SBen Hutchings if (error) { 106262fe0b40SBen Hutchings new_num = old_num; 10630a9627f2STom Herbert break; 10640a9627f2STom Herbert } 106562fe0b40SBen Hutchings } 10660a9627f2STom Herbert 1067a953be53SMichael Dalton while (--i >= new_num) { 1068002d8a1aSAndrey Vagin struct kobject *kobj = &dev->_rx[i].kobj; 1069002d8a1aSAndrey Vagin 10708b8f3e66SChristian Brauner if (!refcount_read(&dev_net(dev)->ns.count)) 1071002d8a1aSAndrey Vagin kobj->uevent_suppress = 1; 10726b53dafeSWANG Cong if (dev->sysfs_rx_queue_group) 1073002d8a1aSAndrey Vagin sysfs_remove_group(kobj, dev->sysfs_rx_queue_group); 1074002d8a1aSAndrey Vagin kobject_put(kobj); 1075a953be53SMichael Dalton } 10760a9627f2STom Herbert 10770a9627f2STom Herbert return error; 1078bf264145STom Herbert #else 1079bf264145STom Herbert return 0; 1080bf264145STom Herbert #endif 10810a9627f2STom Herbert } 10820a9627f2STom Herbert 1083d755407dSChristian Brauner static int net_rx_queue_change_owner(struct net_device *dev, int num, 1084d755407dSChristian Brauner kuid_t kuid, kgid_t kgid) 1085d755407dSChristian Brauner { 1086d755407dSChristian Brauner #ifdef CONFIG_SYSFS 1087d755407dSChristian Brauner int error = 0; 1088d755407dSChristian Brauner int i; 1089d755407dSChristian Brauner 1090d755407dSChristian Brauner #ifndef CONFIG_RPS 1091d755407dSChristian Brauner if (!dev->sysfs_rx_queue_group) 1092d755407dSChristian Brauner return 0; 1093d755407dSChristian Brauner #endif 1094d755407dSChristian Brauner for (i = 0; i < num; i++) { 1095d755407dSChristian Brauner error = rx_queue_change_owner(dev, i, kuid, kgid); 1096d755407dSChristian Brauner if (error) 1097d755407dSChristian Brauner break; 1098d755407dSChristian Brauner } 1099d755407dSChristian Brauner 1100d755407dSChristian Brauner return error; 1101d755407dSChristian Brauner #else 1102d755407dSChristian Brauner return 0; 1103d755407dSChristian Brauner #endif 1104d755407dSChristian Brauner } 1105d755407dSChristian Brauner 1106ccf5ff69Sdavid decotigny #ifdef CONFIG_SYSFS 11071d24eb48STom Herbert /* 11081d24eb48STom Herbert * netdev_queue sysfs structures and functions. 11091d24eb48STom Herbert */ 11101d24eb48STom Herbert struct netdev_queue_attribute { 11111d24eb48STom Herbert struct attribute attr; 1112718ad681Sstephen hemminger ssize_t (*show)(struct netdev_queue *queue, char *buf); 11131d24eb48STom Herbert ssize_t (*store)(struct netdev_queue *queue, 1114718ad681Sstephen hemminger const char *buf, size_t len); 11151d24eb48STom Herbert }; 11166648c65eSstephen hemminger #define to_netdev_queue_attr(_attr) \ 11176648c65eSstephen hemminger container_of(_attr, struct netdev_queue_attribute, attr) 11181d24eb48STom Herbert 11191d24eb48STom Herbert #define to_netdev_queue(obj) container_of(obj, struct netdev_queue, kobj) 11201d24eb48STom Herbert 11211d24eb48STom Herbert static ssize_t netdev_queue_attr_show(struct kobject *kobj, 11221d24eb48STom Herbert struct attribute *attr, char *buf) 112362fe0b40SBen Hutchings { 1124667e427bSstephen hemminger const struct netdev_queue_attribute *attribute 1125667e427bSstephen hemminger = to_netdev_queue_attr(attr); 11261d24eb48STom Herbert struct netdev_queue *queue = to_netdev_queue(kobj); 11271d24eb48STom Herbert 11281d24eb48STom Herbert if (!attribute->show) 11291d24eb48STom Herbert return -EIO; 11301d24eb48STom Herbert 1131718ad681Sstephen hemminger return attribute->show(queue, buf); 11321d24eb48STom Herbert } 11331d24eb48STom Herbert 11341d24eb48STom Herbert static ssize_t netdev_queue_attr_store(struct kobject *kobj, 11351d24eb48STom Herbert struct attribute *attr, 11361d24eb48STom Herbert const char *buf, size_t count) 11371d24eb48STom Herbert { 1138667e427bSstephen hemminger const struct netdev_queue_attribute *attribute 1139667e427bSstephen hemminger = to_netdev_queue_attr(attr); 11401d24eb48STom Herbert struct netdev_queue *queue = to_netdev_queue(kobj); 11411d24eb48STom Herbert 11421d24eb48STom Herbert if (!attribute->store) 11431d24eb48STom Herbert return -EIO; 11441d24eb48STom Herbert 1145718ad681Sstephen hemminger return attribute->store(queue, buf, count); 11461d24eb48STom Herbert } 11471d24eb48STom Herbert 11481d24eb48STom Herbert static const struct sysfs_ops netdev_queue_sysfs_ops = { 11491d24eb48STom Herbert .show = netdev_queue_attr_show, 11501d24eb48STom Herbert .store = netdev_queue_attr_store, 11511d24eb48STom Herbert }; 11521d24eb48STom Herbert 11532b9c7581Sstephen hemminger static ssize_t tx_timeout_show(struct netdev_queue *queue, char *buf) 1154ccf5ff69Sdavid decotigny { 1155ccf5ff69Sdavid decotigny unsigned long trans_timeout; 1156ccf5ff69Sdavid decotigny 1157ccf5ff69Sdavid decotigny spin_lock_irq(&queue->_xmit_lock); 1158ccf5ff69Sdavid decotigny trans_timeout = queue->trans_timeout; 1159ccf5ff69Sdavid decotigny spin_unlock_irq(&queue->_xmit_lock); 1160ccf5ff69Sdavid decotigny 11619bb5fbeaSXiongfeng Wang return sprintf(buf, fmt_ulong, trans_timeout); 1162ccf5ff69Sdavid decotigny } 1163ccf5ff69Sdavid decotigny 1164c4047f53SThadeu Lima de Souza Cascardo static unsigned int get_netdev_queue_index(struct netdev_queue *queue) 1165822b3b2eSJohn Fastabend { 1166822b3b2eSJohn Fastabend struct net_device *dev = queue->dev; 1167c4047f53SThadeu Lima de Souza Cascardo unsigned int i; 1168822b3b2eSJohn Fastabend 1169c4047f53SThadeu Lima de Souza Cascardo i = queue - dev->_tx; 1170822b3b2eSJohn Fastabend BUG_ON(i >= dev->num_tx_queues); 1171822b3b2eSJohn Fastabend 1172822b3b2eSJohn Fastabend return i; 1173822b3b2eSJohn Fastabend } 1174822b3b2eSJohn Fastabend 11752b9c7581Sstephen hemminger static ssize_t traffic_class_show(struct netdev_queue *queue, 11768d059b0fSAlexander Duyck char *buf) 11778d059b0fSAlexander Duyck { 11788d059b0fSAlexander Duyck struct net_device *dev = queue->dev; 1179b2f17564SAlexander Duyck int num_tc, tc; 1180d7be9775SAlexander Duyck int index; 11818d059b0fSAlexander Duyck 1182d7be9775SAlexander Duyck if (!netif_is_multiqueue(dev)) 1183d7be9775SAlexander Duyck return -ENOENT; 1184d7be9775SAlexander Duyck 1185b2f17564SAlexander Duyck if (!rtnl_trylock()) 1186b2f17564SAlexander Duyck return restart_syscall(); 1187b2f17564SAlexander Duyck 1188d7be9775SAlexander Duyck index = get_netdev_queue_index(queue); 1189ffcfe25bSAlexander Duyck 1190ffcfe25bSAlexander Duyck /* If queue belongs to subordinate dev use its TC mapping */ 1191ffcfe25bSAlexander Duyck dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev; 1192ffcfe25bSAlexander Duyck 1193b2f17564SAlexander Duyck num_tc = dev->num_tc; 1194d7be9775SAlexander Duyck tc = netdev_txq_to_tc(dev, index); 1195b2f17564SAlexander Duyck 1196b2f17564SAlexander Duyck rtnl_unlock(); 1197b2f17564SAlexander Duyck 11988d059b0fSAlexander Duyck if (tc < 0) 11998d059b0fSAlexander Duyck return -EINVAL; 12008d059b0fSAlexander Duyck 1201ffcfe25bSAlexander Duyck /* We can report the traffic class one of two ways: 1202ffcfe25bSAlexander Duyck * Subordinate device traffic classes are reported with the traffic 1203ffcfe25bSAlexander Duyck * class first, and then the subordinate class so for example TC0 on 1204ffcfe25bSAlexander Duyck * subordinate device 2 will be reported as "0-2". If the queue 1205ffcfe25bSAlexander Duyck * belongs to the root device it will be reported with just the 1206ffcfe25bSAlexander Duyck * traffic class, so just "0" for TC 0 for example. 1207ffcfe25bSAlexander Duyck */ 1208b2f17564SAlexander Duyck return num_tc < 0 ? sprintf(buf, "%d%d\n", tc, num_tc) : 1209000fe268SYe Bin sprintf(buf, "%d\n", tc); 12108d059b0fSAlexander Duyck } 12118d059b0fSAlexander Duyck 12128d059b0fSAlexander Duyck #ifdef CONFIG_XPS 12132b9c7581Sstephen hemminger static ssize_t tx_maxrate_show(struct netdev_queue *queue, 1214822b3b2eSJohn Fastabend char *buf) 1215822b3b2eSJohn Fastabend { 1216822b3b2eSJohn Fastabend return sprintf(buf, "%lu\n", queue->tx_maxrate); 1217822b3b2eSJohn Fastabend } 1218822b3b2eSJohn Fastabend 12192b9c7581Sstephen hemminger static ssize_t tx_maxrate_store(struct netdev_queue *queue, 1220822b3b2eSJohn Fastabend const char *buf, size_t len) 1221822b3b2eSJohn Fastabend { 1222822b3b2eSJohn Fastabend struct net_device *dev = queue->dev; 1223822b3b2eSJohn Fastabend int err, index = get_netdev_queue_index(queue); 1224822b3b2eSJohn Fastabend u32 rate = 0; 1225822b3b2eSJohn Fastabend 12263033fcedSTyler Hicks if (!capable(CAP_NET_ADMIN)) 12273033fcedSTyler Hicks return -EPERM; 12283033fcedSTyler Hicks 1229822b3b2eSJohn Fastabend err = kstrtou32(buf, 10, &rate); 1230822b3b2eSJohn Fastabend if (err < 0) 1231822b3b2eSJohn Fastabend return err; 1232822b3b2eSJohn Fastabend 1233822b3b2eSJohn Fastabend if (!rtnl_trylock()) 1234822b3b2eSJohn Fastabend return restart_syscall(); 1235822b3b2eSJohn Fastabend 1236822b3b2eSJohn Fastabend err = -EOPNOTSUPP; 1237822b3b2eSJohn Fastabend if (dev->netdev_ops->ndo_set_tx_maxrate) 1238822b3b2eSJohn Fastabend err = dev->netdev_ops->ndo_set_tx_maxrate(dev, index, rate); 1239822b3b2eSJohn Fastabend 1240822b3b2eSJohn Fastabend rtnl_unlock(); 1241822b3b2eSJohn Fastabend if (!err) { 1242822b3b2eSJohn Fastabend queue->tx_maxrate = rate; 1243822b3b2eSJohn Fastabend return len; 1244822b3b2eSJohn Fastabend } 1245822b3b2eSJohn Fastabend return err; 1246822b3b2eSJohn Fastabend } 1247822b3b2eSJohn Fastabend 12482b9c7581Sstephen hemminger static struct netdev_queue_attribute queue_tx_maxrate __ro_after_init 12492b9c7581Sstephen hemminger = __ATTR_RW(tx_maxrate); 1250822b3b2eSJohn Fastabend #endif 1251822b3b2eSJohn Fastabend 12522b9c7581Sstephen hemminger static struct netdev_queue_attribute queue_trans_timeout __ro_after_init 12532b9c7581Sstephen hemminger = __ATTR_RO(tx_timeout); 1254ccf5ff69Sdavid decotigny 12552b9c7581Sstephen hemminger static struct netdev_queue_attribute queue_traffic_class __ro_after_init 12562b9c7581Sstephen hemminger = __ATTR_RO(traffic_class); 12578d059b0fSAlexander Duyck 1258114cf580STom Herbert #ifdef CONFIG_BQL 1259114cf580STom Herbert /* 1260114cf580STom Herbert * Byte queue limits sysfs structures and functions. 1261114cf580STom Herbert */ 1262114cf580STom Herbert static ssize_t bql_show(char *buf, unsigned int value) 1263114cf580STom Herbert { 1264114cf580STom Herbert return sprintf(buf, "%u\n", value); 1265114cf580STom Herbert } 1266114cf580STom Herbert 1267114cf580STom Herbert static ssize_t bql_set(const char *buf, const size_t count, 1268114cf580STom Herbert unsigned int *pvalue) 1269114cf580STom Herbert { 1270114cf580STom Herbert unsigned int value; 1271114cf580STom Herbert int err; 1272114cf580STom Herbert 12736648c65eSstephen hemminger if (!strcmp(buf, "max") || !strcmp(buf, "max\n")) { 1274114cf580STom Herbert value = DQL_MAX_LIMIT; 12756648c65eSstephen hemminger } else { 1276114cf580STom Herbert err = kstrtouint(buf, 10, &value); 1277114cf580STom Herbert if (err < 0) 1278114cf580STom Herbert return err; 1279114cf580STom Herbert if (value > DQL_MAX_LIMIT) 1280114cf580STom Herbert return -EINVAL; 1281114cf580STom Herbert } 1282114cf580STom Herbert 1283114cf580STom Herbert *pvalue = value; 1284114cf580STom Herbert 1285114cf580STom Herbert return count; 1286114cf580STom Herbert } 1287114cf580STom Herbert 1288114cf580STom Herbert static ssize_t bql_show_hold_time(struct netdev_queue *queue, 1289114cf580STom Herbert char *buf) 1290114cf580STom Herbert { 1291114cf580STom Herbert struct dql *dql = &queue->dql; 1292114cf580STom Herbert 1293114cf580STom Herbert return sprintf(buf, "%u\n", jiffies_to_msecs(dql->slack_hold_time)); 1294114cf580STom Herbert } 1295114cf580STom Herbert 1296114cf580STom Herbert static ssize_t bql_set_hold_time(struct netdev_queue *queue, 1297114cf580STom Herbert const char *buf, size_t len) 1298114cf580STom Herbert { 1299114cf580STom Herbert struct dql *dql = &queue->dql; 130095c96174SEric Dumazet unsigned int value; 1301114cf580STom Herbert int err; 1302114cf580STom Herbert 1303114cf580STom Herbert err = kstrtouint(buf, 10, &value); 1304114cf580STom Herbert if (err < 0) 1305114cf580STom Herbert return err; 1306114cf580STom Herbert 1307114cf580STom Herbert dql->slack_hold_time = msecs_to_jiffies(value); 1308114cf580STom Herbert 1309114cf580STom Herbert return len; 1310114cf580STom Herbert } 1311114cf580STom Herbert 1312170c658aSstephen hemminger static struct netdev_queue_attribute bql_hold_time_attribute __ro_after_init 1313d6444062SJoe Perches = __ATTR(hold_time, 0644, 1314170c658aSstephen hemminger bql_show_hold_time, bql_set_hold_time); 1315114cf580STom Herbert 1316114cf580STom Herbert static ssize_t bql_show_inflight(struct netdev_queue *queue, 1317114cf580STom Herbert char *buf) 1318114cf580STom Herbert { 1319114cf580STom Herbert struct dql *dql = &queue->dql; 1320114cf580STom Herbert 1321114cf580STom Herbert return sprintf(buf, "%u\n", dql->num_queued - dql->num_completed); 1322114cf580STom Herbert } 1323114cf580STom Herbert 1324170c658aSstephen hemminger static struct netdev_queue_attribute bql_inflight_attribute __ro_after_init = 1325d6444062SJoe Perches __ATTR(inflight, 0444, bql_show_inflight, NULL); 1326114cf580STom Herbert 1327114cf580STom Herbert #define BQL_ATTR(NAME, FIELD) \ 1328114cf580STom Herbert static ssize_t bql_show_ ## NAME(struct netdev_queue *queue, \ 1329114cf580STom Herbert char *buf) \ 1330114cf580STom Herbert { \ 1331114cf580STom Herbert return bql_show(buf, queue->dql.FIELD); \ 1332114cf580STom Herbert } \ 1333114cf580STom Herbert \ 1334114cf580STom Herbert static ssize_t bql_set_ ## NAME(struct netdev_queue *queue, \ 1335114cf580STom Herbert const char *buf, size_t len) \ 1336114cf580STom Herbert { \ 1337114cf580STom Herbert return bql_set(buf, len, &queue->dql.FIELD); \ 1338114cf580STom Herbert } \ 1339114cf580STom Herbert \ 1340170c658aSstephen hemminger static struct netdev_queue_attribute bql_ ## NAME ## _attribute __ro_after_init \ 1341d6444062SJoe Perches = __ATTR(NAME, 0644, \ 1342170c658aSstephen hemminger bql_show_ ## NAME, bql_set_ ## NAME) 1343114cf580STom Herbert 1344170c658aSstephen hemminger BQL_ATTR(limit, limit); 1345170c658aSstephen hemminger BQL_ATTR(limit_max, max_limit); 1346170c658aSstephen hemminger BQL_ATTR(limit_min, min_limit); 1347114cf580STom Herbert 1348170c658aSstephen hemminger static struct attribute *dql_attrs[] __ro_after_init = { 1349114cf580STom Herbert &bql_limit_attribute.attr, 1350114cf580STom Herbert &bql_limit_max_attribute.attr, 1351114cf580STom Herbert &bql_limit_min_attribute.attr, 1352114cf580STom Herbert &bql_hold_time_attribute.attr, 1353114cf580STom Herbert &bql_inflight_attribute.attr, 1354114cf580STom Herbert NULL 1355114cf580STom Herbert }; 1356114cf580STom Herbert 135738ef00ccSArvind Yadav static const struct attribute_group dql_group = { 1358114cf580STom Herbert .name = "byte_queue_limits", 1359114cf580STom Herbert .attrs = dql_attrs, 1360114cf580STom Herbert }; 1361114cf580STom Herbert #endif /* CONFIG_BQL */ 1362114cf580STom Herbert 1363ccf5ff69Sdavid decotigny #ifdef CONFIG_XPS 13642b9c7581Sstephen hemminger static ssize_t xps_cpus_show(struct netdev_queue *queue, 1365718ad681Sstephen hemminger char *buf) 13661d24eb48STom Herbert { 1367fb250385SAntoine Tenart int cpu, len, ret, num_tc = 1, tc = 0; 13681d24eb48STom Herbert struct net_device *dev = queue->dev; 13691d24eb48STom Herbert struct xps_dev_maps *dev_maps; 1370*d9a063d2SAntoine Tenart unsigned long *mask; 1371*d9a063d2SAntoine Tenart unsigned int index; 13721d24eb48STom Herbert 1373d7be9775SAlexander Duyck if (!netif_is_multiqueue(dev)) 1374d7be9775SAlexander Duyck return -ENOENT; 1375d7be9775SAlexander Duyck 13761d24eb48STom Herbert index = get_netdev_queue_index(queue); 13771d24eb48STom Herbert 1378fb250385SAntoine Tenart if (!rtnl_trylock()) 1379fb250385SAntoine Tenart return restart_syscall(); 1380fb250385SAntoine Tenart 1381184c449fSAlexander Duyck if (dev->num_tc) { 1382ffcfe25bSAlexander Duyck /* Do not allow XPS on subordinate device directly */ 1383184c449fSAlexander Duyck num_tc = dev->num_tc; 1384fb250385SAntoine Tenart if (num_tc < 0) { 1385fb250385SAntoine Tenart ret = -EINVAL; 1386fb250385SAntoine Tenart goto err_rtnl_unlock; 1387fb250385SAntoine Tenart } 1388ffcfe25bSAlexander Duyck 1389ffcfe25bSAlexander Duyck /* If queue belongs to subordinate dev use its map */ 1390ffcfe25bSAlexander Duyck dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev; 1391ffcfe25bSAlexander Duyck 1392184c449fSAlexander Duyck tc = netdev_txq_to_tc(dev, index); 1393fb250385SAntoine Tenart if (tc < 0) { 1394fb250385SAntoine Tenart ret = -EINVAL; 1395fb250385SAntoine Tenart goto err_rtnl_unlock; 1396fb250385SAntoine Tenart } 1397184c449fSAlexander Duyck } 1398184c449fSAlexander Duyck 1399ea4fe7e8SAntoine Tenart mask = bitmap_zalloc(nr_cpu_ids, GFP_KERNEL); 1400ea4fe7e8SAntoine Tenart if (!mask) { 1401fb250385SAntoine Tenart ret = -ENOMEM; 1402fb250385SAntoine Tenart goto err_rtnl_unlock; 1403fb250385SAntoine Tenart } 1404664088f8SAlexander Duyck 14051d24eb48STom Herbert rcu_read_lock(); 140680d19669SAmritha Nambiar dev_maps = rcu_dereference(dev->xps_cpus_map); 14071d24eb48STom Herbert if (dev_maps) { 1408184c449fSAlexander Duyck for_each_possible_cpu(cpu) { 1409184c449fSAlexander Duyck int i, tci = cpu * num_tc + tc; 1410184c449fSAlexander Duyck struct xps_map *map; 1411184c449fSAlexander Duyck 141280d19669SAmritha Nambiar map = rcu_dereference(dev_maps->attr_map[tci]); 1413184c449fSAlexander Duyck if (!map) 1414184c449fSAlexander Duyck continue; 1415184c449fSAlexander Duyck 1416184c449fSAlexander Duyck for (i = map->len; i--;) { 1417184c449fSAlexander Duyck if (map->queues[i] == index) { 1418ea4fe7e8SAntoine Tenart set_bit(cpu, mask); 14191d24eb48STom Herbert break; 14201d24eb48STom Herbert } 14211d24eb48STom Herbert } 14221d24eb48STom Herbert } 14231d24eb48STom Herbert } 14241d24eb48STom Herbert rcu_read_unlock(); 14251d24eb48STom Herbert 1426fb250385SAntoine Tenart rtnl_unlock(); 1427fb250385SAntoine Tenart 1428ea4fe7e8SAntoine Tenart len = bitmap_print_to_pagebuf(false, buf, mask, nr_cpu_ids); 1429ea4fe7e8SAntoine Tenart bitmap_free(mask); 1430f0906827STejun Heo return len < PAGE_SIZE ? len : -EINVAL; 1431fb250385SAntoine Tenart 1432fb250385SAntoine Tenart err_rtnl_unlock: 1433fb250385SAntoine Tenart rtnl_unlock(); 1434fb250385SAntoine Tenart return ret; 14351d24eb48STom Herbert } 14361d24eb48STom Herbert 14372b9c7581Sstephen hemminger static ssize_t xps_cpus_store(struct netdev_queue *queue, 14381d24eb48STom Herbert const char *buf, size_t len) 14391d24eb48STom Herbert { 14401d24eb48STom Herbert struct net_device *dev = queue->dev; 1441*d9a063d2SAntoine Tenart unsigned int index; 1442537c00deSAlexander Duyck cpumask_var_t mask; 1443537c00deSAlexander Duyck int err; 14441d24eb48STom Herbert 1445d7be9775SAlexander Duyck if (!netif_is_multiqueue(dev)) 1446d7be9775SAlexander Duyck return -ENOENT; 1447d7be9775SAlexander Duyck 14481d24eb48STom Herbert if (!capable(CAP_NET_ADMIN)) 14491d24eb48STom Herbert return -EPERM; 14501d24eb48STom Herbert 14511d24eb48STom Herbert if (!alloc_cpumask_var(&mask, GFP_KERNEL)) 14521d24eb48STom Herbert return -ENOMEM; 14531d24eb48STom Herbert 14541d24eb48STom Herbert index = get_netdev_queue_index(queue); 14551d24eb48STom Herbert 14561d24eb48STom Herbert err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits); 14571d24eb48STom Herbert if (err) { 14581d24eb48STom Herbert free_cpumask_var(mask); 14591d24eb48STom Herbert return err; 14601d24eb48STom Herbert } 14611d24eb48STom Herbert 14621ad58225SAntoine Tenart if (!rtnl_trylock()) { 14631ad58225SAntoine Tenart free_cpumask_var(mask); 14641ad58225SAntoine Tenart return restart_syscall(); 14651ad58225SAntoine Tenart } 14661ad58225SAntoine Tenart 1467537c00deSAlexander Duyck err = netif_set_xps_queue(dev, mask, index); 14681ad58225SAntoine Tenart rtnl_unlock(); 14691d24eb48STom Herbert 14701d24eb48STom Herbert free_cpumask_var(mask); 14711d24eb48STom Herbert 1472537c00deSAlexander Duyck return err ? : len; 14731d24eb48STom Herbert } 14741d24eb48STom Herbert 14752b9c7581Sstephen hemminger static struct netdev_queue_attribute xps_cpus_attribute __ro_after_init 14762b9c7581Sstephen hemminger = __ATTR_RW(xps_cpus); 14778af2c06fSAmritha Nambiar 14788af2c06fSAmritha Nambiar static ssize_t xps_rxqs_show(struct netdev_queue *queue, char *buf) 14798af2c06fSAmritha Nambiar { 14804ae2bb81SAntoine Tenart int j, len, ret, num_tc = 1, tc = 0; 14818af2c06fSAmritha Nambiar struct net_device *dev = queue->dev; 14828af2c06fSAmritha Nambiar struct xps_dev_maps *dev_maps; 1483*d9a063d2SAntoine Tenart unsigned long *mask; 1484*d9a063d2SAntoine Tenart unsigned int index; 14858af2c06fSAmritha Nambiar 14868af2c06fSAmritha Nambiar index = get_netdev_queue_index(queue); 14878af2c06fSAmritha Nambiar 14884ae2bb81SAntoine Tenart if (!rtnl_trylock()) 14894ae2bb81SAntoine Tenart return restart_syscall(); 14904ae2bb81SAntoine Tenart 14918af2c06fSAmritha Nambiar if (dev->num_tc) { 14928af2c06fSAmritha Nambiar num_tc = dev->num_tc; 14938af2c06fSAmritha Nambiar tc = netdev_txq_to_tc(dev, index); 14944ae2bb81SAntoine Tenart if (tc < 0) { 14954ae2bb81SAntoine Tenart ret = -EINVAL; 14964ae2bb81SAntoine Tenart goto err_rtnl_unlock; 14974ae2bb81SAntoine Tenart } 14988af2c06fSAmritha Nambiar } 149929ca1c5aSAndy Shevchenko mask = bitmap_zalloc(dev->num_rx_queues, GFP_KERNEL); 15004ae2bb81SAntoine Tenart if (!mask) { 15014ae2bb81SAntoine Tenart ret = -ENOMEM; 15024ae2bb81SAntoine Tenart goto err_rtnl_unlock; 15034ae2bb81SAntoine Tenart } 15048af2c06fSAmritha Nambiar 15058af2c06fSAmritha Nambiar rcu_read_lock(); 15068af2c06fSAmritha Nambiar dev_maps = rcu_dereference(dev->xps_rxqs_map); 15078af2c06fSAmritha Nambiar if (!dev_maps) 15088af2c06fSAmritha Nambiar goto out_no_maps; 15098af2c06fSAmritha Nambiar 15108af2c06fSAmritha Nambiar for (j = -1; j = netif_attrmask_next(j, NULL, dev->num_rx_queues), 15118af2c06fSAmritha Nambiar j < dev->num_rx_queues;) { 15128af2c06fSAmritha Nambiar int i, tci = j * num_tc + tc; 15138af2c06fSAmritha Nambiar struct xps_map *map; 15148af2c06fSAmritha Nambiar 15158af2c06fSAmritha Nambiar map = rcu_dereference(dev_maps->attr_map[tci]); 15168af2c06fSAmritha Nambiar if (!map) 15178af2c06fSAmritha Nambiar continue; 15188af2c06fSAmritha Nambiar 15198af2c06fSAmritha Nambiar for (i = map->len; i--;) { 15208af2c06fSAmritha Nambiar if (map->queues[i] == index) { 15218af2c06fSAmritha Nambiar set_bit(j, mask); 15228af2c06fSAmritha Nambiar break; 15238af2c06fSAmritha Nambiar } 15248af2c06fSAmritha Nambiar } 15258af2c06fSAmritha Nambiar } 15268af2c06fSAmritha Nambiar out_no_maps: 15278af2c06fSAmritha Nambiar rcu_read_unlock(); 15288af2c06fSAmritha Nambiar 15294ae2bb81SAntoine Tenart rtnl_unlock(); 15304ae2bb81SAntoine Tenart 15318af2c06fSAmritha Nambiar len = bitmap_print_to_pagebuf(false, buf, mask, dev->num_rx_queues); 153229ca1c5aSAndy Shevchenko bitmap_free(mask); 15338af2c06fSAmritha Nambiar 15348af2c06fSAmritha Nambiar return len < PAGE_SIZE ? len : -EINVAL; 15354ae2bb81SAntoine Tenart 15364ae2bb81SAntoine Tenart err_rtnl_unlock: 15374ae2bb81SAntoine Tenart rtnl_unlock(); 15384ae2bb81SAntoine Tenart return ret; 15398af2c06fSAmritha Nambiar } 15408af2c06fSAmritha Nambiar 15418af2c06fSAmritha Nambiar static ssize_t xps_rxqs_store(struct netdev_queue *queue, const char *buf, 15428af2c06fSAmritha Nambiar size_t len) 15438af2c06fSAmritha Nambiar { 15448af2c06fSAmritha Nambiar struct net_device *dev = queue->dev; 15458af2c06fSAmritha Nambiar struct net *net = dev_net(dev); 1546*d9a063d2SAntoine Tenart unsigned long *mask; 1547*d9a063d2SAntoine Tenart unsigned int index; 15488af2c06fSAmritha Nambiar int err; 15498af2c06fSAmritha Nambiar 15508af2c06fSAmritha Nambiar if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 15518af2c06fSAmritha Nambiar return -EPERM; 15528af2c06fSAmritha Nambiar 155329ca1c5aSAndy Shevchenko mask = bitmap_zalloc(dev->num_rx_queues, GFP_KERNEL); 15548af2c06fSAmritha Nambiar if (!mask) 15558af2c06fSAmritha Nambiar return -ENOMEM; 15568af2c06fSAmritha Nambiar 15578af2c06fSAmritha Nambiar index = get_netdev_queue_index(queue); 15588af2c06fSAmritha Nambiar 15598af2c06fSAmritha Nambiar err = bitmap_parse(buf, len, mask, dev->num_rx_queues); 15608af2c06fSAmritha Nambiar if (err) { 156129ca1c5aSAndy Shevchenko bitmap_free(mask); 15628af2c06fSAmritha Nambiar return err; 15638af2c06fSAmritha Nambiar } 15648af2c06fSAmritha Nambiar 15652d57b4f1SAntoine Tenart if (!rtnl_trylock()) { 15662d57b4f1SAntoine Tenart bitmap_free(mask); 15672d57b4f1SAntoine Tenart return restart_syscall(); 15682d57b4f1SAntoine Tenart } 15692d57b4f1SAntoine Tenart 15704d99f660SAndrei Vagin cpus_read_lock(); 15718af2c06fSAmritha Nambiar err = __netif_set_xps_queue(dev, mask, index, true); 15724d99f660SAndrei Vagin cpus_read_unlock(); 15734d99f660SAndrei Vagin 15742d57b4f1SAntoine Tenart rtnl_unlock(); 15752d57b4f1SAntoine Tenart 157629ca1c5aSAndy Shevchenko bitmap_free(mask); 15778af2c06fSAmritha Nambiar return err ? : len; 15788af2c06fSAmritha Nambiar } 15798af2c06fSAmritha Nambiar 15808af2c06fSAmritha Nambiar static struct netdev_queue_attribute xps_rxqs_attribute __ro_after_init 15818af2c06fSAmritha Nambiar = __ATTR_RW(xps_rxqs); 1582ccf5ff69Sdavid decotigny #endif /* CONFIG_XPS */ 15831d24eb48STom Herbert 15842b9c7581Sstephen hemminger static struct attribute *netdev_queue_default_attrs[] __ro_after_init = { 1585ccf5ff69Sdavid decotigny &queue_trans_timeout.attr, 15868d059b0fSAlexander Duyck &queue_traffic_class.attr, 1587ccf5ff69Sdavid decotigny #ifdef CONFIG_XPS 15881d24eb48STom Herbert &xps_cpus_attribute.attr, 15898af2c06fSAmritha Nambiar &xps_rxqs_attribute.attr, 1590822b3b2eSJohn Fastabend &queue_tx_maxrate.attr, 1591ccf5ff69Sdavid decotigny #endif 15921d24eb48STom Herbert NULL 15931d24eb48STom Herbert }; 1594be0d6926SKimberly Brown ATTRIBUTE_GROUPS(netdev_queue_default); 15951d24eb48STom Herbert 15961d24eb48STom Herbert static void netdev_queue_release(struct kobject *kobj) 15971d24eb48STom Herbert { 15981d24eb48STom Herbert struct netdev_queue *queue = to_netdev_queue(kobj); 15991d24eb48STom Herbert 16001d24eb48STom Herbert memset(kobj, 0, sizeof(*kobj)); 16011d24eb48STom Herbert dev_put(queue->dev); 16021d24eb48STom Herbert } 16031d24eb48STom Herbert 160482ef3d5dSWeilong Chen static const void *netdev_queue_namespace(struct kobject *kobj) 160582ef3d5dSWeilong Chen { 160682ef3d5dSWeilong Chen struct netdev_queue *queue = to_netdev_queue(kobj); 160782ef3d5dSWeilong Chen struct device *dev = &queue->dev->dev; 160882ef3d5dSWeilong Chen const void *ns = NULL; 160982ef3d5dSWeilong Chen 161082ef3d5dSWeilong Chen if (dev->class && dev->class->ns_type) 161182ef3d5dSWeilong Chen ns = dev->class->namespace(dev); 161282ef3d5dSWeilong Chen 161382ef3d5dSWeilong Chen return ns; 161482ef3d5dSWeilong Chen } 161582ef3d5dSWeilong Chen 1616b0e37c0dSDmitry Torokhov static void netdev_queue_get_ownership(struct kobject *kobj, 1617b0e37c0dSDmitry Torokhov kuid_t *uid, kgid_t *gid) 1618b0e37c0dSDmitry Torokhov { 1619b0e37c0dSDmitry Torokhov const struct net *net = netdev_queue_namespace(kobj); 1620b0e37c0dSDmitry Torokhov 1621b0e37c0dSDmitry Torokhov net_ns_get_ownership(net, uid, gid); 1622b0e37c0dSDmitry Torokhov } 1623b0e37c0dSDmitry Torokhov 16242b9c7581Sstephen hemminger static struct kobj_type netdev_queue_ktype __ro_after_init = { 16251d24eb48STom Herbert .sysfs_ops = &netdev_queue_sysfs_ops, 16261d24eb48STom Herbert .release = netdev_queue_release, 1627be0d6926SKimberly Brown .default_groups = netdev_queue_default_groups, 162882ef3d5dSWeilong Chen .namespace = netdev_queue_namespace, 1629b0e37c0dSDmitry Torokhov .get_ownership = netdev_queue_get_ownership, 16301d24eb48STom Herbert }; 16311d24eb48STom Herbert 16326b53dafeSWANG Cong static int netdev_queue_add_kobject(struct net_device *dev, int index) 16331d24eb48STom Herbert { 16346b53dafeSWANG Cong struct netdev_queue *queue = dev->_tx + index; 16351d24eb48STom Herbert struct kobject *kobj = &queue->kobj; 16361d24eb48STom Herbert int error = 0; 16371d24eb48STom Herbert 1638e0b60903SJouni Hogander /* Kobject_put later will trigger netdev_queue_release call 1639e0b60903SJouni Hogander * which decreases dev refcount: Take that reference here 1640e0b60903SJouni Hogander */ 1641e0b60903SJouni Hogander dev_hold(queue->dev); 1642e0b60903SJouni Hogander 16436b53dafeSWANG Cong kobj->kset = dev->queues_kset; 16441d24eb48STom Herbert error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL, 16451d24eb48STom Herbert "tx-%u", index); 1646114cf580STom Herbert if (error) 1647b8eb7183SJouni Hogander goto err; 1648114cf580STom Herbert 1649114cf580STom Herbert #ifdef CONFIG_BQL 1650114cf580STom Herbert error = sysfs_create_group(kobj, &dql_group); 1651b8eb7183SJouni Hogander if (error) 1652b8eb7183SJouni Hogander goto err; 1653114cf580STom Herbert #endif 16541d24eb48STom Herbert 16551d24eb48STom Herbert kobject_uevent(kobj, KOBJ_ADD); 165648a322b6SEric Dumazet return 0; 16571d24eb48STom Herbert 1658b8eb7183SJouni Hogander err: 1659b8eb7183SJouni Hogander kobject_put(kobj); 1660b8eb7183SJouni Hogander return error; 16611d24eb48STom Herbert } 1662d755407dSChristian Brauner 1663d755407dSChristian Brauner static int tx_queue_change_owner(struct net_device *ndev, int index, 1664d755407dSChristian Brauner kuid_t kuid, kgid_t kgid) 1665d755407dSChristian Brauner { 1666d755407dSChristian Brauner struct netdev_queue *queue = ndev->_tx + index; 1667d755407dSChristian Brauner struct kobject *kobj = &queue->kobj; 1668d755407dSChristian Brauner int error; 1669d755407dSChristian Brauner 1670d755407dSChristian Brauner error = sysfs_change_owner(kobj, kuid, kgid); 1671d755407dSChristian Brauner if (error) 1672d755407dSChristian Brauner return error; 1673d755407dSChristian Brauner 1674d755407dSChristian Brauner #ifdef CONFIG_BQL 1675d755407dSChristian Brauner error = sysfs_group_change_owner(kobj, &dql_group, kuid, kgid); 1676d755407dSChristian Brauner #endif 1677d755407dSChristian Brauner return error; 1678d755407dSChristian Brauner } 1679ccf5ff69Sdavid decotigny #endif /* CONFIG_SYSFS */ 16801d24eb48STom Herbert 16811d24eb48STom Herbert int 16826b53dafeSWANG Cong netdev_queue_update_kobjects(struct net_device *dev, int old_num, int new_num) 16831d24eb48STom Herbert { 1684ccf5ff69Sdavid decotigny #ifdef CONFIG_SYSFS 16851d24eb48STom Herbert int i; 16861d24eb48STom Herbert int error = 0; 16871d24eb48STom Herbert 16881d24eb48STom Herbert for (i = old_num; i < new_num; i++) { 16896b53dafeSWANG Cong error = netdev_queue_add_kobject(dev, i); 16901d24eb48STom Herbert if (error) { 16911d24eb48STom Herbert new_num = old_num; 16921d24eb48STom Herbert break; 16931d24eb48STom Herbert } 16941d24eb48STom Herbert } 16951d24eb48STom Herbert 1696114cf580STom Herbert while (--i >= new_num) { 16976b53dafeSWANG Cong struct netdev_queue *queue = dev->_tx + i; 1698114cf580STom Herbert 16998b8f3e66SChristian Brauner if (!refcount_read(&dev_net(dev)->ns.count)) 1700002d8a1aSAndrey Vagin queue->kobj.uevent_suppress = 1; 1701114cf580STom Herbert #ifdef CONFIG_BQL 1702114cf580STom Herbert sysfs_remove_group(&queue->kobj, &dql_group); 1703114cf580STom Herbert #endif 1704114cf580STom Herbert kobject_put(&queue->kobj); 1705114cf580STom Herbert } 17061d24eb48STom Herbert 17071d24eb48STom Herbert return error; 1708bf264145STom Herbert #else 1709bf264145STom Herbert return 0; 1710ccf5ff69Sdavid decotigny #endif /* CONFIG_SYSFS */ 17111d24eb48STom Herbert } 17121d24eb48STom Herbert 1713d755407dSChristian Brauner static int net_tx_queue_change_owner(struct net_device *dev, int num, 1714d755407dSChristian Brauner kuid_t kuid, kgid_t kgid) 1715d755407dSChristian Brauner { 1716d755407dSChristian Brauner #ifdef CONFIG_SYSFS 1717d755407dSChristian Brauner int error = 0; 1718d755407dSChristian Brauner int i; 1719d755407dSChristian Brauner 1720d755407dSChristian Brauner for (i = 0; i < num; i++) { 1721d755407dSChristian Brauner error = tx_queue_change_owner(dev, i, kuid, kgid); 1722d755407dSChristian Brauner if (error) 1723d755407dSChristian Brauner break; 1724d755407dSChristian Brauner } 1725d755407dSChristian Brauner 1726d755407dSChristian Brauner return error; 1727d755407dSChristian Brauner #else 1728d755407dSChristian Brauner return 0; 1729d755407dSChristian Brauner #endif /* CONFIG_SYSFS */ 1730d755407dSChristian Brauner } 1731d755407dSChristian Brauner 17326b53dafeSWANG Cong static int register_queue_kobjects(struct net_device *dev) 17331d24eb48STom Herbert { 1734bf264145STom Herbert int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0; 17351d24eb48STom Herbert 1736ccf5ff69Sdavid decotigny #ifdef CONFIG_SYSFS 17376b53dafeSWANG Cong dev->queues_kset = kset_create_and_add("queues", 17386b53dafeSWANG Cong NULL, &dev->dev.kobj); 17396b53dafeSWANG Cong if (!dev->queues_kset) 174062fe0b40SBen Hutchings return -ENOMEM; 17416b53dafeSWANG Cong real_rx = dev->real_num_rx_queues; 1742bf264145STom Herbert #endif 17436b53dafeSWANG Cong real_tx = dev->real_num_tx_queues; 1744bf264145STom Herbert 17456b53dafeSWANG Cong error = net_rx_queue_update_kobjects(dev, 0, real_rx); 17461d24eb48STom Herbert if (error) 17471d24eb48STom Herbert goto error; 1748bf264145STom Herbert rxq = real_rx; 17491d24eb48STom Herbert 17506b53dafeSWANG Cong error = netdev_queue_update_kobjects(dev, 0, real_tx); 17511d24eb48STom Herbert if (error) 17521d24eb48STom Herbert goto error; 1753bf264145STom Herbert txq = real_tx; 17541d24eb48STom Herbert 17551d24eb48STom Herbert return 0; 17561d24eb48STom Herbert 17571d24eb48STom Herbert error: 17586b53dafeSWANG Cong netdev_queue_update_kobjects(dev, txq, 0); 17596b53dafeSWANG Cong net_rx_queue_update_kobjects(dev, rxq, 0); 1760895a5e96SYueHaibing #ifdef CONFIG_SYSFS 1761895a5e96SYueHaibing kset_unregister(dev->queues_kset); 1762895a5e96SYueHaibing #endif 17631d24eb48STom Herbert return error; 176462fe0b40SBen Hutchings } 176562fe0b40SBen Hutchings 1766d755407dSChristian Brauner static int queue_change_owner(struct net_device *ndev, kuid_t kuid, kgid_t kgid) 1767d755407dSChristian Brauner { 1768d755407dSChristian Brauner int error = 0, real_rx = 0, real_tx = 0; 1769d755407dSChristian Brauner 1770d755407dSChristian Brauner #ifdef CONFIG_SYSFS 1771d755407dSChristian Brauner if (ndev->queues_kset) { 1772d755407dSChristian Brauner error = sysfs_change_owner(&ndev->queues_kset->kobj, kuid, kgid); 1773d755407dSChristian Brauner if (error) 1774d755407dSChristian Brauner return error; 1775d755407dSChristian Brauner } 1776d755407dSChristian Brauner real_rx = ndev->real_num_rx_queues; 1777d755407dSChristian Brauner #endif 1778d755407dSChristian Brauner real_tx = ndev->real_num_tx_queues; 1779d755407dSChristian Brauner 1780d755407dSChristian Brauner error = net_rx_queue_change_owner(ndev, real_rx, kuid, kgid); 1781d755407dSChristian Brauner if (error) 1782d755407dSChristian Brauner return error; 1783d755407dSChristian Brauner 1784d755407dSChristian Brauner error = net_tx_queue_change_owner(ndev, real_tx, kuid, kgid); 1785d755407dSChristian Brauner if (error) 1786d755407dSChristian Brauner return error; 1787d755407dSChristian Brauner 1788d755407dSChristian Brauner return 0; 1789d755407dSChristian Brauner } 1790d755407dSChristian Brauner 17916b53dafeSWANG Cong static void remove_queue_kobjects(struct net_device *dev) 17920a9627f2STom Herbert { 1793bf264145STom Herbert int real_rx = 0, real_tx = 0; 1794bf264145STom Herbert 1795a953be53SMichael Dalton #ifdef CONFIG_SYSFS 17966b53dafeSWANG Cong real_rx = dev->real_num_rx_queues; 1797bf264145STom Herbert #endif 17986b53dafeSWANG Cong real_tx = dev->real_num_tx_queues; 1799bf264145STom Herbert 18006b53dafeSWANG Cong net_rx_queue_update_kobjects(dev, real_rx, 0); 18016b53dafeSWANG Cong netdev_queue_update_kobjects(dev, real_tx, 0); 1802ccf5ff69Sdavid decotigny #ifdef CONFIG_SYSFS 18036b53dafeSWANG Cong kset_unregister(dev->queues_kset); 1804bf264145STom Herbert #endif 18050a9627f2STom Herbert } 1806608b4b95SEric W. Biederman 18077dc5dbc8SEric W. Biederman static bool net_current_may_mount(void) 18087dc5dbc8SEric W. Biederman { 18097dc5dbc8SEric W. Biederman struct net *net = current->nsproxy->net_ns; 18107dc5dbc8SEric W. Biederman 18117dc5dbc8SEric W. Biederman return ns_capable(net->user_ns, CAP_SYS_ADMIN); 18127dc5dbc8SEric W. Biederman } 18137dc5dbc8SEric W. Biederman 1814a685e089SAl Viro static void *net_grab_current_ns(void) 1815608b4b95SEric W. Biederman { 1816a685e089SAl Viro struct net *ns = current->nsproxy->net_ns; 1817a685e089SAl Viro #ifdef CONFIG_NET_NS 1818a685e089SAl Viro if (ns) 1819c122e14dSReshetova, Elena refcount_inc(&ns->passive); 1820a685e089SAl Viro #endif 1821a685e089SAl Viro return ns; 1822608b4b95SEric W. Biederman } 1823608b4b95SEric W. Biederman 1824608b4b95SEric W. Biederman static const void *net_initial_ns(void) 1825608b4b95SEric W. Biederman { 1826608b4b95SEric W. Biederman return &init_net; 1827608b4b95SEric W. Biederman } 1828608b4b95SEric W. Biederman 1829608b4b95SEric W. Biederman static const void *net_netlink_ns(struct sock *sk) 1830608b4b95SEric W. Biederman { 1831608b4b95SEric W. Biederman return sock_net(sk); 1832608b4b95SEric W. Biederman } 1833608b4b95SEric W. Biederman 1834737aec57Sstephen hemminger const struct kobj_ns_type_operations net_ns_type_operations = { 1835608b4b95SEric W. Biederman .type = KOBJ_NS_TYPE_NET, 18367dc5dbc8SEric W. Biederman .current_may_mount = net_current_may_mount, 1837a685e089SAl Viro .grab_current_ns = net_grab_current_ns, 1838608b4b95SEric W. Biederman .netlink_ns = net_netlink_ns, 1839608b4b95SEric W. Biederman .initial_ns = net_initial_ns, 1840a685e089SAl Viro .drop_ns = net_drop_ns, 1841608b4b95SEric W. Biederman }; 184204600794SJohannes Berg EXPORT_SYMBOL_GPL(net_ns_type_operations); 1843608b4b95SEric W. Biederman 18447eff2e7aSKay Sievers static int netdev_uevent(struct device *d, struct kobj_uevent_env *env) 18451da177e4SLinus Torvalds { 184643cb76d9SGreg Kroah-Hartman struct net_device *dev = to_net_dev(d); 18477eff2e7aSKay Sievers int retval; 18481da177e4SLinus Torvalds 1849312c004dSKay Sievers /* pass interface to uevent. */ 18507eff2e7aSKay Sievers retval = add_uevent_var(env, "INTERFACE=%s", dev->name); 1851bf62456eSEric Rannaud if (retval) 1852bf62456eSEric Rannaud goto exit; 18531da177e4SLinus Torvalds 1854ca2f37dbSJean Tourrilhes /* pass ifindex to uevent. 1855ca2f37dbSJean Tourrilhes * ifindex is useful as it won't change (interface name may change) 18566648c65eSstephen hemminger * and is what RtNetlink uses natively. 18576648c65eSstephen hemminger */ 18587eff2e7aSKay Sievers retval = add_uevent_var(env, "IFINDEX=%d", dev->ifindex); 1859ca2f37dbSJean Tourrilhes 1860bf62456eSEric Rannaud exit: 1861bf62456eSEric Rannaud return retval; 18621da177e4SLinus Torvalds } 18631da177e4SLinus Torvalds 18641da177e4SLinus Torvalds /* 18651da177e4SLinus Torvalds * netdev_release -- destroy and free a dead device. 186643cb76d9SGreg Kroah-Hartman * Called when last reference to device kobject is gone. 18671da177e4SLinus Torvalds */ 186843cb76d9SGreg Kroah-Hartman static void netdev_release(struct device *d) 18691da177e4SLinus Torvalds { 187043cb76d9SGreg Kroah-Hartman struct net_device *dev = to_net_dev(d); 18711da177e4SLinus Torvalds 18721da177e4SLinus Torvalds BUG_ON(dev->reg_state != NETREG_RELEASED); 18731da177e4SLinus Torvalds 18746c557001SFlorian Westphal /* no need to wait for rcu grace period: 18756c557001SFlorian Westphal * device is dead and about to be freed. 18766c557001SFlorian Westphal */ 18776c557001SFlorian Westphal kfree(rcu_access_pointer(dev->ifalias)); 187874d332c1SEric Dumazet netdev_freemem(dev); 18791da177e4SLinus Torvalds } 18801da177e4SLinus Torvalds 1881608b4b95SEric W. Biederman static const void *net_namespace(struct device *d) 1882608b4b95SEric W. Biederman { 18835c29482dSGeliang Tang struct net_device *dev = to_net_dev(d); 18845c29482dSGeliang Tang 1885608b4b95SEric W. Biederman return dev_net(dev); 1886608b4b95SEric W. Biederman } 1887608b4b95SEric W. Biederman 1888b0e37c0dSDmitry Torokhov static void net_get_ownership(struct device *d, kuid_t *uid, kgid_t *gid) 1889b0e37c0dSDmitry Torokhov { 1890b0e37c0dSDmitry Torokhov struct net_device *dev = to_net_dev(d); 1891b0e37c0dSDmitry Torokhov const struct net *net = dev_net(dev); 1892b0e37c0dSDmitry Torokhov 1893b0e37c0dSDmitry Torokhov net_ns_get_ownership(net, uid, gid); 1894b0e37c0dSDmitry Torokhov } 1895b0e37c0dSDmitry Torokhov 1896e6d473e6Sstephen hemminger static struct class net_class __ro_after_init = { 18971da177e4SLinus Torvalds .name = "net", 189843cb76d9SGreg Kroah-Hartman .dev_release = netdev_release, 18996be8aeefSGreg Kroah-Hartman .dev_groups = net_class_groups, 190043cb76d9SGreg Kroah-Hartman .dev_uevent = netdev_uevent, 1901608b4b95SEric W. Biederman .ns_type = &net_ns_type_operations, 1902608b4b95SEric W. Biederman .namespace = net_namespace, 1903b0e37c0dSDmitry Torokhov .get_ownership = net_get_ownership, 19041da177e4SLinus Torvalds }; 19051da177e4SLinus Torvalds 1906aa836df9SFlorian Fainelli #ifdef CONFIG_OF_NET 1907aa836df9SFlorian Fainelli static int of_dev_node_match(struct device *dev, const void *data) 1908aa836df9SFlorian Fainelli { 19092e186a2cSTobias Waldekranz for (; dev; dev = dev->parent) { 19102e186a2cSTobias Waldekranz if (dev->of_node == data) 19112e186a2cSTobias Waldekranz return 1; 19122e186a2cSTobias Waldekranz } 1913aa836df9SFlorian Fainelli 19142e186a2cSTobias Waldekranz return 0; 1915aa836df9SFlorian Fainelli } 1916aa836df9SFlorian Fainelli 19179861f720SRussell King /* 19189861f720SRussell King * of_find_net_device_by_node - lookup the net device for the device node 19199861f720SRussell King * @np: OF device node 19209861f720SRussell King * 19219861f720SRussell King * Looks up the net_device structure corresponding with the device node. 19229861f720SRussell King * If successful, returns a pointer to the net_device with the embedded 19239861f720SRussell King * struct device refcount incremented by one, or NULL on failure. The 19249861f720SRussell King * refcount must be dropped when done with the net_device. 19259861f720SRussell King */ 1926aa836df9SFlorian Fainelli struct net_device *of_find_net_device_by_node(struct device_node *np) 1927aa836df9SFlorian Fainelli { 1928aa836df9SFlorian Fainelli struct device *dev; 1929aa836df9SFlorian Fainelli 1930aa836df9SFlorian Fainelli dev = class_find_device(&net_class, NULL, np, of_dev_node_match); 1931aa836df9SFlorian Fainelli if (!dev) 1932aa836df9SFlorian Fainelli return NULL; 1933aa836df9SFlorian Fainelli 1934aa836df9SFlorian Fainelli return to_net_dev(dev); 1935aa836df9SFlorian Fainelli } 1936aa836df9SFlorian Fainelli EXPORT_SYMBOL(of_find_net_device_by_node); 1937aa836df9SFlorian Fainelli #endif 1938aa836df9SFlorian Fainelli 19399093bbb2SStephen Hemminger /* Delete sysfs entries but hold kobject reference until after all 19409093bbb2SStephen Hemminger * netdev references are gone. 19419093bbb2SStephen Hemminger */ 19426b53dafeSWANG Cong void netdev_unregister_kobject(struct net_device *ndev) 19431da177e4SLinus Torvalds { 19446648c65eSstephen hemminger struct device *dev = &ndev->dev; 19459093bbb2SStephen Hemminger 19468b8f3e66SChristian Brauner if (!refcount_read(&dev_net(ndev)->ns.count)) 1947002d8a1aSAndrey Vagin dev_set_uevent_suppress(dev, 1); 1948002d8a1aSAndrey Vagin 19499093bbb2SStephen Hemminger kobject_get(&dev->kobj); 19503891845eSEric W. Biederman 19516b53dafeSWANG Cong remove_queue_kobjects(ndev); 19520a9627f2STom Herbert 19539802c8e2SMing Lei pm_runtime_set_memalloc_noio(dev, false); 19549802c8e2SMing Lei 19559093bbb2SStephen Hemminger device_del(dev); 19561da177e4SLinus Torvalds } 19571da177e4SLinus Torvalds 19581da177e4SLinus Torvalds /* Create sysfs entries for network device. */ 19596b53dafeSWANG Cong int netdev_register_kobject(struct net_device *ndev) 19601da177e4SLinus Torvalds { 19616648c65eSstephen hemminger struct device *dev = &ndev->dev; 19626b53dafeSWANG Cong const struct attribute_group **groups = ndev->sysfs_groups; 19630a9627f2STom Herbert int error = 0; 19641da177e4SLinus Torvalds 1965a1b3f594SEric W. Biederman device_initialize(dev); 196643cb76d9SGreg Kroah-Hartman dev->class = &net_class; 19676b53dafeSWANG Cong dev->platform_data = ndev; 196843cb76d9SGreg Kroah-Hartman dev->groups = groups; 19691da177e4SLinus Torvalds 19706b53dafeSWANG Cong dev_set_name(dev, "%s", ndev->name); 19711da177e4SLinus Torvalds 19728b41d188SEric W. Biederman #ifdef CONFIG_SYSFS 19730c509a6cSEric W. Biederman /* Allow for a device specific group */ 19740c509a6cSEric W. Biederman if (*groups) 19750c509a6cSEric W. Biederman groups++; 19761da177e4SLinus Torvalds 19770c509a6cSEric W. Biederman *groups++ = &netstat_group; 197838c1a01cSJohannes Berg 197938c1a01cSJohannes Berg #if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211) 19806b53dafeSWANG Cong if (ndev->ieee80211_ptr) 198138c1a01cSJohannes Berg *groups++ = &wireless_group; 198238c1a01cSJohannes Berg #if IS_ENABLED(CONFIG_WIRELESS_EXT) 19836b53dafeSWANG Cong else if (ndev->wireless_handlers) 198438c1a01cSJohannes Berg *groups++ = &wireless_group; 198538c1a01cSJohannes Berg #endif 198638c1a01cSJohannes Berg #endif 19878b41d188SEric W. Biederman #endif /* CONFIG_SYSFS */ 19881da177e4SLinus Torvalds 19890a9627f2STom Herbert error = device_add(dev); 19900a9627f2STom Herbert if (error) 19918ed633b9SWang Hai return error; 19920a9627f2STom Herbert 19936b53dafeSWANG Cong error = register_queue_kobjects(ndev); 19948ed633b9SWang Hai if (error) { 19958ed633b9SWang Hai device_del(dev); 19968ed633b9SWang Hai return error; 19978ed633b9SWang Hai } 19980a9627f2STom Herbert 19999802c8e2SMing Lei pm_runtime_set_memalloc_noio(dev, true); 20009802c8e2SMing Lei 20010a9627f2STom Herbert return error; 20021da177e4SLinus Torvalds } 20031da177e4SLinus Torvalds 2004e6dee9f3SChristian Brauner /* Change owner for sysfs entries when moving network devices across network 2005e6dee9f3SChristian Brauner * namespaces owned by different user namespaces. 2006e6dee9f3SChristian Brauner */ 2007e6dee9f3SChristian Brauner int netdev_change_owner(struct net_device *ndev, const struct net *net_old, 2008e6dee9f3SChristian Brauner const struct net *net_new) 2009e6dee9f3SChristian Brauner { 2010e6dee9f3SChristian Brauner struct device *dev = &ndev->dev; 2011e6dee9f3SChristian Brauner kuid_t old_uid, new_uid; 2012e6dee9f3SChristian Brauner kgid_t old_gid, new_gid; 2013e6dee9f3SChristian Brauner int error; 2014e6dee9f3SChristian Brauner 2015e6dee9f3SChristian Brauner net_ns_get_ownership(net_old, &old_uid, &old_gid); 2016e6dee9f3SChristian Brauner net_ns_get_ownership(net_new, &new_uid, &new_gid); 2017e6dee9f3SChristian Brauner 2018e6dee9f3SChristian Brauner /* The network namespace was changed but the owning user namespace is 2019e6dee9f3SChristian Brauner * identical so there's no need to change the owner of sysfs entries. 2020e6dee9f3SChristian Brauner */ 2021e6dee9f3SChristian Brauner if (uid_eq(old_uid, new_uid) && gid_eq(old_gid, new_gid)) 2022e6dee9f3SChristian Brauner return 0; 2023e6dee9f3SChristian Brauner 2024e6dee9f3SChristian Brauner error = device_change_owner(dev, new_uid, new_gid); 2025e6dee9f3SChristian Brauner if (error) 2026e6dee9f3SChristian Brauner return error; 2027e6dee9f3SChristian Brauner 2028d755407dSChristian Brauner error = queue_change_owner(ndev, new_uid, new_gid); 2029d755407dSChristian Brauner if (error) 2030d755407dSChristian Brauner return error; 2031d755407dSChristian Brauner 2032e6dee9f3SChristian Brauner return 0; 2033e6dee9f3SChristian Brauner } 2034e6dee9f3SChristian Brauner 2035b793dc5cSstephen hemminger int netdev_class_create_file_ns(const struct class_attribute *class_attr, 203658292cbeSTejun Heo const void *ns) 2037b8a9787eSJay Vosburgh { 203858292cbeSTejun Heo return class_create_file_ns(&net_class, class_attr, ns); 2039b8a9787eSJay Vosburgh } 204058292cbeSTejun Heo EXPORT_SYMBOL(netdev_class_create_file_ns); 2041b8a9787eSJay Vosburgh 2042b793dc5cSstephen hemminger void netdev_class_remove_file_ns(const struct class_attribute *class_attr, 204358292cbeSTejun Heo const void *ns) 2044b8a9787eSJay Vosburgh { 204558292cbeSTejun Heo class_remove_file_ns(&net_class, class_attr, ns); 2046b8a9787eSJay Vosburgh } 204758292cbeSTejun Heo EXPORT_SYMBOL(netdev_class_remove_file_ns); 2048b8a9787eSJay Vosburgh 2049a48d4bb0SDaniel Borkmann int __init netdev_kobject_init(void) 20501da177e4SLinus Torvalds { 2051608b4b95SEric W. Biederman kobj_ns_type_register(&net_ns_type_operations); 20521da177e4SLinus Torvalds return class_register(&net_class); 20531da177e4SLinus Torvalds } 2054