12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * net-sysfs.c - network device class and attributes 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * Copyright (c) 2003 Stephen Hemminger <shemminger@osdl.org> 61da177e4SLinus Torvalds */ 71da177e4SLinus Torvalds 84fc268d2SRandy Dunlap #include <linux/capability.h> 91da177e4SLinus Torvalds #include <linux/kernel.h> 101da177e4SLinus Torvalds #include <linux/netdevice.h> 111da177e4SLinus Torvalds #include <linux/if_arp.h> 125a0e3ad6STejun Heo #include <linux/slab.h> 13174cd4b1SIngo Molnar #include <linux/sched/signal.h> 1407bbecb3SAlex Belits #include <linux/sched/isolation.h> 15608b4b95SEric W. Biederman #include <linux/nsproxy.h> 161da177e4SLinus Torvalds #include <net/sock.h> 17608b4b95SEric W. Biederman #include <net/net_namespace.h> 181da177e4SLinus Torvalds #include <linux/rtnetlink.h> 19fec5e652STom Herbert #include <linux/vmalloc.h> 20bc3b2d7fSPaul Gortmaker #include <linux/export.h> 21114cf580STom Herbert #include <linux/jiffies.h> 229802c8e2SMing Lei #include <linux/pm_runtime.h> 23aa836df9SFlorian Fainelli #include <linux/of.h> 2488832a22SBen Dooks #include <linux/of_net.h> 254d99f660SAndrei Vagin #include <linux/cpu.h> 261da177e4SLinus Torvalds 27342709efSPavel Emelyanov #include "net-sysfs.h" 28342709efSPavel Emelyanov 298b41d188SEric W. Biederman #ifdef CONFIG_SYSFS 301da177e4SLinus Torvalds static const char fmt_hex[] = "%#x\n"; 311da177e4SLinus Torvalds static const char fmt_dec[] = "%d\n"; 321da177e4SLinus Torvalds static const char fmt_ulong[] = "%lu\n"; 33be1f3c2cSBen Hutchings static const char fmt_u64[] = "%llu\n"; 341da177e4SLinus Torvalds 351da177e4SLinus Torvalds static inline int dev_isalive(const struct net_device *dev) 361da177e4SLinus Torvalds { 37fe9925b5SStephen Hemminger return dev->reg_state <= NETREG_REGISTERED; 381da177e4SLinus Torvalds } 391da177e4SLinus Torvalds 401da177e4SLinus Torvalds /* use same locking rules as GIF* ioctl's */ 4143cb76d9SGreg Kroah-Hartman static ssize_t netdev_show(const struct device *dev, 4243cb76d9SGreg Kroah-Hartman struct device_attribute *attr, char *buf, 431da177e4SLinus Torvalds ssize_t (*format)(const struct net_device *, char *)) 441da177e4SLinus Torvalds { 456b53dafeSWANG Cong struct net_device *ndev = to_net_dev(dev); 461da177e4SLinus Torvalds ssize_t ret = -EINVAL; 471da177e4SLinus Torvalds 481da177e4SLinus Torvalds read_lock(&dev_base_lock); 496b53dafeSWANG Cong if (dev_isalive(ndev)) 506b53dafeSWANG Cong ret = (*format)(ndev, buf); 511da177e4SLinus Torvalds read_unlock(&dev_base_lock); 521da177e4SLinus Torvalds 531da177e4SLinus Torvalds return ret; 541da177e4SLinus Torvalds } 551da177e4SLinus Torvalds 561da177e4SLinus Torvalds /* generate a show function for simple field */ 571da177e4SLinus Torvalds #define NETDEVICE_SHOW(field, format_string) \ 586b53dafeSWANG Cong static ssize_t format_##field(const struct net_device *dev, char *buf) \ 591da177e4SLinus Torvalds { \ 606b53dafeSWANG Cong return sprintf(buf, format_string, dev->field); \ 611da177e4SLinus Torvalds } \ 626be8aeefSGreg Kroah-Hartman static ssize_t field##_show(struct device *dev, \ 6343cb76d9SGreg Kroah-Hartman struct device_attribute *attr, char *buf) \ 641da177e4SLinus Torvalds { \ 6543cb76d9SGreg Kroah-Hartman return netdev_show(dev, attr, buf, format_##field); \ 666be8aeefSGreg Kroah-Hartman } \ 671da177e4SLinus Torvalds 686be8aeefSGreg Kroah-Hartman #define NETDEVICE_SHOW_RO(field, format_string) \ 696be8aeefSGreg Kroah-Hartman NETDEVICE_SHOW(field, format_string); \ 706be8aeefSGreg Kroah-Hartman static DEVICE_ATTR_RO(field) 716be8aeefSGreg Kroah-Hartman 726be8aeefSGreg Kroah-Hartman #define NETDEVICE_SHOW_RW(field, format_string) \ 736be8aeefSGreg Kroah-Hartman NETDEVICE_SHOW(field, format_string); \ 746be8aeefSGreg Kroah-Hartman static DEVICE_ATTR_RW(field) 751da177e4SLinus Torvalds 761da177e4SLinus Torvalds /* use same locking and permission rules as SIF* ioctl's */ 7743cb76d9SGreg Kroah-Hartman static ssize_t netdev_store(struct device *dev, struct device_attribute *attr, 781da177e4SLinus Torvalds const char *buf, size_t len, 791da177e4SLinus Torvalds int (*set)(struct net_device *, unsigned long)) 801da177e4SLinus Torvalds { 815e1fccc0SEric W. Biederman struct net_device *netdev = to_net_dev(dev); 825e1fccc0SEric W. Biederman struct net *net = dev_net(netdev); 831da177e4SLinus Torvalds unsigned long new; 845f0224a6SColin Ian King int ret; 851da177e4SLinus Torvalds 865e1fccc0SEric W. Biederman if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 871da177e4SLinus Torvalds return -EPERM; 881da177e4SLinus Torvalds 89e1e420c7SShuah Khan ret = kstrtoul(buf, 0, &new); 90e1e420c7SShuah Khan if (ret) 911da177e4SLinus Torvalds goto err; 921da177e4SLinus Torvalds 935a5990d3SStephen Hemminger if (!rtnl_trylock()) 94336ca57cSEric W. Biederman return restart_syscall(); 955a5990d3SStephen Hemminger 965e1fccc0SEric W. Biederman if (dev_isalive(netdev)) { 976648c65eSstephen hemminger ret = (*set)(netdev, new); 986648c65eSstephen hemminger if (ret == 0) 991da177e4SLinus Torvalds ret = len; 1001da177e4SLinus Torvalds } 1011da177e4SLinus Torvalds rtnl_unlock(); 1021da177e4SLinus Torvalds err: 1031da177e4SLinus Torvalds return ret; 1041da177e4SLinus Torvalds } 1051da177e4SLinus Torvalds 1066be8aeefSGreg Kroah-Hartman NETDEVICE_SHOW_RO(dev_id, fmt_hex); 1073f85944fSAmir Vadai NETDEVICE_SHOW_RO(dev_port, fmt_dec); 1086be8aeefSGreg Kroah-Hartman NETDEVICE_SHOW_RO(addr_assign_type, fmt_dec); 1096be8aeefSGreg Kroah-Hartman NETDEVICE_SHOW_RO(addr_len, fmt_dec); 1106be8aeefSGreg Kroah-Hartman NETDEVICE_SHOW_RO(ifindex, fmt_dec); 1116be8aeefSGreg Kroah-Hartman NETDEVICE_SHOW_RO(type, fmt_dec); 1126be8aeefSGreg Kroah-Hartman NETDEVICE_SHOW_RO(link_mode, fmt_dec); 1131da177e4SLinus Torvalds 114a54acb3aSNicolas Dichtel static ssize_t iflink_show(struct device *dev, struct device_attribute *attr, 115a54acb3aSNicolas Dichtel char *buf) 116a54acb3aSNicolas Dichtel { 117a54acb3aSNicolas Dichtel struct net_device *ndev = to_net_dev(dev); 118a54acb3aSNicolas Dichtel 119a54acb3aSNicolas Dichtel return sprintf(buf, fmt_dec, dev_get_iflink(ndev)); 120a54acb3aSNicolas Dichtel } 121a54acb3aSNicolas Dichtel static DEVICE_ATTR_RO(iflink); 122a54acb3aSNicolas Dichtel 1236b53dafeSWANG Cong static ssize_t format_name_assign_type(const struct net_device *dev, char *buf) 124685343fcSTom Gundersen { 1256b53dafeSWANG Cong return sprintf(buf, fmt_dec, dev->name_assign_type); 126685343fcSTom Gundersen } 127685343fcSTom Gundersen 128685343fcSTom Gundersen static ssize_t name_assign_type_show(struct device *dev, 129685343fcSTom Gundersen struct device_attribute *attr, 130685343fcSTom Gundersen char *buf) 131685343fcSTom Gundersen { 1326b53dafeSWANG Cong struct net_device *ndev = to_net_dev(dev); 133685343fcSTom Gundersen ssize_t ret = -EINVAL; 134685343fcSTom Gundersen 1356b53dafeSWANG Cong if (ndev->name_assign_type != NET_NAME_UNKNOWN) 136685343fcSTom Gundersen ret = netdev_show(dev, attr, buf, format_name_assign_type); 137685343fcSTom Gundersen 138685343fcSTom Gundersen return ret; 139685343fcSTom Gundersen } 140685343fcSTom Gundersen static DEVICE_ATTR_RO(name_assign_type); 141685343fcSTom Gundersen 1421da177e4SLinus Torvalds /* use same locking rules as GIFHWADDR ioctl's */ 1436be8aeefSGreg Kroah-Hartman static ssize_t address_show(struct device *dev, struct device_attribute *attr, 14443cb76d9SGreg Kroah-Hartman char *buf) 1451da177e4SLinus Torvalds { 1466b53dafeSWANG Cong struct net_device *ndev = to_net_dev(dev); 1471da177e4SLinus Torvalds ssize_t ret = -EINVAL; 1481da177e4SLinus Torvalds 1491da177e4SLinus Torvalds read_lock(&dev_base_lock); 1506b53dafeSWANG Cong if (dev_isalive(ndev)) 1516b53dafeSWANG Cong ret = sysfs_format_mac(buf, ndev->dev_addr, ndev->addr_len); 1521da177e4SLinus Torvalds read_unlock(&dev_base_lock); 1531da177e4SLinus Torvalds return ret; 1541da177e4SLinus Torvalds } 1556be8aeefSGreg Kroah-Hartman static DEVICE_ATTR_RO(address); 1561da177e4SLinus Torvalds 1576be8aeefSGreg Kroah-Hartman static ssize_t broadcast_show(struct device *dev, 15843cb76d9SGreg Kroah-Hartman struct device_attribute *attr, char *buf) 1591da177e4SLinus Torvalds { 1606b53dafeSWANG Cong struct net_device *ndev = to_net_dev(dev); 1616648c65eSstephen hemminger 1626b53dafeSWANG Cong if (dev_isalive(ndev)) 1636b53dafeSWANG Cong return sysfs_format_mac(buf, ndev->broadcast, ndev->addr_len); 1641da177e4SLinus Torvalds return -EINVAL; 1651da177e4SLinus Torvalds } 1666be8aeefSGreg Kroah-Hartman static DEVICE_ATTR_RO(broadcast); 1671da177e4SLinus Torvalds 1686b53dafeSWANG Cong static int change_carrier(struct net_device *dev, unsigned long new_carrier) 169fdae0fdeSJiri Pirko { 1706b53dafeSWANG Cong if (!netif_running(dev)) 171fdae0fdeSJiri Pirko return -EINVAL; 1726b53dafeSWANG Cong return dev_change_carrier(dev, (bool)new_carrier); 173fdae0fdeSJiri Pirko } 174fdae0fdeSJiri Pirko 1756be8aeefSGreg Kroah-Hartman static ssize_t carrier_store(struct device *dev, struct device_attribute *attr, 176fdae0fdeSJiri Pirko const char *buf, size_t len) 177fdae0fdeSJiri Pirko { 178fdae0fdeSJiri Pirko return netdev_store(dev, attr, buf, len, change_carrier); 179fdae0fdeSJiri Pirko } 180fdae0fdeSJiri Pirko 1816be8aeefSGreg Kroah-Hartman static ssize_t carrier_show(struct device *dev, 18243cb76d9SGreg Kroah-Hartman struct device_attribute *attr, char *buf) 1831da177e4SLinus Torvalds { 1841da177e4SLinus Torvalds struct net_device *netdev = to_net_dev(dev); 1856648c65eSstephen hemminger 1866648c65eSstephen hemminger if (netif_running(netdev)) 1871da177e4SLinus Torvalds return sprintf(buf, fmt_dec, !!netif_carrier_ok(netdev)); 1886648c65eSstephen hemminger 1891da177e4SLinus Torvalds return -EINVAL; 1901da177e4SLinus Torvalds } 1916be8aeefSGreg Kroah-Hartman static DEVICE_ATTR_RW(carrier); 1921da177e4SLinus Torvalds 1936be8aeefSGreg Kroah-Hartman static ssize_t speed_show(struct device *dev, 194d519e17eSAndy Gospodarek struct device_attribute *attr, char *buf) 195d519e17eSAndy Gospodarek { 196d519e17eSAndy Gospodarek struct net_device *netdev = to_net_dev(dev); 197d519e17eSAndy Gospodarek int ret = -EINVAL; 198d519e17eSAndy Gospodarek 199d519e17eSAndy Gospodarek if (!rtnl_trylock()) 200d519e17eSAndy Gospodarek return restart_syscall(); 201d519e17eSAndy Gospodarek 2028ae6dacaSDavid Decotigny if (netif_running(netdev)) { 2037cad1bacSDavid Decotigny struct ethtool_link_ksettings cmd; 2047cad1bacSDavid Decotigny 2057cad1bacSDavid Decotigny if (!__ethtool_get_link_ksettings(netdev, &cmd)) 2067cad1bacSDavid Decotigny ret = sprintf(buf, fmt_dec, cmd.base.speed); 207d519e17eSAndy Gospodarek } 208d519e17eSAndy Gospodarek rtnl_unlock(); 209d519e17eSAndy Gospodarek return ret; 210d519e17eSAndy Gospodarek } 2116be8aeefSGreg Kroah-Hartman static DEVICE_ATTR_RO(speed); 212d519e17eSAndy Gospodarek 2136be8aeefSGreg Kroah-Hartman static ssize_t duplex_show(struct device *dev, 214d519e17eSAndy Gospodarek struct device_attribute *attr, char *buf) 215d519e17eSAndy Gospodarek { 216d519e17eSAndy Gospodarek struct net_device *netdev = to_net_dev(dev); 217d519e17eSAndy Gospodarek int ret = -EINVAL; 218d519e17eSAndy Gospodarek 219d519e17eSAndy Gospodarek if (!rtnl_trylock()) 220d519e17eSAndy Gospodarek return restart_syscall(); 221d519e17eSAndy Gospodarek 2228ae6dacaSDavid Decotigny if (netif_running(netdev)) { 2237cad1bacSDavid Decotigny struct ethtool_link_ksettings cmd; 2247cad1bacSDavid Decotigny 2257cad1bacSDavid Decotigny if (!__ethtool_get_link_ksettings(netdev, &cmd)) { 226c6c13965SNikolay Aleksandrov const char *duplex; 2277cad1bacSDavid Decotigny 2287cad1bacSDavid Decotigny switch (cmd.base.duplex) { 229c6c13965SNikolay Aleksandrov case DUPLEX_HALF: 230c6c13965SNikolay Aleksandrov duplex = "half"; 231c6c13965SNikolay Aleksandrov break; 232c6c13965SNikolay Aleksandrov case DUPLEX_FULL: 233c6c13965SNikolay Aleksandrov duplex = "full"; 234c6c13965SNikolay Aleksandrov break; 235c6c13965SNikolay Aleksandrov default: 236c6c13965SNikolay Aleksandrov duplex = "unknown"; 237c6c13965SNikolay Aleksandrov break; 238c6c13965SNikolay Aleksandrov } 239c6c13965SNikolay Aleksandrov ret = sprintf(buf, "%s\n", duplex); 240c6c13965SNikolay Aleksandrov } 241d519e17eSAndy Gospodarek } 242d519e17eSAndy Gospodarek rtnl_unlock(); 243d519e17eSAndy Gospodarek return ret; 244d519e17eSAndy Gospodarek } 2456be8aeefSGreg Kroah-Hartman static DEVICE_ATTR_RO(duplex); 246d519e17eSAndy Gospodarek 247db30a577SAndrew Lunn static ssize_t testing_show(struct device *dev, 248db30a577SAndrew Lunn struct device_attribute *attr, char *buf) 249db30a577SAndrew Lunn { 250db30a577SAndrew Lunn struct net_device *netdev = to_net_dev(dev); 251db30a577SAndrew Lunn 252db30a577SAndrew Lunn if (netif_running(netdev)) 253db30a577SAndrew Lunn return sprintf(buf, fmt_dec, !!netif_testing(netdev)); 254db30a577SAndrew Lunn 255db30a577SAndrew Lunn return -EINVAL; 256db30a577SAndrew Lunn } 257db30a577SAndrew Lunn static DEVICE_ATTR_RO(testing); 258db30a577SAndrew Lunn 2596be8aeefSGreg Kroah-Hartman static ssize_t dormant_show(struct device *dev, 26043cb76d9SGreg Kroah-Hartman struct device_attribute *attr, char *buf) 261b00055aaSStefan Rompf { 262b00055aaSStefan Rompf struct net_device *netdev = to_net_dev(dev); 263b00055aaSStefan Rompf 264b00055aaSStefan Rompf if (netif_running(netdev)) 265b00055aaSStefan Rompf return sprintf(buf, fmt_dec, !!netif_dormant(netdev)); 266b00055aaSStefan Rompf 267b00055aaSStefan Rompf return -EINVAL; 268b00055aaSStefan Rompf } 2696be8aeefSGreg Kroah-Hartman static DEVICE_ATTR_RO(dormant); 270b00055aaSStefan Rompf 27136cbd3dcSJan Engelhardt static const char *const operstates[] = { 272b00055aaSStefan Rompf "unknown", 273b00055aaSStefan Rompf "notpresent", /* currently unused */ 274b00055aaSStefan Rompf "down", 275b00055aaSStefan Rompf "lowerlayerdown", 276db30a577SAndrew Lunn "testing", 277b00055aaSStefan Rompf "dormant", 278b00055aaSStefan Rompf "up" 279b00055aaSStefan Rompf }; 280b00055aaSStefan Rompf 2816be8aeefSGreg Kroah-Hartman static ssize_t operstate_show(struct device *dev, 28243cb76d9SGreg Kroah-Hartman struct device_attribute *attr, char *buf) 283b00055aaSStefan Rompf { 284b00055aaSStefan Rompf const struct net_device *netdev = to_net_dev(dev); 285b00055aaSStefan Rompf unsigned char operstate; 286b00055aaSStefan Rompf 287b00055aaSStefan Rompf read_lock(&dev_base_lock); 288b00055aaSStefan Rompf operstate = netdev->operstate; 289b00055aaSStefan Rompf if (!netif_running(netdev)) 290b00055aaSStefan Rompf operstate = IF_OPER_DOWN; 291b00055aaSStefan Rompf read_unlock(&dev_base_lock); 292b00055aaSStefan Rompf 293e3a5cd9eSAdrian Bunk if (operstate >= ARRAY_SIZE(operstates)) 294b00055aaSStefan Rompf return -EINVAL; /* should not happen */ 295b00055aaSStefan Rompf 296b00055aaSStefan Rompf return sprintf(buf, "%s\n", operstates[operstate]); 297b00055aaSStefan Rompf } 2986be8aeefSGreg Kroah-Hartman static DEVICE_ATTR_RO(operstate); 299b00055aaSStefan Rompf 3002d3b479dSdavid decotigny static ssize_t carrier_changes_show(struct device *dev, 3012d3b479dSdavid decotigny struct device_attribute *attr, 3022d3b479dSdavid decotigny char *buf) 3032d3b479dSdavid decotigny { 3042d3b479dSdavid decotigny struct net_device *netdev = to_net_dev(dev); 3056648c65eSstephen hemminger 3062d3b479dSdavid decotigny return sprintf(buf, fmt_dec, 307b2d3bcfaSDavid Decotigny atomic_read(&netdev->carrier_up_count) + 308b2d3bcfaSDavid Decotigny atomic_read(&netdev->carrier_down_count)); 3092d3b479dSdavid decotigny } 3102d3b479dSdavid decotigny static DEVICE_ATTR_RO(carrier_changes); 3112d3b479dSdavid decotigny 312b2d3bcfaSDavid Decotigny static ssize_t carrier_up_count_show(struct device *dev, 313b2d3bcfaSDavid Decotigny struct device_attribute *attr, 314b2d3bcfaSDavid Decotigny char *buf) 315b2d3bcfaSDavid Decotigny { 316b2d3bcfaSDavid Decotigny struct net_device *netdev = to_net_dev(dev); 317b2d3bcfaSDavid Decotigny 318b2d3bcfaSDavid Decotigny return sprintf(buf, fmt_dec, atomic_read(&netdev->carrier_up_count)); 319b2d3bcfaSDavid Decotigny } 320b2d3bcfaSDavid Decotigny static DEVICE_ATTR_RO(carrier_up_count); 321b2d3bcfaSDavid Decotigny 322b2d3bcfaSDavid Decotigny static ssize_t carrier_down_count_show(struct device *dev, 323b2d3bcfaSDavid Decotigny struct device_attribute *attr, 324b2d3bcfaSDavid Decotigny char *buf) 325b2d3bcfaSDavid Decotigny { 326b2d3bcfaSDavid Decotigny struct net_device *netdev = to_net_dev(dev); 327b2d3bcfaSDavid Decotigny 328b2d3bcfaSDavid Decotigny return sprintf(buf, fmt_dec, atomic_read(&netdev->carrier_down_count)); 329b2d3bcfaSDavid Decotigny } 330b2d3bcfaSDavid Decotigny static DEVICE_ATTR_RO(carrier_down_count); 331b2d3bcfaSDavid Decotigny 3321da177e4SLinus Torvalds /* read-write attributes */ 3331da177e4SLinus Torvalds 3346b53dafeSWANG Cong static int change_mtu(struct net_device *dev, unsigned long new_mtu) 3351da177e4SLinus Torvalds { 3366b53dafeSWANG Cong return dev_set_mtu(dev, (int)new_mtu); 3371da177e4SLinus Torvalds } 3381da177e4SLinus Torvalds 3396be8aeefSGreg Kroah-Hartman static ssize_t mtu_store(struct device *dev, struct device_attribute *attr, 34043cb76d9SGreg Kroah-Hartman const char *buf, size_t len) 3411da177e4SLinus Torvalds { 34243cb76d9SGreg Kroah-Hartman return netdev_store(dev, attr, buf, len, change_mtu); 3431da177e4SLinus Torvalds } 3446be8aeefSGreg Kroah-Hartman NETDEVICE_SHOW_RW(mtu, fmt_dec); 3451da177e4SLinus Torvalds 3466b53dafeSWANG Cong static int change_flags(struct net_device *dev, unsigned long new_flags) 3471da177e4SLinus Torvalds { 348567c5e13SPetr Machata return dev_change_flags(dev, (unsigned int)new_flags, NULL); 3491da177e4SLinus Torvalds } 3501da177e4SLinus Torvalds 3516be8aeefSGreg Kroah-Hartman static ssize_t flags_store(struct device *dev, struct device_attribute *attr, 35243cb76d9SGreg Kroah-Hartman const char *buf, size_t len) 3531da177e4SLinus Torvalds { 35443cb76d9SGreg Kroah-Hartman return netdev_store(dev, attr, buf, len, change_flags); 3551da177e4SLinus Torvalds } 3566be8aeefSGreg Kroah-Hartman NETDEVICE_SHOW_RW(flags, fmt_hex); 3571da177e4SLinus Torvalds 3586be8aeefSGreg Kroah-Hartman static ssize_t tx_queue_len_store(struct device *dev, 35943cb76d9SGreg Kroah-Hartman struct device_attribute *attr, 36043cb76d9SGreg Kroah-Hartman const char *buf, size_t len) 3611da177e4SLinus Torvalds { 3625e1fccc0SEric W. Biederman if (!capable(CAP_NET_ADMIN)) 3635e1fccc0SEric W. Biederman return -EPERM; 3645e1fccc0SEric W. Biederman 3656a643ddbSCong Wang return netdev_store(dev, attr, buf, len, dev_change_tx_queue_len); 3661da177e4SLinus Torvalds } 3670cd29503SAlexey Dobriyan NETDEVICE_SHOW_RW(tx_queue_len, fmt_dec); 3681da177e4SLinus Torvalds 3693b47d303SEric Dumazet static int change_gro_flush_timeout(struct net_device *dev, unsigned long val) 3703b47d303SEric Dumazet { 3717e417a66SEric Dumazet WRITE_ONCE(dev->gro_flush_timeout, val); 3723b47d303SEric Dumazet return 0; 3733b47d303SEric Dumazet } 3743b47d303SEric Dumazet 3753b47d303SEric Dumazet static ssize_t gro_flush_timeout_store(struct device *dev, 3763b47d303SEric Dumazet struct device_attribute *attr, 3773b47d303SEric Dumazet const char *buf, size_t len) 3783b47d303SEric Dumazet { 3793b47d303SEric Dumazet if (!capable(CAP_NET_ADMIN)) 3803b47d303SEric Dumazet return -EPERM; 3813b47d303SEric Dumazet 3823b47d303SEric Dumazet return netdev_store(dev, attr, buf, len, change_gro_flush_timeout); 3833b47d303SEric Dumazet } 3843b47d303SEric Dumazet NETDEVICE_SHOW_RW(gro_flush_timeout, fmt_ulong); 3853b47d303SEric Dumazet 3866f8b12d6SEric Dumazet static int change_napi_defer_hard_irqs(struct net_device *dev, unsigned long val) 3876f8b12d6SEric Dumazet { 3887e417a66SEric Dumazet WRITE_ONCE(dev->napi_defer_hard_irqs, val); 3896f8b12d6SEric Dumazet return 0; 3906f8b12d6SEric Dumazet } 3916f8b12d6SEric Dumazet 3926f8b12d6SEric Dumazet static ssize_t napi_defer_hard_irqs_store(struct device *dev, 3936f8b12d6SEric Dumazet struct device_attribute *attr, 3946f8b12d6SEric Dumazet const char *buf, size_t len) 3956f8b12d6SEric Dumazet { 3966f8b12d6SEric Dumazet if (!capable(CAP_NET_ADMIN)) 3976f8b12d6SEric Dumazet return -EPERM; 3986f8b12d6SEric Dumazet 3996f8b12d6SEric Dumazet return netdev_store(dev, attr, buf, len, change_napi_defer_hard_irqs); 4006f8b12d6SEric Dumazet } 4016f8b12d6SEric Dumazet NETDEVICE_SHOW_RW(napi_defer_hard_irqs, fmt_dec); 4026f8b12d6SEric Dumazet 4036be8aeefSGreg Kroah-Hartman static ssize_t ifalias_store(struct device *dev, struct device_attribute *attr, 4040b815a1aSStephen Hemminger const char *buf, size_t len) 4050b815a1aSStephen Hemminger { 4060b815a1aSStephen Hemminger struct net_device *netdev = to_net_dev(dev); 4075e1fccc0SEric W. Biederman struct net *net = dev_net(netdev); 4080b815a1aSStephen Hemminger size_t count = len; 409c92eb77aSRoopa Prabhu ssize_t ret = 0; 4100b815a1aSStephen Hemminger 4115e1fccc0SEric W. Biederman if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 4120b815a1aSStephen Hemminger return -EPERM; 4130b815a1aSStephen Hemminger 4140b815a1aSStephen Hemminger /* ignore trailing newline */ 4150b815a1aSStephen Hemminger if (len > 0 && buf[len - 1] == '\n') 4160b815a1aSStephen Hemminger --count; 4170b815a1aSStephen Hemminger 418c92eb77aSRoopa Prabhu if (!rtnl_trylock()) 419c92eb77aSRoopa Prabhu return restart_syscall(); 4200b815a1aSStephen Hemminger 421c92eb77aSRoopa Prabhu if (dev_isalive(netdev)) { 422c92eb77aSRoopa Prabhu ret = dev_set_alias(netdev, buf, count); 423c92eb77aSRoopa Prabhu if (ret < 0) 424c92eb77aSRoopa Prabhu goto err; 425c92eb77aSRoopa Prabhu ret = len; 426c92eb77aSRoopa Prabhu netdev_state_change(netdev); 427c92eb77aSRoopa Prabhu } 428c92eb77aSRoopa Prabhu err: 429c92eb77aSRoopa Prabhu rtnl_unlock(); 430c92eb77aSRoopa Prabhu 431c92eb77aSRoopa Prabhu return ret; 4320b815a1aSStephen Hemminger } 4330b815a1aSStephen Hemminger 4346be8aeefSGreg Kroah-Hartman static ssize_t ifalias_show(struct device *dev, 4350b815a1aSStephen Hemminger struct device_attribute *attr, char *buf) 4360b815a1aSStephen Hemminger { 4370b815a1aSStephen Hemminger const struct net_device *netdev = to_net_dev(dev); 4386c557001SFlorian Westphal char tmp[IFALIASZ]; 4390b815a1aSStephen Hemminger ssize_t ret = 0; 4400b815a1aSStephen Hemminger 4416c557001SFlorian Westphal ret = dev_get_alias(netdev, tmp, sizeof(tmp)); 4426c557001SFlorian Westphal if (ret > 0) 4436c557001SFlorian Westphal ret = sprintf(buf, "%s\n", tmp); 4440b815a1aSStephen Hemminger return ret; 4450b815a1aSStephen Hemminger } 4466be8aeefSGreg Kroah-Hartman static DEVICE_ATTR_RW(ifalias); 447a512b92bSVlad Dogaru 4486b53dafeSWANG Cong static int change_group(struct net_device *dev, unsigned long new_group) 449a512b92bSVlad Dogaru { 4506b53dafeSWANG Cong dev_set_group(dev, (int)new_group); 451a512b92bSVlad Dogaru return 0; 452a512b92bSVlad Dogaru } 453a512b92bSVlad Dogaru 4546be8aeefSGreg Kroah-Hartman static ssize_t group_store(struct device *dev, struct device_attribute *attr, 455a512b92bSVlad Dogaru const char *buf, size_t len) 456a512b92bSVlad Dogaru { 457a512b92bSVlad Dogaru return netdev_store(dev, attr, buf, len, change_group); 458a512b92bSVlad Dogaru } 4596be8aeefSGreg Kroah-Hartman NETDEVICE_SHOW(group, fmt_dec); 460d6444062SJoe Perches static DEVICE_ATTR(netdev_group, 0644, group_show, group_store); 461a512b92bSVlad Dogaru 462d746d707SAnuradha Karuppiah static int change_proto_down(struct net_device *dev, unsigned long proto_down) 463d746d707SAnuradha Karuppiah { 464d746d707SAnuradha Karuppiah return dev_change_proto_down(dev, (bool)proto_down); 465d746d707SAnuradha Karuppiah } 466d746d707SAnuradha Karuppiah 467d746d707SAnuradha Karuppiah static ssize_t proto_down_store(struct device *dev, 468d746d707SAnuradha Karuppiah struct device_attribute *attr, 469d746d707SAnuradha Karuppiah const char *buf, size_t len) 470d746d707SAnuradha Karuppiah { 471d746d707SAnuradha Karuppiah return netdev_store(dev, attr, buf, len, change_proto_down); 472d746d707SAnuradha Karuppiah } 473d746d707SAnuradha Karuppiah NETDEVICE_SHOW_RW(proto_down, fmt_dec); 474d746d707SAnuradha Karuppiah 475cc998ff8SLinus Torvalds static ssize_t phys_port_id_show(struct device *dev, 476ff80e519SJiri Pirko struct device_attribute *attr, char *buf) 477ff80e519SJiri Pirko { 478ff80e519SJiri Pirko struct net_device *netdev = to_net_dev(dev); 479ff80e519SJiri Pirko ssize_t ret = -EINVAL; 480ff80e519SJiri Pirko 481ff80e519SJiri Pirko if (!rtnl_trylock()) 482ff80e519SJiri Pirko return restart_syscall(); 483ff80e519SJiri Pirko 484ff80e519SJiri Pirko if (dev_isalive(netdev)) { 48502637fceSJiri Pirko struct netdev_phys_item_id ppid; 486ff80e519SJiri Pirko 487ff80e519SJiri Pirko ret = dev_get_phys_port_id(netdev, &ppid); 488ff80e519SJiri Pirko if (!ret) 489ff80e519SJiri Pirko ret = sprintf(buf, "%*phN\n", ppid.id_len, ppid.id); 490ff80e519SJiri Pirko } 491ff80e519SJiri Pirko rtnl_unlock(); 492ff80e519SJiri Pirko 493ff80e519SJiri Pirko return ret; 494ff80e519SJiri Pirko } 495cc998ff8SLinus Torvalds static DEVICE_ATTR_RO(phys_port_id); 496ff80e519SJiri Pirko 497db24a904SDavid Ahern static ssize_t phys_port_name_show(struct device *dev, 498db24a904SDavid Ahern struct device_attribute *attr, char *buf) 499db24a904SDavid Ahern { 500db24a904SDavid Ahern struct net_device *netdev = to_net_dev(dev); 501db24a904SDavid Ahern ssize_t ret = -EINVAL; 502db24a904SDavid Ahern 503db24a904SDavid Ahern if (!rtnl_trylock()) 504db24a904SDavid Ahern return restart_syscall(); 505db24a904SDavid Ahern 506db24a904SDavid Ahern if (dev_isalive(netdev)) { 507db24a904SDavid Ahern char name[IFNAMSIZ]; 508db24a904SDavid Ahern 509db24a904SDavid Ahern ret = dev_get_phys_port_name(netdev, name, sizeof(name)); 510db24a904SDavid Ahern if (!ret) 511db24a904SDavid Ahern ret = sprintf(buf, "%s\n", name); 512db24a904SDavid Ahern } 513db24a904SDavid Ahern rtnl_unlock(); 514db24a904SDavid Ahern 515db24a904SDavid Ahern return ret; 516db24a904SDavid Ahern } 517db24a904SDavid Ahern static DEVICE_ATTR_RO(phys_port_name); 518db24a904SDavid Ahern 519aecbe01eSJiri Pirko static ssize_t phys_switch_id_show(struct device *dev, 520aecbe01eSJiri Pirko struct device_attribute *attr, char *buf) 521aecbe01eSJiri Pirko { 522aecbe01eSJiri Pirko struct net_device *netdev = to_net_dev(dev); 523aecbe01eSJiri Pirko ssize_t ret = -EINVAL; 524aecbe01eSJiri Pirko 525aecbe01eSJiri Pirko if (!rtnl_trylock()) 526aecbe01eSJiri Pirko return restart_syscall(); 527aecbe01eSJiri Pirko 528aecbe01eSJiri Pirko if (dev_isalive(netdev)) { 529bccb3025SFlorian Fainelli struct netdev_phys_item_id ppid = { }; 530aecbe01eSJiri Pirko 531bccb3025SFlorian Fainelli ret = dev_get_port_parent_id(netdev, &ppid, false); 532aecbe01eSJiri Pirko if (!ret) 533bccb3025SFlorian Fainelli ret = sprintf(buf, "%*phN\n", ppid.id_len, ppid.id); 534aecbe01eSJiri Pirko } 535aecbe01eSJiri Pirko rtnl_unlock(); 536aecbe01eSJiri Pirko 537aecbe01eSJiri Pirko return ret; 538aecbe01eSJiri Pirko } 539aecbe01eSJiri Pirko static DEVICE_ATTR_RO(phys_switch_id); 540aecbe01eSJiri Pirko 5415fdd2f0eSWei Wang static ssize_t threaded_show(struct device *dev, 5425fdd2f0eSWei Wang struct device_attribute *attr, char *buf) 5435fdd2f0eSWei Wang { 5445fdd2f0eSWei Wang struct net_device *netdev = to_net_dev(dev); 5455fdd2f0eSWei Wang ssize_t ret = -EINVAL; 5465fdd2f0eSWei Wang 5475fdd2f0eSWei Wang if (!rtnl_trylock()) 5485fdd2f0eSWei Wang return restart_syscall(); 5495fdd2f0eSWei Wang 5505fdd2f0eSWei Wang if (dev_isalive(netdev)) 5515fdd2f0eSWei Wang ret = sprintf(buf, fmt_dec, netdev->threaded); 5525fdd2f0eSWei Wang 5535fdd2f0eSWei Wang rtnl_unlock(); 5545fdd2f0eSWei Wang return ret; 5555fdd2f0eSWei Wang } 5565fdd2f0eSWei Wang 5575fdd2f0eSWei Wang static int modify_napi_threaded(struct net_device *dev, unsigned long val) 5585fdd2f0eSWei Wang { 5595fdd2f0eSWei Wang int ret; 5605fdd2f0eSWei Wang 5615fdd2f0eSWei Wang if (list_empty(&dev->napi_list)) 5625fdd2f0eSWei Wang return -EOPNOTSUPP; 5635fdd2f0eSWei Wang 5645fdd2f0eSWei Wang if (val != 0 && val != 1) 5655fdd2f0eSWei Wang return -EOPNOTSUPP; 5665fdd2f0eSWei Wang 5675fdd2f0eSWei Wang ret = dev_set_threaded(dev, val); 5685fdd2f0eSWei Wang 5695fdd2f0eSWei Wang return ret; 5705fdd2f0eSWei Wang } 5715fdd2f0eSWei Wang 5725fdd2f0eSWei Wang static ssize_t threaded_store(struct device *dev, 5735fdd2f0eSWei Wang struct device_attribute *attr, 5745fdd2f0eSWei Wang const char *buf, size_t len) 5755fdd2f0eSWei Wang { 5765fdd2f0eSWei Wang return netdev_store(dev, attr, buf, len, modify_napi_threaded); 5775fdd2f0eSWei Wang } 5785fdd2f0eSWei Wang static DEVICE_ATTR_RW(threaded); 5795fdd2f0eSWei Wang 580ec6cc599Sstephen hemminger static struct attribute *net_class_attrs[] __ro_after_init = { 5816be8aeefSGreg Kroah-Hartman &dev_attr_netdev_group.attr, 5826be8aeefSGreg Kroah-Hartman &dev_attr_type.attr, 5836be8aeefSGreg Kroah-Hartman &dev_attr_dev_id.attr, 5843f85944fSAmir Vadai &dev_attr_dev_port.attr, 5856be8aeefSGreg Kroah-Hartman &dev_attr_iflink.attr, 5866be8aeefSGreg Kroah-Hartman &dev_attr_ifindex.attr, 587685343fcSTom Gundersen &dev_attr_name_assign_type.attr, 5886be8aeefSGreg Kroah-Hartman &dev_attr_addr_assign_type.attr, 5896be8aeefSGreg Kroah-Hartman &dev_attr_addr_len.attr, 5906be8aeefSGreg Kroah-Hartman &dev_attr_link_mode.attr, 5916be8aeefSGreg Kroah-Hartman &dev_attr_address.attr, 5926be8aeefSGreg Kroah-Hartman &dev_attr_broadcast.attr, 5936be8aeefSGreg Kroah-Hartman &dev_attr_speed.attr, 5946be8aeefSGreg Kroah-Hartman &dev_attr_duplex.attr, 5956be8aeefSGreg Kroah-Hartman &dev_attr_dormant.attr, 596db30a577SAndrew Lunn &dev_attr_testing.attr, 5976be8aeefSGreg Kroah-Hartman &dev_attr_operstate.attr, 5982d3b479dSdavid decotigny &dev_attr_carrier_changes.attr, 5996be8aeefSGreg Kroah-Hartman &dev_attr_ifalias.attr, 6006be8aeefSGreg Kroah-Hartman &dev_attr_carrier.attr, 6016be8aeefSGreg Kroah-Hartman &dev_attr_mtu.attr, 6026be8aeefSGreg Kroah-Hartman &dev_attr_flags.attr, 6036be8aeefSGreg Kroah-Hartman &dev_attr_tx_queue_len.attr, 6043b47d303SEric Dumazet &dev_attr_gro_flush_timeout.attr, 6056f8b12d6SEric Dumazet &dev_attr_napi_defer_hard_irqs.attr, 606cc998ff8SLinus Torvalds &dev_attr_phys_port_id.attr, 607db24a904SDavid Ahern &dev_attr_phys_port_name.attr, 608aecbe01eSJiri Pirko &dev_attr_phys_switch_id.attr, 609d746d707SAnuradha Karuppiah &dev_attr_proto_down.attr, 610b2d3bcfaSDavid Decotigny &dev_attr_carrier_up_count.attr, 611b2d3bcfaSDavid Decotigny &dev_attr_carrier_down_count.attr, 6125fdd2f0eSWei Wang &dev_attr_threaded.attr, 6136be8aeefSGreg Kroah-Hartman NULL, 6141da177e4SLinus Torvalds }; 6156be8aeefSGreg Kroah-Hartman ATTRIBUTE_GROUPS(net_class); 6161da177e4SLinus Torvalds 6171da177e4SLinus Torvalds /* Show a given an attribute in the statistics group */ 61843cb76d9SGreg Kroah-Hartman static ssize_t netstat_show(const struct device *d, 61943cb76d9SGreg Kroah-Hartman struct device_attribute *attr, char *buf, 6201da177e4SLinus Torvalds unsigned long offset) 6211da177e4SLinus Torvalds { 62243cb76d9SGreg Kroah-Hartman struct net_device *dev = to_net_dev(d); 6231da177e4SLinus Torvalds ssize_t ret = -EINVAL; 6241da177e4SLinus Torvalds 625be1f3c2cSBen Hutchings WARN_ON(offset > sizeof(struct rtnl_link_stats64) || 626be1f3c2cSBen Hutchings offset % sizeof(u64) != 0); 6271da177e4SLinus Torvalds 6281da177e4SLinus Torvalds read_lock(&dev_base_lock); 62996e74088SPavel Emelyanov if (dev_isalive(dev)) { 63028172739SEric Dumazet struct rtnl_link_stats64 temp; 63128172739SEric Dumazet const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp); 63228172739SEric Dumazet 633be1f3c2cSBen Hutchings ret = sprintf(buf, fmt_u64, *(u64 *)(((u8 *)stats) + offset)); 63496e74088SPavel Emelyanov } 6351da177e4SLinus Torvalds read_unlock(&dev_base_lock); 6361da177e4SLinus Torvalds return ret; 6371da177e4SLinus Torvalds } 6381da177e4SLinus Torvalds 6391da177e4SLinus Torvalds /* generate a read-only statistics attribute */ 6401da177e4SLinus Torvalds #define NETSTAT_ENTRY(name) \ 6416be8aeefSGreg Kroah-Hartman static ssize_t name##_show(struct device *d, \ 64243cb76d9SGreg Kroah-Hartman struct device_attribute *attr, char *buf) \ 6431da177e4SLinus Torvalds { \ 64443cb76d9SGreg Kroah-Hartman return netstat_show(d, attr, buf, \ 645be1f3c2cSBen Hutchings offsetof(struct rtnl_link_stats64, name)); \ 6461da177e4SLinus Torvalds } \ 6476be8aeefSGreg Kroah-Hartman static DEVICE_ATTR_RO(name) 6481da177e4SLinus Torvalds 6491da177e4SLinus Torvalds NETSTAT_ENTRY(rx_packets); 6501da177e4SLinus Torvalds NETSTAT_ENTRY(tx_packets); 6511da177e4SLinus Torvalds NETSTAT_ENTRY(rx_bytes); 6521da177e4SLinus Torvalds NETSTAT_ENTRY(tx_bytes); 6531da177e4SLinus Torvalds NETSTAT_ENTRY(rx_errors); 6541da177e4SLinus Torvalds NETSTAT_ENTRY(tx_errors); 6551da177e4SLinus Torvalds NETSTAT_ENTRY(rx_dropped); 6561da177e4SLinus Torvalds NETSTAT_ENTRY(tx_dropped); 6571da177e4SLinus Torvalds NETSTAT_ENTRY(multicast); 6581da177e4SLinus Torvalds NETSTAT_ENTRY(collisions); 6591da177e4SLinus Torvalds NETSTAT_ENTRY(rx_length_errors); 6601da177e4SLinus Torvalds NETSTAT_ENTRY(rx_over_errors); 6611da177e4SLinus Torvalds NETSTAT_ENTRY(rx_crc_errors); 6621da177e4SLinus Torvalds NETSTAT_ENTRY(rx_frame_errors); 6631da177e4SLinus Torvalds NETSTAT_ENTRY(rx_fifo_errors); 6641da177e4SLinus Torvalds NETSTAT_ENTRY(rx_missed_errors); 6651da177e4SLinus Torvalds NETSTAT_ENTRY(tx_aborted_errors); 6661da177e4SLinus Torvalds NETSTAT_ENTRY(tx_carrier_errors); 6671da177e4SLinus Torvalds NETSTAT_ENTRY(tx_fifo_errors); 6681da177e4SLinus Torvalds NETSTAT_ENTRY(tx_heartbeat_errors); 6691da177e4SLinus Torvalds NETSTAT_ENTRY(tx_window_errors); 6701da177e4SLinus Torvalds NETSTAT_ENTRY(rx_compressed); 6711da177e4SLinus Torvalds NETSTAT_ENTRY(tx_compressed); 6726e7333d3SJarod Wilson NETSTAT_ENTRY(rx_nohandler); 6731da177e4SLinus Torvalds 674ec6cc599Sstephen hemminger static struct attribute *netstat_attrs[] __ro_after_init = { 67543cb76d9SGreg Kroah-Hartman &dev_attr_rx_packets.attr, 67643cb76d9SGreg Kroah-Hartman &dev_attr_tx_packets.attr, 67743cb76d9SGreg Kroah-Hartman &dev_attr_rx_bytes.attr, 67843cb76d9SGreg Kroah-Hartman &dev_attr_tx_bytes.attr, 67943cb76d9SGreg Kroah-Hartman &dev_attr_rx_errors.attr, 68043cb76d9SGreg Kroah-Hartman &dev_attr_tx_errors.attr, 68143cb76d9SGreg Kroah-Hartman &dev_attr_rx_dropped.attr, 68243cb76d9SGreg Kroah-Hartman &dev_attr_tx_dropped.attr, 68343cb76d9SGreg Kroah-Hartman &dev_attr_multicast.attr, 68443cb76d9SGreg Kroah-Hartman &dev_attr_collisions.attr, 68543cb76d9SGreg Kroah-Hartman &dev_attr_rx_length_errors.attr, 68643cb76d9SGreg Kroah-Hartman &dev_attr_rx_over_errors.attr, 68743cb76d9SGreg Kroah-Hartman &dev_attr_rx_crc_errors.attr, 68843cb76d9SGreg Kroah-Hartman &dev_attr_rx_frame_errors.attr, 68943cb76d9SGreg Kroah-Hartman &dev_attr_rx_fifo_errors.attr, 69043cb76d9SGreg Kroah-Hartman &dev_attr_rx_missed_errors.attr, 69143cb76d9SGreg Kroah-Hartman &dev_attr_tx_aborted_errors.attr, 69243cb76d9SGreg Kroah-Hartman &dev_attr_tx_carrier_errors.attr, 69343cb76d9SGreg Kroah-Hartman &dev_attr_tx_fifo_errors.attr, 69443cb76d9SGreg Kroah-Hartman &dev_attr_tx_heartbeat_errors.attr, 69543cb76d9SGreg Kroah-Hartman &dev_attr_tx_window_errors.attr, 69643cb76d9SGreg Kroah-Hartman &dev_attr_rx_compressed.attr, 69743cb76d9SGreg Kroah-Hartman &dev_attr_tx_compressed.attr, 6986e7333d3SJarod Wilson &dev_attr_rx_nohandler.attr, 6991da177e4SLinus Torvalds NULL 7001da177e4SLinus Torvalds }; 7011da177e4SLinus Torvalds 70238ef00ccSArvind Yadav static const struct attribute_group netstat_group = { 7031da177e4SLinus Torvalds .name = "statistics", 7041da177e4SLinus Torvalds .attrs = netstat_attrs, 7051da177e4SLinus Torvalds }; 70638c1a01cSJohannes Berg 70738c1a01cSJohannes Berg #if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211) 70838c1a01cSJohannes Berg static struct attribute *wireless_attrs[] = { 70938c1a01cSJohannes Berg NULL 71038c1a01cSJohannes Berg }; 71138c1a01cSJohannes Berg 71238ef00ccSArvind Yadav static const struct attribute_group wireless_group = { 71338c1a01cSJohannes Berg .name = "wireless", 71438c1a01cSJohannes Berg .attrs = wireless_attrs, 71538c1a01cSJohannes Berg }; 71638c1a01cSJohannes Berg #endif 7176be8aeefSGreg Kroah-Hartman 7186be8aeefSGreg Kroah-Hartman #else /* CONFIG_SYSFS */ 7196be8aeefSGreg Kroah-Hartman #define net_class_groups NULL 720d6523ddfSEric W. Biederman #endif /* CONFIG_SYSFS */ 7211da177e4SLinus Torvalds 722a953be53SMichael Dalton #ifdef CONFIG_SYSFS 7236648c65eSstephen hemminger #define to_rx_queue_attr(_attr) \ 7246648c65eSstephen hemminger container_of(_attr, struct rx_queue_attribute, attr) 7250a9627f2STom Herbert 7260a9627f2STom Herbert #define to_rx_queue(obj) container_of(obj, struct netdev_rx_queue, kobj) 7270a9627f2STom Herbert 7280a9627f2STom Herbert static ssize_t rx_queue_attr_show(struct kobject *kobj, struct attribute *attr, 7290a9627f2STom Herbert char *buf) 7300a9627f2STom Herbert { 731667e427bSstephen hemminger const struct rx_queue_attribute *attribute = to_rx_queue_attr(attr); 7320a9627f2STom Herbert struct netdev_rx_queue *queue = to_rx_queue(kobj); 7330a9627f2STom Herbert 7340a9627f2STom Herbert if (!attribute->show) 7350a9627f2STom Herbert return -EIO; 7360a9627f2STom Herbert 737718ad681Sstephen hemminger return attribute->show(queue, buf); 7380a9627f2STom Herbert } 7390a9627f2STom Herbert 7400a9627f2STom Herbert static ssize_t rx_queue_attr_store(struct kobject *kobj, struct attribute *attr, 7410a9627f2STom Herbert const char *buf, size_t count) 7420a9627f2STom Herbert { 743667e427bSstephen hemminger const struct rx_queue_attribute *attribute = to_rx_queue_attr(attr); 7440a9627f2STom Herbert struct netdev_rx_queue *queue = to_rx_queue(kobj); 7450a9627f2STom Herbert 7460a9627f2STom Herbert if (!attribute->store) 7470a9627f2STom Herbert return -EIO; 7480a9627f2STom Herbert 749718ad681Sstephen hemminger return attribute->store(queue, buf, count); 7500a9627f2STom Herbert } 7510a9627f2STom Herbert 752fa50d645Sstephen hemminger static const struct sysfs_ops rx_queue_sysfs_ops = { 7530a9627f2STom Herbert .show = rx_queue_attr_show, 7540a9627f2STom Herbert .store = rx_queue_attr_store, 7550a9627f2STom Herbert }; 7560a9627f2STom Herbert 757a953be53SMichael Dalton #ifdef CONFIG_RPS 758718ad681Sstephen hemminger static ssize_t show_rps_map(struct netdev_rx_queue *queue, char *buf) 7590a9627f2STom Herbert { 7600a9627f2STom Herbert struct rps_map *map; 7610a9627f2STom Herbert cpumask_var_t mask; 762f0906827STejun Heo int i, len; 7630a9627f2STom Herbert 7640a9627f2STom Herbert if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) 7650a9627f2STom Herbert return -ENOMEM; 7660a9627f2STom Herbert 7670a9627f2STom Herbert rcu_read_lock(); 7680a9627f2STom Herbert map = rcu_dereference(queue->rps_map); 7690a9627f2STom Herbert if (map) 7700a9627f2STom Herbert for (i = 0; i < map->len; i++) 7710a9627f2STom Herbert cpumask_set_cpu(map->cpus[i], mask); 7720a9627f2STom Herbert 773f0906827STejun Heo len = snprintf(buf, PAGE_SIZE, "%*pb\n", cpumask_pr_args(mask)); 7740a9627f2STom Herbert rcu_read_unlock(); 7750a9627f2STom Herbert free_cpumask_var(mask); 7760a9627f2STom Herbert 777f0906827STejun Heo return len < PAGE_SIZE ? len : -EINVAL; 7780a9627f2STom Herbert } 7790a9627f2STom Herbert 780f5acb907SEric Dumazet static ssize_t store_rps_map(struct netdev_rx_queue *queue, 7810a9627f2STom Herbert const char *buf, size_t len) 7820a9627f2STom Herbert { 7830a9627f2STom Herbert struct rps_map *old_map, *map; 7840a9627f2STom Herbert cpumask_var_t mask; 78507bbecb3SAlex Belits int err, cpu, i, hk_flags; 786da65ad1fSSasha Levin static DEFINE_MUTEX(rps_map_mutex); 7870a9627f2STom Herbert 7880a9627f2STom Herbert if (!capable(CAP_NET_ADMIN)) 7890a9627f2STom Herbert return -EPERM; 7900a9627f2STom Herbert 7910a9627f2STom Herbert if (!alloc_cpumask_var(&mask, GFP_KERNEL)) 7920a9627f2STom Herbert return -ENOMEM; 7930a9627f2STom Herbert 7940a9627f2STom Herbert err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits); 7950a9627f2STom Herbert if (err) { 7960a9627f2STom Herbert free_cpumask_var(mask); 7970a9627f2STom Herbert return err; 7980a9627f2STom Herbert } 7990a9627f2STom Herbert 8002e0d8fefSEric Dumazet if (!cpumask_empty(mask)) { 80107bbecb3SAlex Belits hk_flags = HK_FLAG_DOMAIN | HK_FLAG_WQ; 80207bbecb3SAlex Belits cpumask_and(mask, mask, housekeeping_cpumask(hk_flags)); 80307bbecb3SAlex Belits if (cpumask_empty(mask)) { 80407bbecb3SAlex Belits free_cpumask_var(mask); 80507bbecb3SAlex Belits return -EINVAL; 80607bbecb3SAlex Belits } 8072e0d8fefSEric Dumazet } 80807bbecb3SAlex Belits 80995c96174SEric Dumazet map = kzalloc(max_t(unsigned int, 8100a9627f2STom Herbert RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES), 8110a9627f2STom Herbert GFP_KERNEL); 8120a9627f2STom Herbert if (!map) { 8130a9627f2STom Herbert free_cpumask_var(mask); 8140a9627f2STom Herbert return -ENOMEM; 8150a9627f2STom Herbert } 8160a9627f2STom Herbert 8170a9627f2STom Herbert i = 0; 8180a9627f2STom Herbert for_each_cpu_and(cpu, mask, cpu_online_mask) 8190a9627f2STom Herbert map->cpus[i++] = cpu; 8200a9627f2STom Herbert 8216648c65eSstephen hemminger if (i) { 8220a9627f2STom Herbert map->len = i; 8236648c65eSstephen hemminger } else { 8240a9627f2STom Herbert kfree(map); 8250a9627f2STom Herbert map = NULL; 8260a9627f2STom Herbert } 8270a9627f2STom Herbert 828da65ad1fSSasha Levin mutex_lock(&rps_map_mutex); 8296e3f7fafSEric Dumazet old_map = rcu_dereference_protected(queue->rps_map, 830da65ad1fSSasha Levin mutex_is_locked(&rps_map_mutex)); 8310a9627f2STom Herbert rcu_assign_pointer(queue->rps_map, map); 8320a9627f2STom Herbert 833adc9300eSEric Dumazet if (map) 834dc05360fSEric Dumazet static_branch_inc(&rps_needed); 83510e4ea75STom Herbert if (old_map) 836dc05360fSEric Dumazet static_branch_dec(&rps_needed); 83710e4ea75STom Herbert 838da65ad1fSSasha Levin mutex_unlock(&rps_map_mutex); 83910e4ea75STom Herbert 84010e4ea75STom Herbert if (old_map) 84110e4ea75STom Herbert kfree_rcu(old_map, rcu); 84210e4ea75STom Herbert 8430a9627f2STom Herbert free_cpumask_var(mask); 8440a9627f2STom Herbert return len; 8450a9627f2STom Herbert } 8460a9627f2STom Herbert 847fec5e652STom Herbert static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue, 848fec5e652STom Herbert char *buf) 849fec5e652STom Herbert { 850fec5e652STom Herbert struct rps_dev_flow_table *flow_table; 85160b778ceSEric Dumazet unsigned long val = 0; 852fec5e652STom Herbert 853fec5e652STom Herbert rcu_read_lock(); 854fec5e652STom Herbert flow_table = rcu_dereference(queue->rps_flow_table); 855fec5e652STom Herbert if (flow_table) 85660b778ceSEric Dumazet val = (unsigned long)flow_table->mask + 1; 857fec5e652STom Herbert rcu_read_unlock(); 858fec5e652STom Herbert 85960b778ceSEric Dumazet return sprintf(buf, "%lu\n", val); 860fec5e652STom Herbert } 861fec5e652STom Herbert 862fec5e652STom Herbert static void rps_dev_flow_table_release(struct rcu_head *rcu) 863fec5e652STom Herbert { 864fec5e652STom Herbert struct rps_dev_flow_table *table = container_of(rcu, 865fec5e652STom Herbert struct rps_dev_flow_table, rcu); 866243198d0SAl Viro vfree(table); 867fec5e652STom Herbert } 868fec5e652STom Herbert 869f5acb907SEric Dumazet static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue, 870fec5e652STom Herbert const char *buf, size_t len) 871fec5e652STom Herbert { 87260b778ceSEric Dumazet unsigned long mask, count; 873fec5e652STom Herbert struct rps_dev_flow_table *table, *old_table; 874fec5e652STom Herbert static DEFINE_SPINLOCK(rps_dev_flow_lock); 87560b778ceSEric Dumazet int rc; 876fec5e652STom Herbert 877fec5e652STom Herbert if (!capable(CAP_NET_ADMIN)) 878fec5e652STom Herbert return -EPERM; 879fec5e652STom Herbert 88060b778ceSEric Dumazet rc = kstrtoul(buf, 0, &count); 88160b778ceSEric Dumazet if (rc < 0) 88260b778ceSEric Dumazet return rc; 883fec5e652STom Herbert 884fec5e652STom Herbert if (count) { 88560b778ceSEric Dumazet mask = count - 1; 88660b778ceSEric Dumazet /* mask = roundup_pow_of_two(count) - 1; 88760b778ceSEric Dumazet * without overflows... 88860b778ceSEric Dumazet */ 88960b778ceSEric Dumazet while ((mask | (mask >> 1)) != mask) 89060b778ceSEric Dumazet mask |= (mask >> 1); 89160b778ceSEric Dumazet /* On 64 bit arches, must check mask fits in table->mask (u32), 8928e3bff96Sstephen hemminger * and on 32bit arches, must check 8938e3bff96Sstephen hemminger * RPS_DEV_FLOW_TABLE_SIZE(mask + 1) doesn't overflow. 89460b778ceSEric Dumazet */ 89560b778ceSEric Dumazet #if BITS_PER_LONG > 32 89660b778ceSEric Dumazet if (mask > (unsigned long)(u32)mask) 897a0a129f8SXi Wang return -EINVAL; 89860b778ceSEric Dumazet #else 89960b778ceSEric Dumazet if (mask > (ULONG_MAX - RPS_DEV_FLOW_TABLE_SIZE(1)) 900a0a129f8SXi Wang / sizeof(struct rps_dev_flow)) { 901fec5e652STom Herbert /* Enforce a limit to prevent overflow */ 902fec5e652STom Herbert return -EINVAL; 903fec5e652STom Herbert } 90460b778ceSEric Dumazet #endif 90560b778ceSEric Dumazet table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(mask + 1)); 906fec5e652STom Herbert if (!table) 907fec5e652STom Herbert return -ENOMEM; 908fec5e652STom Herbert 90960b778ceSEric Dumazet table->mask = mask; 91060b778ceSEric Dumazet for (count = 0; count <= mask; count++) 91160b778ceSEric Dumazet table->flows[count].cpu = RPS_NO_CPU; 9126648c65eSstephen hemminger } else { 913fec5e652STom Herbert table = NULL; 9146648c65eSstephen hemminger } 915fec5e652STom Herbert 916fec5e652STom Herbert spin_lock(&rps_dev_flow_lock); 9176e3f7fafSEric Dumazet old_table = rcu_dereference_protected(queue->rps_flow_table, 9186e3f7fafSEric Dumazet lockdep_is_held(&rps_dev_flow_lock)); 919fec5e652STom Herbert rcu_assign_pointer(queue->rps_flow_table, table); 920fec5e652STom Herbert spin_unlock(&rps_dev_flow_lock); 921fec5e652STom Herbert 922fec5e652STom Herbert if (old_table) 923fec5e652STom Herbert call_rcu(&old_table->rcu, rps_dev_flow_table_release); 924fec5e652STom Herbert 925fec5e652STom Herbert return len; 926fec5e652STom Herbert } 927fec5e652STom Herbert 928667e427bSstephen hemminger static struct rx_queue_attribute rps_cpus_attribute __ro_after_init 929d6444062SJoe Perches = __ATTR(rps_cpus, 0644, show_rps_map, store_rps_map); 9300a9627f2STom Herbert 931667e427bSstephen hemminger static struct rx_queue_attribute rps_dev_flow_table_cnt_attribute __ro_after_init 932d6444062SJoe Perches = __ATTR(rps_flow_cnt, 0644, 933fec5e652STom Herbert show_rps_dev_flow_table_cnt, store_rps_dev_flow_table_cnt); 934a953be53SMichael Dalton #endif /* CONFIG_RPS */ 935fec5e652STom Herbert 936667e427bSstephen hemminger static struct attribute *rx_queue_default_attrs[] __ro_after_init = { 937a953be53SMichael Dalton #ifdef CONFIG_RPS 9380a9627f2STom Herbert &rps_cpus_attribute.attr, 939fec5e652STom Herbert &rps_dev_flow_table_cnt_attribute.attr, 940a953be53SMichael Dalton #endif 9410a9627f2STom Herbert NULL 9420a9627f2STom Herbert }; 943be0d6926SKimberly Brown ATTRIBUTE_GROUPS(rx_queue_default); 9440a9627f2STom Herbert 9450a9627f2STom Herbert static void rx_queue_release(struct kobject *kobj) 9460a9627f2STom Herbert { 9470a9627f2STom Herbert struct netdev_rx_queue *queue = to_rx_queue(kobj); 948a953be53SMichael Dalton #ifdef CONFIG_RPS 9496e3f7fafSEric Dumazet struct rps_map *map; 9506e3f7fafSEric Dumazet struct rps_dev_flow_table *flow_table; 9510a9627f2STom Herbert 95233d480ceSEric Dumazet map = rcu_dereference_protected(queue->rps_map, 1); 9539ea19481SJohn Fastabend if (map) { 9549ea19481SJohn Fastabend RCU_INIT_POINTER(queue->rps_map, NULL); 955f6f80238SLai Jiangshan kfree_rcu(map, rcu); 9569ea19481SJohn Fastabend } 9576e3f7fafSEric Dumazet 95833d480ceSEric Dumazet flow_table = rcu_dereference_protected(queue->rps_flow_table, 1); 9599ea19481SJohn Fastabend if (flow_table) { 9609ea19481SJohn Fastabend RCU_INIT_POINTER(queue->rps_flow_table, NULL); 9616e3f7fafSEric Dumazet call_rcu(&flow_table->rcu, rps_dev_flow_table_release); 9629ea19481SJohn Fastabend } 963a953be53SMichael Dalton #endif 9640a9627f2STom Herbert 9659ea19481SJohn Fastabend memset(kobj, 0, sizeof(*kobj)); 966fe822240STom Herbert dev_put(queue->dev); 9670a9627f2STom Herbert } 9680a9627f2STom Herbert 96982ef3d5dSWeilong Chen static const void *rx_queue_namespace(struct kobject *kobj) 97082ef3d5dSWeilong Chen { 97182ef3d5dSWeilong Chen struct netdev_rx_queue *queue = to_rx_queue(kobj); 97282ef3d5dSWeilong Chen struct device *dev = &queue->dev->dev; 97382ef3d5dSWeilong Chen const void *ns = NULL; 97482ef3d5dSWeilong Chen 97582ef3d5dSWeilong Chen if (dev->class && dev->class->ns_type) 97682ef3d5dSWeilong Chen ns = dev->class->namespace(dev); 97782ef3d5dSWeilong Chen 97882ef3d5dSWeilong Chen return ns; 97982ef3d5dSWeilong Chen } 98082ef3d5dSWeilong Chen 981b0e37c0dSDmitry Torokhov static void rx_queue_get_ownership(struct kobject *kobj, 982b0e37c0dSDmitry Torokhov kuid_t *uid, kgid_t *gid) 983b0e37c0dSDmitry Torokhov { 984b0e37c0dSDmitry Torokhov const struct net *net = rx_queue_namespace(kobj); 985b0e37c0dSDmitry Torokhov 986b0e37c0dSDmitry Torokhov net_ns_get_ownership(net, uid, gid); 987b0e37c0dSDmitry Torokhov } 988b0e37c0dSDmitry Torokhov 989667e427bSstephen hemminger static struct kobj_type rx_queue_ktype __ro_after_init = { 9900a9627f2STom Herbert .sysfs_ops = &rx_queue_sysfs_ops, 9910a9627f2STom Herbert .release = rx_queue_release, 992be0d6926SKimberly Brown .default_groups = rx_queue_default_groups, 993b0e37c0dSDmitry Torokhov .namespace = rx_queue_namespace, 994b0e37c0dSDmitry Torokhov .get_ownership = rx_queue_get_ownership, 9950a9627f2STom Herbert }; 9960a9627f2STom Herbert 9976b53dafeSWANG Cong static int rx_queue_add_kobject(struct net_device *dev, int index) 9980a9627f2STom Herbert { 9996b53dafeSWANG Cong struct netdev_rx_queue *queue = dev->_rx + index; 10000a9627f2STom Herbert struct kobject *kobj = &queue->kobj; 10010a9627f2STom Herbert int error = 0; 10020a9627f2STom Herbert 1003ddd9b5e3SJouni Hogander /* Kobject_put later will trigger rx_queue_release call which 1004ddd9b5e3SJouni Hogander * decreases dev refcount: Take that reference here 1005ddd9b5e3SJouni Hogander */ 1006ddd9b5e3SJouni Hogander dev_hold(queue->dev); 1007ddd9b5e3SJouni Hogander 10086b53dafeSWANG Cong kobj->kset = dev->queues_kset; 10090a9627f2STom Herbert error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL, 10100a9627f2STom Herbert "rx-%u", index); 1011a953be53SMichael Dalton if (error) 1012b8eb7183SJouni Hogander goto err; 1013a953be53SMichael Dalton 10146b53dafeSWANG Cong if (dev->sysfs_rx_queue_group) { 10156b53dafeSWANG Cong error = sysfs_create_group(kobj, dev->sysfs_rx_queue_group); 1016b8eb7183SJouni Hogander if (error) 1017b8eb7183SJouni Hogander goto err; 10180a9627f2STom Herbert } 10190a9627f2STom Herbert 10200a9627f2STom Herbert kobject_uevent(kobj, KOBJ_ADD); 10210a9627f2STom Herbert 10220a9627f2STom Herbert return error; 1023b8eb7183SJouni Hogander 1024b8eb7183SJouni Hogander err: 1025b8eb7183SJouni Hogander kobject_put(kobj); 1026b8eb7183SJouni Hogander return error; 10270a9627f2STom Herbert } 1028d755407dSChristian Brauner 1029d755407dSChristian Brauner static int rx_queue_change_owner(struct net_device *dev, int index, kuid_t kuid, 1030d755407dSChristian Brauner kgid_t kgid) 1031d755407dSChristian Brauner { 1032d755407dSChristian Brauner struct netdev_rx_queue *queue = dev->_rx + index; 1033d755407dSChristian Brauner struct kobject *kobj = &queue->kobj; 1034d755407dSChristian Brauner int error; 1035d755407dSChristian Brauner 1036d755407dSChristian Brauner error = sysfs_change_owner(kobj, kuid, kgid); 1037d755407dSChristian Brauner if (error) 1038d755407dSChristian Brauner return error; 1039d755407dSChristian Brauner 1040d755407dSChristian Brauner if (dev->sysfs_rx_queue_group) 1041d755407dSChristian Brauner error = sysfs_group_change_owner( 1042d755407dSChristian Brauner kobj, dev->sysfs_rx_queue_group, kuid, kgid); 1043d755407dSChristian Brauner 1044d755407dSChristian Brauner return error; 1045d755407dSChristian Brauner } 104680dd6eacSPaul Bolle #endif /* CONFIG_SYSFS */ 10470a9627f2STom Herbert 104862fe0b40SBen Hutchings int 10496b53dafeSWANG Cong net_rx_queue_update_kobjects(struct net_device *dev, int old_num, int new_num) 10500a9627f2STom Herbert { 1051a953be53SMichael Dalton #ifdef CONFIG_SYSFS 10520a9627f2STom Herbert int i; 10530a9627f2STom Herbert int error = 0; 10540a9627f2STom Herbert 1055a953be53SMichael Dalton #ifndef CONFIG_RPS 10566b53dafeSWANG Cong if (!dev->sysfs_rx_queue_group) 1057a953be53SMichael Dalton return 0; 1058a953be53SMichael Dalton #endif 105962fe0b40SBen Hutchings for (i = old_num; i < new_num; i++) { 10606b53dafeSWANG Cong error = rx_queue_add_kobject(dev, i); 106162fe0b40SBen Hutchings if (error) { 106262fe0b40SBen Hutchings new_num = old_num; 10630a9627f2STom Herbert break; 10640a9627f2STom Herbert } 106562fe0b40SBen Hutchings } 10660a9627f2STom Herbert 1067a953be53SMichael Dalton while (--i >= new_num) { 1068002d8a1aSAndrey Vagin struct kobject *kobj = &dev->_rx[i].kobj; 1069002d8a1aSAndrey Vagin 10708b8f3e66SChristian Brauner if (!refcount_read(&dev_net(dev)->ns.count)) 1071002d8a1aSAndrey Vagin kobj->uevent_suppress = 1; 10726b53dafeSWANG Cong if (dev->sysfs_rx_queue_group) 1073002d8a1aSAndrey Vagin sysfs_remove_group(kobj, dev->sysfs_rx_queue_group); 1074002d8a1aSAndrey Vagin kobject_put(kobj); 1075a953be53SMichael Dalton } 10760a9627f2STom Herbert 10770a9627f2STom Herbert return error; 1078bf264145STom Herbert #else 1079bf264145STom Herbert return 0; 1080bf264145STom Herbert #endif 10810a9627f2STom Herbert } 10820a9627f2STom Herbert 1083d755407dSChristian Brauner static int net_rx_queue_change_owner(struct net_device *dev, int num, 1084d755407dSChristian Brauner kuid_t kuid, kgid_t kgid) 1085d755407dSChristian Brauner { 1086d755407dSChristian Brauner #ifdef CONFIG_SYSFS 1087d755407dSChristian Brauner int error = 0; 1088d755407dSChristian Brauner int i; 1089d755407dSChristian Brauner 1090d755407dSChristian Brauner #ifndef CONFIG_RPS 1091d755407dSChristian Brauner if (!dev->sysfs_rx_queue_group) 1092d755407dSChristian Brauner return 0; 1093d755407dSChristian Brauner #endif 1094d755407dSChristian Brauner for (i = 0; i < num; i++) { 1095d755407dSChristian Brauner error = rx_queue_change_owner(dev, i, kuid, kgid); 1096d755407dSChristian Brauner if (error) 1097d755407dSChristian Brauner break; 1098d755407dSChristian Brauner } 1099d755407dSChristian Brauner 1100d755407dSChristian Brauner return error; 1101d755407dSChristian Brauner #else 1102d755407dSChristian Brauner return 0; 1103d755407dSChristian Brauner #endif 1104d755407dSChristian Brauner } 1105d755407dSChristian Brauner 1106ccf5ff69Sdavid decotigny #ifdef CONFIG_SYSFS 11071d24eb48STom Herbert /* 11081d24eb48STom Herbert * netdev_queue sysfs structures and functions. 11091d24eb48STom Herbert */ 11101d24eb48STom Herbert struct netdev_queue_attribute { 11111d24eb48STom Herbert struct attribute attr; 1112718ad681Sstephen hemminger ssize_t (*show)(struct netdev_queue *queue, char *buf); 11131d24eb48STom Herbert ssize_t (*store)(struct netdev_queue *queue, 1114718ad681Sstephen hemminger const char *buf, size_t len); 11151d24eb48STom Herbert }; 11166648c65eSstephen hemminger #define to_netdev_queue_attr(_attr) \ 11176648c65eSstephen hemminger container_of(_attr, struct netdev_queue_attribute, attr) 11181d24eb48STom Herbert 11191d24eb48STom Herbert #define to_netdev_queue(obj) container_of(obj, struct netdev_queue, kobj) 11201d24eb48STom Herbert 11211d24eb48STom Herbert static ssize_t netdev_queue_attr_show(struct kobject *kobj, 11221d24eb48STom Herbert struct attribute *attr, char *buf) 112362fe0b40SBen Hutchings { 1124667e427bSstephen hemminger const struct netdev_queue_attribute *attribute 1125667e427bSstephen hemminger = to_netdev_queue_attr(attr); 11261d24eb48STom Herbert struct netdev_queue *queue = to_netdev_queue(kobj); 11271d24eb48STom Herbert 11281d24eb48STom Herbert if (!attribute->show) 11291d24eb48STom Herbert return -EIO; 11301d24eb48STom Herbert 1131718ad681Sstephen hemminger return attribute->show(queue, buf); 11321d24eb48STom Herbert } 11331d24eb48STom Herbert 11341d24eb48STom Herbert static ssize_t netdev_queue_attr_store(struct kobject *kobj, 11351d24eb48STom Herbert struct attribute *attr, 11361d24eb48STom Herbert const char *buf, size_t count) 11371d24eb48STom Herbert { 1138667e427bSstephen hemminger const struct netdev_queue_attribute *attribute 1139667e427bSstephen hemminger = to_netdev_queue_attr(attr); 11401d24eb48STom Herbert struct netdev_queue *queue = to_netdev_queue(kobj); 11411d24eb48STom Herbert 11421d24eb48STom Herbert if (!attribute->store) 11431d24eb48STom Herbert return -EIO; 11441d24eb48STom Herbert 1145718ad681Sstephen hemminger return attribute->store(queue, buf, count); 11461d24eb48STom Herbert } 11471d24eb48STom Herbert 11481d24eb48STom Herbert static const struct sysfs_ops netdev_queue_sysfs_ops = { 11491d24eb48STom Herbert .show = netdev_queue_attr_show, 11501d24eb48STom Herbert .store = netdev_queue_attr_store, 11511d24eb48STom Herbert }; 11521d24eb48STom Herbert 11532b9c7581Sstephen hemminger static ssize_t tx_timeout_show(struct netdev_queue *queue, char *buf) 1154ccf5ff69Sdavid decotigny { 1155ccf5ff69Sdavid decotigny unsigned long trans_timeout; 1156ccf5ff69Sdavid decotigny 1157ccf5ff69Sdavid decotigny spin_lock_irq(&queue->_xmit_lock); 1158ccf5ff69Sdavid decotigny trans_timeout = queue->trans_timeout; 1159ccf5ff69Sdavid decotigny spin_unlock_irq(&queue->_xmit_lock); 1160ccf5ff69Sdavid decotigny 11619bb5fbeaSXiongfeng Wang return sprintf(buf, fmt_ulong, trans_timeout); 1162ccf5ff69Sdavid decotigny } 1163ccf5ff69Sdavid decotigny 1164c4047f53SThadeu Lima de Souza Cascardo static unsigned int get_netdev_queue_index(struct netdev_queue *queue) 1165822b3b2eSJohn Fastabend { 1166822b3b2eSJohn Fastabend struct net_device *dev = queue->dev; 1167c4047f53SThadeu Lima de Souza Cascardo unsigned int i; 1168822b3b2eSJohn Fastabend 1169c4047f53SThadeu Lima de Souza Cascardo i = queue - dev->_tx; 1170822b3b2eSJohn Fastabend BUG_ON(i >= dev->num_tx_queues); 1171822b3b2eSJohn Fastabend 1172822b3b2eSJohn Fastabend return i; 1173822b3b2eSJohn Fastabend } 1174822b3b2eSJohn Fastabend 11752b9c7581Sstephen hemminger static ssize_t traffic_class_show(struct netdev_queue *queue, 11768d059b0fSAlexander Duyck char *buf) 11778d059b0fSAlexander Duyck { 11788d059b0fSAlexander Duyck struct net_device *dev = queue->dev; 1179b2f17564SAlexander Duyck int num_tc, tc; 1180d7be9775SAlexander Duyck int index; 11818d059b0fSAlexander Duyck 1182d7be9775SAlexander Duyck if (!netif_is_multiqueue(dev)) 1183d7be9775SAlexander Duyck return -ENOENT; 1184d7be9775SAlexander Duyck 1185b2f17564SAlexander Duyck if (!rtnl_trylock()) 1186b2f17564SAlexander Duyck return restart_syscall(); 1187b2f17564SAlexander Duyck 1188d7be9775SAlexander Duyck index = get_netdev_queue_index(queue); 1189ffcfe25bSAlexander Duyck 1190ffcfe25bSAlexander Duyck /* If queue belongs to subordinate dev use its TC mapping */ 1191ffcfe25bSAlexander Duyck dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev; 1192ffcfe25bSAlexander Duyck 1193b2f17564SAlexander Duyck num_tc = dev->num_tc; 1194d7be9775SAlexander Duyck tc = netdev_txq_to_tc(dev, index); 1195b2f17564SAlexander Duyck 1196b2f17564SAlexander Duyck rtnl_unlock(); 1197b2f17564SAlexander Duyck 11988d059b0fSAlexander Duyck if (tc < 0) 11998d059b0fSAlexander Duyck return -EINVAL; 12008d059b0fSAlexander Duyck 1201ffcfe25bSAlexander Duyck /* We can report the traffic class one of two ways: 1202ffcfe25bSAlexander Duyck * Subordinate device traffic classes are reported with the traffic 1203ffcfe25bSAlexander Duyck * class first, and then the subordinate class so for example TC0 on 1204ffcfe25bSAlexander Duyck * subordinate device 2 will be reported as "0-2". If the queue 1205ffcfe25bSAlexander Duyck * belongs to the root device it will be reported with just the 1206ffcfe25bSAlexander Duyck * traffic class, so just "0" for TC 0 for example. 1207ffcfe25bSAlexander Duyck */ 1208b2f17564SAlexander Duyck return num_tc < 0 ? sprintf(buf, "%d%d\n", tc, num_tc) : 1209000fe268SYe Bin sprintf(buf, "%d\n", tc); 12108d059b0fSAlexander Duyck } 12118d059b0fSAlexander Duyck 12128d059b0fSAlexander Duyck #ifdef CONFIG_XPS 12132b9c7581Sstephen hemminger static ssize_t tx_maxrate_show(struct netdev_queue *queue, 1214822b3b2eSJohn Fastabend char *buf) 1215822b3b2eSJohn Fastabend { 1216822b3b2eSJohn Fastabend return sprintf(buf, "%lu\n", queue->tx_maxrate); 1217822b3b2eSJohn Fastabend } 1218822b3b2eSJohn Fastabend 12192b9c7581Sstephen hemminger static ssize_t tx_maxrate_store(struct netdev_queue *queue, 1220822b3b2eSJohn Fastabend const char *buf, size_t len) 1221822b3b2eSJohn Fastabend { 1222822b3b2eSJohn Fastabend struct net_device *dev = queue->dev; 1223822b3b2eSJohn Fastabend int err, index = get_netdev_queue_index(queue); 1224822b3b2eSJohn Fastabend u32 rate = 0; 1225822b3b2eSJohn Fastabend 12263033fcedSTyler Hicks if (!capable(CAP_NET_ADMIN)) 12273033fcedSTyler Hicks return -EPERM; 12283033fcedSTyler Hicks 1229822b3b2eSJohn Fastabend err = kstrtou32(buf, 10, &rate); 1230822b3b2eSJohn Fastabend if (err < 0) 1231822b3b2eSJohn Fastabend return err; 1232822b3b2eSJohn Fastabend 1233822b3b2eSJohn Fastabend if (!rtnl_trylock()) 1234822b3b2eSJohn Fastabend return restart_syscall(); 1235822b3b2eSJohn Fastabend 1236822b3b2eSJohn Fastabend err = -EOPNOTSUPP; 1237822b3b2eSJohn Fastabend if (dev->netdev_ops->ndo_set_tx_maxrate) 1238822b3b2eSJohn Fastabend err = dev->netdev_ops->ndo_set_tx_maxrate(dev, index, rate); 1239822b3b2eSJohn Fastabend 1240822b3b2eSJohn Fastabend rtnl_unlock(); 1241822b3b2eSJohn Fastabend if (!err) { 1242822b3b2eSJohn Fastabend queue->tx_maxrate = rate; 1243822b3b2eSJohn Fastabend return len; 1244822b3b2eSJohn Fastabend } 1245822b3b2eSJohn Fastabend return err; 1246822b3b2eSJohn Fastabend } 1247822b3b2eSJohn Fastabend 12482b9c7581Sstephen hemminger static struct netdev_queue_attribute queue_tx_maxrate __ro_after_init 12492b9c7581Sstephen hemminger = __ATTR_RW(tx_maxrate); 1250822b3b2eSJohn Fastabend #endif 1251822b3b2eSJohn Fastabend 12522b9c7581Sstephen hemminger static struct netdev_queue_attribute queue_trans_timeout __ro_after_init 12532b9c7581Sstephen hemminger = __ATTR_RO(tx_timeout); 1254ccf5ff69Sdavid decotigny 12552b9c7581Sstephen hemminger static struct netdev_queue_attribute queue_traffic_class __ro_after_init 12562b9c7581Sstephen hemminger = __ATTR_RO(traffic_class); 12578d059b0fSAlexander Duyck 1258114cf580STom Herbert #ifdef CONFIG_BQL 1259114cf580STom Herbert /* 1260114cf580STom Herbert * Byte queue limits sysfs structures and functions. 1261114cf580STom Herbert */ 1262114cf580STom Herbert static ssize_t bql_show(char *buf, unsigned int value) 1263114cf580STom Herbert { 1264114cf580STom Herbert return sprintf(buf, "%u\n", value); 1265114cf580STom Herbert } 1266114cf580STom Herbert 1267114cf580STom Herbert static ssize_t bql_set(const char *buf, const size_t count, 1268114cf580STom Herbert unsigned int *pvalue) 1269114cf580STom Herbert { 1270114cf580STom Herbert unsigned int value; 1271114cf580STom Herbert int err; 1272114cf580STom Herbert 12736648c65eSstephen hemminger if (!strcmp(buf, "max") || !strcmp(buf, "max\n")) { 1274114cf580STom Herbert value = DQL_MAX_LIMIT; 12756648c65eSstephen hemminger } else { 1276114cf580STom Herbert err = kstrtouint(buf, 10, &value); 1277114cf580STom Herbert if (err < 0) 1278114cf580STom Herbert return err; 1279114cf580STom Herbert if (value > DQL_MAX_LIMIT) 1280114cf580STom Herbert return -EINVAL; 1281114cf580STom Herbert } 1282114cf580STom Herbert 1283114cf580STom Herbert *pvalue = value; 1284114cf580STom Herbert 1285114cf580STom Herbert return count; 1286114cf580STom Herbert } 1287114cf580STom Herbert 1288114cf580STom Herbert static ssize_t bql_show_hold_time(struct netdev_queue *queue, 1289114cf580STom Herbert char *buf) 1290114cf580STom Herbert { 1291114cf580STom Herbert struct dql *dql = &queue->dql; 1292114cf580STom Herbert 1293114cf580STom Herbert return sprintf(buf, "%u\n", jiffies_to_msecs(dql->slack_hold_time)); 1294114cf580STom Herbert } 1295114cf580STom Herbert 1296114cf580STom Herbert static ssize_t bql_set_hold_time(struct netdev_queue *queue, 1297114cf580STom Herbert const char *buf, size_t len) 1298114cf580STom Herbert { 1299114cf580STom Herbert struct dql *dql = &queue->dql; 130095c96174SEric Dumazet unsigned int value; 1301114cf580STom Herbert int err; 1302114cf580STom Herbert 1303114cf580STom Herbert err = kstrtouint(buf, 10, &value); 1304114cf580STom Herbert if (err < 0) 1305114cf580STom Herbert return err; 1306114cf580STom Herbert 1307114cf580STom Herbert dql->slack_hold_time = msecs_to_jiffies(value); 1308114cf580STom Herbert 1309114cf580STom Herbert return len; 1310114cf580STom Herbert } 1311114cf580STom Herbert 1312170c658aSstephen hemminger static struct netdev_queue_attribute bql_hold_time_attribute __ro_after_init 1313d6444062SJoe Perches = __ATTR(hold_time, 0644, 1314170c658aSstephen hemminger bql_show_hold_time, bql_set_hold_time); 1315114cf580STom Herbert 1316114cf580STom Herbert static ssize_t bql_show_inflight(struct netdev_queue *queue, 1317114cf580STom Herbert char *buf) 1318114cf580STom Herbert { 1319114cf580STom Herbert struct dql *dql = &queue->dql; 1320114cf580STom Herbert 1321114cf580STom Herbert return sprintf(buf, "%u\n", dql->num_queued - dql->num_completed); 1322114cf580STom Herbert } 1323114cf580STom Herbert 1324170c658aSstephen hemminger static struct netdev_queue_attribute bql_inflight_attribute __ro_after_init = 1325d6444062SJoe Perches __ATTR(inflight, 0444, bql_show_inflight, NULL); 1326114cf580STom Herbert 1327114cf580STom Herbert #define BQL_ATTR(NAME, FIELD) \ 1328114cf580STom Herbert static ssize_t bql_show_ ## NAME(struct netdev_queue *queue, \ 1329114cf580STom Herbert char *buf) \ 1330114cf580STom Herbert { \ 1331114cf580STom Herbert return bql_show(buf, queue->dql.FIELD); \ 1332114cf580STom Herbert } \ 1333114cf580STom Herbert \ 1334114cf580STom Herbert static ssize_t bql_set_ ## NAME(struct netdev_queue *queue, \ 1335114cf580STom Herbert const char *buf, size_t len) \ 1336114cf580STom Herbert { \ 1337114cf580STom Herbert return bql_set(buf, len, &queue->dql.FIELD); \ 1338114cf580STom Herbert } \ 1339114cf580STom Herbert \ 1340170c658aSstephen hemminger static struct netdev_queue_attribute bql_ ## NAME ## _attribute __ro_after_init \ 1341d6444062SJoe Perches = __ATTR(NAME, 0644, \ 1342170c658aSstephen hemminger bql_show_ ## NAME, bql_set_ ## NAME) 1343114cf580STom Herbert 1344170c658aSstephen hemminger BQL_ATTR(limit, limit); 1345170c658aSstephen hemminger BQL_ATTR(limit_max, max_limit); 1346170c658aSstephen hemminger BQL_ATTR(limit_min, min_limit); 1347114cf580STom Herbert 1348170c658aSstephen hemminger static struct attribute *dql_attrs[] __ro_after_init = { 1349114cf580STom Herbert &bql_limit_attribute.attr, 1350114cf580STom Herbert &bql_limit_max_attribute.attr, 1351114cf580STom Herbert &bql_limit_min_attribute.attr, 1352114cf580STom Herbert &bql_hold_time_attribute.attr, 1353114cf580STom Herbert &bql_inflight_attribute.attr, 1354114cf580STom Herbert NULL 1355114cf580STom Herbert }; 1356114cf580STom Herbert 135738ef00ccSArvind Yadav static const struct attribute_group dql_group = { 1358114cf580STom Herbert .name = "byte_queue_limits", 1359114cf580STom Herbert .attrs = dql_attrs, 1360114cf580STom Herbert }; 1361114cf580STom Herbert #endif /* CONFIG_BQL */ 1362114cf580STom Herbert 1363ccf5ff69Sdavid decotigny #ifdef CONFIG_XPS 13642b9c7581Sstephen hemminger static ssize_t xps_cpus_show(struct netdev_queue *queue, 1365718ad681Sstephen hemminger char *buf) 13661d24eb48STom Herbert { 1367fb250385SAntoine Tenart int cpu, len, ret, num_tc = 1, tc = 0; 13681d24eb48STom Herbert struct net_device *dev = queue->dev; 13691d24eb48STom Herbert struct xps_dev_maps *dev_maps; 1370*ea4fe7e8SAntoine Tenart unsigned long *mask, index; 13711d24eb48STom Herbert 1372d7be9775SAlexander Duyck if (!netif_is_multiqueue(dev)) 1373d7be9775SAlexander Duyck return -ENOENT; 1374d7be9775SAlexander Duyck 13751d24eb48STom Herbert index = get_netdev_queue_index(queue); 13761d24eb48STom Herbert 1377fb250385SAntoine Tenart if (!rtnl_trylock()) 1378fb250385SAntoine Tenart return restart_syscall(); 1379fb250385SAntoine Tenart 1380184c449fSAlexander Duyck if (dev->num_tc) { 1381ffcfe25bSAlexander Duyck /* Do not allow XPS on subordinate device directly */ 1382184c449fSAlexander Duyck num_tc = dev->num_tc; 1383fb250385SAntoine Tenart if (num_tc < 0) { 1384fb250385SAntoine Tenart ret = -EINVAL; 1385fb250385SAntoine Tenart goto err_rtnl_unlock; 1386fb250385SAntoine Tenart } 1387ffcfe25bSAlexander Duyck 1388ffcfe25bSAlexander Duyck /* If queue belongs to subordinate dev use its map */ 1389ffcfe25bSAlexander Duyck dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev; 1390ffcfe25bSAlexander Duyck 1391184c449fSAlexander Duyck tc = netdev_txq_to_tc(dev, index); 1392fb250385SAntoine Tenart if (tc < 0) { 1393fb250385SAntoine Tenart ret = -EINVAL; 1394fb250385SAntoine Tenart goto err_rtnl_unlock; 1395fb250385SAntoine Tenart } 1396184c449fSAlexander Duyck } 1397184c449fSAlexander Duyck 1398*ea4fe7e8SAntoine Tenart mask = bitmap_zalloc(nr_cpu_ids, GFP_KERNEL); 1399*ea4fe7e8SAntoine Tenart if (!mask) { 1400fb250385SAntoine Tenart ret = -ENOMEM; 1401fb250385SAntoine Tenart goto err_rtnl_unlock; 1402fb250385SAntoine Tenart } 1403664088f8SAlexander Duyck 14041d24eb48STom Herbert rcu_read_lock(); 140580d19669SAmritha Nambiar dev_maps = rcu_dereference(dev->xps_cpus_map); 14061d24eb48STom Herbert if (dev_maps) { 1407184c449fSAlexander Duyck for_each_possible_cpu(cpu) { 1408184c449fSAlexander Duyck int i, tci = cpu * num_tc + tc; 1409184c449fSAlexander Duyck struct xps_map *map; 1410184c449fSAlexander Duyck 141180d19669SAmritha Nambiar map = rcu_dereference(dev_maps->attr_map[tci]); 1412184c449fSAlexander Duyck if (!map) 1413184c449fSAlexander Duyck continue; 1414184c449fSAlexander Duyck 1415184c449fSAlexander Duyck for (i = map->len; i--;) { 1416184c449fSAlexander Duyck if (map->queues[i] == index) { 1417*ea4fe7e8SAntoine Tenart set_bit(cpu, mask); 14181d24eb48STom Herbert break; 14191d24eb48STom Herbert } 14201d24eb48STom Herbert } 14211d24eb48STom Herbert } 14221d24eb48STom Herbert } 14231d24eb48STom Herbert rcu_read_unlock(); 14241d24eb48STom Herbert 1425fb250385SAntoine Tenart rtnl_unlock(); 1426fb250385SAntoine Tenart 1427*ea4fe7e8SAntoine Tenart len = bitmap_print_to_pagebuf(false, buf, mask, nr_cpu_ids); 1428*ea4fe7e8SAntoine Tenart bitmap_free(mask); 1429f0906827STejun Heo return len < PAGE_SIZE ? len : -EINVAL; 1430fb250385SAntoine Tenart 1431fb250385SAntoine Tenart err_rtnl_unlock: 1432fb250385SAntoine Tenart rtnl_unlock(); 1433fb250385SAntoine Tenart return ret; 14341d24eb48STom Herbert } 14351d24eb48STom Herbert 14362b9c7581Sstephen hemminger static ssize_t xps_cpus_store(struct netdev_queue *queue, 14371d24eb48STom Herbert const char *buf, size_t len) 14381d24eb48STom Herbert { 14391d24eb48STom Herbert struct net_device *dev = queue->dev; 14401d24eb48STom Herbert unsigned long index; 1441537c00deSAlexander Duyck cpumask_var_t mask; 1442537c00deSAlexander Duyck int err; 14431d24eb48STom Herbert 1444d7be9775SAlexander Duyck if (!netif_is_multiqueue(dev)) 1445d7be9775SAlexander Duyck return -ENOENT; 1446d7be9775SAlexander Duyck 14471d24eb48STom Herbert if (!capable(CAP_NET_ADMIN)) 14481d24eb48STom Herbert return -EPERM; 14491d24eb48STom Herbert 14501d24eb48STom Herbert if (!alloc_cpumask_var(&mask, GFP_KERNEL)) 14511d24eb48STom Herbert return -ENOMEM; 14521d24eb48STom Herbert 14531d24eb48STom Herbert index = get_netdev_queue_index(queue); 14541d24eb48STom Herbert 14551d24eb48STom Herbert err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits); 14561d24eb48STom Herbert if (err) { 14571d24eb48STom Herbert free_cpumask_var(mask); 14581d24eb48STom Herbert return err; 14591d24eb48STom Herbert } 14601d24eb48STom Herbert 14611ad58225SAntoine Tenart if (!rtnl_trylock()) { 14621ad58225SAntoine Tenart free_cpumask_var(mask); 14631ad58225SAntoine Tenart return restart_syscall(); 14641ad58225SAntoine Tenart } 14651ad58225SAntoine Tenart 1466537c00deSAlexander Duyck err = netif_set_xps_queue(dev, mask, index); 14671ad58225SAntoine Tenart rtnl_unlock(); 14681d24eb48STom Herbert 14691d24eb48STom Herbert free_cpumask_var(mask); 14701d24eb48STom Herbert 1471537c00deSAlexander Duyck return err ? : len; 14721d24eb48STom Herbert } 14731d24eb48STom Herbert 14742b9c7581Sstephen hemminger static struct netdev_queue_attribute xps_cpus_attribute __ro_after_init 14752b9c7581Sstephen hemminger = __ATTR_RW(xps_cpus); 14768af2c06fSAmritha Nambiar 14778af2c06fSAmritha Nambiar static ssize_t xps_rxqs_show(struct netdev_queue *queue, char *buf) 14788af2c06fSAmritha Nambiar { 14794ae2bb81SAntoine Tenart int j, len, ret, num_tc = 1, tc = 0; 14808af2c06fSAmritha Nambiar struct net_device *dev = queue->dev; 14818af2c06fSAmritha Nambiar struct xps_dev_maps *dev_maps; 14828af2c06fSAmritha Nambiar unsigned long *mask, index; 14838af2c06fSAmritha Nambiar 14848af2c06fSAmritha Nambiar index = get_netdev_queue_index(queue); 14858af2c06fSAmritha Nambiar 14864ae2bb81SAntoine Tenart if (!rtnl_trylock()) 14874ae2bb81SAntoine Tenart return restart_syscall(); 14884ae2bb81SAntoine Tenart 14898af2c06fSAmritha Nambiar if (dev->num_tc) { 14908af2c06fSAmritha Nambiar num_tc = dev->num_tc; 14918af2c06fSAmritha Nambiar tc = netdev_txq_to_tc(dev, index); 14924ae2bb81SAntoine Tenart if (tc < 0) { 14934ae2bb81SAntoine Tenart ret = -EINVAL; 14944ae2bb81SAntoine Tenart goto err_rtnl_unlock; 14954ae2bb81SAntoine Tenart } 14968af2c06fSAmritha Nambiar } 149729ca1c5aSAndy Shevchenko mask = bitmap_zalloc(dev->num_rx_queues, GFP_KERNEL); 14984ae2bb81SAntoine Tenart if (!mask) { 14994ae2bb81SAntoine Tenart ret = -ENOMEM; 15004ae2bb81SAntoine Tenart goto err_rtnl_unlock; 15014ae2bb81SAntoine Tenart } 15028af2c06fSAmritha Nambiar 15038af2c06fSAmritha Nambiar rcu_read_lock(); 15048af2c06fSAmritha Nambiar dev_maps = rcu_dereference(dev->xps_rxqs_map); 15058af2c06fSAmritha Nambiar if (!dev_maps) 15068af2c06fSAmritha Nambiar goto out_no_maps; 15078af2c06fSAmritha Nambiar 15088af2c06fSAmritha Nambiar for (j = -1; j = netif_attrmask_next(j, NULL, dev->num_rx_queues), 15098af2c06fSAmritha Nambiar j < dev->num_rx_queues;) { 15108af2c06fSAmritha Nambiar int i, tci = j * num_tc + tc; 15118af2c06fSAmritha Nambiar struct xps_map *map; 15128af2c06fSAmritha Nambiar 15138af2c06fSAmritha Nambiar map = rcu_dereference(dev_maps->attr_map[tci]); 15148af2c06fSAmritha Nambiar if (!map) 15158af2c06fSAmritha Nambiar continue; 15168af2c06fSAmritha Nambiar 15178af2c06fSAmritha Nambiar for (i = map->len; i--;) { 15188af2c06fSAmritha Nambiar if (map->queues[i] == index) { 15198af2c06fSAmritha Nambiar set_bit(j, mask); 15208af2c06fSAmritha Nambiar break; 15218af2c06fSAmritha Nambiar } 15228af2c06fSAmritha Nambiar } 15238af2c06fSAmritha Nambiar } 15248af2c06fSAmritha Nambiar out_no_maps: 15258af2c06fSAmritha Nambiar rcu_read_unlock(); 15268af2c06fSAmritha Nambiar 15274ae2bb81SAntoine Tenart rtnl_unlock(); 15284ae2bb81SAntoine Tenart 15298af2c06fSAmritha Nambiar len = bitmap_print_to_pagebuf(false, buf, mask, dev->num_rx_queues); 153029ca1c5aSAndy Shevchenko bitmap_free(mask); 15318af2c06fSAmritha Nambiar 15328af2c06fSAmritha Nambiar return len < PAGE_SIZE ? len : -EINVAL; 15334ae2bb81SAntoine Tenart 15344ae2bb81SAntoine Tenart err_rtnl_unlock: 15354ae2bb81SAntoine Tenart rtnl_unlock(); 15364ae2bb81SAntoine Tenart return ret; 15378af2c06fSAmritha Nambiar } 15388af2c06fSAmritha Nambiar 15398af2c06fSAmritha Nambiar static ssize_t xps_rxqs_store(struct netdev_queue *queue, const char *buf, 15408af2c06fSAmritha Nambiar size_t len) 15418af2c06fSAmritha Nambiar { 15428af2c06fSAmritha Nambiar struct net_device *dev = queue->dev; 15438af2c06fSAmritha Nambiar struct net *net = dev_net(dev); 15448af2c06fSAmritha Nambiar unsigned long *mask, index; 15458af2c06fSAmritha Nambiar int err; 15468af2c06fSAmritha Nambiar 15478af2c06fSAmritha Nambiar if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 15488af2c06fSAmritha Nambiar return -EPERM; 15498af2c06fSAmritha Nambiar 155029ca1c5aSAndy Shevchenko mask = bitmap_zalloc(dev->num_rx_queues, GFP_KERNEL); 15518af2c06fSAmritha Nambiar if (!mask) 15528af2c06fSAmritha Nambiar return -ENOMEM; 15538af2c06fSAmritha Nambiar 15548af2c06fSAmritha Nambiar index = get_netdev_queue_index(queue); 15558af2c06fSAmritha Nambiar 15568af2c06fSAmritha Nambiar err = bitmap_parse(buf, len, mask, dev->num_rx_queues); 15578af2c06fSAmritha Nambiar if (err) { 155829ca1c5aSAndy Shevchenko bitmap_free(mask); 15598af2c06fSAmritha Nambiar return err; 15608af2c06fSAmritha Nambiar } 15618af2c06fSAmritha Nambiar 15622d57b4f1SAntoine Tenart if (!rtnl_trylock()) { 15632d57b4f1SAntoine Tenart bitmap_free(mask); 15642d57b4f1SAntoine Tenart return restart_syscall(); 15652d57b4f1SAntoine Tenart } 15662d57b4f1SAntoine Tenart 15674d99f660SAndrei Vagin cpus_read_lock(); 15688af2c06fSAmritha Nambiar err = __netif_set_xps_queue(dev, mask, index, true); 15694d99f660SAndrei Vagin cpus_read_unlock(); 15704d99f660SAndrei Vagin 15712d57b4f1SAntoine Tenart rtnl_unlock(); 15722d57b4f1SAntoine Tenart 157329ca1c5aSAndy Shevchenko bitmap_free(mask); 15748af2c06fSAmritha Nambiar return err ? : len; 15758af2c06fSAmritha Nambiar } 15768af2c06fSAmritha Nambiar 15778af2c06fSAmritha Nambiar static struct netdev_queue_attribute xps_rxqs_attribute __ro_after_init 15788af2c06fSAmritha Nambiar = __ATTR_RW(xps_rxqs); 1579ccf5ff69Sdavid decotigny #endif /* CONFIG_XPS */ 15801d24eb48STom Herbert 15812b9c7581Sstephen hemminger static struct attribute *netdev_queue_default_attrs[] __ro_after_init = { 1582ccf5ff69Sdavid decotigny &queue_trans_timeout.attr, 15838d059b0fSAlexander Duyck &queue_traffic_class.attr, 1584ccf5ff69Sdavid decotigny #ifdef CONFIG_XPS 15851d24eb48STom Herbert &xps_cpus_attribute.attr, 15868af2c06fSAmritha Nambiar &xps_rxqs_attribute.attr, 1587822b3b2eSJohn Fastabend &queue_tx_maxrate.attr, 1588ccf5ff69Sdavid decotigny #endif 15891d24eb48STom Herbert NULL 15901d24eb48STom Herbert }; 1591be0d6926SKimberly Brown ATTRIBUTE_GROUPS(netdev_queue_default); 15921d24eb48STom Herbert 15931d24eb48STom Herbert static void netdev_queue_release(struct kobject *kobj) 15941d24eb48STom Herbert { 15951d24eb48STom Herbert struct netdev_queue *queue = to_netdev_queue(kobj); 15961d24eb48STom Herbert 15971d24eb48STom Herbert memset(kobj, 0, sizeof(*kobj)); 15981d24eb48STom Herbert dev_put(queue->dev); 15991d24eb48STom Herbert } 16001d24eb48STom Herbert 160182ef3d5dSWeilong Chen static const void *netdev_queue_namespace(struct kobject *kobj) 160282ef3d5dSWeilong Chen { 160382ef3d5dSWeilong Chen struct netdev_queue *queue = to_netdev_queue(kobj); 160482ef3d5dSWeilong Chen struct device *dev = &queue->dev->dev; 160582ef3d5dSWeilong Chen const void *ns = NULL; 160682ef3d5dSWeilong Chen 160782ef3d5dSWeilong Chen if (dev->class && dev->class->ns_type) 160882ef3d5dSWeilong Chen ns = dev->class->namespace(dev); 160982ef3d5dSWeilong Chen 161082ef3d5dSWeilong Chen return ns; 161182ef3d5dSWeilong Chen } 161282ef3d5dSWeilong Chen 1613b0e37c0dSDmitry Torokhov static void netdev_queue_get_ownership(struct kobject *kobj, 1614b0e37c0dSDmitry Torokhov kuid_t *uid, kgid_t *gid) 1615b0e37c0dSDmitry Torokhov { 1616b0e37c0dSDmitry Torokhov const struct net *net = netdev_queue_namespace(kobj); 1617b0e37c0dSDmitry Torokhov 1618b0e37c0dSDmitry Torokhov net_ns_get_ownership(net, uid, gid); 1619b0e37c0dSDmitry Torokhov } 1620b0e37c0dSDmitry Torokhov 16212b9c7581Sstephen hemminger static struct kobj_type netdev_queue_ktype __ro_after_init = { 16221d24eb48STom Herbert .sysfs_ops = &netdev_queue_sysfs_ops, 16231d24eb48STom Herbert .release = netdev_queue_release, 1624be0d6926SKimberly Brown .default_groups = netdev_queue_default_groups, 162582ef3d5dSWeilong Chen .namespace = netdev_queue_namespace, 1626b0e37c0dSDmitry Torokhov .get_ownership = netdev_queue_get_ownership, 16271d24eb48STom Herbert }; 16281d24eb48STom Herbert 16296b53dafeSWANG Cong static int netdev_queue_add_kobject(struct net_device *dev, int index) 16301d24eb48STom Herbert { 16316b53dafeSWANG Cong struct netdev_queue *queue = dev->_tx + index; 16321d24eb48STom Herbert struct kobject *kobj = &queue->kobj; 16331d24eb48STom Herbert int error = 0; 16341d24eb48STom Herbert 1635e0b60903SJouni Hogander /* Kobject_put later will trigger netdev_queue_release call 1636e0b60903SJouni Hogander * which decreases dev refcount: Take that reference here 1637e0b60903SJouni Hogander */ 1638e0b60903SJouni Hogander dev_hold(queue->dev); 1639e0b60903SJouni Hogander 16406b53dafeSWANG Cong kobj->kset = dev->queues_kset; 16411d24eb48STom Herbert error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL, 16421d24eb48STom Herbert "tx-%u", index); 1643114cf580STom Herbert if (error) 1644b8eb7183SJouni Hogander goto err; 1645114cf580STom Herbert 1646114cf580STom Herbert #ifdef CONFIG_BQL 1647114cf580STom Herbert error = sysfs_create_group(kobj, &dql_group); 1648b8eb7183SJouni Hogander if (error) 1649b8eb7183SJouni Hogander goto err; 1650114cf580STom Herbert #endif 16511d24eb48STom Herbert 16521d24eb48STom Herbert kobject_uevent(kobj, KOBJ_ADD); 165348a322b6SEric Dumazet return 0; 16541d24eb48STom Herbert 1655b8eb7183SJouni Hogander err: 1656b8eb7183SJouni Hogander kobject_put(kobj); 1657b8eb7183SJouni Hogander return error; 16581d24eb48STom Herbert } 1659d755407dSChristian Brauner 1660d755407dSChristian Brauner static int tx_queue_change_owner(struct net_device *ndev, int index, 1661d755407dSChristian Brauner kuid_t kuid, kgid_t kgid) 1662d755407dSChristian Brauner { 1663d755407dSChristian Brauner struct netdev_queue *queue = ndev->_tx + index; 1664d755407dSChristian Brauner struct kobject *kobj = &queue->kobj; 1665d755407dSChristian Brauner int error; 1666d755407dSChristian Brauner 1667d755407dSChristian Brauner error = sysfs_change_owner(kobj, kuid, kgid); 1668d755407dSChristian Brauner if (error) 1669d755407dSChristian Brauner return error; 1670d755407dSChristian Brauner 1671d755407dSChristian Brauner #ifdef CONFIG_BQL 1672d755407dSChristian Brauner error = sysfs_group_change_owner(kobj, &dql_group, kuid, kgid); 1673d755407dSChristian Brauner #endif 1674d755407dSChristian Brauner return error; 1675d755407dSChristian Brauner } 1676ccf5ff69Sdavid decotigny #endif /* CONFIG_SYSFS */ 16771d24eb48STom Herbert 16781d24eb48STom Herbert int 16796b53dafeSWANG Cong netdev_queue_update_kobjects(struct net_device *dev, int old_num, int new_num) 16801d24eb48STom Herbert { 1681ccf5ff69Sdavid decotigny #ifdef CONFIG_SYSFS 16821d24eb48STom Herbert int i; 16831d24eb48STom Herbert int error = 0; 16841d24eb48STom Herbert 16851d24eb48STom Herbert for (i = old_num; i < new_num; i++) { 16866b53dafeSWANG Cong error = netdev_queue_add_kobject(dev, i); 16871d24eb48STom Herbert if (error) { 16881d24eb48STom Herbert new_num = old_num; 16891d24eb48STom Herbert break; 16901d24eb48STom Herbert } 16911d24eb48STom Herbert } 16921d24eb48STom Herbert 1693114cf580STom Herbert while (--i >= new_num) { 16946b53dafeSWANG Cong struct netdev_queue *queue = dev->_tx + i; 1695114cf580STom Herbert 16968b8f3e66SChristian Brauner if (!refcount_read(&dev_net(dev)->ns.count)) 1697002d8a1aSAndrey Vagin queue->kobj.uevent_suppress = 1; 1698114cf580STom Herbert #ifdef CONFIG_BQL 1699114cf580STom Herbert sysfs_remove_group(&queue->kobj, &dql_group); 1700114cf580STom Herbert #endif 1701114cf580STom Herbert kobject_put(&queue->kobj); 1702114cf580STom Herbert } 17031d24eb48STom Herbert 17041d24eb48STom Herbert return error; 1705bf264145STom Herbert #else 1706bf264145STom Herbert return 0; 1707ccf5ff69Sdavid decotigny #endif /* CONFIG_SYSFS */ 17081d24eb48STom Herbert } 17091d24eb48STom Herbert 1710d755407dSChristian Brauner static int net_tx_queue_change_owner(struct net_device *dev, int num, 1711d755407dSChristian Brauner kuid_t kuid, kgid_t kgid) 1712d755407dSChristian Brauner { 1713d755407dSChristian Brauner #ifdef CONFIG_SYSFS 1714d755407dSChristian Brauner int error = 0; 1715d755407dSChristian Brauner int i; 1716d755407dSChristian Brauner 1717d755407dSChristian Brauner for (i = 0; i < num; i++) { 1718d755407dSChristian Brauner error = tx_queue_change_owner(dev, i, kuid, kgid); 1719d755407dSChristian Brauner if (error) 1720d755407dSChristian Brauner break; 1721d755407dSChristian Brauner } 1722d755407dSChristian Brauner 1723d755407dSChristian Brauner return error; 1724d755407dSChristian Brauner #else 1725d755407dSChristian Brauner return 0; 1726d755407dSChristian Brauner #endif /* CONFIG_SYSFS */ 1727d755407dSChristian Brauner } 1728d755407dSChristian Brauner 17296b53dafeSWANG Cong static int register_queue_kobjects(struct net_device *dev) 17301d24eb48STom Herbert { 1731bf264145STom Herbert int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0; 17321d24eb48STom Herbert 1733ccf5ff69Sdavid decotigny #ifdef CONFIG_SYSFS 17346b53dafeSWANG Cong dev->queues_kset = kset_create_and_add("queues", 17356b53dafeSWANG Cong NULL, &dev->dev.kobj); 17366b53dafeSWANG Cong if (!dev->queues_kset) 173762fe0b40SBen Hutchings return -ENOMEM; 17386b53dafeSWANG Cong real_rx = dev->real_num_rx_queues; 1739bf264145STom Herbert #endif 17406b53dafeSWANG Cong real_tx = dev->real_num_tx_queues; 1741bf264145STom Herbert 17426b53dafeSWANG Cong error = net_rx_queue_update_kobjects(dev, 0, real_rx); 17431d24eb48STom Herbert if (error) 17441d24eb48STom Herbert goto error; 1745bf264145STom Herbert rxq = real_rx; 17461d24eb48STom Herbert 17476b53dafeSWANG Cong error = netdev_queue_update_kobjects(dev, 0, real_tx); 17481d24eb48STom Herbert if (error) 17491d24eb48STom Herbert goto error; 1750bf264145STom Herbert txq = real_tx; 17511d24eb48STom Herbert 17521d24eb48STom Herbert return 0; 17531d24eb48STom Herbert 17541d24eb48STom Herbert error: 17556b53dafeSWANG Cong netdev_queue_update_kobjects(dev, txq, 0); 17566b53dafeSWANG Cong net_rx_queue_update_kobjects(dev, rxq, 0); 1757895a5e96SYueHaibing #ifdef CONFIG_SYSFS 1758895a5e96SYueHaibing kset_unregister(dev->queues_kset); 1759895a5e96SYueHaibing #endif 17601d24eb48STom Herbert return error; 176162fe0b40SBen Hutchings } 176262fe0b40SBen Hutchings 1763d755407dSChristian Brauner static int queue_change_owner(struct net_device *ndev, kuid_t kuid, kgid_t kgid) 1764d755407dSChristian Brauner { 1765d755407dSChristian Brauner int error = 0, real_rx = 0, real_tx = 0; 1766d755407dSChristian Brauner 1767d755407dSChristian Brauner #ifdef CONFIG_SYSFS 1768d755407dSChristian Brauner if (ndev->queues_kset) { 1769d755407dSChristian Brauner error = sysfs_change_owner(&ndev->queues_kset->kobj, kuid, kgid); 1770d755407dSChristian Brauner if (error) 1771d755407dSChristian Brauner return error; 1772d755407dSChristian Brauner } 1773d755407dSChristian Brauner real_rx = ndev->real_num_rx_queues; 1774d755407dSChristian Brauner #endif 1775d755407dSChristian Brauner real_tx = ndev->real_num_tx_queues; 1776d755407dSChristian Brauner 1777d755407dSChristian Brauner error = net_rx_queue_change_owner(ndev, real_rx, kuid, kgid); 1778d755407dSChristian Brauner if (error) 1779d755407dSChristian Brauner return error; 1780d755407dSChristian Brauner 1781d755407dSChristian Brauner error = net_tx_queue_change_owner(ndev, real_tx, kuid, kgid); 1782d755407dSChristian Brauner if (error) 1783d755407dSChristian Brauner return error; 1784d755407dSChristian Brauner 1785d755407dSChristian Brauner return 0; 1786d755407dSChristian Brauner } 1787d755407dSChristian Brauner 17886b53dafeSWANG Cong static void remove_queue_kobjects(struct net_device *dev) 17890a9627f2STom Herbert { 1790bf264145STom Herbert int real_rx = 0, real_tx = 0; 1791bf264145STom Herbert 1792a953be53SMichael Dalton #ifdef CONFIG_SYSFS 17936b53dafeSWANG Cong real_rx = dev->real_num_rx_queues; 1794bf264145STom Herbert #endif 17956b53dafeSWANG Cong real_tx = dev->real_num_tx_queues; 1796bf264145STom Herbert 17976b53dafeSWANG Cong net_rx_queue_update_kobjects(dev, real_rx, 0); 17986b53dafeSWANG Cong netdev_queue_update_kobjects(dev, real_tx, 0); 1799ccf5ff69Sdavid decotigny #ifdef CONFIG_SYSFS 18006b53dafeSWANG Cong kset_unregister(dev->queues_kset); 1801bf264145STom Herbert #endif 18020a9627f2STom Herbert } 1803608b4b95SEric W. Biederman 18047dc5dbc8SEric W. Biederman static bool net_current_may_mount(void) 18057dc5dbc8SEric W. Biederman { 18067dc5dbc8SEric W. Biederman struct net *net = current->nsproxy->net_ns; 18077dc5dbc8SEric W. Biederman 18087dc5dbc8SEric W. Biederman return ns_capable(net->user_ns, CAP_SYS_ADMIN); 18097dc5dbc8SEric W. Biederman } 18107dc5dbc8SEric W. Biederman 1811a685e089SAl Viro static void *net_grab_current_ns(void) 1812608b4b95SEric W. Biederman { 1813a685e089SAl Viro struct net *ns = current->nsproxy->net_ns; 1814a685e089SAl Viro #ifdef CONFIG_NET_NS 1815a685e089SAl Viro if (ns) 1816c122e14dSReshetova, Elena refcount_inc(&ns->passive); 1817a685e089SAl Viro #endif 1818a685e089SAl Viro return ns; 1819608b4b95SEric W. Biederman } 1820608b4b95SEric W. Biederman 1821608b4b95SEric W. Biederman static const void *net_initial_ns(void) 1822608b4b95SEric W. Biederman { 1823608b4b95SEric W. Biederman return &init_net; 1824608b4b95SEric W. Biederman } 1825608b4b95SEric W. Biederman 1826608b4b95SEric W. Biederman static const void *net_netlink_ns(struct sock *sk) 1827608b4b95SEric W. Biederman { 1828608b4b95SEric W. Biederman return sock_net(sk); 1829608b4b95SEric W. Biederman } 1830608b4b95SEric W. Biederman 1831737aec57Sstephen hemminger const struct kobj_ns_type_operations net_ns_type_operations = { 1832608b4b95SEric W. Biederman .type = KOBJ_NS_TYPE_NET, 18337dc5dbc8SEric W. Biederman .current_may_mount = net_current_may_mount, 1834a685e089SAl Viro .grab_current_ns = net_grab_current_ns, 1835608b4b95SEric W. Biederman .netlink_ns = net_netlink_ns, 1836608b4b95SEric W. Biederman .initial_ns = net_initial_ns, 1837a685e089SAl Viro .drop_ns = net_drop_ns, 1838608b4b95SEric W. Biederman }; 183904600794SJohannes Berg EXPORT_SYMBOL_GPL(net_ns_type_operations); 1840608b4b95SEric W. Biederman 18417eff2e7aSKay Sievers static int netdev_uevent(struct device *d, struct kobj_uevent_env *env) 18421da177e4SLinus Torvalds { 184343cb76d9SGreg Kroah-Hartman struct net_device *dev = to_net_dev(d); 18447eff2e7aSKay Sievers int retval; 18451da177e4SLinus Torvalds 1846312c004dSKay Sievers /* pass interface to uevent. */ 18477eff2e7aSKay Sievers retval = add_uevent_var(env, "INTERFACE=%s", dev->name); 1848bf62456eSEric Rannaud if (retval) 1849bf62456eSEric Rannaud goto exit; 18501da177e4SLinus Torvalds 1851ca2f37dbSJean Tourrilhes /* pass ifindex to uevent. 1852ca2f37dbSJean Tourrilhes * ifindex is useful as it won't change (interface name may change) 18536648c65eSstephen hemminger * and is what RtNetlink uses natively. 18546648c65eSstephen hemminger */ 18557eff2e7aSKay Sievers retval = add_uevent_var(env, "IFINDEX=%d", dev->ifindex); 1856ca2f37dbSJean Tourrilhes 1857bf62456eSEric Rannaud exit: 1858bf62456eSEric Rannaud return retval; 18591da177e4SLinus Torvalds } 18601da177e4SLinus Torvalds 18611da177e4SLinus Torvalds /* 18621da177e4SLinus Torvalds * netdev_release -- destroy and free a dead device. 186343cb76d9SGreg Kroah-Hartman * Called when last reference to device kobject is gone. 18641da177e4SLinus Torvalds */ 186543cb76d9SGreg Kroah-Hartman static void netdev_release(struct device *d) 18661da177e4SLinus Torvalds { 186743cb76d9SGreg Kroah-Hartman struct net_device *dev = to_net_dev(d); 18681da177e4SLinus Torvalds 18691da177e4SLinus Torvalds BUG_ON(dev->reg_state != NETREG_RELEASED); 18701da177e4SLinus Torvalds 18716c557001SFlorian Westphal /* no need to wait for rcu grace period: 18726c557001SFlorian Westphal * device is dead and about to be freed. 18736c557001SFlorian Westphal */ 18746c557001SFlorian Westphal kfree(rcu_access_pointer(dev->ifalias)); 187574d332c1SEric Dumazet netdev_freemem(dev); 18761da177e4SLinus Torvalds } 18771da177e4SLinus Torvalds 1878608b4b95SEric W. Biederman static const void *net_namespace(struct device *d) 1879608b4b95SEric W. Biederman { 18805c29482dSGeliang Tang struct net_device *dev = to_net_dev(d); 18815c29482dSGeliang Tang 1882608b4b95SEric W. Biederman return dev_net(dev); 1883608b4b95SEric W. Biederman } 1884608b4b95SEric W. Biederman 1885b0e37c0dSDmitry Torokhov static void net_get_ownership(struct device *d, kuid_t *uid, kgid_t *gid) 1886b0e37c0dSDmitry Torokhov { 1887b0e37c0dSDmitry Torokhov struct net_device *dev = to_net_dev(d); 1888b0e37c0dSDmitry Torokhov const struct net *net = dev_net(dev); 1889b0e37c0dSDmitry Torokhov 1890b0e37c0dSDmitry Torokhov net_ns_get_ownership(net, uid, gid); 1891b0e37c0dSDmitry Torokhov } 1892b0e37c0dSDmitry Torokhov 1893e6d473e6Sstephen hemminger static struct class net_class __ro_after_init = { 18941da177e4SLinus Torvalds .name = "net", 189543cb76d9SGreg Kroah-Hartman .dev_release = netdev_release, 18966be8aeefSGreg Kroah-Hartman .dev_groups = net_class_groups, 189743cb76d9SGreg Kroah-Hartman .dev_uevent = netdev_uevent, 1898608b4b95SEric W. Biederman .ns_type = &net_ns_type_operations, 1899608b4b95SEric W. Biederman .namespace = net_namespace, 1900b0e37c0dSDmitry Torokhov .get_ownership = net_get_ownership, 19011da177e4SLinus Torvalds }; 19021da177e4SLinus Torvalds 1903aa836df9SFlorian Fainelli #ifdef CONFIG_OF_NET 1904aa836df9SFlorian Fainelli static int of_dev_node_match(struct device *dev, const void *data) 1905aa836df9SFlorian Fainelli { 19062e186a2cSTobias Waldekranz for (; dev; dev = dev->parent) { 19072e186a2cSTobias Waldekranz if (dev->of_node == data) 19082e186a2cSTobias Waldekranz return 1; 19092e186a2cSTobias Waldekranz } 1910aa836df9SFlorian Fainelli 19112e186a2cSTobias Waldekranz return 0; 1912aa836df9SFlorian Fainelli } 1913aa836df9SFlorian Fainelli 19149861f720SRussell King /* 19159861f720SRussell King * of_find_net_device_by_node - lookup the net device for the device node 19169861f720SRussell King * @np: OF device node 19179861f720SRussell King * 19189861f720SRussell King * Looks up the net_device structure corresponding with the device node. 19199861f720SRussell King * If successful, returns a pointer to the net_device with the embedded 19209861f720SRussell King * struct device refcount incremented by one, or NULL on failure. The 19219861f720SRussell King * refcount must be dropped when done with the net_device. 19229861f720SRussell King */ 1923aa836df9SFlorian Fainelli struct net_device *of_find_net_device_by_node(struct device_node *np) 1924aa836df9SFlorian Fainelli { 1925aa836df9SFlorian Fainelli struct device *dev; 1926aa836df9SFlorian Fainelli 1927aa836df9SFlorian Fainelli dev = class_find_device(&net_class, NULL, np, of_dev_node_match); 1928aa836df9SFlorian Fainelli if (!dev) 1929aa836df9SFlorian Fainelli return NULL; 1930aa836df9SFlorian Fainelli 1931aa836df9SFlorian Fainelli return to_net_dev(dev); 1932aa836df9SFlorian Fainelli } 1933aa836df9SFlorian Fainelli EXPORT_SYMBOL(of_find_net_device_by_node); 1934aa836df9SFlorian Fainelli #endif 1935aa836df9SFlorian Fainelli 19369093bbb2SStephen Hemminger /* Delete sysfs entries but hold kobject reference until after all 19379093bbb2SStephen Hemminger * netdev references are gone. 19389093bbb2SStephen Hemminger */ 19396b53dafeSWANG Cong void netdev_unregister_kobject(struct net_device *ndev) 19401da177e4SLinus Torvalds { 19416648c65eSstephen hemminger struct device *dev = &ndev->dev; 19429093bbb2SStephen Hemminger 19438b8f3e66SChristian Brauner if (!refcount_read(&dev_net(ndev)->ns.count)) 1944002d8a1aSAndrey Vagin dev_set_uevent_suppress(dev, 1); 1945002d8a1aSAndrey Vagin 19469093bbb2SStephen Hemminger kobject_get(&dev->kobj); 19473891845eSEric W. Biederman 19486b53dafeSWANG Cong remove_queue_kobjects(ndev); 19490a9627f2STom Herbert 19509802c8e2SMing Lei pm_runtime_set_memalloc_noio(dev, false); 19519802c8e2SMing Lei 19529093bbb2SStephen Hemminger device_del(dev); 19531da177e4SLinus Torvalds } 19541da177e4SLinus Torvalds 19551da177e4SLinus Torvalds /* Create sysfs entries for network device. */ 19566b53dafeSWANG Cong int netdev_register_kobject(struct net_device *ndev) 19571da177e4SLinus Torvalds { 19586648c65eSstephen hemminger struct device *dev = &ndev->dev; 19596b53dafeSWANG Cong const struct attribute_group **groups = ndev->sysfs_groups; 19600a9627f2STom Herbert int error = 0; 19611da177e4SLinus Torvalds 1962a1b3f594SEric W. Biederman device_initialize(dev); 196343cb76d9SGreg Kroah-Hartman dev->class = &net_class; 19646b53dafeSWANG Cong dev->platform_data = ndev; 196543cb76d9SGreg Kroah-Hartman dev->groups = groups; 19661da177e4SLinus Torvalds 19676b53dafeSWANG Cong dev_set_name(dev, "%s", ndev->name); 19681da177e4SLinus Torvalds 19698b41d188SEric W. Biederman #ifdef CONFIG_SYSFS 19700c509a6cSEric W. Biederman /* Allow for a device specific group */ 19710c509a6cSEric W. Biederman if (*groups) 19720c509a6cSEric W. Biederman groups++; 19731da177e4SLinus Torvalds 19740c509a6cSEric W. Biederman *groups++ = &netstat_group; 197538c1a01cSJohannes Berg 197638c1a01cSJohannes Berg #if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211) 19776b53dafeSWANG Cong if (ndev->ieee80211_ptr) 197838c1a01cSJohannes Berg *groups++ = &wireless_group; 197938c1a01cSJohannes Berg #if IS_ENABLED(CONFIG_WIRELESS_EXT) 19806b53dafeSWANG Cong else if (ndev->wireless_handlers) 198138c1a01cSJohannes Berg *groups++ = &wireless_group; 198238c1a01cSJohannes Berg #endif 198338c1a01cSJohannes Berg #endif 19848b41d188SEric W. Biederman #endif /* CONFIG_SYSFS */ 19851da177e4SLinus Torvalds 19860a9627f2STom Herbert error = device_add(dev); 19870a9627f2STom Herbert if (error) 19888ed633b9SWang Hai return error; 19890a9627f2STom Herbert 19906b53dafeSWANG Cong error = register_queue_kobjects(ndev); 19918ed633b9SWang Hai if (error) { 19928ed633b9SWang Hai device_del(dev); 19938ed633b9SWang Hai return error; 19948ed633b9SWang Hai } 19950a9627f2STom Herbert 19969802c8e2SMing Lei pm_runtime_set_memalloc_noio(dev, true); 19979802c8e2SMing Lei 19980a9627f2STom Herbert return error; 19991da177e4SLinus Torvalds } 20001da177e4SLinus Torvalds 2001e6dee9f3SChristian Brauner /* Change owner for sysfs entries when moving network devices across network 2002e6dee9f3SChristian Brauner * namespaces owned by different user namespaces. 2003e6dee9f3SChristian Brauner */ 2004e6dee9f3SChristian Brauner int netdev_change_owner(struct net_device *ndev, const struct net *net_old, 2005e6dee9f3SChristian Brauner const struct net *net_new) 2006e6dee9f3SChristian Brauner { 2007e6dee9f3SChristian Brauner struct device *dev = &ndev->dev; 2008e6dee9f3SChristian Brauner kuid_t old_uid, new_uid; 2009e6dee9f3SChristian Brauner kgid_t old_gid, new_gid; 2010e6dee9f3SChristian Brauner int error; 2011e6dee9f3SChristian Brauner 2012e6dee9f3SChristian Brauner net_ns_get_ownership(net_old, &old_uid, &old_gid); 2013e6dee9f3SChristian Brauner net_ns_get_ownership(net_new, &new_uid, &new_gid); 2014e6dee9f3SChristian Brauner 2015e6dee9f3SChristian Brauner /* The network namespace was changed but the owning user namespace is 2016e6dee9f3SChristian Brauner * identical so there's no need to change the owner of sysfs entries. 2017e6dee9f3SChristian Brauner */ 2018e6dee9f3SChristian Brauner if (uid_eq(old_uid, new_uid) && gid_eq(old_gid, new_gid)) 2019e6dee9f3SChristian Brauner return 0; 2020e6dee9f3SChristian Brauner 2021e6dee9f3SChristian Brauner error = device_change_owner(dev, new_uid, new_gid); 2022e6dee9f3SChristian Brauner if (error) 2023e6dee9f3SChristian Brauner return error; 2024e6dee9f3SChristian Brauner 2025d755407dSChristian Brauner error = queue_change_owner(ndev, new_uid, new_gid); 2026d755407dSChristian Brauner if (error) 2027d755407dSChristian Brauner return error; 2028d755407dSChristian Brauner 2029e6dee9f3SChristian Brauner return 0; 2030e6dee9f3SChristian Brauner } 2031e6dee9f3SChristian Brauner 2032b793dc5cSstephen hemminger int netdev_class_create_file_ns(const struct class_attribute *class_attr, 203358292cbeSTejun Heo const void *ns) 2034b8a9787eSJay Vosburgh { 203558292cbeSTejun Heo return class_create_file_ns(&net_class, class_attr, ns); 2036b8a9787eSJay Vosburgh } 203758292cbeSTejun Heo EXPORT_SYMBOL(netdev_class_create_file_ns); 2038b8a9787eSJay Vosburgh 2039b793dc5cSstephen hemminger void netdev_class_remove_file_ns(const struct class_attribute *class_attr, 204058292cbeSTejun Heo const void *ns) 2041b8a9787eSJay Vosburgh { 204258292cbeSTejun Heo class_remove_file_ns(&net_class, class_attr, ns); 2043b8a9787eSJay Vosburgh } 204458292cbeSTejun Heo EXPORT_SYMBOL(netdev_class_remove_file_ns); 2045b8a9787eSJay Vosburgh 2046a48d4bb0SDaniel Borkmann int __init netdev_kobject_init(void) 20471da177e4SLinus Torvalds { 2048608b4b95SEric W. Biederman kobj_ns_type_register(&net_ns_type_operations); 20491da177e4SLinus Torvalds return class_register(&net_class); 20501da177e4SLinus Torvalds } 2051