1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * NET3 Protocol independent device support routines. 4 * 5 * Derived from the non IP parts of dev.c 1.0.19 6 * Authors: Ross Biro 7 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 8 * Mark Evans, <evansmp@uhura.aston.ac.uk> 9 * 10 * Additional Authors: 11 * Florian la Roche <rzsfl@rz.uni-sb.de> 12 * Alan Cox <gw4pts@gw4pts.ampr.org> 13 * David Hinds <dahinds@users.sourceforge.net> 14 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> 15 * Adam Sulmicki <adam@cfar.umd.edu> 16 * Pekka Riikonen <priikone@poesidon.pspt.fi> 17 * 18 * Changes: 19 * D.J. Barrow : Fixed bug where dev->refcnt gets set 20 * to 2 if register_netdev gets called 21 * before net_dev_init & also removed a 22 * few lines of code in the process. 23 * Alan Cox : device private ioctl copies fields back. 24 * Alan Cox : Transmit queue code does relevant 25 * stunts to keep the queue safe. 26 * Alan Cox : Fixed double lock. 27 * Alan Cox : Fixed promisc NULL pointer trap 28 * ???????? : Support the full private ioctl range 29 * Alan Cox : Moved ioctl permission check into 30 * drivers 31 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI 32 * Alan Cox : 100 backlog just doesn't cut it when 33 * you start doing multicast video 8) 34 * Alan Cox : Rewrote net_bh and list manager. 35 * Alan Cox : Fix ETH_P_ALL echoback lengths. 36 * Alan Cox : Took out transmit every packet pass 37 * Saved a few bytes in the ioctl handler 38 * Alan Cox : Network driver sets packet type before 39 * calling netif_rx. Saves a function 40 * call a packet. 41 * Alan Cox : Hashed net_bh() 42 * Richard Kooijman: Timestamp fixes. 43 * Alan Cox : Wrong field in SIOCGIFDSTADDR 44 * Alan Cox : Device lock protection. 45 * Alan Cox : Fixed nasty side effect of device close 46 * changes. 47 * Rudi Cilibrasi : Pass the right thing to 48 * set_mac_address() 49 * Dave Miller : 32bit quantity for the device lock to 50 * make it work out on a Sparc. 51 * Bjorn Ekwall : Added KERNELD hack. 52 * Alan Cox : Cleaned up the backlog initialise. 53 * Craig Metz : SIOCGIFCONF fix if space for under 54 * 1 device. 55 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there 56 * is no device open function. 57 * Andi Kleen : Fix error reporting for SIOCGIFCONF 58 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF 59 * Cyrus Durgin : Cleaned for KMOD 60 * Adam Sulmicki : Bug Fix : Network Device Unload 61 * A network device unload needs to purge 62 * the backlog queue. 63 * Paul Rusty Russell : SIOCSIFNAME 64 * Pekka Riikonen : Netdev boot-time settings code 65 * Andrew Morton : Make unregister_netdevice wait 66 * indefinitely on dev->refcnt 67 * J Hadi Salim : - Backlog queue sampling 68 * - netif_rx() feedback 69 */ 70 71 #include <linux/uaccess.h> 72 #include <linux/bitops.h> 73 #include <linux/capability.h> 74 #include <linux/cpu.h> 75 #include <linux/types.h> 76 #include <linux/kernel.h> 77 #include <linux/hash.h> 78 #include <linux/slab.h> 79 #include <linux/sched.h> 80 #include <linux/sched/mm.h> 81 #include <linux/mutex.h> 82 #include <linux/rwsem.h> 83 #include <linux/string.h> 84 #include <linux/mm.h> 85 #include <linux/socket.h> 86 #include <linux/sockios.h> 87 #include <linux/errno.h> 88 #include <linux/interrupt.h> 89 #include <linux/if_ether.h> 90 #include <linux/netdevice.h> 91 #include <linux/etherdevice.h> 92 #include <linux/ethtool.h> 93 #include <linux/skbuff.h> 94 #include <linux/kthread.h> 95 #include <linux/bpf.h> 96 #include <linux/bpf_trace.h> 97 #include <net/net_namespace.h> 98 #include <net/sock.h> 99 #include <net/busy_poll.h> 100 #include <linux/rtnetlink.h> 101 #include <linux/stat.h> 102 #include <net/dsa.h> 103 #include <net/dst.h> 104 #include <net/dst_metadata.h> 105 #include <net/gro.h> 106 #include <net/pkt_sched.h> 107 #include <net/pkt_cls.h> 108 #include <net/checksum.h> 109 #include <net/xfrm.h> 110 #include <linux/highmem.h> 111 #include <linux/init.h> 112 #include <linux/module.h> 113 #include <linux/netpoll.h> 114 #include <linux/rcupdate.h> 115 #include <linux/delay.h> 116 #include <net/iw_handler.h> 117 #include <asm/current.h> 118 #include <linux/audit.h> 119 #include <linux/dmaengine.h> 120 #include <linux/err.h> 121 #include <linux/ctype.h> 122 #include <linux/if_arp.h> 123 #include <linux/if_vlan.h> 124 #include <linux/ip.h> 125 #include <net/ip.h> 126 #include <net/mpls.h> 127 #include <linux/ipv6.h> 128 #include <linux/in.h> 129 #include <linux/jhash.h> 130 #include <linux/random.h> 131 #include <trace/events/napi.h> 132 #include <trace/events/net.h> 133 #include <trace/events/skb.h> 134 #include <trace/events/qdisc.h> 135 #include <linux/inetdevice.h> 136 #include <linux/cpu_rmap.h> 137 #include <linux/static_key.h> 138 #include <linux/hashtable.h> 139 #include <linux/vmalloc.h> 140 #include <linux/if_macvlan.h> 141 #include <linux/errqueue.h> 142 #include <linux/hrtimer.h> 143 #include <linux/netfilter_netdev.h> 144 #include <linux/crash_dump.h> 145 #include <linux/sctp.h> 146 #include <net/udp_tunnel.h> 147 #include <linux/net_namespace.h> 148 #include <linux/indirect_call_wrapper.h> 149 #include <net/devlink.h> 150 #include <linux/pm_runtime.h> 151 #include <linux/prandom.h> 152 #include <linux/once_lite.h> 153 154 #include "dev.h" 155 #include "net-sysfs.h" 156 157 158 static DEFINE_SPINLOCK(ptype_lock); 159 struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; 160 struct list_head ptype_all __read_mostly; /* Taps */ 161 162 static int netif_rx_internal(struct sk_buff *skb); 163 static int call_netdevice_notifiers_info(unsigned long val, 164 struct netdev_notifier_info *info); 165 static int call_netdevice_notifiers_extack(unsigned long val, 166 struct net_device *dev, 167 struct netlink_ext_ack *extack); 168 static struct napi_struct *napi_by_id(unsigned int napi_id); 169 170 /* 171 * The @dev_base_head list is protected by @dev_base_lock and the rtnl 172 * semaphore. 173 * 174 * Pure readers hold dev_base_lock for reading, or rcu_read_lock() 175 * 176 * Writers must hold the rtnl semaphore while they loop through the 177 * dev_base_head list, and hold dev_base_lock for writing when they do the 178 * actual updates. This allows pure readers to access the list even 179 * while a writer is preparing to update it. 180 * 181 * To put it another way, dev_base_lock is held for writing only to 182 * protect against pure readers; the rtnl semaphore provides the 183 * protection against other writers. 184 * 185 * See, for example usages, register_netdevice() and 186 * unregister_netdevice(), which must be called with the rtnl 187 * semaphore held. 188 */ 189 DEFINE_RWLOCK(dev_base_lock); 190 EXPORT_SYMBOL(dev_base_lock); 191 192 static DEFINE_MUTEX(ifalias_mutex); 193 194 /* protects napi_hash addition/deletion and napi_gen_id */ 195 static DEFINE_SPINLOCK(napi_hash_lock); 196 197 static unsigned int napi_gen_id = NR_CPUS; 198 static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8); 199 200 static DECLARE_RWSEM(devnet_rename_sem); 201 202 static inline void dev_base_seq_inc(struct net *net) 203 { 204 while (++net->dev_base_seq == 0) 205 ; 206 } 207 208 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name) 209 { 210 unsigned int hash = full_name_hash(net, name, strnlen(name, IFNAMSIZ)); 211 212 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)]; 213 } 214 215 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) 216 { 217 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)]; 218 } 219 220 static inline void rps_lock_irqsave(struct softnet_data *sd, 221 unsigned long *flags) 222 { 223 if (IS_ENABLED(CONFIG_RPS)) 224 spin_lock_irqsave(&sd->input_pkt_queue.lock, *flags); 225 else if (!IS_ENABLED(CONFIG_PREEMPT_RT)) 226 local_irq_save(*flags); 227 } 228 229 static inline void rps_lock_irq_disable(struct softnet_data *sd) 230 { 231 if (IS_ENABLED(CONFIG_RPS)) 232 spin_lock_irq(&sd->input_pkt_queue.lock); 233 else if (!IS_ENABLED(CONFIG_PREEMPT_RT)) 234 local_irq_disable(); 235 } 236 237 static inline void rps_unlock_irq_restore(struct softnet_data *sd, 238 unsigned long *flags) 239 { 240 if (IS_ENABLED(CONFIG_RPS)) 241 spin_unlock_irqrestore(&sd->input_pkt_queue.lock, *flags); 242 else if (!IS_ENABLED(CONFIG_PREEMPT_RT)) 243 local_irq_restore(*flags); 244 } 245 246 static inline void rps_unlock_irq_enable(struct softnet_data *sd) 247 { 248 if (IS_ENABLED(CONFIG_RPS)) 249 spin_unlock_irq(&sd->input_pkt_queue.lock); 250 else if (!IS_ENABLED(CONFIG_PREEMPT_RT)) 251 local_irq_enable(); 252 } 253 254 static struct netdev_name_node *netdev_name_node_alloc(struct net_device *dev, 255 const char *name) 256 { 257 struct netdev_name_node *name_node; 258 259 name_node = kmalloc(sizeof(*name_node), GFP_KERNEL); 260 if (!name_node) 261 return NULL; 262 INIT_HLIST_NODE(&name_node->hlist); 263 name_node->dev = dev; 264 name_node->name = name; 265 return name_node; 266 } 267 268 static struct netdev_name_node * 269 netdev_name_node_head_alloc(struct net_device *dev) 270 { 271 struct netdev_name_node *name_node; 272 273 name_node = netdev_name_node_alloc(dev, dev->name); 274 if (!name_node) 275 return NULL; 276 INIT_LIST_HEAD(&name_node->list); 277 return name_node; 278 } 279 280 static void netdev_name_node_free(struct netdev_name_node *name_node) 281 { 282 kfree(name_node); 283 } 284 285 static void netdev_name_node_add(struct net *net, 286 struct netdev_name_node *name_node) 287 { 288 hlist_add_head_rcu(&name_node->hlist, 289 dev_name_hash(net, name_node->name)); 290 } 291 292 static void netdev_name_node_del(struct netdev_name_node *name_node) 293 { 294 hlist_del_rcu(&name_node->hlist); 295 } 296 297 static struct netdev_name_node *netdev_name_node_lookup(struct net *net, 298 const char *name) 299 { 300 struct hlist_head *head = dev_name_hash(net, name); 301 struct netdev_name_node *name_node; 302 303 hlist_for_each_entry(name_node, head, hlist) 304 if (!strcmp(name_node->name, name)) 305 return name_node; 306 return NULL; 307 } 308 309 static struct netdev_name_node *netdev_name_node_lookup_rcu(struct net *net, 310 const char *name) 311 { 312 struct hlist_head *head = dev_name_hash(net, name); 313 struct netdev_name_node *name_node; 314 315 hlist_for_each_entry_rcu(name_node, head, hlist) 316 if (!strcmp(name_node->name, name)) 317 return name_node; 318 return NULL; 319 } 320 321 bool netdev_name_in_use(struct net *net, const char *name) 322 { 323 return netdev_name_node_lookup(net, name); 324 } 325 EXPORT_SYMBOL(netdev_name_in_use); 326 327 int netdev_name_node_alt_create(struct net_device *dev, const char *name) 328 { 329 struct netdev_name_node *name_node; 330 struct net *net = dev_net(dev); 331 332 name_node = netdev_name_node_lookup(net, name); 333 if (name_node) 334 return -EEXIST; 335 name_node = netdev_name_node_alloc(dev, name); 336 if (!name_node) 337 return -ENOMEM; 338 netdev_name_node_add(net, name_node); 339 /* The node that holds dev->name acts as a head of per-device list. */ 340 list_add_tail(&name_node->list, &dev->name_node->list); 341 342 return 0; 343 } 344 345 static void __netdev_name_node_alt_destroy(struct netdev_name_node *name_node) 346 { 347 list_del(&name_node->list); 348 netdev_name_node_del(name_node); 349 kfree(name_node->name); 350 netdev_name_node_free(name_node); 351 } 352 353 int netdev_name_node_alt_destroy(struct net_device *dev, const char *name) 354 { 355 struct netdev_name_node *name_node; 356 struct net *net = dev_net(dev); 357 358 name_node = netdev_name_node_lookup(net, name); 359 if (!name_node) 360 return -ENOENT; 361 /* lookup might have found our primary name or a name belonging 362 * to another device. 363 */ 364 if (name_node == dev->name_node || name_node->dev != dev) 365 return -EINVAL; 366 367 __netdev_name_node_alt_destroy(name_node); 368 369 return 0; 370 } 371 372 static void netdev_name_node_alt_flush(struct net_device *dev) 373 { 374 struct netdev_name_node *name_node, *tmp; 375 376 list_for_each_entry_safe(name_node, tmp, &dev->name_node->list, list) 377 __netdev_name_node_alt_destroy(name_node); 378 } 379 380 /* Device list insertion */ 381 static void list_netdevice(struct net_device *dev) 382 { 383 struct net *net = dev_net(dev); 384 385 ASSERT_RTNL(); 386 387 write_lock(&dev_base_lock); 388 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head); 389 netdev_name_node_add(net, dev->name_node); 390 hlist_add_head_rcu(&dev->index_hlist, 391 dev_index_hash(net, dev->ifindex)); 392 write_unlock(&dev_base_lock); 393 394 dev_base_seq_inc(net); 395 } 396 397 /* Device list removal 398 * caller must respect a RCU grace period before freeing/reusing dev 399 */ 400 static void unlist_netdevice(struct net_device *dev, bool lock) 401 { 402 ASSERT_RTNL(); 403 404 /* Unlink dev from the device chain */ 405 if (lock) 406 write_lock(&dev_base_lock); 407 list_del_rcu(&dev->dev_list); 408 netdev_name_node_del(dev->name_node); 409 hlist_del_rcu(&dev->index_hlist); 410 if (lock) 411 write_unlock(&dev_base_lock); 412 413 dev_base_seq_inc(dev_net(dev)); 414 } 415 416 /* 417 * Our notifier list 418 */ 419 420 static RAW_NOTIFIER_HEAD(netdev_chain); 421 422 /* 423 * Device drivers call our routines to queue packets here. We empty the 424 * queue in the local softnet handler. 425 */ 426 427 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); 428 EXPORT_PER_CPU_SYMBOL(softnet_data); 429 430 #ifdef CONFIG_LOCKDEP 431 /* 432 * register_netdevice() inits txq->_xmit_lock and sets lockdep class 433 * according to dev->type 434 */ 435 static const unsigned short netdev_lock_type[] = { 436 ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25, 437 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET, 438 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM, 439 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP, 440 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD, 441 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25, 442 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP, 443 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD, 444 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI, 445 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE, 446 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET, 447 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL, 448 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM, 449 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE, 450 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE}; 451 452 static const char *const netdev_lock_name[] = { 453 "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25", 454 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET", 455 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM", 456 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP", 457 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD", 458 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25", 459 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP", 460 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD", 461 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI", 462 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE", 463 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET", 464 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL", 465 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM", 466 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE", 467 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"}; 468 469 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)]; 470 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)]; 471 472 static inline unsigned short netdev_lock_pos(unsigned short dev_type) 473 { 474 int i; 475 476 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++) 477 if (netdev_lock_type[i] == dev_type) 478 return i; 479 /* the last key is used by default */ 480 return ARRAY_SIZE(netdev_lock_type) - 1; 481 } 482 483 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, 484 unsigned short dev_type) 485 { 486 int i; 487 488 i = netdev_lock_pos(dev_type); 489 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i], 490 netdev_lock_name[i]); 491 } 492 493 static inline void netdev_set_addr_lockdep_class(struct net_device *dev) 494 { 495 int i; 496 497 i = netdev_lock_pos(dev->type); 498 lockdep_set_class_and_name(&dev->addr_list_lock, 499 &netdev_addr_lock_key[i], 500 netdev_lock_name[i]); 501 } 502 #else 503 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, 504 unsigned short dev_type) 505 { 506 } 507 508 static inline void netdev_set_addr_lockdep_class(struct net_device *dev) 509 { 510 } 511 #endif 512 513 /******************************************************************************* 514 * 515 * Protocol management and registration routines 516 * 517 *******************************************************************************/ 518 519 520 /* 521 * Add a protocol ID to the list. Now that the input handler is 522 * smarter we can dispense with all the messy stuff that used to be 523 * here. 524 * 525 * BEWARE!!! Protocol handlers, mangling input packets, 526 * MUST BE last in hash buckets and checking protocol handlers 527 * MUST start from promiscuous ptype_all chain in net_bh. 528 * It is true now, do not change it. 529 * Explanation follows: if protocol handler, mangling packet, will 530 * be the first on list, it is not able to sense, that packet 531 * is cloned and should be copied-on-write, so that it will 532 * change it and subsequent readers will get broken packet. 533 * --ANK (980803) 534 */ 535 536 static inline struct list_head *ptype_head(const struct packet_type *pt) 537 { 538 if (pt->type == htons(ETH_P_ALL)) 539 return pt->dev ? &pt->dev->ptype_all : &ptype_all; 540 else 541 return pt->dev ? &pt->dev->ptype_specific : 542 &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK]; 543 } 544 545 /** 546 * dev_add_pack - add packet handler 547 * @pt: packet type declaration 548 * 549 * Add a protocol handler to the networking stack. The passed &packet_type 550 * is linked into kernel lists and may not be freed until it has been 551 * removed from the kernel lists. 552 * 553 * This call does not sleep therefore it can not 554 * guarantee all CPU's that are in middle of receiving packets 555 * will see the new packet type (until the next received packet). 556 */ 557 558 void dev_add_pack(struct packet_type *pt) 559 { 560 struct list_head *head = ptype_head(pt); 561 562 spin_lock(&ptype_lock); 563 list_add_rcu(&pt->list, head); 564 spin_unlock(&ptype_lock); 565 } 566 EXPORT_SYMBOL(dev_add_pack); 567 568 /** 569 * __dev_remove_pack - remove packet handler 570 * @pt: packet type declaration 571 * 572 * Remove a protocol handler that was previously added to the kernel 573 * protocol handlers by dev_add_pack(). The passed &packet_type is removed 574 * from the kernel lists and can be freed or reused once this function 575 * returns. 576 * 577 * The packet type might still be in use by receivers 578 * and must not be freed until after all the CPU's have gone 579 * through a quiescent state. 580 */ 581 void __dev_remove_pack(struct packet_type *pt) 582 { 583 struct list_head *head = ptype_head(pt); 584 struct packet_type *pt1; 585 586 spin_lock(&ptype_lock); 587 588 list_for_each_entry(pt1, head, list) { 589 if (pt == pt1) { 590 list_del_rcu(&pt->list); 591 goto out; 592 } 593 } 594 595 pr_warn("dev_remove_pack: %p not found\n", pt); 596 out: 597 spin_unlock(&ptype_lock); 598 } 599 EXPORT_SYMBOL(__dev_remove_pack); 600 601 /** 602 * dev_remove_pack - remove packet handler 603 * @pt: packet type declaration 604 * 605 * Remove a protocol handler that was previously added to the kernel 606 * protocol handlers by dev_add_pack(). The passed &packet_type is removed 607 * from the kernel lists and can be freed or reused once this function 608 * returns. 609 * 610 * This call sleeps to guarantee that no CPU is looking at the packet 611 * type after return. 612 */ 613 void dev_remove_pack(struct packet_type *pt) 614 { 615 __dev_remove_pack(pt); 616 617 synchronize_net(); 618 } 619 EXPORT_SYMBOL(dev_remove_pack); 620 621 622 /******************************************************************************* 623 * 624 * Device Interface Subroutines 625 * 626 *******************************************************************************/ 627 628 /** 629 * dev_get_iflink - get 'iflink' value of a interface 630 * @dev: targeted interface 631 * 632 * Indicates the ifindex the interface is linked to. 633 * Physical interfaces have the same 'ifindex' and 'iflink' values. 634 */ 635 636 int dev_get_iflink(const struct net_device *dev) 637 { 638 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink) 639 return dev->netdev_ops->ndo_get_iflink(dev); 640 641 return dev->ifindex; 642 } 643 EXPORT_SYMBOL(dev_get_iflink); 644 645 /** 646 * dev_fill_metadata_dst - Retrieve tunnel egress information. 647 * @dev: targeted interface 648 * @skb: The packet. 649 * 650 * For better visibility of tunnel traffic OVS needs to retrieve 651 * egress tunnel information for a packet. Following API allows 652 * user to get this info. 653 */ 654 int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) 655 { 656 struct ip_tunnel_info *info; 657 658 if (!dev->netdev_ops || !dev->netdev_ops->ndo_fill_metadata_dst) 659 return -EINVAL; 660 661 info = skb_tunnel_info_unclone(skb); 662 if (!info) 663 return -ENOMEM; 664 if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX))) 665 return -EINVAL; 666 667 return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb); 668 } 669 EXPORT_SYMBOL_GPL(dev_fill_metadata_dst); 670 671 static struct net_device_path *dev_fwd_path(struct net_device_path_stack *stack) 672 { 673 int k = stack->num_paths++; 674 675 if (WARN_ON_ONCE(k >= NET_DEVICE_PATH_STACK_MAX)) 676 return NULL; 677 678 return &stack->path[k]; 679 } 680 681 int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr, 682 struct net_device_path_stack *stack) 683 { 684 const struct net_device *last_dev; 685 struct net_device_path_ctx ctx = { 686 .dev = dev, 687 }; 688 struct net_device_path *path; 689 int ret = 0; 690 691 memcpy(ctx.daddr, daddr, sizeof(ctx.daddr)); 692 stack->num_paths = 0; 693 while (ctx.dev && ctx.dev->netdev_ops->ndo_fill_forward_path) { 694 last_dev = ctx.dev; 695 path = dev_fwd_path(stack); 696 if (!path) 697 return -1; 698 699 memset(path, 0, sizeof(struct net_device_path)); 700 ret = ctx.dev->netdev_ops->ndo_fill_forward_path(&ctx, path); 701 if (ret < 0) 702 return -1; 703 704 if (WARN_ON_ONCE(last_dev == ctx.dev)) 705 return -1; 706 } 707 708 if (!ctx.dev) 709 return ret; 710 711 path = dev_fwd_path(stack); 712 if (!path) 713 return -1; 714 path->type = DEV_PATH_ETHERNET; 715 path->dev = ctx.dev; 716 717 return ret; 718 } 719 EXPORT_SYMBOL_GPL(dev_fill_forward_path); 720 721 /** 722 * __dev_get_by_name - find a device by its name 723 * @net: the applicable net namespace 724 * @name: name to find 725 * 726 * Find an interface by name. Must be called under RTNL semaphore 727 * or @dev_base_lock. If the name is found a pointer to the device 728 * is returned. If the name is not found then %NULL is returned. The 729 * reference counters are not incremented so the caller must be 730 * careful with locks. 731 */ 732 733 struct net_device *__dev_get_by_name(struct net *net, const char *name) 734 { 735 struct netdev_name_node *node_name; 736 737 node_name = netdev_name_node_lookup(net, name); 738 return node_name ? node_name->dev : NULL; 739 } 740 EXPORT_SYMBOL(__dev_get_by_name); 741 742 /** 743 * dev_get_by_name_rcu - find a device by its name 744 * @net: the applicable net namespace 745 * @name: name to find 746 * 747 * Find an interface by name. 748 * If the name is found a pointer to the device is returned. 749 * If the name is not found then %NULL is returned. 750 * The reference counters are not incremented so the caller must be 751 * careful with locks. The caller must hold RCU lock. 752 */ 753 754 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name) 755 { 756 struct netdev_name_node *node_name; 757 758 node_name = netdev_name_node_lookup_rcu(net, name); 759 return node_name ? node_name->dev : NULL; 760 } 761 EXPORT_SYMBOL(dev_get_by_name_rcu); 762 763 /** 764 * dev_get_by_name - find a device by its name 765 * @net: the applicable net namespace 766 * @name: name to find 767 * 768 * Find an interface by name. This can be called from any 769 * context and does its own locking. The returned handle has 770 * the usage count incremented and the caller must use dev_put() to 771 * release it when it is no longer needed. %NULL is returned if no 772 * matching device is found. 773 */ 774 775 struct net_device *dev_get_by_name(struct net *net, const char *name) 776 { 777 struct net_device *dev; 778 779 rcu_read_lock(); 780 dev = dev_get_by_name_rcu(net, name); 781 dev_hold(dev); 782 rcu_read_unlock(); 783 return dev; 784 } 785 EXPORT_SYMBOL(dev_get_by_name); 786 787 /** 788 * __dev_get_by_index - find a device by its ifindex 789 * @net: the applicable net namespace 790 * @ifindex: index of device 791 * 792 * Search for an interface by index. Returns %NULL if the device 793 * is not found or a pointer to the device. The device has not 794 * had its reference counter increased so the caller must be careful 795 * about locking. The caller must hold either the RTNL semaphore 796 * or @dev_base_lock. 797 */ 798 799 struct net_device *__dev_get_by_index(struct net *net, int ifindex) 800 { 801 struct net_device *dev; 802 struct hlist_head *head = dev_index_hash(net, ifindex); 803 804 hlist_for_each_entry(dev, head, index_hlist) 805 if (dev->ifindex == ifindex) 806 return dev; 807 808 return NULL; 809 } 810 EXPORT_SYMBOL(__dev_get_by_index); 811 812 /** 813 * dev_get_by_index_rcu - find a device by its ifindex 814 * @net: the applicable net namespace 815 * @ifindex: index of device 816 * 817 * Search for an interface by index. Returns %NULL if the device 818 * is not found or a pointer to the device. The device has not 819 * had its reference counter increased so the caller must be careful 820 * about locking. The caller must hold RCU lock. 821 */ 822 823 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex) 824 { 825 struct net_device *dev; 826 struct hlist_head *head = dev_index_hash(net, ifindex); 827 828 hlist_for_each_entry_rcu(dev, head, index_hlist) 829 if (dev->ifindex == ifindex) 830 return dev; 831 832 return NULL; 833 } 834 EXPORT_SYMBOL(dev_get_by_index_rcu); 835 836 837 /** 838 * dev_get_by_index - find a device by its ifindex 839 * @net: the applicable net namespace 840 * @ifindex: index of device 841 * 842 * Search for an interface by index. Returns NULL if the device 843 * is not found or a pointer to the device. The device returned has 844 * had a reference added and the pointer is safe until the user calls 845 * dev_put to indicate they have finished with it. 846 */ 847 848 struct net_device *dev_get_by_index(struct net *net, int ifindex) 849 { 850 struct net_device *dev; 851 852 rcu_read_lock(); 853 dev = dev_get_by_index_rcu(net, ifindex); 854 dev_hold(dev); 855 rcu_read_unlock(); 856 return dev; 857 } 858 EXPORT_SYMBOL(dev_get_by_index); 859 860 /** 861 * dev_get_by_napi_id - find a device by napi_id 862 * @napi_id: ID of the NAPI struct 863 * 864 * Search for an interface by NAPI ID. Returns %NULL if the device 865 * is not found or a pointer to the device. The device has not had 866 * its reference counter increased so the caller must be careful 867 * about locking. The caller must hold RCU lock. 868 */ 869 870 struct net_device *dev_get_by_napi_id(unsigned int napi_id) 871 { 872 struct napi_struct *napi; 873 874 WARN_ON_ONCE(!rcu_read_lock_held()); 875 876 if (napi_id < MIN_NAPI_ID) 877 return NULL; 878 879 napi = napi_by_id(napi_id); 880 881 return napi ? napi->dev : NULL; 882 } 883 EXPORT_SYMBOL(dev_get_by_napi_id); 884 885 /** 886 * netdev_get_name - get a netdevice name, knowing its ifindex. 887 * @net: network namespace 888 * @name: a pointer to the buffer where the name will be stored. 889 * @ifindex: the ifindex of the interface to get the name from. 890 */ 891 int netdev_get_name(struct net *net, char *name, int ifindex) 892 { 893 struct net_device *dev; 894 int ret; 895 896 down_read(&devnet_rename_sem); 897 rcu_read_lock(); 898 899 dev = dev_get_by_index_rcu(net, ifindex); 900 if (!dev) { 901 ret = -ENODEV; 902 goto out; 903 } 904 905 strcpy(name, dev->name); 906 907 ret = 0; 908 out: 909 rcu_read_unlock(); 910 up_read(&devnet_rename_sem); 911 return ret; 912 } 913 914 /** 915 * dev_getbyhwaddr_rcu - find a device by its hardware address 916 * @net: the applicable net namespace 917 * @type: media type of device 918 * @ha: hardware address 919 * 920 * Search for an interface by MAC address. Returns NULL if the device 921 * is not found or a pointer to the device. 922 * The caller must hold RCU or RTNL. 923 * The returned device has not had its ref count increased 924 * and the caller must therefore be careful about locking 925 * 926 */ 927 928 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, 929 const char *ha) 930 { 931 struct net_device *dev; 932 933 for_each_netdev_rcu(net, dev) 934 if (dev->type == type && 935 !memcmp(dev->dev_addr, ha, dev->addr_len)) 936 return dev; 937 938 return NULL; 939 } 940 EXPORT_SYMBOL(dev_getbyhwaddr_rcu); 941 942 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type) 943 { 944 struct net_device *dev, *ret = NULL; 945 946 rcu_read_lock(); 947 for_each_netdev_rcu(net, dev) 948 if (dev->type == type) { 949 dev_hold(dev); 950 ret = dev; 951 break; 952 } 953 rcu_read_unlock(); 954 return ret; 955 } 956 EXPORT_SYMBOL(dev_getfirstbyhwtype); 957 958 /** 959 * __dev_get_by_flags - find any device with given flags 960 * @net: the applicable net namespace 961 * @if_flags: IFF_* values 962 * @mask: bitmask of bits in if_flags to check 963 * 964 * Search for any interface with the given flags. Returns NULL if a device 965 * is not found or a pointer to the device. Must be called inside 966 * rtnl_lock(), and result refcount is unchanged. 967 */ 968 969 struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags, 970 unsigned short mask) 971 { 972 struct net_device *dev, *ret; 973 974 ASSERT_RTNL(); 975 976 ret = NULL; 977 for_each_netdev(net, dev) { 978 if (((dev->flags ^ if_flags) & mask) == 0) { 979 ret = dev; 980 break; 981 } 982 } 983 return ret; 984 } 985 EXPORT_SYMBOL(__dev_get_by_flags); 986 987 /** 988 * dev_valid_name - check if name is okay for network device 989 * @name: name string 990 * 991 * Network device names need to be valid file names to 992 * allow sysfs to work. We also disallow any kind of 993 * whitespace. 994 */ 995 bool dev_valid_name(const char *name) 996 { 997 if (*name == '\0') 998 return false; 999 if (strnlen(name, IFNAMSIZ) == IFNAMSIZ) 1000 return false; 1001 if (!strcmp(name, ".") || !strcmp(name, "..")) 1002 return false; 1003 1004 while (*name) { 1005 if (*name == '/' || *name == ':' || isspace(*name)) 1006 return false; 1007 name++; 1008 } 1009 return true; 1010 } 1011 EXPORT_SYMBOL(dev_valid_name); 1012 1013 /** 1014 * __dev_alloc_name - allocate a name for a device 1015 * @net: network namespace to allocate the device name in 1016 * @name: name format string 1017 * @buf: scratch buffer and result name string 1018 * 1019 * Passed a format string - eg "lt%d" it will try and find a suitable 1020 * id. It scans list of devices to build up a free map, then chooses 1021 * the first empty slot. The caller must hold the dev_base or rtnl lock 1022 * while allocating the name and adding the device in order to avoid 1023 * duplicates. 1024 * Limited to bits_per_byte * page size devices (ie 32K on most platforms). 1025 * Returns the number of the unit assigned or a negative errno code. 1026 */ 1027 1028 static int __dev_alloc_name(struct net *net, const char *name, char *buf) 1029 { 1030 int i = 0; 1031 const char *p; 1032 const int max_netdevices = 8*PAGE_SIZE; 1033 unsigned long *inuse; 1034 struct net_device *d; 1035 1036 if (!dev_valid_name(name)) 1037 return -EINVAL; 1038 1039 p = strchr(name, '%'); 1040 if (p) { 1041 /* 1042 * Verify the string as this thing may have come from 1043 * the user. There must be either one "%d" and no other "%" 1044 * characters. 1045 */ 1046 if (p[1] != 'd' || strchr(p + 2, '%')) 1047 return -EINVAL; 1048 1049 /* Use one page as a bit array of possible slots */ 1050 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC); 1051 if (!inuse) 1052 return -ENOMEM; 1053 1054 for_each_netdev(net, d) { 1055 struct netdev_name_node *name_node; 1056 list_for_each_entry(name_node, &d->name_node->list, list) { 1057 if (!sscanf(name_node->name, name, &i)) 1058 continue; 1059 if (i < 0 || i >= max_netdevices) 1060 continue; 1061 1062 /* avoid cases where sscanf is not exact inverse of printf */ 1063 snprintf(buf, IFNAMSIZ, name, i); 1064 if (!strncmp(buf, name_node->name, IFNAMSIZ)) 1065 __set_bit(i, inuse); 1066 } 1067 if (!sscanf(d->name, name, &i)) 1068 continue; 1069 if (i < 0 || i >= max_netdevices) 1070 continue; 1071 1072 /* avoid cases where sscanf is not exact inverse of printf */ 1073 snprintf(buf, IFNAMSIZ, name, i); 1074 if (!strncmp(buf, d->name, IFNAMSIZ)) 1075 __set_bit(i, inuse); 1076 } 1077 1078 i = find_first_zero_bit(inuse, max_netdevices); 1079 free_page((unsigned long) inuse); 1080 } 1081 1082 snprintf(buf, IFNAMSIZ, name, i); 1083 if (!netdev_name_in_use(net, buf)) 1084 return i; 1085 1086 /* It is possible to run out of possible slots 1087 * when the name is long and there isn't enough space left 1088 * for the digits, or if all bits are used. 1089 */ 1090 return -ENFILE; 1091 } 1092 1093 static int dev_alloc_name_ns(struct net *net, 1094 struct net_device *dev, 1095 const char *name) 1096 { 1097 char buf[IFNAMSIZ]; 1098 int ret; 1099 1100 BUG_ON(!net); 1101 ret = __dev_alloc_name(net, name, buf); 1102 if (ret >= 0) 1103 strscpy(dev->name, buf, IFNAMSIZ); 1104 return ret; 1105 } 1106 1107 /** 1108 * dev_alloc_name - allocate a name for a device 1109 * @dev: device 1110 * @name: name format string 1111 * 1112 * Passed a format string - eg "lt%d" it will try and find a suitable 1113 * id. It scans list of devices to build up a free map, then chooses 1114 * the first empty slot. The caller must hold the dev_base or rtnl lock 1115 * while allocating the name and adding the device in order to avoid 1116 * duplicates. 1117 * Limited to bits_per_byte * page size devices (ie 32K on most platforms). 1118 * Returns the number of the unit assigned or a negative errno code. 1119 */ 1120 1121 int dev_alloc_name(struct net_device *dev, const char *name) 1122 { 1123 return dev_alloc_name_ns(dev_net(dev), dev, name); 1124 } 1125 EXPORT_SYMBOL(dev_alloc_name); 1126 1127 static int dev_get_valid_name(struct net *net, struct net_device *dev, 1128 const char *name) 1129 { 1130 BUG_ON(!net); 1131 1132 if (!dev_valid_name(name)) 1133 return -EINVAL; 1134 1135 if (strchr(name, '%')) 1136 return dev_alloc_name_ns(net, dev, name); 1137 else if (netdev_name_in_use(net, name)) 1138 return -EEXIST; 1139 else if (dev->name != name) 1140 strscpy(dev->name, name, IFNAMSIZ); 1141 1142 return 0; 1143 } 1144 1145 /** 1146 * dev_change_name - change name of a device 1147 * @dev: device 1148 * @newname: name (or format string) must be at least IFNAMSIZ 1149 * 1150 * Change name of a device, can pass format strings "eth%d". 1151 * for wildcarding. 1152 */ 1153 int dev_change_name(struct net_device *dev, const char *newname) 1154 { 1155 unsigned char old_assign_type; 1156 char oldname[IFNAMSIZ]; 1157 int err = 0; 1158 int ret; 1159 struct net *net; 1160 1161 ASSERT_RTNL(); 1162 BUG_ON(!dev_net(dev)); 1163 1164 net = dev_net(dev); 1165 1166 down_write(&devnet_rename_sem); 1167 1168 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) { 1169 up_write(&devnet_rename_sem); 1170 return 0; 1171 } 1172 1173 memcpy(oldname, dev->name, IFNAMSIZ); 1174 1175 err = dev_get_valid_name(net, dev, newname); 1176 if (err < 0) { 1177 up_write(&devnet_rename_sem); 1178 return err; 1179 } 1180 1181 if (oldname[0] && !strchr(oldname, '%')) 1182 netdev_info(dev, "renamed from %s%s\n", oldname, 1183 dev->flags & IFF_UP ? " (while UP)" : ""); 1184 1185 old_assign_type = dev->name_assign_type; 1186 dev->name_assign_type = NET_NAME_RENAMED; 1187 1188 rollback: 1189 ret = device_rename(&dev->dev, dev->name); 1190 if (ret) { 1191 memcpy(dev->name, oldname, IFNAMSIZ); 1192 dev->name_assign_type = old_assign_type; 1193 up_write(&devnet_rename_sem); 1194 return ret; 1195 } 1196 1197 up_write(&devnet_rename_sem); 1198 1199 netdev_adjacent_rename_links(dev, oldname); 1200 1201 write_lock(&dev_base_lock); 1202 netdev_name_node_del(dev->name_node); 1203 write_unlock(&dev_base_lock); 1204 1205 synchronize_rcu(); 1206 1207 write_lock(&dev_base_lock); 1208 netdev_name_node_add(net, dev->name_node); 1209 write_unlock(&dev_base_lock); 1210 1211 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev); 1212 ret = notifier_to_errno(ret); 1213 1214 if (ret) { 1215 /* err >= 0 after dev_alloc_name() or stores the first errno */ 1216 if (err >= 0) { 1217 err = ret; 1218 down_write(&devnet_rename_sem); 1219 memcpy(dev->name, oldname, IFNAMSIZ); 1220 memcpy(oldname, newname, IFNAMSIZ); 1221 dev->name_assign_type = old_assign_type; 1222 old_assign_type = NET_NAME_RENAMED; 1223 goto rollback; 1224 } else { 1225 netdev_err(dev, "name change rollback failed: %d\n", 1226 ret); 1227 } 1228 } 1229 1230 return err; 1231 } 1232 1233 /** 1234 * dev_set_alias - change ifalias of a device 1235 * @dev: device 1236 * @alias: name up to IFALIASZ 1237 * @len: limit of bytes to copy from info 1238 * 1239 * Set ifalias for a device, 1240 */ 1241 int dev_set_alias(struct net_device *dev, const char *alias, size_t len) 1242 { 1243 struct dev_ifalias *new_alias = NULL; 1244 1245 if (len >= IFALIASZ) 1246 return -EINVAL; 1247 1248 if (len) { 1249 new_alias = kmalloc(sizeof(*new_alias) + len + 1, GFP_KERNEL); 1250 if (!new_alias) 1251 return -ENOMEM; 1252 1253 memcpy(new_alias->ifalias, alias, len); 1254 new_alias->ifalias[len] = 0; 1255 } 1256 1257 mutex_lock(&ifalias_mutex); 1258 new_alias = rcu_replace_pointer(dev->ifalias, new_alias, 1259 mutex_is_locked(&ifalias_mutex)); 1260 mutex_unlock(&ifalias_mutex); 1261 1262 if (new_alias) 1263 kfree_rcu(new_alias, rcuhead); 1264 1265 return len; 1266 } 1267 EXPORT_SYMBOL(dev_set_alias); 1268 1269 /** 1270 * dev_get_alias - get ifalias of a device 1271 * @dev: device 1272 * @name: buffer to store name of ifalias 1273 * @len: size of buffer 1274 * 1275 * get ifalias for a device. Caller must make sure dev cannot go 1276 * away, e.g. rcu read lock or own a reference count to device. 1277 */ 1278 int dev_get_alias(const struct net_device *dev, char *name, size_t len) 1279 { 1280 const struct dev_ifalias *alias; 1281 int ret = 0; 1282 1283 rcu_read_lock(); 1284 alias = rcu_dereference(dev->ifalias); 1285 if (alias) 1286 ret = snprintf(name, len, "%s", alias->ifalias); 1287 rcu_read_unlock(); 1288 1289 return ret; 1290 } 1291 1292 /** 1293 * netdev_features_change - device changes features 1294 * @dev: device to cause notification 1295 * 1296 * Called to indicate a device has changed features. 1297 */ 1298 void netdev_features_change(struct net_device *dev) 1299 { 1300 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev); 1301 } 1302 EXPORT_SYMBOL(netdev_features_change); 1303 1304 /** 1305 * netdev_state_change - device changes state 1306 * @dev: device to cause notification 1307 * 1308 * Called to indicate a device has changed state. This function calls 1309 * the notifier chains for netdev_chain and sends a NEWLINK message 1310 * to the routing socket. 1311 */ 1312 void netdev_state_change(struct net_device *dev) 1313 { 1314 if (dev->flags & IFF_UP) { 1315 struct netdev_notifier_change_info change_info = { 1316 .info.dev = dev, 1317 }; 1318 1319 call_netdevice_notifiers_info(NETDEV_CHANGE, 1320 &change_info.info); 1321 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL, 0, NULL); 1322 } 1323 } 1324 EXPORT_SYMBOL(netdev_state_change); 1325 1326 /** 1327 * __netdev_notify_peers - notify network peers about existence of @dev, 1328 * to be called when rtnl lock is already held. 1329 * @dev: network device 1330 * 1331 * Generate traffic such that interested network peers are aware of 1332 * @dev, such as by generating a gratuitous ARP. This may be used when 1333 * a device wants to inform the rest of the network about some sort of 1334 * reconfiguration such as a failover event or virtual machine 1335 * migration. 1336 */ 1337 void __netdev_notify_peers(struct net_device *dev) 1338 { 1339 ASSERT_RTNL(); 1340 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev); 1341 call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev); 1342 } 1343 EXPORT_SYMBOL(__netdev_notify_peers); 1344 1345 /** 1346 * netdev_notify_peers - notify network peers about existence of @dev 1347 * @dev: network device 1348 * 1349 * Generate traffic such that interested network peers are aware of 1350 * @dev, such as by generating a gratuitous ARP. This may be used when 1351 * a device wants to inform the rest of the network about some sort of 1352 * reconfiguration such as a failover event or virtual machine 1353 * migration. 1354 */ 1355 void netdev_notify_peers(struct net_device *dev) 1356 { 1357 rtnl_lock(); 1358 __netdev_notify_peers(dev); 1359 rtnl_unlock(); 1360 } 1361 EXPORT_SYMBOL(netdev_notify_peers); 1362 1363 static int napi_threaded_poll(void *data); 1364 1365 static int napi_kthread_create(struct napi_struct *n) 1366 { 1367 int err = 0; 1368 1369 /* Create and wake up the kthread once to put it in 1370 * TASK_INTERRUPTIBLE mode to avoid the blocked task 1371 * warning and work with loadavg. 1372 */ 1373 n->thread = kthread_run(napi_threaded_poll, n, "napi/%s-%d", 1374 n->dev->name, n->napi_id); 1375 if (IS_ERR(n->thread)) { 1376 err = PTR_ERR(n->thread); 1377 pr_err("kthread_run failed with err %d\n", err); 1378 n->thread = NULL; 1379 } 1380 1381 return err; 1382 } 1383 1384 static int __dev_open(struct net_device *dev, struct netlink_ext_ack *extack) 1385 { 1386 const struct net_device_ops *ops = dev->netdev_ops; 1387 int ret; 1388 1389 ASSERT_RTNL(); 1390 dev_addr_check(dev); 1391 1392 if (!netif_device_present(dev)) { 1393 /* may be detached because parent is runtime-suspended */ 1394 if (dev->dev.parent) 1395 pm_runtime_resume(dev->dev.parent); 1396 if (!netif_device_present(dev)) 1397 return -ENODEV; 1398 } 1399 1400 /* Block netpoll from trying to do any rx path servicing. 1401 * If we don't do this there is a chance ndo_poll_controller 1402 * or ndo_poll may be running while we open the device 1403 */ 1404 netpoll_poll_disable(dev); 1405 1406 ret = call_netdevice_notifiers_extack(NETDEV_PRE_UP, dev, extack); 1407 ret = notifier_to_errno(ret); 1408 if (ret) 1409 return ret; 1410 1411 set_bit(__LINK_STATE_START, &dev->state); 1412 1413 if (ops->ndo_validate_addr) 1414 ret = ops->ndo_validate_addr(dev); 1415 1416 if (!ret && ops->ndo_open) 1417 ret = ops->ndo_open(dev); 1418 1419 netpoll_poll_enable(dev); 1420 1421 if (ret) 1422 clear_bit(__LINK_STATE_START, &dev->state); 1423 else { 1424 dev->flags |= IFF_UP; 1425 dev_set_rx_mode(dev); 1426 dev_activate(dev); 1427 add_device_randomness(dev->dev_addr, dev->addr_len); 1428 } 1429 1430 return ret; 1431 } 1432 1433 /** 1434 * dev_open - prepare an interface for use. 1435 * @dev: device to open 1436 * @extack: netlink extended ack 1437 * 1438 * Takes a device from down to up state. The device's private open 1439 * function is invoked and then the multicast lists are loaded. Finally 1440 * the device is moved into the up state and a %NETDEV_UP message is 1441 * sent to the netdev notifier chain. 1442 * 1443 * Calling this function on an active interface is a nop. On a failure 1444 * a negative errno code is returned. 1445 */ 1446 int dev_open(struct net_device *dev, struct netlink_ext_ack *extack) 1447 { 1448 int ret; 1449 1450 if (dev->flags & IFF_UP) 1451 return 0; 1452 1453 ret = __dev_open(dev, extack); 1454 if (ret < 0) 1455 return ret; 1456 1457 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP | IFF_RUNNING, GFP_KERNEL, 0, NULL); 1458 call_netdevice_notifiers(NETDEV_UP, dev); 1459 1460 return ret; 1461 } 1462 EXPORT_SYMBOL(dev_open); 1463 1464 static void __dev_close_many(struct list_head *head) 1465 { 1466 struct net_device *dev; 1467 1468 ASSERT_RTNL(); 1469 might_sleep(); 1470 1471 list_for_each_entry(dev, head, close_list) { 1472 /* Temporarily disable netpoll until the interface is down */ 1473 netpoll_poll_disable(dev); 1474 1475 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev); 1476 1477 clear_bit(__LINK_STATE_START, &dev->state); 1478 1479 /* Synchronize to scheduled poll. We cannot touch poll list, it 1480 * can be even on different cpu. So just clear netif_running(). 1481 * 1482 * dev->stop() will invoke napi_disable() on all of it's 1483 * napi_struct instances on this device. 1484 */ 1485 smp_mb__after_atomic(); /* Commit netif_running(). */ 1486 } 1487 1488 dev_deactivate_many(head); 1489 1490 list_for_each_entry(dev, head, close_list) { 1491 const struct net_device_ops *ops = dev->netdev_ops; 1492 1493 /* 1494 * Call the device specific close. This cannot fail. 1495 * Only if device is UP 1496 * 1497 * We allow it to be called even after a DETACH hot-plug 1498 * event. 1499 */ 1500 if (ops->ndo_stop) 1501 ops->ndo_stop(dev); 1502 1503 dev->flags &= ~IFF_UP; 1504 netpoll_poll_enable(dev); 1505 } 1506 } 1507 1508 static void __dev_close(struct net_device *dev) 1509 { 1510 LIST_HEAD(single); 1511 1512 list_add(&dev->close_list, &single); 1513 __dev_close_many(&single); 1514 list_del(&single); 1515 } 1516 1517 void dev_close_many(struct list_head *head, bool unlink) 1518 { 1519 struct net_device *dev, *tmp; 1520 1521 /* Remove the devices that don't need to be closed */ 1522 list_for_each_entry_safe(dev, tmp, head, close_list) 1523 if (!(dev->flags & IFF_UP)) 1524 list_del_init(&dev->close_list); 1525 1526 __dev_close_many(head); 1527 1528 list_for_each_entry_safe(dev, tmp, head, close_list) { 1529 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP | IFF_RUNNING, GFP_KERNEL, 0, NULL); 1530 call_netdevice_notifiers(NETDEV_DOWN, dev); 1531 if (unlink) 1532 list_del_init(&dev->close_list); 1533 } 1534 } 1535 EXPORT_SYMBOL(dev_close_many); 1536 1537 /** 1538 * dev_close - shutdown an interface. 1539 * @dev: device to shutdown 1540 * 1541 * This function moves an active device into down state. A 1542 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device 1543 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier 1544 * chain. 1545 */ 1546 void dev_close(struct net_device *dev) 1547 { 1548 if (dev->flags & IFF_UP) { 1549 LIST_HEAD(single); 1550 1551 list_add(&dev->close_list, &single); 1552 dev_close_many(&single, true); 1553 list_del(&single); 1554 } 1555 } 1556 EXPORT_SYMBOL(dev_close); 1557 1558 1559 /** 1560 * dev_disable_lro - disable Large Receive Offload on a device 1561 * @dev: device 1562 * 1563 * Disable Large Receive Offload (LRO) on a net device. Must be 1564 * called under RTNL. This is needed if received packets may be 1565 * forwarded to another interface. 1566 */ 1567 void dev_disable_lro(struct net_device *dev) 1568 { 1569 struct net_device *lower_dev; 1570 struct list_head *iter; 1571 1572 dev->wanted_features &= ~NETIF_F_LRO; 1573 netdev_update_features(dev); 1574 1575 if (unlikely(dev->features & NETIF_F_LRO)) 1576 netdev_WARN(dev, "failed to disable LRO!\n"); 1577 1578 netdev_for_each_lower_dev(dev, lower_dev, iter) 1579 dev_disable_lro(lower_dev); 1580 } 1581 EXPORT_SYMBOL(dev_disable_lro); 1582 1583 /** 1584 * dev_disable_gro_hw - disable HW Generic Receive Offload on a device 1585 * @dev: device 1586 * 1587 * Disable HW Generic Receive Offload (GRO_HW) on a net device. Must be 1588 * called under RTNL. This is needed if Generic XDP is installed on 1589 * the device. 1590 */ 1591 static void dev_disable_gro_hw(struct net_device *dev) 1592 { 1593 dev->wanted_features &= ~NETIF_F_GRO_HW; 1594 netdev_update_features(dev); 1595 1596 if (unlikely(dev->features & NETIF_F_GRO_HW)) 1597 netdev_WARN(dev, "failed to disable GRO_HW!\n"); 1598 } 1599 1600 const char *netdev_cmd_to_name(enum netdev_cmd cmd) 1601 { 1602 #define N(val) \ 1603 case NETDEV_##val: \ 1604 return "NETDEV_" __stringify(val); 1605 switch (cmd) { 1606 N(UP) N(DOWN) N(REBOOT) N(CHANGE) N(REGISTER) N(UNREGISTER) 1607 N(CHANGEMTU) N(CHANGEADDR) N(GOING_DOWN) N(CHANGENAME) N(FEAT_CHANGE) 1608 N(BONDING_FAILOVER) N(PRE_UP) N(PRE_TYPE_CHANGE) N(POST_TYPE_CHANGE) 1609 N(POST_INIT) N(PRE_UNINIT) N(RELEASE) N(NOTIFY_PEERS) N(JOIN) 1610 N(CHANGEUPPER) N(RESEND_IGMP) N(PRECHANGEMTU) N(CHANGEINFODATA) 1611 N(BONDING_INFO) N(PRECHANGEUPPER) N(CHANGELOWERSTATE) 1612 N(UDP_TUNNEL_PUSH_INFO) N(UDP_TUNNEL_DROP_INFO) N(CHANGE_TX_QUEUE_LEN) 1613 N(CVLAN_FILTER_PUSH_INFO) N(CVLAN_FILTER_DROP_INFO) 1614 N(SVLAN_FILTER_PUSH_INFO) N(SVLAN_FILTER_DROP_INFO) 1615 N(PRE_CHANGEADDR) N(OFFLOAD_XSTATS_ENABLE) N(OFFLOAD_XSTATS_DISABLE) 1616 N(OFFLOAD_XSTATS_REPORT_USED) N(OFFLOAD_XSTATS_REPORT_DELTA) 1617 N(XDP_FEAT_CHANGE) 1618 } 1619 #undef N 1620 return "UNKNOWN_NETDEV_EVENT"; 1621 } 1622 EXPORT_SYMBOL_GPL(netdev_cmd_to_name); 1623 1624 static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val, 1625 struct net_device *dev) 1626 { 1627 struct netdev_notifier_info info = { 1628 .dev = dev, 1629 }; 1630 1631 return nb->notifier_call(nb, val, &info); 1632 } 1633 1634 static int call_netdevice_register_notifiers(struct notifier_block *nb, 1635 struct net_device *dev) 1636 { 1637 int err; 1638 1639 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev); 1640 err = notifier_to_errno(err); 1641 if (err) 1642 return err; 1643 1644 if (!(dev->flags & IFF_UP)) 1645 return 0; 1646 1647 call_netdevice_notifier(nb, NETDEV_UP, dev); 1648 return 0; 1649 } 1650 1651 static void call_netdevice_unregister_notifiers(struct notifier_block *nb, 1652 struct net_device *dev) 1653 { 1654 if (dev->flags & IFF_UP) { 1655 call_netdevice_notifier(nb, NETDEV_GOING_DOWN, 1656 dev); 1657 call_netdevice_notifier(nb, NETDEV_DOWN, dev); 1658 } 1659 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev); 1660 } 1661 1662 static int call_netdevice_register_net_notifiers(struct notifier_block *nb, 1663 struct net *net) 1664 { 1665 struct net_device *dev; 1666 int err; 1667 1668 for_each_netdev(net, dev) { 1669 err = call_netdevice_register_notifiers(nb, dev); 1670 if (err) 1671 goto rollback; 1672 } 1673 return 0; 1674 1675 rollback: 1676 for_each_netdev_continue_reverse(net, dev) 1677 call_netdevice_unregister_notifiers(nb, dev); 1678 return err; 1679 } 1680 1681 static void call_netdevice_unregister_net_notifiers(struct notifier_block *nb, 1682 struct net *net) 1683 { 1684 struct net_device *dev; 1685 1686 for_each_netdev(net, dev) 1687 call_netdevice_unregister_notifiers(nb, dev); 1688 } 1689 1690 static int dev_boot_phase = 1; 1691 1692 /** 1693 * register_netdevice_notifier - register a network notifier block 1694 * @nb: notifier 1695 * 1696 * Register a notifier to be called when network device events occur. 1697 * The notifier passed is linked into the kernel structures and must 1698 * not be reused until it has been unregistered. A negative errno code 1699 * is returned on a failure. 1700 * 1701 * When registered all registration and up events are replayed 1702 * to the new notifier to allow device to have a race free 1703 * view of the network device list. 1704 */ 1705 1706 int register_netdevice_notifier(struct notifier_block *nb) 1707 { 1708 struct net *net; 1709 int err; 1710 1711 /* Close race with setup_net() and cleanup_net() */ 1712 down_write(&pernet_ops_rwsem); 1713 rtnl_lock(); 1714 err = raw_notifier_chain_register(&netdev_chain, nb); 1715 if (err) 1716 goto unlock; 1717 if (dev_boot_phase) 1718 goto unlock; 1719 for_each_net(net) { 1720 err = call_netdevice_register_net_notifiers(nb, net); 1721 if (err) 1722 goto rollback; 1723 } 1724 1725 unlock: 1726 rtnl_unlock(); 1727 up_write(&pernet_ops_rwsem); 1728 return err; 1729 1730 rollback: 1731 for_each_net_continue_reverse(net) 1732 call_netdevice_unregister_net_notifiers(nb, net); 1733 1734 raw_notifier_chain_unregister(&netdev_chain, nb); 1735 goto unlock; 1736 } 1737 EXPORT_SYMBOL(register_netdevice_notifier); 1738 1739 /** 1740 * unregister_netdevice_notifier - unregister a network notifier block 1741 * @nb: notifier 1742 * 1743 * Unregister a notifier previously registered by 1744 * register_netdevice_notifier(). The notifier is unlinked into the 1745 * kernel structures and may then be reused. A negative errno code 1746 * is returned on a failure. 1747 * 1748 * After unregistering unregister and down device events are synthesized 1749 * for all devices on the device list to the removed notifier to remove 1750 * the need for special case cleanup code. 1751 */ 1752 1753 int unregister_netdevice_notifier(struct notifier_block *nb) 1754 { 1755 struct net *net; 1756 int err; 1757 1758 /* Close race with setup_net() and cleanup_net() */ 1759 down_write(&pernet_ops_rwsem); 1760 rtnl_lock(); 1761 err = raw_notifier_chain_unregister(&netdev_chain, nb); 1762 if (err) 1763 goto unlock; 1764 1765 for_each_net(net) 1766 call_netdevice_unregister_net_notifiers(nb, net); 1767 1768 unlock: 1769 rtnl_unlock(); 1770 up_write(&pernet_ops_rwsem); 1771 return err; 1772 } 1773 EXPORT_SYMBOL(unregister_netdevice_notifier); 1774 1775 static int __register_netdevice_notifier_net(struct net *net, 1776 struct notifier_block *nb, 1777 bool ignore_call_fail) 1778 { 1779 int err; 1780 1781 err = raw_notifier_chain_register(&net->netdev_chain, nb); 1782 if (err) 1783 return err; 1784 if (dev_boot_phase) 1785 return 0; 1786 1787 err = call_netdevice_register_net_notifiers(nb, net); 1788 if (err && !ignore_call_fail) 1789 goto chain_unregister; 1790 1791 return 0; 1792 1793 chain_unregister: 1794 raw_notifier_chain_unregister(&net->netdev_chain, nb); 1795 return err; 1796 } 1797 1798 static int __unregister_netdevice_notifier_net(struct net *net, 1799 struct notifier_block *nb) 1800 { 1801 int err; 1802 1803 err = raw_notifier_chain_unregister(&net->netdev_chain, nb); 1804 if (err) 1805 return err; 1806 1807 call_netdevice_unregister_net_notifiers(nb, net); 1808 return 0; 1809 } 1810 1811 /** 1812 * register_netdevice_notifier_net - register a per-netns network notifier block 1813 * @net: network namespace 1814 * @nb: notifier 1815 * 1816 * Register a notifier to be called when network device events occur. 1817 * The notifier passed is linked into the kernel structures and must 1818 * not be reused until it has been unregistered. A negative errno code 1819 * is returned on a failure. 1820 * 1821 * When registered all registration and up events are replayed 1822 * to the new notifier to allow device to have a race free 1823 * view of the network device list. 1824 */ 1825 1826 int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb) 1827 { 1828 int err; 1829 1830 rtnl_lock(); 1831 err = __register_netdevice_notifier_net(net, nb, false); 1832 rtnl_unlock(); 1833 return err; 1834 } 1835 EXPORT_SYMBOL(register_netdevice_notifier_net); 1836 1837 /** 1838 * unregister_netdevice_notifier_net - unregister a per-netns 1839 * network notifier block 1840 * @net: network namespace 1841 * @nb: notifier 1842 * 1843 * Unregister a notifier previously registered by 1844 * register_netdevice_notifier_net(). The notifier is unlinked from the 1845 * kernel structures and may then be reused. A negative errno code 1846 * is returned on a failure. 1847 * 1848 * After unregistering unregister and down device events are synthesized 1849 * for all devices on the device list to the removed notifier to remove 1850 * the need for special case cleanup code. 1851 */ 1852 1853 int unregister_netdevice_notifier_net(struct net *net, 1854 struct notifier_block *nb) 1855 { 1856 int err; 1857 1858 rtnl_lock(); 1859 err = __unregister_netdevice_notifier_net(net, nb); 1860 rtnl_unlock(); 1861 return err; 1862 } 1863 EXPORT_SYMBOL(unregister_netdevice_notifier_net); 1864 1865 static void __move_netdevice_notifier_net(struct net *src_net, 1866 struct net *dst_net, 1867 struct notifier_block *nb) 1868 { 1869 __unregister_netdevice_notifier_net(src_net, nb); 1870 __register_netdevice_notifier_net(dst_net, nb, true); 1871 } 1872 1873 int register_netdevice_notifier_dev_net(struct net_device *dev, 1874 struct notifier_block *nb, 1875 struct netdev_net_notifier *nn) 1876 { 1877 int err; 1878 1879 rtnl_lock(); 1880 err = __register_netdevice_notifier_net(dev_net(dev), nb, false); 1881 if (!err) { 1882 nn->nb = nb; 1883 list_add(&nn->list, &dev->net_notifier_list); 1884 } 1885 rtnl_unlock(); 1886 return err; 1887 } 1888 EXPORT_SYMBOL(register_netdevice_notifier_dev_net); 1889 1890 int unregister_netdevice_notifier_dev_net(struct net_device *dev, 1891 struct notifier_block *nb, 1892 struct netdev_net_notifier *nn) 1893 { 1894 int err; 1895 1896 rtnl_lock(); 1897 list_del(&nn->list); 1898 err = __unregister_netdevice_notifier_net(dev_net(dev), nb); 1899 rtnl_unlock(); 1900 return err; 1901 } 1902 EXPORT_SYMBOL(unregister_netdevice_notifier_dev_net); 1903 1904 static void move_netdevice_notifiers_dev_net(struct net_device *dev, 1905 struct net *net) 1906 { 1907 struct netdev_net_notifier *nn; 1908 1909 list_for_each_entry(nn, &dev->net_notifier_list, list) 1910 __move_netdevice_notifier_net(dev_net(dev), net, nn->nb); 1911 } 1912 1913 /** 1914 * call_netdevice_notifiers_info - call all network notifier blocks 1915 * @val: value passed unmodified to notifier function 1916 * @info: notifier information data 1917 * 1918 * Call all network notifier blocks. Parameters and return value 1919 * are as for raw_notifier_call_chain(). 1920 */ 1921 1922 static int call_netdevice_notifiers_info(unsigned long val, 1923 struct netdev_notifier_info *info) 1924 { 1925 struct net *net = dev_net(info->dev); 1926 int ret; 1927 1928 ASSERT_RTNL(); 1929 1930 /* Run per-netns notifier block chain first, then run the global one. 1931 * Hopefully, one day, the global one is going to be removed after 1932 * all notifier block registrators get converted to be per-netns. 1933 */ 1934 ret = raw_notifier_call_chain(&net->netdev_chain, val, info); 1935 if (ret & NOTIFY_STOP_MASK) 1936 return ret; 1937 return raw_notifier_call_chain(&netdev_chain, val, info); 1938 } 1939 1940 /** 1941 * call_netdevice_notifiers_info_robust - call per-netns notifier blocks 1942 * for and rollback on error 1943 * @val_up: value passed unmodified to notifier function 1944 * @val_down: value passed unmodified to the notifier function when 1945 * recovering from an error on @val_up 1946 * @info: notifier information data 1947 * 1948 * Call all per-netns network notifier blocks, but not notifier blocks on 1949 * the global notifier chain. Parameters and return value are as for 1950 * raw_notifier_call_chain_robust(). 1951 */ 1952 1953 static int 1954 call_netdevice_notifiers_info_robust(unsigned long val_up, 1955 unsigned long val_down, 1956 struct netdev_notifier_info *info) 1957 { 1958 struct net *net = dev_net(info->dev); 1959 1960 ASSERT_RTNL(); 1961 1962 return raw_notifier_call_chain_robust(&net->netdev_chain, 1963 val_up, val_down, info); 1964 } 1965 1966 static int call_netdevice_notifiers_extack(unsigned long val, 1967 struct net_device *dev, 1968 struct netlink_ext_ack *extack) 1969 { 1970 struct netdev_notifier_info info = { 1971 .dev = dev, 1972 .extack = extack, 1973 }; 1974 1975 return call_netdevice_notifiers_info(val, &info); 1976 } 1977 1978 /** 1979 * call_netdevice_notifiers - call all network notifier blocks 1980 * @val: value passed unmodified to notifier function 1981 * @dev: net_device pointer passed unmodified to notifier function 1982 * 1983 * Call all network notifier blocks. Parameters and return value 1984 * are as for raw_notifier_call_chain(). 1985 */ 1986 1987 int call_netdevice_notifiers(unsigned long val, struct net_device *dev) 1988 { 1989 return call_netdevice_notifiers_extack(val, dev, NULL); 1990 } 1991 EXPORT_SYMBOL(call_netdevice_notifiers); 1992 1993 /** 1994 * call_netdevice_notifiers_mtu - call all network notifier blocks 1995 * @val: value passed unmodified to notifier function 1996 * @dev: net_device pointer passed unmodified to notifier function 1997 * @arg: additional u32 argument passed to the notifier function 1998 * 1999 * Call all network notifier blocks. Parameters and return value 2000 * are as for raw_notifier_call_chain(). 2001 */ 2002 static int call_netdevice_notifiers_mtu(unsigned long val, 2003 struct net_device *dev, u32 arg) 2004 { 2005 struct netdev_notifier_info_ext info = { 2006 .info.dev = dev, 2007 .ext.mtu = arg, 2008 }; 2009 2010 BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext, info) != 0); 2011 2012 return call_netdevice_notifiers_info(val, &info.info); 2013 } 2014 2015 #ifdef CONFIG_NET_INGRESS 2016 static DEFINE_STATIC_KEY_FALSE(ingress_needed_key); 2017 2018 void net_inc_ingress_queue(void) 2019 { 2020 static_branch_inc(&ingress_needed_key); 2021 } 2022 EXPORT_SYMBOL_GPL(net_inc_ingress_queue); 2023 2024 void net_dec_ingress_queue(void) 2025 { 2026 static_branch_dec(&ingress_needed_key); 2027 } 2028 EXPORT_SYMBOL_GPL(net_dec_ingress_queue); 2029 #endif 2030 2031 #ifdef CONFIG_NET_EGRESS 2032 static DEFINE_STATIC_KEY_FALSE(egress_needed_key); 2033 2034 void net_inc_egress_queue(void) 2035 { 2036 static_branch_inc(&egress_needed_key); 2037 } 2038 EXPORT_SYMBOL_GPL(net_inc_egress_queue); 2039 2040 void net_dec_egress_queue(void) 2041 { 2042 static_branch_dec(&egress_needed_key); 2043 } 2044 EXPORT_SYMBOL_GPL(net_dec_egress_queue); 2045 #endif 2046 2047 DEFINE_STATIC_KEY_FALSE(netstamp_needed_key); 2048 EXPORT_SYMBOL(netstamp_needed_key); 2049 #ifdef CONFIG_JUMP_LABEL 2050 static atomic_t netstamp_needed_deferred; 2051 static atomic_t netstamp_wanted; 2052 static void netstamp_clear(struct work_struct *work) 2053 { 2054 int deferred = atomic_xchg(&netstamp_needed_deferred, 0); 2055 int wanted; 2056 2057 wanted = atomic_add_return(deferred, &netstamp_wanted); 2058 if (wanted > 0) 2059 static_branch_enable(&netstamp_needed_key); 2060 else 2061 static_branch_disable(&netstamp_needed_key); 2062 } 2063 static DECLARE_WORK(netstamp_work, netstamp_clear); 2064 #endif 2065 2066 void net_enable_timestamp(void) 2067 { 2068 #ifdef CONFIG_JUMP_LABEL 2069 int wanted = atomic_read(&netstamp_wanted); 2070 2071 while (wanted > 0) { 2072 if (atomic_try_cmpxchg(&netstamp_wanted, &wanted, wanted + 1)) 2073 return; 2074 } 2075 atomic_inc(&netstamp_needed_deferred); 2076 schedule_work(&netstamp_work); 2077 #else 2078 static_branch_inc(&netstamp_needed_key); 2079 #endif 2080 } 2081 EXPORT_SYMBOL(net_enable_timestamp); 2082 2083 void net_disable_timestamp(void) 2084 { 2085 #ifdef CONFIG_JUMP_LABEL 2086 int wanted = atomic_read(&netstamp_wanted); 2087 2088 while (wanted > 1) { 2089 if (atomic_try_cmpxchg(&netstamp_wanted, &wanted, wanted - 1)) 2090 return; 2091 } 2092 atomic_dec(&netstamp_needed_deferred); 2093 schedule_work(&netstamp_work); 2094 #else 2095 static_branch_dec(&netstamp_needed_key); 2096 #endif 2097 } 2098 EXPORT_SYMBOL(net_disable_timestamp); 2099 2100 static inline void net_timestamp_set(struct sk_buff *skb) 2101 { 2102 skb->tstamp = 0; 2103 skb->mono_delivery_time = 0; 2104 if (static_branch_unlikely(&netstamp_needed_key)) 2105 skb->tstamp = ktime_get_real(); 2106 } 2107 2108 #define net_timestamp_check(COND, SKB) \ 2109 if (static_branch_unlikely(&netstamp_needed_key)) { \ 2110 if ((COND) && !(SKB)->tstamp) \ 2111 (SKB)->tstamp = ktime_get_real(); \ 2112 } \ 2113 2114 bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb) 2115 { 2116 return __is_skb_forwardable(dev, skb, true); 2117 } 2118 EXPORT_SYMBOL_GPL(is_skb_forwardable); 2119 2120 static int __dev_forward_skb2(struct net_device *dev, struct sk_buff *skb, 2121 bool check_mtu) 2122 { 2123 int ret = ____dev_forward_skb(dev, skb, check_mtu); 2124 2125 if (likely(!ret)) { 2126 skb->protocol = eth_type_trans(skb, dev); 2127 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); 2128 } 2129 2130 return ret; 2131 } 2132 2133 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb) 2134 { 2135 return __dev_forward_skb2(dev, skb, true); 2136 } 2137 EXPORT_SYMBOL_GPL(__dev_forward_skb); 2138 2139 /** 2140 * dev_forward_skb - loopback an skb to another netif 2141 * 2142 * @dev: destination network device 2143 * @skb: buffer to forward 2144 * 2145 * return values: 2146 * NET_RX_SUCCESS (no congestion) 2147 * NET_RX_DROP (packet was dropped, but freed) 2148 * 2149 * dev_forward_skb can be used for injecting an skb from the 2150 * start_xmit function of one device into the receive queue 2151 * of another device. 2152 * 2153 * The receiving device may be in another namespace, so 2154 * we have to clear all information in the skb that could 2155 * impact namespace isolation. 2156 */ 2157 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) 2158 { 2159 return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb); 2160 } 2161 EXPORT_SYMBOL_GPL(dev_forward_skb); 2162 2163 int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb) 2164 { 2165 return __dev_forward_skb2(dev, skb, false) ?: netif_rx_internal(skb); 2166 } 2167 2168 static inline int deliver_skb(struct sk_buff *skb, 2169 struct packet_type *pt_prev, 2170 struct net_device *orig_dev) 2171 { 2172 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) 2173 return -ENOMEM; 2174 refcount_inc(&skb->users); 2175 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev); 2176 } 2177 2178 static inline void deliver_ptype_list_skb(struct sk_buff *skb, 2179 struct packet_type **pt, 2180 struct net_device *orig_dev, 2181 __be16 type, 2182 struct list_head *ptype_list) 2183 { 2184 struct packet_type *ptype, *pt_prev = *pt; 2185 2186 list_for_each_entry_rcu(ptype, ptype_list, list) { 2187 if (ptype->type != type) 2188 continue; 2189 if (pt_prev) 2190 deliver_skb(skb, pt_prev, orig_dev); 2191 pt_prev = ptype; 2192 } 2193 *pt = pt_prev; 2194 } 2195 2196 static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb) 2197 { 2198 if (!ptype->af_packet_priv || !skb->sk) 2199 return false; 2200 2201 if (ptype->id_match) 2202 return ptype->id_match(ptype, skb->sk); 2203 else if ((struct sock *)ptype->af_packet_priv == skb->sk) 2204 return true; 2205 2206 return false; 2207 } 2208 2209 /** 2210 * dev_nit_active - return true if any network interface taps are in use 2211 * 2212 * @dev: network device to check for the presence of taps 2213 */ 2214 bool dev_nit_active(struct net_device *dev) 2215 { 2216 return !list_empty(&ptype_all) || !list_empty(&dev->ptype_all); 2217 } 2218 EXPORT_SYMBOL_GPL(dev_nit_active); 2219 2220 /* 2221 * Support routine. Sends outgoing frames to any network 2222 * taps currently in use. 2223 */ 2224 2225 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) 2226 { 2227 struct packet_type *ptype; 2228 struct sk_buff *skb2 = NULL; 2229 struct packet_type *pt_prev = NULL; 2230 struct list_head *ptype_list = &ptype_all; 2231 2232 rcu_read_lock(); 2233 again: 2234 list_for_each_entry_rcu(ptype, ptype_list, list) { 2235 if (ptype->ignore_outgoing) 2236 continue; 2237 2238 /* Never send packets back to the socket 2239 * they originated from - MvS (miquels@drinkel.ow.org) 2240 */ 2241 if (skb_loop_sk(ptype, skb)) 2242 continue; 2243 2244 if (pt_prev) { 2245 deliver_skb(skb2, pt_prev, skb->dev); 2246 pt_prev = ptype; 2247 continue; 2248 } 2249 2250 /* need to clone skb, done only once */ 2251 skb2 = skb_clone(skb, GFP_ATOMIC); 2252 if (!skb2) 2253 goto out_unlock; 2254 2255 net_timestamp_set(skb2); 2256 2257 /* skb->nh should be correctly 2258 * set by sender, so that the second statement is 2259 * just protection against buggy protocols. 2260 */ 2261 skb_reset_mac_header(skb2); 2262 2263 if (skb_network_header(skb2) < skb2->data || 2264 skb_network_header(skb2) > skb_tail_pointer(skb2)) { 2265 net_crit_ratelimited("protocol %04x is buggy, dev %s\n", 2266 ntohs(skb2->protocol), 2267 dev->name); 2268 skb_reset_network_header(skb2); 2269 } 2270 2271 skb2->transport_header = skb2->network_header; 2272 skb2->pkt_type = PACKET_OUTGOING; 2273 pt_prev = ptype; 2274 } 2275 2276 if (ptype_list == &ptype_all) { 2277 ptype_list = &dev->ptype_all; 2278 goto again; 2279 } 2280 out_unlock: 2281 if (pt_prev) { 2282 if (!skb_orphan_frags_rx(skb2, GFP_ATOMIC)) 2283 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev); 2284 else 2285 kfree_skb(skb2); 2286 } 2287 rcu_read_unlock(); 2288 } 2289 EXPORT_SYMBOL_GPL(dev_queue_xmit_nit); 2290 2291 /** 2292 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change 2293 * @dev: Network device 2294 * @txq: number of queues available 2295 * 2296 * If real_num_tx_queues is changed the tc mappings may no longer be 2297 * valid. To resolve this verify the tc mapping remains valid and if 2298 * not NULL the mapping. With no priorities mapping to this 2299 * offset/count pair it will no longer be used. In the worst case TC0 2300 * is invalid nothing can be done so disable priority mappings. If is 2301 * expected that drivers will fix this mapping if they can before 2302 * calling netif_set_real_num_tx_queues. 2303 */ 2304 static void netif_setup_tc(struct net_device *dev, unsigned int txq) 2305 { 2306 int i; 2307 struct netdev_tc_txq *tc = &dev->tc_to_txq[0]; 2308 2309 /* If TC0 is invalidated disable TC mapping */ 2310 if (tc->offset + tc->count > txq) { 2311 netdev_warn(dev, "Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n"); 2312 dev->num_tc = 0; 2313 return; 2314 } 2315 2316 /* Invalidated prio to tc mappings set to TC0 */ 2317 for (i = 1; i < TC_BITMASK + 1; i++) { 2318 int q = netdev_get_prio_tc_map(dev, i); 2319 2320 tc = &dev->tc_to_txq[q]; 2321 if (tc->offset + tc->count > txq) { 2322 netdev_warn(dev, "Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n", 2323 i, q); 2324 netdev_set_prio_tc_map(dev, i, 0); 2325 } 2326 } 2327 } 2328 2329 int netdev_txq_to_tc(struct net_device *dev, unsigned int txq) 2330 { 2331 if (dev->num_tc) { 2332 struct netdev_tc_txq *tc = &dev->tc_to_txq[0]; 2333 int i; 2334 2335 /* walk through the TCs and see if it falls into any of them */ 2336 for (i = 0; i < TC_MAX_QUEUE; i++, tc++) { 2337 if ((txq - tc->offset) < tc->count) 2338 return i; 2339 } 2340 2341 /* didn't find it, just return -1 to indicate no match */ 2342 return -1; 2343 } 2344 2345 return 0; 2346 } 2347 EXPORT_SYMBOL(netdev_txq_to_tc); 2348 2349 #ifdef CONFIG_XPS 2350 static struct static_key xps_needed __read_mostly; 2351 static struct static_key xps_rxqs_needed __read_mostly; 2352 static DEFINE_MUTEX(xps_map_mutex); 2353 #define xmap_dereference(P) \ 2354 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex)) 2355 2356 static bool remove_xps_queue(struct xps_dev_maps *dev_maps, 2357 struct xps_dev_maps *old_maps, int tci, u16 index) 2358 { 2359 struct xps_map *map = NULL; 2360 int pos; 2361 2362 if (dev_maps) 2363 map = xmap_dereference(dev_maps->attr_map[tci]); 2364 if (!map) 2365 return false; 2366 2367 for (pos = map->len; pos--;) { 2368 if (map->queues[pos] != index) 2369 continue; 2370 2371 if (map->len > 1) { 2372 map->queues[pos] = map->queues[--map->len]; 2373 break; 2374 } 2375 2376 if (old_maps) 2377 RCU_INIT_POINTER(old_maps->attr_map[tci], NULL); 2378 RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL); 2379 kfree_rcu(map, rcu); 2380 return false; 2381 } 2382 2383 return true; 2384 } 2385 2386 static bool remove_xps_queue_cpu(struct net_device *dev, 2387 struct xps_dev_maps *dev_maps, 2388 int cpu, u16 offset, u16 count) 2389 { 2390 int num_tc = dev_maps->num_tc; 2391 bool active = false; 2392 int tci; 2393 2394 for (tci = cpu * num_tc; num_tc--; tci++) { 2395 int i, j; 2396 2397 for (i = count, j = offset; i--; j++) { 2398 if (!remove_xps_queue(dev_maps, NULL, tci, j)) 2399 break; 2400 } 2401 2402 active |= i < 0; 2403 } 2404 2405 return active; 2406 } 2407 2408 static void reset_xps_maps(struct net_device *dev, 2409 struct xps_dev_maps *dev_maps, 2410 enum xps_map_type type) 2411 { 2412 static_key_slow_dec_cpuslocked(&xps_needed); 2413 if (type == XPS_RXQS) 2414 static_key_slow_dec_cpuslocked(&xps_rxqs_needed); 2415 2416 RCU_INIT_POINTER(dev->xps_maps[type], NULL); 2417 2418 kfree_rcu(dev_maps, rcu); 2419 } 2420 2421 static void clean_xps_maps(struct net_device *dev, enum xps_map_type type, 2422 u16 offset, u16 count) 2423 { 2424 struct xps_dev_maps *dev_maps; 2425 bool active = false; 2426 int i, j; 2427 2428 dev_maps = xmap_dereference(dev->xps_maps[type]); 2429 if (!dev_maps) 2430 return; 2431 2432 for (j = 0; j < dev_maps->nr_ids; j++) 2433 active |= remove_xps_queue_cpu(dev, dev_maps, j, offset, count); 2434 if (!active) 2435 reset_xps_maps(dev, dev_maps, type); 2436 2437 if (type == XPS_CPUS) { 2438 for (i = offset + (count - 1); count--; i--) 2439 netdev_queue_numa_node_write( 2440 netdev_get_tx_queue(dev, i), NUMA_NO_NODE); 2441 } 2442 } 2443 2444 static void netif_reset_xps_queues(struct net_device *dev, u16 offset, 2445 u16 count) 2446 { 2447 if (!static_key_false(&xps_needed)) 2448 return; 2449 2450 cpus_read_lock(); 2451 mutex_lock(&xps_map_mutex); 2452 2453 if (static_key_false(&xps_rxqs_needed)) 2454 clean_xps_maps(dev, XPS_RXQS, offset, count); 2455 2456 clean_xps_maps(dev, XPS_CPUS, offset, count); 2457 2458 mutex_unlock(&xps_map_mutex); 2459 cpus_read_unlock(); 2460 } 2461 2462 static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index) 2463 { 2464 netif_reset_xps_queues(dev, index, dev->num_tx_queues - index); 2465 } 2466 2467 static struct xps_map *expand_xps_map(struct xps_map *map, int attr_index, 2468 u16 index, bool is_rxqs_map) 2469 { 2470 struct xps_map *new_map; 2471 int alloc_len = XPS_MIN_MAP_ALLOC; 2472 int i, pos; 2473 2474 for (pos = 0; map && pos < map->len; pos++) { 2475 if (map->queues[pos] != index) 2476 continue; 2477 return map; 2478 } 2479 2480 /* Need to add tx-queue to this CPU's/rx-queue's existing map */ 2481 if (map) { 2482 if (pos < map->alloc_len) 2483 return map; 2484 2485 alloc_len = map->alloc_len * 2; 2486 } 2487 2488 /* Need to allocate new map to store tx-queue on this CPU's/rx-queue's 2489 * map 2490 */ 2491 if (is_rxqs_map) 2492 new_map = kzalloc(XPS_MAP_SIZE(alloc_len), GFP_KERNEL); 2493 else 2494 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL, 2495 cpu_to_node(attr_index)); 2496 if (!new_map) 2497 return NULL; 2498 2499 for (i = 0; i < pos; i++) 2500 new_map->queues[i] = map->queues[i]; 2501 new_map->alloc_len = alloc_len; 2502 new_map->len = pos; 2503 2504 return new_map; 2505 } 2506 2507 /* Copy xps maps at a given index */ 2508 static void xps_copy_dev_maps(struct xps_dev_maps *dev_maps, 2509 struct xps_dev_maps *new_dev_maps, int index, 2510 int tc, bool skip_tc) 2511 { 2512 int i, tci = index * dev_maps->num_tc; 2513 struct xps_map *map; 2514 2515 /* copy maps belonging to foreign traffic classes */ 2516 for (i = 0; i < dev_maps->num_tc; i++, tci++) { 2517 if (i == tc && skip_tc) 2518 continue; 2519 2520 /* fill in the new device map from the old device map */ 2521 map = xmap_dereference(dev_maps->attr_map[tci]); 2522 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map); 2523 } 2524 } 2525 2526 /* Must be called under cpus_read_lock */ 2527 int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, 2528 u16 index, enum xps_map_type type) 2529 { 2530 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL, *old_dev_maps = NULL; 2531 const unsigned long *online_mask = NULL; 2532 bool active = false, copy = false; 2533 int i, j, tci, numa_node_id = -2; 2534 int maps_sz, num_tc = 1, tc = 0; 2535 struct xps_map *map, *new_map; 2536 unsigned int nr_ids; 2537 2538 WARN_ON_ONCE(index >= dev->num_tx_queues); 2539 2540 if (dev->num_tc) { 2541 /* Do not allow XPS on subordinate device directly */ 2542 num_tc = dev->num_tc; 2543 if (num_tc < 0) 2544 return -EINVAL; 2545 2546 /* If queue belongs to subordinate dev use its map */ 2547 dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev; 2548 2549 tc = netdev_txq_to_tc(dev, index); 2550 if (tc < 0) 2551 return -EINVAL; 2552 } 2553 2554 mutex_lock(&xps_map_mutex); 2555 2556 dev_maps = xmap_dereference(dev->xps_maps[type]); 2557 if (type == XPS_RXQS) { 2558 maps_sz = XPS_RXQ_DEV_MAPS_SIZE(num_tc, dev->num_rx_queues); 2559 nr_ids = dev->num_rx_queues; 2560 } else { 2561 maps_sz = XPS_CPU_DEV_MAPS_SIZE(num_tc); 2562 if (num_possible_cpus() > 1) 2563 online_mask = cpumask_bits(cpu_online_mask); 2564 nr_ids = nr_cpu_ids; 2565 } 2566 2567 if (maps_sz < L1_CACHE_BYTES) 2568 maps_sz = L1_CACHE_BYTES; 2569 2570 /* The old dev_maps could be larger or smaller than the one we're 2571 * setting up now, as dev->num_tc or nr_ids could have been updated in 2572 * between. We could try to be smart, but let's be safe instead and only 2573 * copy foreign traffic classes if the two map sizes match. 2574 */ 2575 if (dev_maps && 2576 dev_maps->num_tc == num_tc && dev_maps->nr_ids == nr_ids) 2577 copy = true; 2578 2579 /* allocate memory for queue storage */ 2580 for (j = -1; j = netif_attrmask_next_and(j, online_mask, mask, nr_ids), 2581 j < nr_ids;) { 2582 if (!new_dev_maps) { 2583 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL); 2584 if (!new_dev_maps) { 2585 mutex_unlock(&xps_map_mutex); 2586 return -ENOMEM; 2587 } 2588 2589 new_dev_maps->nr_ids = nr_ids; 2590 new_dev_maps->num_tc = num_tc; 2591 } 2592 2593 tci = j * num_tc + tc; 2594 map = copy ? xmap_dereference(dev_maps->attr_map[tci]) : NULL; 2595 2596 map = expand_xps_map(map, j, index, type == XPS_RXQS); 2597 if (!map) 2598 goto error; 2599 2600 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map); 2601 } 2602 2603 if (!new_dev_maps) 2604 goto out_no_new_maps; 2605 2606 if (!dev_maps) { 2607 /* Increment static keys at most once per type */ 2608 static_key_slow_inc_cpuslocked(&xps_needed); 2609 if (type == XPS_RXQS) 2610 static_key_slow_inc_cpuslocked(&xps_rxqs_needed); 2611 } 2612 2613 for (j = 0; j < nr_ids; j++) { 2614 bool skip_tc = false; 2615 2616 tci = j * num_tc + tc; 2617 if (netif_attr_test_mask(j, mask, nr_ids) && 2618 netif_attr_test_online(j, online_mask, nr_ids)) { 2619 /* add tx-queue to CPU/rx-queue maps */ 2620 int pos = 0; 2621 2622 skip_tc = true; 2623 2624 map = xmap_dereference(new_dev_maps->attr_map[tci]); 2625 while ((pos < map->len) && (map->queues[pos] != index)) 2626 pos++; 2627 2628 if (pos == map->len) 2629 map->queues[map->len++] = index; 2630 #ifdef CONFIG_NUMA 2631 if (type == XPS_CPUS) { 2632 if (numa_node_id == -2) 2633 numa_node_id = cpu_to_node(j); 2634 else if (numa_node_id != cpu_to_node(j)) 2635 numa_node_id = -1; 2636 } 2637 #endif 2638 } 2639 2640 if (copy) 2641 xps_copy_dev_maps(dev_maps, new_dev_maps, j, tc, 2642 skip_tc); 2643 } 2644 2645 rcu_assign_pointer(dev->xps_maps[type], new_dev_maps); 2646 2647 /* Cleanup old maps */ 2648 if (!dev_maps) 2649 goto out_no_old_maps; 2650 2651 for (j = 0; j < dev_maps->nr_ids; j++) { 2652 for (i = num_tc, tci = j * dev_maps->num_tc; i--; tci++) { 2653 map = xmap_dereference(dev_maps->attr_map[tci]); 2654 if (!map) 2655 continue; 2656 2657 if (copy) { 2658 new_map = xmap_dereference(new_dev_maps->attr_map[tci]); 2659 if (map == new_map) 2660 continue; 2661 } 2662 2663 RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL); 2664 kfree_rcu(map, rcu); 2665 } 2666 } 2667 2668 old_dev_maps = dev_maps; 2669 2670 out_no_old_maps: 2671 dev_maps = new_dev_maps; 2672 active = true; 2673 2674 out_no_new_maps: 2675 if (type == XPS_CPUS) 2676 /* update Tx queue numa node */ 2677 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index), 2678 (numa_node_id >= 0) ? 2679 numa_node_id : NUMA_NO_NODE); 2680 2681 if (!dev_maps) 2682 goto out_no_maps; 2683 2684 /* removes tx-queue from unused CPUs/rx-queues */ 2685 for (j = 0; j < dev_maps->nr_ids; j++) { 2686 tci = j * dev_maps->num_tc; 2687 2688 for (i = 0; i < dev_maps->num_tc; i++, tci++) { 2689 if (i == tc && 2690 netif_attr_test_mask(j, mask, dev_maps->nr_ids) && 2691 netif_attr_test_online(j, online_mask, dev_maps->nr_ids)) 2692 continue; 2693 2694 active |= remove_xps_queue(dev_maps, 2695 copy ? old_dev_maps : NULL, 2696 tci, index); 2697 } 2698 } 2699 2700 if (old_dev_maps) 2701 kfree_rcu(old_dev_maps, rcu); 2702 2703 /* free map if not active */ 2704 if (!active) 2705 reset_xps_maps(dev, dev_maps, type); 2706 2707 out_no_maps: 2708 mutex_unlock(&xps_map_mutex); 2709 2710 return 0; 2711 error: 2712 /* remove any maps that we added */ 2713 for (j = 0; j < nr_ids; j++) { 2714 for (i = num_tc, tci = j * num_tc; i--; tci++) { 2715 new_map = xmap_dereference(new_dev_maps->attr_map[tci]); 2716 map = copy ? 2717 xmap_dereference(dev_maps->attr_map[tci]) : 2718 NULL; 2719 if (new_map && new_map != map) 2720 kfree(new_map); 2721 } 2722 } 2723 2724 mutex_unlock(&xps_map_mutex); 2725 2726 kfree(new_dev_maps); 2727 return -ENOMEM; 2728 } 2729 EXPORT_SYMBOL_GPL(__netif_set_xps_queue); 2730 2731 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, 2732 u16 index) 2733 { 2734 int ret; 2735 2736 cpus_read_lock(); 2737 ret = __netif_set_xps_queue(dev, cpumask_bits(mask), index, XPS_CPUS); 2738 cpus_read_unlock(); 2739 2740 return ret; 2741 } 2742 EXPORT_SYMBOL(netif_set_xps_queue); 2743 2744 #endif 2745 static void netdev_unbind_all_sb_channels(struct net_device *dev) 2746 { 2747 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues]; 2748 2749 /* Unbind any subordinate channels */ 2750 while (txq-- != &dev->_tx[0]) { 2751 if (txq->sb_dev) 2752 netdev_unbind_sb_channel(dev, txq->sb_dev); 2753 } 2754 } 2755 2756 void netdev_reset_tc(struct net_device *dev) 2757 { 2758 #ifdef CONFIG_XPS 2759 netif_reset_xps_queues_gt(dev, 0); 2760 #endif 2761 netdev_unbind_all_sb_channels(dev); 2762 2763 /* Reset TC configuration of device */ 2764 dev->num_tc = 0; 2765 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq)); 2766 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map)); 2767 } 2768 EXPORT_SYMBOL(netdev_reset_tc); 2769 2770 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset) 2771 { 2772 if (tc >= dev->num_tc) 2773 return -EINVAL; 2774 2775 #ifdef CONFIG_XPS 2776 netif_reset_xps_queues(dev, offset, count); 2777 #endif 2778 dev->tc_to_txq[tc].count = count; 2779 dev->tc_to_txq[tc].offset = offset; 2780 return 0; 2781 } 2782 EXPORT_SYMBOL(netdev_set_tc_queue); 2783 2784 int netdev_set_num_tc(struct net_device *dev, u8 num_tc) 2785 { 2786 if (num_tc > TC_MAX_QUEUE) 2787 return -EINVAL; 2788 2789 #ifdef CONFIG_XPS 2790 netif_reset_xps_queues_gt(dev, 0); 2791 #endif 2792 netdev_unbind_all_sb_channels(dev); 2793 2794 dev->num_tc = num_tc; 2795 return 0; 2796 } 2797 EXPORT_SYMBOL(netdev_set_num_tc); 2798 2799 void netdev_unbind_sb_channel(struct net_device *dev, 2800 struct net_device *sb_dev) 2801 { 2802 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues]; 2803 2804 #ifdef CONFIG_XPS 2805 netif_reset_xps_queues_gt(sb_dev, 0); 2806 #endif 2807 memset(sb_dev->tc_to_txq, 0, sizeof(sb_dev->tc_to_txq)); 2808 memset(sb_dev->prio_tc_map, 0, sizeof(sb_dev->prio_tc_map)); 2809 2810 while (txq-- != &dev->_tx[0]) { 2811 if (txq->sb_dev == sb_dev) 2812 txq->sb_dev = NULL; 2813 } 2814 } 2815 EXPORT_SYMBOL(netdev_unbind_sb_channel); 2816 2817 int netdev_bind_sb_channel_queue(struct net_device *dev, 2818 struct net_device *sb_dev, 2819 u8 tc, u16 count, u16 offset) 2820 { 2821 /* Make certain the sb_dev and dev are already configured */ 2822 if (sb_dev->num_tc >= 0 || tc >= dev->num_tc) 2823 return -EINVAL; 2824 2825 /* We cannot hand out queues we don't have */ 2826 if ((offset + count) > dev->real_num_tx_queues) 2827 return -EINVAL; 2828 2829 /* Record the mapping */ 2830 sb_dev->tc_to_txq[tc].count = count; 2831 sb_dev->tc_to_txq[tc].offset = offset; 2832 2833 /* Provide a way for Tx queue to find the tc_to_txq map or 2834 * XPS map for itself. 2835 */ 2836 while (count--) 2837 netdev_get_tx_queue(dev, count + offset)->sb_dev = sb_dev; 2838 2839 return 0; 2840 } 2841 EXPORT_SYMBOL(netdev_bind_sb_channel_queue); 2842 2843 int netdev_set_sb_channel(struct net_device *dev, u16 channel) 2844 { 2845 /* Do not use a multiqueue device to represent a subordinate channel */ 2846 if (netif_is_multiqueue(dev)) 2847 return -ENODEV; 2848 2849 /* We allow channels 1 - 32767 to be used for subordinate channels. 2850 * Channel 0 is meant to be "native" mode and used only to represent 2851 * the main root device. We allow writing 0 to reset the device back 2852 * to normal mode after being used as a subordinate channel. 2853 */ 2854 if (channel > S16_MAX) 2855 return -EINVAL; 2856 2857 dev->num_tc = -channel; 2858 2859 return 0; 2860 } 2861 EXPORT_SYMBOL(netdev_set_sb_channel); 2862 2863 /* 2864 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues 2865 * greater than real_num_tx_queues stale skbs on the qdisc must be flushed. 2866 */ 2867 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) 2868 { 2869 bool disabling; 2870 int rc; 2871 2872 disabling = txq < dev->real_num_tx_queues; 2873 2874 if (txq < 1 || txq > dev->num_tx_queues) 2875 return -EINVAL; 2876 2877 if (dev->reg_state == NETREG_REGISTERED || 2878 dev->reg_state == NETREG_UNREGISTERING) { 2879 ASSERT_RTNL(); 2880 2881 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues, 2882 txq); 2883 if (rc) 2884 return rc; 2885 2886 if (dev->num_tc) 2887 netif_setup_tc(dev, txq); 2888 2889 dev_qdisc_change_real_num_tx(dev, txq); 2890 2891 dev->real_num_tx_queues = txq; 2892 2893 if (disabling) { 2894 synchronize_net(); 2895 qdisc_reset_all_tx_gt(dev, txq); 2896 #ifdef CONFIG_XPS 2897 netif_reset_xps_queues_gt(dev, txq); 2898 #endif 2899 } 2900 } else { 2901 dev->real_num_tx_queues = txq; 2902 } 2903 2904 return 0; 2905 } 2906 EXPORT_SYMBOL(netif_set_real_num_tx_queues); 2907 2908 #ifdef CONFIG_SYSFS 2909 /** 2910 * netif_set_real_num_rx_queues - set actual number of RX queues used 2911 * @dev: Network device 2912 * @rxq: Actual number of RX queues 2913 * 2914 * This must be called either with the rtnl_lock held or before 2915 * registration of the net device. Returns 0 on success, or a 2916 * negative error code. If called before registration, it always 2917 * succeeds. 2918 */ 2919 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq) 2920 { 2921 int rc; 2922 2923 if (rxq < 1 || rxq > dev->num_rx_queues) 2924 return -EINVAL; 2925 2926 if (dev->reg_state == NETREG_REGISTERED) { 2927 ASSERT_RTNL(); 2928 2929 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues, 2930 rxq); 2931 if (rc) 2932 return rc; 2933 } 2934 2935 dev->real_num_rx_queues = rxq; 2936 return 0; 2937 } 2938 EXPORT_SYMBOL(netif_set_real_num_rx_queues); 2939 #endif 2940 2941 /** 2942 * netif_set_real_num_queues - set actual number of RX and TX queues used 2943 * @dev: Network device 2944 * @txq: Actual number of TX queues 2945 * @rxq: Actual number of RX queues 2946 * 2947 * Set the real number of both TX and RX queues. 2948 * Does nothing if the number of queues is already correct. 2949 */ 2950 int netif_set_real_num_queues(struct net_device *dev, 2951 unsigned int txq, unsigned int rxq) 2952 { 2953 unsigned int old_rxq = dev->real_num_rx_queues; 2954 int err; 2955 2956 if (txq < 1 || txq > dev->num_tx_queues || 2957 rxq < 1 || rxq > dev->num_rx_queues) 2958 return -EINVAL; 2959 2960 /* Start from increases, so the error path only does decreases - 2961 * decreases can't fail. 2962 */ 2963 if (rxq > dev->real_num_rx_queues) { 2964 err = netif_set_real_num_rx_queues(dev, rxq); 2965 if (err) 2966 return err; 2967 } 2968 if (txq > dev->real_num_tx_queues) { 2969 err = netif_set_real_num_tx_queues(dev, txq); 2970 if (err) 2971 goto undo_rx; 2972 } 2973 if (rxq < dev->real_num_rx_queues) 2974 WARN_ON(netif_set_real_num_rx_queues(dev, rxq)); 2975 if (txq < dev->real_num_tx_queues) 2976 WARN_ON(netif_set_real_num_tx_queues(dev, txq)); 2977 2978 return 0; 2979 undo_rx: 2980 WARN_ON(netif_set_real_num_rx_queues(dev, old_rxq)); 2981 return err; 2982 } 2983 EXPORT_SYMBOL(netif_set_real_num_queues); 2984 2985 /** 2986 * netif_set_tso_max_size() - set the max size of TSO frames supported 2987 * @dev: netdev to update 2988 * @size: max skb->len of a TSO frame 2989 * 2990 * Set the limit on the size of TSO super-frames the device can handle. 2991 * Unless explicitly set the stack will assume the value of 2992 * %GSO_LEGACY_MAX_SIZE. 2993 */ 2994 void netif_set_tso_max_size(struct net_device *dev, unsigned int size) 2995 { 2996 dev->tso_max_size = min(GSO_MAX_SIZE, size); 2997 if (size < READ_ONCE(dev->gso_max_size)) 2998 netif_set_gso_max_size(dev, size); 2999 if (size < READ_ONCE(dev->gso_ipv4_max_size)) 3000 netif_set_gso_ipv4_max_size(dev, size); 3001 } 3002 EXPORT_SYMBOL(netif_set_tso_max_size); 3003 3004 /** 3005 * netif_set_tso_max_segs() - set the max number of segs supported for TSO 3006 * @dev: netdev to update 3007 * @segs: max number of TCP segments 3008 * 3009 * Set the limit on the number of TCP segments the device can generate from 3010 * a single TSO super-frame. 3011 * Unless explicitly set the stack will assume the value of %GSO_MAX_SEGS. 3012 */ 3013 void netif_set_tso_max_segs(struct net_device *dev, unsigned int segs) 3014 { 3015 dev->tso_max_segs = segs; 3016 if (segs < READ_ONCE(dev->gso_max_segs)) 3017 netif_set_gso_max_segs(dev, segs); 3018 } 3019 EXPORT_SYMBOL(netif_set_tso_max_segs); 3020 3021 /** 3022 * netif_inherit_tso_max() - copy all TSO limits from a lower device to an upper 3023 * @to: netdev to update 3024 * @from: netdev from which to copy the limits 3025 */ 3026 void netif_inherit_tso_max(struct net_device *to, const struct net_device *from) 3027 { 3028 netif_set_tso_max_size(to, from->tso_max_size); 3029 netif_set_tso_max_segs(to, from->tso_max_segs); 3030 } 3031 EXPORT_SYMBOL(netif_inherit_tso_max); 3032 3033 /** 3034 * netif_get_num_default_rss_queues - default number of RSS queues 3035 * 3036 * Default value is the number of physical cores if there are only 1 or 2, or 3037 * divided by 2 if there are more. 3038 */ 3039 int netif_get_num_default_rss_queues(void) 3040 { 3041 cpumask_var_t cpus; 3042 int cpu, count = 0; 3043 3044 if (unlikely(is_kdump_kernel() || !zalloc_cpumask_var(&cpus, GFP_KERNEL))) 3045 return 1; 3046 3047 cpumask_copy(cpus, cpu_online_mask); 3048 for_each_cpu(cpu, cpus) { 3049 ++count; 3050 cpumask_andnot(cpus, cpus, topology_sibling_cpumask(cpu)); 3051 } 3052 free_cpumask_var(cpus); 3053 3054 return count > 2 ? DIV_ROUND_UP(count, 2) : count; 3055 } 3056 EXPORT_SYMBOL(netif_get_num_default_rss_queues); 3057 3058 static void __netif_reschedule(struct Qdisc *q) 3059 { 3060 struct softnet_data *sd; 3061 unsigned long flags; 3062 3063 local_irq_save(flags); 3064 sd = this_cpu_ptr(&softnet_data); 3065 q->next_sched = NULL; 3066 *sd->output_queue_tailp = q; 3067 sd->output_queue_tailp = &q->next_sched; 3068 raise_softirq_irqoff(NET_TX_SOFTIRQ); 3069 local_irq_restore(flags); 3070 } 3071 3072 void __netif_schedule(struct Qdisc *q) 3073 { 3074 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) 3075 __netif_reschedule(q); 3076 } 3077 EXPORT_SYMBOL(__netif_schedule); 3078 3079 struct dev_kfree_skb_cb { 3080 enum skb_drop_reason reason; 3081 }; 3082 3083 static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb) 3084 { 3085 return (struct dev_kfree_skb_cb *)skb->cb; 3086 } 3087 3088 void netif_schedule_queue(struct netdev_queue *txq) 3089 { 3090 rcu_read_lock(); 3091 if (!netif_xmit_stopped(txq)) { 3092 struct Qdisc *q = rcu_dereference(txq->qdisc); 3093 3094 __netif_schedule(q); 3095 } 3096 rcu_read_unlock(); 3097 } 3098 EXPORT_SYMBOL(netif_schedule_queue); 3099 3100 void netif_tx_wake_queue(struct netdev_queue *dev_queue) 3101 { 3102 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) { 3103 struct Qdisc *q; 3104 3105 rcu_read_lock(); 3106 q = rcu_dereference(dev_queue->qdisc); 3107 __netif_schedule(q); 3108 rcu_read_unlock(); 3109 } 3110 } 3111 EXPORT_SYMBOL(netif_tx_wake_queue); 3112 3113 void dev_kfree_skb_irq_reason(struct sk_buff *skb, enum skb_drop_reason reason) 3114 { 3115 unsigned long flags; 3116 3117 if (unlikely(!skb)) 3118 return; 3119 3120 if (likely(refcount_read(&skb->users) == 1)) { 3121 smp_rmb(); 3122 refcount_set(&skb->users, 0); 3123 } else if (likely(!refcount_dec_and_test(&skb->users))) { 3124 return; 3125 } 3126 get_kfree_skb_cb(skb)->reason = reason; 3127 local_irq_save(flags); 3128 skb->next = __this_cpu_read(softnet_data.completion_queue); 3129 __this_cpu_write(softnet_data.completion_queue, skb); 3130 raise_softirq_irqoff(NET_TX_SOFTIRQ); 3131 local_irq_restore(flags); 3132 } 3133 EXPORT_SYMBOL(dev_kfree_skb_irq_reason); 3134 3135 void dev_kfree_skb_any_reason(struct sk_buff *skb, enum skb_drop_reason reason) 3136 { 3137 if (in_hardirq() || irqs_disabled()) 3138 dev_kfree_skb_irq_reason(skb, reason); 3139 else 3140 kfree_skb_reason(skb, reason); 3141 } 3142 EXPORT_SYMBOL(dev_kfree_skb_any_reason); 3143 3144 3145 /** 3146 * netif_device_detach - mark device as removed 3147 * @dev: network device 3148 * 3149 * Mark device as removed from system and therefore no longer available. 3150 */ 3151 void netif_device_detach(struct net_device *dev) 3152 { 3153 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) && 3154 netif_running(dev)) { 3155 netif_tx_stop_all_queues(dev); 3156 } 3157 } 3158 EXPORT_SYMBOL(netif_device_detach); 3159 3160 /** 3161 * netif_device_attach - mark device as attached 3162 * @dev: network device 3163 * 3164 * Mark device as attached from system and restart if needed. 3165 */ 3166 void netif_device_attach(struct net_device *dev) 3167 { 3168 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) && 3169 netif_running(dev)) { 3170 netif_tx_wake_all_queues(dev); 3171 __netdev_watchdog_up(dev); 3172 } 3173 } 3174 EXPORT_SYMBOL(netif_device_attach); 3175 3176 /* 3177 * Returns a Tx hash based on the given packet descriptor a Tx queues' number 3178 * to be used as a distribution range. 3179 */ 3180 static u16 skb_tx_hash(const struct net_device *dev, 3181 const struct net_device *sb_dev, 3182 struct sk_buff *skb) 3183 { 3184 u32 hash; 3185 u16 qoffset = 0; 3186 u16 qcount = dev->real_num_tx_queues; 3187 3188 if (dev->num_tc) { 3189 u8 tc = netdev_get_prio_tc_map(dev, skb->priority); 3190 3191 qoffset = sb_dev->tc_to_txq[tc].offset; 3192 qcount = sb_dev->tc_to_txq[tc].count; 3193 if (unlikely(!qcount)) { 3194 net_warn_ratelimited("%s: invalid qcount, qoffset %u for tc %u\n", 3195 sb_dev->name, qoffset, tc); 3196 qoffset = 0; 3197 qcount = dev->real_num_tx_queues; 3198 } 3199 } 3200 3201 if (skb_rx_queue_recorded(skb)) { 3202 hash = skb_get_rx_queue(skb); 3203 if (hash >= qoffset) 3204 hash -= qoffset; 3205 while (unlikely(hash >= qcount)) 3206 hash -= qcount; 3207 return hash + qoffset; 3208 } 3209 3210 return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset; 3211 } 3212 3213 static void skb_warn_bad_offload(const struct sk_buff *skb) 3214 { 3215 static const netdev_features_t null_features; 3216 struct net_device *dev = skb->dev; 3217 const char *name = ""; 3218 3219 if (!net_ratelimit()) 3220 return; 3221 3222 if (dev) { 3223 if (dev->dev.parent) 3224 name = dev_driver_string(dev->dev.parent); 3225 else 3226 name = netdev_name(dev); 3227 } 3228 skb_dump(KERN_WARNING, skb, false); 3229 WARN(1, "%s: caps=(%pNF, %pNF)\n", 3230 name, dev ? &dev->features : &null_features, 3231 skb->sk ? &skb->sk->sk_route_caps : &null_features); 3232 } 3233 3234 /* 3235 * Invalidate hardware checksum when packet is to be mangled, and 3236 * complete checksum manually on outgoing path. 3237 */ 3238 int skb_checksum_help(struct sk_buff *skb) 3239 { 3240 __wsum csum; 3241 int ret = 0, offset; 3242 3243 if (skb->ip_summed == CHECKSUM_COMPLETE) 3244 goto out_set_summed; 3245 3246 if (unlikely(skb_is_gso(skb))) { 3247 skb_warn_bad_offload(skb); 3248 return -EINVAL; 3249 } 3250 3251 /* Before computing a checksum, we should make sure no frag could 3252 * be modified by an external entity : checksum could be wrong. 3253 */ 3254 if (skb_has_shared_frag(skb)) { 3255 ret = __skb_linearize(skb); 3256 if (ret) 3257 goto out; 3258 } 3259 3260 offset = skb_checksum_start_offset(skb); 3261 ret = -EINVAL; 3262 if (WARN_ON_ONCE(offset >= skb_headlen(skb))) { 3263 DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false); 3264 goto out; 3265 } 3266 csum = skb_checksum(skb, offset, skb->len - offset, 0); 3267 3268 offset += skb->csum_offset; 3269 if (WARN_ON_ONCE(offset + sizeof(__sum16) > skb_headlen(skb))) { 3270 DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false); 3271 goto out; 3272 } 3273 ret = skb_ensure_writable(skb, offset + sizeof(__sum16)); 3274 if (ret) 3275 goto out; 3276 3277 *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0; 3278 out_set_summed: 3279 skb->ip_summed = CHECKSUM_NONE; 3280 out: 3281 return ret; 3282 } 3283 EXPORT_SYMBOL(skb_checksum_help); 3284 3285 int skb_crc32c_csum_help(struct sk_buff *skb) 3286 { 3287 __le32 crc32c_csum; 3288 int ret = 0, offset, start; 3289 3290 if (skb->ip_summed != CHECKSUM_PARTIAL) 3291 goto out; 3292 3293 if (unlikely(skb_is_gso(skb))) 3294 goto out; 3295 3296 /* Before computing a checksum, we should make sure no frag could 3297 * be modified by an external entity : checksum could be wrong. 3298 */ 3299 if (unlikely(skb_has_shared_frag(skb))) { 3300 ret = __skb_linearize(skb); 3301 if (ret) 3302 goto out; 3303 } 3304 start = skb_checksum_start_offset(skb); 3305 offset = start + offsetof(struct sctphdr, checksum); 3306 if (WARN_ON_ONCE(offset >= skb_headlen(skb))) { 3307 ret = -EINVAL; 3308 goto out; 3309 } 3310 3311 ret = skb_ensure_writable(skb, offset + sizeof(__le32)); 3312 if (ret) 3313 goto out; 3314 3315 crc32c_csum = cpu_to_le32(~__skb_checksum(skb, start, 3316 skb->len - start, ~(__u32)0, 3317 crc32c_csum_stub)); 3318 *(__le32 *)(skb->data + offset) = crc32c_csum; 3319 skb->ip_summed = CHECKSUM_NONE; 3320 skb->csum_not_inet = 0; 3321 out: 3322 return ret; 3323 } 3324 3325 __be16 skb_network_protocol(struct sk_buff *skb, int *depth) 3326 { 3327 __be16 type = skb->protocol; 3328 3329 /* Tunnel gso handlers can set protocol to ethernet. */ 3330 if (type == htons(ETH_P_TEB)) { 3331 struct ethhdr *eth; 3332 3333 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr)))) 3334 return 0; 3335 3336 eth = (struct ethhdr *)skb->data; 3337 type = eth->h_proto; 3338 } 3339 3340 return __vlan_get_protocol(skb, type, depth); 3341 } 3342 3343 /* openvswitch calls this on rx path, so we need a different check. 3344 */ 3345 static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path) 3346 { 3347 if (tx_path) 3348 return skb->ip_summed != CHECKSUM_PARTIAL && 3349 skb->ip_summed != CHECKSUM_UNNECESSARY; 3350 3351 return skb->ip_summed == CHECKSUM_NONE; 3352 } 3353 3354 /** 3355 * __skb_gso_segment - Perform segmentation on skb. 3356 * @skb: buffer to segment 3357 * @features: features for the output path (see dev->features) 3358 * @tx_path: whether it is called in TX path 3359 * 3360 * This function segments the given skb and returns a list of segments. 3361 * 3362 * It may return NULL if the skb requires no segmentation. This is 3363 * only possible when GSO is used for verifying header integrity. 3364 * 3365 * Segmentation preserves SKB_GSO_CB_OFFSET bytes of previous skb cb. 3366 */ 3367 struct sk_buff *__skb_gso_segment(struct sk_buff *skb, 3368 netdev_features_t features, bool tx_path) 3369 { 3370 struct sk_buff *segs; 3371 3372 if (unlikely(skb_needs_check(skb, tx_path))) { 3373 int err; 3374 3375 /* We're going to init ->check field in TCP or UDP header */ 3376 err = skb_cow_head(skb, 0); 3377 if (err < 0) 3378 return ERR_PTR(err); 3379 } 3380 3381 /* Only report GSO partial support if it will enable us to 3382 * support segmentation on this frame without needing additional 3383 * work. 3384 */ 3385 if (features & NETIF_F_GSO_PARTIAL) { 3386 netdev_features_t partial_features = NETIF_F_GSO_ROBUST; 3387 struct net_device *dev = skb->dev; 3388 3389 partial_features |= dev->features & dev->gso_partial_features; 3390 if (!skb_gso_ok(skb, features | partial_features)) 3391 features &= ~NETIF_F_GSO_PARTIAL; 3392 } 3393 3394 BUILD_BUG_ON(SKB_GSO_CB_OFFSET + 3395 sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb)); 3396 3397 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb); 3398 SKB_GSO_CB(skb)->encap_level = 0; 3399 3400 skb_reset_mac_header(skb); 3401 skb_reset_mac_len(skb); 3402 3403 segs = skb_mac_gso_segment(skb, features); 3404 3405 if (segs != skb && unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs))) 3406 skb_warn_bad_offload(skb); 3407 3408 return segs; 3409 } 3410 EXPORT_SYMBOL(__skb_gso_segment); 3411 3412 /* Take action when hardware reception checksum errors are detected. */ 3413 #ifdef CONFIG_BUG 3414 static void do_netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb) 3415 { 3416 netdev_err(dev, "hw csum failure\n"); 3417 skb_dump(KERN_ERR, skb, true); 3418 dump_stack(); 3419 } 3420 3421 void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb) 3422 { 3423 DO_ONCE_LITE(do_netdev_rx_csum_fault, dev, skb); 3424 } 3425 EXPORT_SYMBOL(netdev_rx_csum_fault); 3426 #endif 3427 3428 /* XXX: check that highmem exists at all on the given machine. */ 3429 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb) 3430 { 3431 #ifdef CONFIG_HIGHMEM 3432 int i; 3433 3434 if (!(dev->features & NETIF_F_HIGHDMA)) { 3435 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 3436 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3437 3438 if (PageHighMem(skb_frag_page(frag))) 3439 return 1; 3440 } 3441 } 3442 #endif 3443 return 0; 3444 } 3445 3446 /* If MPLS offload request, verify we are testing hardware MPLS features 3447 * instead of standard features for the netdev. 3448 */ 3449 #if IS_ENABLED(CONFIG_NET_MPLS_GSO) 3450 static netdev_features_t net_mpls_features(struct sk_buff *skb, 3451 netdev_features_t features, 3452 __be16 type) 3453 { 3454 if (eth_p_mpls(type)) 3455 features &= skb->dev->mpls_features; 3456 3457 return features; 3458 } 3459 #else 3460 static netdev_features_t net_mpls_features(struct sk_buff *skb, 3461 netdev_features_t features, 3462 __be16 type) 3463 { 3464 return features; 3465 } 3466 #endif 3467 3468 static netdev_features_t harmonize_features(struct sk_buff *skb, 3469 netdev_features_t features) 3470 { 3471 __be16 type; 3472 3473 type = skb_network_protocol(skb, NULL); 3474 features = net_mpls_features(skb, features, type); 3475 3476 if (skb->ip_summed != CHECKSUM_NONE && 3477 !can_checksum_protocol(features, type)) { 3478 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 3479 } 3480 if (illegal_highdma(skb->dev, skb)) 3481 features &= ~NETIF_F_SG; 3482 3483 return features; 3484 } 3485 3486 netdev_features_t passthru_features_check(struct sk_buff *skb, 3487 struct net_device *dev, 3488 netdev_features_t features) 3489 { 3490 return features; 3491 } 3492 EXPORT_SYMBOL(passthru_features_check); 3493 3494 static netdev_features_t dflt_features_check(struct sk_buff *skb, 3495 struct net_device *dev, 3496 netdev_features_t features) 3497 { 3498 return vlan_features_check(skb, features); 3499 } 3500 3501 static netdev_features_t gso_features_check(const struct sk_buff *skb, 3502 struct net_device *dev, 3503 netdev_features_t features) 3504 { 3505 u16 gso_segs = skb_shinfo(skb)->gso_segs; 3506 3507 if (gso_segs > READ_ONCE(dev->gso_max_segs)) 3508 return features & ~NETIF_F_GSO_MASK; 3509 3510 if (!skb_shinfo(skb)->gso_type) { 3511 skb_warn_bad_offload(skb); 3512 return features & ~NETIF_F_GSO_MASK; 3513 } 3514 3515 /* Support for GSO partial features requires software 3516 * intervention before we can actually process the packets 3517 * so we need to strip support for any partial features now 3518 * and we can pull them back in after we have partially 3519 * segmented the frame. 3520 */ 3521 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL)) 3522 features &= ~dev->gso_partial_features; 3523 3524 /* Make sure to clear the IPv4 ID mangling feature if the 3525 * IPv4 header has the potential to be fragmented. 3526 */ 3527 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) { 3528 struct iphdr *iph = skb->encapsulation ? 3529 inner_ip_hdr(skb) : ip_hdr(skb); 3530 3531 if (!(iph->frag_off & htons(IP_DF))) 3532 features &= ~NETIF_F_TSO_MANGLEID; 3533 } 3534 3535 return features; 3536 } 3537 3538 netdev_features_t netif_skb_features(struct sk_buff *skb) 3539 { 3540 struct net_device *dev = skb->dev; 3541 netdev_features_t features = dev->features; 3542 3543 if (skb_is_gso(skb)) 3544 features = gso_features_check(skb, dev, features); 3545 3546 /* If encapsulation offload request, verify we are testing 3547 * hardware encapsulation features instead of standard 3548 * features for the netdev 3549 */ 3550 if (skb->encapsulation) 3551 features &= dev->hw_enc_features; 3552 3553 if (skb_vlan_tagged(skb)) 3554 features = netdev_intersect_features(features, 3555 dev->vlan_features | 3556 NETIF_F_HW_VLAN_CTAG_TX | 3557 NETIF_F_HW_VLAN_STAG_TX); 3558 3559 if (dev->netdev_ops->ndo_features_check) 3560 features &= dev->netdev_ops->ndo_features_check(skb, dev, 3561 features); 3562 else 3563 features &= dflt_features_check(skb, dev, features); 3564 3565 return harmonize_features(skb, features); 3566 } 3567 EXPORT_SYMBOL(netif_skb_features); 3568 3569 static int xmit_one(struct sk_buff *skb, struct net_device *dev, 3570 struct netdev_queue *txq, bool more) 3571 { 3572 unsigned int len; 3573 int rc; 3574 3575 if (dev_nit_active(dev)) 3576 dev_queue_xmit_nit(skb, dev); 3577 3578 len = skb->len; 3579 trace_net_dev_start_xmit(skb, dev); 3580 rc = netdev_start_xmit(skb, dev, txq, more); 3581 trace_net_dev_xmit(skb, rc, dev, len); 3582 3583 return rc; 3584 } 3585 3586 struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev, 3587 struct netdev_queue *txq, int *ret) 3588 { 3589 struct sk_buff *skb = first; 3590 int rc = NETDEV_TX_OK; 3591 3592 while (skb) { 3593 struct sk_buff *next = skb->next; 3594 3595 skb_mark_not_on_list(skb); 3596 rc = xmit_one(skb, dev, txq, next != NULL); 3597 if (unlikely(!dev_xmit_complete(rc))) { 3598 skb->next = next; 3599 goto out; 3600 } 3601 3602 skb = next; 3603 if (netif_tx_queue_stopped(txq) && skb) { 3604 rc = NETDEV_TX_BUSY; 3605 break; 3606 } 3607 } 3608 3609 out: 3610 *ret = rc; 3611 return skb; 3612 } 3613 3614 static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb, 3615 netdev_features_t features) 3616 { 3617 if (skb_vlan_tag_present(skb) && 3618 !vlan_hw_offload_capable(features, skb->vlan_proto)) 3619 skb = __vlan_hwaccel_push_inside(skb); 3620 return skb; 3621 } 3622 3623 int skb_csum_hwoffload_help(struct sk_buff *skb, 3624 const netdev_features_t features) 3625 { 3626 if (unlikely(skb_csum_is_sctp(skb))) 3627 return !!(features & NETIF_F_SCTP_CRC) ? 0 : 3628 skb_crc32c_csum_help(skb); 3629 3630 if (features & NETIF_F_HW_CSUM) 3631 return 0; 3632 3633 if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) { 3634 switch (skb->csum_offset) { 3635 case offsetof(struct tcphdr, check): 3636 case offsetof(struct udphdr, check): 3637 return 0; 3638 } 3639 } 3640 3641 return skb_checksum_help(skb); 3642 } 3643 EXPORT_SYMBOL(skb_csum_hwoffload_help); 3644 3645 static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again) 3646 { 3647 netdev_features_t features; 3648 3649 features = netif_skb_features(skb); 3650 skb = validate_xmit_vlan(skb, features); 3651 if (unlikely(!skb)) 3652 goto out_null; 3653 3654 skb = sk_validate_xmit_skb(skb, dev); 3655 if (unlikely(!skb)) 3656 goto out_null; 3657 3658 if (netif_needs_gso(skb, features)) { 3659 struct sk_buff *segs; 3660 3661 segs = skb_gso_segment(skb, features); 3662 if (IS_ERR(segs)) { 3663 goto out_kfree_skb; 3664 } else if (segs) { 3665 consume_skb(skb); 3666 skb = segs; 3667 } 3668 } else { 3669 if (skb_needs_linearize(skb, features) && 3670 __skb_linearize(skb)) 3671 goto out_kfree_skb; 3672 3673 /* If packet is not checksummed and device does not 3674 * support checksumming for this protocol, complete 3675 * checksumming here. 3676 */ 3677 if (skb->ip_summed == CHECKSUM_PARTIAL) { 3678 if (skb->encapsulation) 3679 skb_set_inner_transport_header(skb, 3680 skb_checksum_start_offset(skb)); 3681 else 3682 skb_set_transport_header(skb, 3683 skb_checksum_start_offset(skb)); 3684 if (skb_csum_hwoffload_help(skb, features)) 3685 goto out_kfree_skb; 3686 } 3687 } 3688 3689 skb = validate_xmit_xfrm(skb, features, again); 3690 3691 return skb; 3692 3693 out_kfree_skb: 3694 kfree_skb(skb); 3695 out_null: 3696 dev_core_stats_tx_dropped_inc(dev); 3697 return NULL; 3698 } 3699 3700 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again) 3701 { 3702 struct sk_buff *next, *head = NULL, *tail; 3703 3704 for (; skb != NULL; skb = next) { 3705 next = skb->next; 3706 skb_mark_not_on_list(skb); 3707 3708 /* in case skb wont be segmented, point to itself */ 3709 skb->prev = skb; 3710 3711 skb = validate_xmit_skb(skb, dev, again); 3712 if (!skb) 3713 continue; 3714 3715 if (!head) 3716 head = skb; 3717 else 3718 tail->next = skb; 3719 /* If skb was segmented, skb->prev points to 3720 * the last segment. If not, it still contains skb. 3721 */ 3722 tail = skb->prev; 3723 } 3724 return head; 3725 } 3726 EXPORT_SYMBOL_GPL(validate_xmit_skb_list); 3727 3728 static void qdisc_pkt_len_init(struct sk_buff *skb) 3729 { 3730 const struct skb_shared_info *shinfo = skb_shinfo(skb); 3731 3732 qdisc_skb_cb(skb)->pkt_len = skb->len; 3733 3734 /* To get more precise estimation of bytes sent on wire, 3735 * we add to pkt_len the headers size of all segments 3736 */ 3737 if (shinfo->gso_size && skb_transport_header_was_set(skb)) { 3738 u16 gso_segs = shinfo->gso_segs; 3739 unsigned int hdr_len; 3740 3741 /* mac layer + network layer */ 3742 hdr_len = skb_transport_offset(skb); 3743 3744 /* + transport layer */ 3745 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) { 3746 const struct tcphdr *th; 3747 struct tcphdr _tcphdr; 3748 3749 th = skb_header_pointer(skb, hdr_len, 3750 sizeof(_tcphdr), &_tcphdr); 3751 if (likely(th)) 3752 hdr_len += __tcp_hdrlen(th); 3753 } else { 3754 struct udphdr _udphdr; 3755 3756 if (skb_header_pointer(skb, hdr_len, 3757 sizeof(_udphdr), &_udphdr)) 3758 hdr_len += sizeof(struct udphdr); 3759 } 3760 3761 if (shinfo->gso_type & SKB_GSO_DODGY) 3762 gso_segs = DIV_ROUND_UP(skb->len - hdr_len, 3763 shinfo->gso_size); 3764 3765 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len; 3766 } 3767 } 3768 3769 static int dev_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *q, 3770 struct sk_buff **to_free, 3771 struct netdev_queue *txq) 3772 { 3773 int rc; 3774 3775 rc = q->enqueue(skb, q, to_free) & NET_XMIT_MASK; 3776 if (rc == NET_XMIT_SUCCESS) 3777 trace_qdisc_enqueue(q, txq, skb); 3778 return rc; 3779 } 3780 3781 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, 3782 struct net_device *dev, 3783 struct netdev_queue *txq) 3784 { 3785 spinlock_t *root_lock = qdisc_lock(q); 3786 struct sk_buff *to_free = NULL; 3787 bool contended; 3788 int rc; 3789 3790 qdisc_calculate_pkt_len(skb, q); 3791 3792 if (q->flags & TCQ_F_NOLOCK) { 3793 if (q->flags & TCQ_F_CAN_BYPASS && nolock_qdisc_is_empty(q) && 3794 qdisc_run_begin(q)) { 3795 /* Retest nolock_qdisc_is_empty() within the protection 3796 * of q->seqlock to protect from racing with requeuing. 3797 */ 3798 if (unlikely(!nolock_qdisc_is_empty(q))) { 3799 rc = dev_qdisc_enqueue(skb, q, &to_free, txq); 3800 __qdisc_run(q); 3801 qdisc_run_end(q); 3802 3803 goto no_lock_out; 3804 } 3805 3806 qdisc_bstats_cpu_update(q, skb); 3807 if (sch_direct_xmit(skb, q, dev, txq, NULL, true) && 3808 !nolock_qdisc_is_empty(q)) 3809 __qdisc_run(q); 3810 3811 qdisc_run_end(q); 3812 return NET_XMIT_SUCCESS; 3813 } 3814 3815 rc = dev_qdisc_enqueue(skb, q, &to_free, txq); 3816 qdisc_run(q); 3817 3818 no_lock_out: 3819 if (unlikely(to_free)) 3820 kfree_skb_list_reason(to_free, 3821 SKB_DROP_REASON_QDISC_DROP); 3822 return rc; 3823 } 3824 3825 /* 3826 * Heuristic to force contended enqueues to serialize on a 3827 * separate lock before trying to get qdisc main lock. 3828 * This permits qdisc->running owner to get the lock more 3829 * often and dequeue packets faster. 3830 * On PREEMPT_RT it is possible to preempt the qdisc owner during xmit 3831 * and then other tasks will only enqueue packets. The packets will be 3832 * sent after the qdisc owner is scheduled again. To prevent this 3833 * scenario the task always serialize on the lock. 3834 */ 3835 contended = qdisc_is_running(q) || IS_ENABLED(CONFIG_PREEMPT_RT); 3836 if (unlikely(contended)) 3837 spin_lock(&q->busylock); 3838 3839 spin_lock(root_lock); 3840 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) { 3841 __qdisc_drop(skb, &to_free); 3842 rc = NET_XMIT_DROP; 3843 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) && 3844 qdisc_run_begin(q)) { 3845 /* 3846 * This is a work-conserving queue; there are no old skbs 3847 * waiting to be sent out; and the qdisc is not running - 3848 * xmit the skb directly. 3849 */ 3850 3851 qdisc_bstats_update(q, skb); 3852 3853 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) { 3854 if (unlikely(contended)) { 3855 spin_unlock(&q->busylock); 3856 contended = false; 3857 } 3858 __qdisc_run(q); 3859 } 3860 3861 qdisc_run_end(q); 3862 rc = NET_XMIT_SUCCESS; 3863 } else { 3864 rc = dev_qdisc_enqueue(skb, q, &to_free, txq); 3865 if (qdisc_run_begin(q)) { 3866 if (unlikely(contended)) { 3867 spin_unlock(&q->busylock); 3868 contended = false; 3869 } 3870 __qdisc_run(q); 3871 qdisc_run_end(q); 3872 } 3873 } 3874 spin_unlock(root_lock); 3875 if (unlikely(to_free)) 3876 kfree_skb_list_reason(to_free, SKB_DROP_REASON_QDISC_DROP); 3877 if (unlikely(contended)) 3878 spin_unlock(&q->busylock); 3879 return rc; 3880 } 3881 3882 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) 3883 static void skb_update_prio(struct sk_buff *skb) 3884 { 3885 const struct netprio_map *map; 3886 const struct sock *sk; 3887 unsigned int prioidx; 3888 3889 if (skb->priority) 3890 return; 3891 map = rcu_dereference_bh(skb->dev->priomap); 3892 if (!map) 3893 return; 3894 sk = skb_to_full_sk(skb); 3895 if (!sk) 3896 return; 3897 3898 prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data); 3899 3900 if (prioidx < map->priomap_len) 3901 skb->priority = map->priomap[prioidx]; 3902 } 3903 #else 3904 #define skb_update_prio(skb) 3905 #endif 3906 3907 /** 3908 * dev_loopback_xmit - loop back @skb 3909 * @net: network namespace this loopback is happening in 3910 * @sk: sk needed to be a netfilter okfn 3911 * @skb: buffer to transmit 3912 */ 3913 int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb) 3914 { 3915 skb_reset_mac_header(skb); 3916 __skb_pull(skb, skb_network_offset(skb)); 3917 skb->pkt_type = PACKET_LOOPBACK; 3918 if (skb->ip_summed == CHECKSUM_NONE) 3919 skb->ip_summed = CHECKSUM_UNNECESSARY; 3920 DEBUG_NET_WARN_ON_ONCE(!skb_dst(skb)); 3921 skb_dst_force(skb); 3922 netif_rx(skb); 3923 return 0; 3924 } 3925 EXPORT_SYMBOL(dev_loopback_xmit); 3926 3927 #ifdef CONFIG_NET_EGRESS 3928 static struct sk_buff * 3929 sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev) 3930 { 3931 #ifdef CONFIG_NET_CLS_ACT 3932 struct mini_Qdisc *miniq = rcu_dereference_bh(dev->miniq_egress); 3933 struct tcf_result cl_res; 3934 3935 if (!miniq) 3936 return skb; 3937 3938 /* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */ 3939 tc_skb_cb(skb)->mru = 0; 3940 tc_skb_cb(skb)->post_ct = false; 3941 mini_qdisc_bstats_cpu_update(miniq, skb); 3942 3943 switch (tcf_classify(skb, miniq->block, miniq->filter_list, &cl_res, false)) { 3944 case TC_ACT_OK: 3945 case TC_ACT_RECLASSIFY: 3946 skb->tc_index = TC_H_MIN(cl_res.classid); 3947 break; 3948 case TC_ACT_SHOT: 3949 mini_qdisc_qstats_cpu_drop(miniq); 3950 *ret = NET_XMIT_DROP; 3951 kfree_skb_reason(skb, SKB_DROP_REASON_TC_EGRESS); 3952 return NULL; 3953 case TC_ACT_STOLEN: 3954 case TC_ACT_QUEUED: 3955 case TC_ACT_TRAP: 3956 *ret = NET_XMIT_SUCCESS; 3957 consume_skb(skb); 3958 return NULL; 3959 case TC_ACT_REDIRECT: 3960 /* No need to push/pop skb's mac_header here on egress! */ 3961 skb_do_redirect(skb); 3962 *ret = NET_XMIT_SUCCESS; 3963 return NULL; 3964 default: 3965 break; 3966 } 3967 #endif /* CONFIG_NET_CLS_ACT */ 3968 3969 return skb; 3970 } 3971 3972 static struct netdev_queue * 3973 netdev_tx_queue_mapping(struct net_device *dev, struct sk_buff *skb) 3974 { 3975 int qm = skb_get_queue_mapping(skb); 3976 3977 return netdev_get_tx_queue(dev, netdev_cap_txqueue(dev, qm)); 3978 } 3979 3980 static bool netdev_xmit_txqueue_skipped(void) 3981 { 3982 return __this_cpu_read(softnet_data.xmit.skip_txqueue); 3983 } 3984 3985 void netdev_xmit_skip_txqueue(bool skip) 3986 { 3987 __this_cpu_write(softnet_data.xmit.skip_txqueue, skip); 3988 } 3989 EXPORT_SYMBOL_GPL(netdev_xmit_skip_txqueue); 3990 #endif /* CONFIG_NET_EGRESS */ 3991 3992 #ifdef CONFIG_XPS 3993 static int __get_xps_queue_idx(struct net_device *dev, struct sk_buff *skb, 3994 struct xps_dev_maps *dev_maps, unsigned int tci) 3995 { 3996 int tc = netdev_get_prio_tc_map(dev, skb->priority); 3997 struct xps_map *map; 3998 int queue_index = -1; 3999 4000 if (tc >= dev_maps->num_tc || tci >= dev_maps->nr_ids) 4001 return queue_index; 4002 4003 tci *= dev_maps->num_tc; 4004 tci += tc; 4005 4006 map = rcu_dereference(dev_maps->attr_map[tci]); 4007 if (map) { 4008 if (map->len == 1) 4009 queue_index = map->queues[0]; 4010 else 4011 queue_index = map->queues[reciprocal_scale( 4012 skb_get_hash(skb), map->len)]; 4013 if (unlikely(queue_index >= dev->real_num_tx_queues)) 4014 queue_index = -1; 4015 } 4016 return queue_index; 4017 } 4018 #endif 4019 4020 static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev, 4021 struct sk_buff *skb) 4022 { 4023 #ifdef CONFIG_XPS 4024 struct xps_dev_maps *dev_maps; 4025 struct sock *sk = skb->sk; 4026 int queue_index = -1; 4027 4028 if (!static_key_false(&xps_needed)) 4029 return -1; 4030 4031 rcu_read_lock(); 4032 if (!static_key_false(&xps_rxqs_needed)) 4033 goto get_cpus_map; 4034 4035 dev_maps = rcu_dereference(sb_dev->xps_maps[XPS_RXQS]); 4036 if (dev_maps) { 4037 int tci = sk_rx_queue_get(sk); 4038 4039 if (tci >= 0) 4040 queue_index = __get_xps_queue_idx(dev, skb, dev_maps, 4041 tci); 4042 } 4043 4044 get_cpus_map: 4045 if (queue_index < 0) { 4046 dev_maps = rcu_dereference(sb_dev->xps_maps[XPS_CPUS]); 4047 if (dev_maps) { 4048 unsigned int tci = skb->sender_cpu - 1; 4049 4050 queue_index = __get_xps_queue_idx(dev, skb, dev_maps, 4051 tci); 4052 } 4053 } 4054 rcu_read_unlock(); 4055 4056 return queue_index; 4057 #else 4058 return -1; 4059 #endif 4060 } 4061 4062 u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb, 4063 struct net_device *sb_dev) 4064 { 4065 return 0; 4066 } 4067 EXPORT_SYMBOL(dev_pick_tx_zero); 4068 4069 u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb, 4070 struct net_device *sb_dev) 4071 { 4072 return (u16)raw_smp_processor_id() % dev->real_num_tx_queues; 4073 } 4074 EXPORT_SYMBOL(dev_pick_tx_cpu_id); 4075 4076 u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, 4077 struct net_device *sb_dev) 4078 { 4079 struct sock *sk = skb->sk; 4080 int queue_index = sk_tx_queue_get(sk); 4081 4082 sb_dev = sb_dev ? : dev; 4083 4084 if (queue_index < 0 || skb->ooo_okay || 4085 queue_index >= dev->real_num_tx_queues) { 4086 int new_index = get_xps_queue(dev, sb_dev, skb); 4087 4088 if (new_index < 0) 4089 new_index = skb_tx_hash(dev, sb_dev, skb); 4090 4091 if (queue_index != new_index && sk && 4092 sk_fullsock(sk) && 4093 rcu_access_pointer(sk->sk_dst_cache)) 4094 sk_tx_queue_set(sk, new_index); 4095 4096 queue_index = new_index; 4097 } 4098 4099 return queue_index; 4100 } 4101 EXPORT_SYMBOL(netdev_pick_tx); 4102 4103 struct netdev_queue *netdev_core_pick_tx(struct net_device *dev, 4104 struct sk_buff *skb, 4105 struct net_device *sb_dev) 4106 { 4107 int queue_index = 0; 4108 4109 #ifdef CONFIG_XPS 4110 u32 sender_cpu = skb->sender_cpu - 1; 4111 4112 if (sender_cpu >= (u32)NR_CPUS) 4113 skb->sender_cpu = raw_smp_processor_id() + 1; 4114 #endif 4115 4116 if (dev->real_num_tx_queues != 1) { 4117 const struct net_device_ops *ops = dev->netdev_ops; 4118 4119 if (ops->ndo_select_queue) 4120 queue_index = ops->ndo_select_queue(dev, skb, sb_dev); 4121 else 4122 queue_index = netdev_pick_tx(dev, skb, sb_dev); 4123 4124 queue_index = netdev_cap_txqueue(dev, queue_index); 4125 } 4126 4127 skb_set_queue_mapping(skb, queue_index); 4128 return netdev_get_tx_queue(dev, queue_index); 4129 } 4130 4131 /** 4132 * __dev_queue_xmit() - transmit a buffer 4133 * @skb: buffer to transmit 4134 * @sb_dev: suboordinate device used for L2 forwarding offload 4135 * 4136 * Queue a buffer for transmission to a network device. The caller must 4137 * have set the device and priority and built the buffer before calling 4138 * this function. The function can be called from an interrupt. 4139 * 4140 * When calling this method, interrupts MUST be enabled. This is because 4141 * the BH enable code must have IRQs enabled so that it will not deadlock. 4142 * 4143 * Regardless of the return value, the skb is consumed, so it is currently 4144 * difficult to retry a send to this method. (You can bump the ref count 4145 * before sending to hold a reference for retry if you are careful.) 4146 * 4147 * Return: 4148 * * 0 - buffer successfully transmitted 4149 * * positive qdisc return code - NET_XMIT_DROP etc. 4150 * * negative errno - other errors 4151 */ 4152 int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev) 4153 { 4154 struct net_device *dev = skb->dev; 4155 struct netdev_queue *txq = NULL; 4156 struct Qdisc *q; 4157 int rc = -ENOMEM; 4158 bool again = false; 4159 4160 skb_reset_mac_header(skb); 4161 skb_assert_len(skb); 4162 4163 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP)) 4164 __skb_tstamp_tx(skb, NULL, NULL, skb->sk, SCM_TSTAMP_SCHED); 4165 4166 /* Disable soft irqs for various locks below. Also 4167 * stops preemption for RCU. 4168 */ 4169 rcu_read_lock_bh(); 4170 4171 skb_update_prio(skb); 4172 4173 qdisc_pkt_len_init(skb); 4174 #ifdef CONFIG_NET_CLS_ACT 4175 skb->tc_at_ingress = 0; 4176 #endif 4177 #ifdef CONFIG_NET_EGRESS 4178 if (static_branch_unlikely(&egress_needed_key)) { 4179 if (nf_hook_egress_active()) { 4180 skb = nf_hook_egress(skb, &rc, dev); 4181 if (!skb) 4182 goto out; 4183 } 4184 4185 netdev_xmit_skip_txqueue(false); 4186 4187 nf_skip_egress(skb, true); 4188 skb = sch_handle_egress(skb, &rc, dev); 4189 if (!skb) 4190 goto out; 4191 nf_skip_egress(skb, false); 4192 4193 if (netdev_xmit_txqueue_skipped()) 4194 txq = netdev_tx_queue_mapping(dev, skb); 4195 } 4196 #endif 4197 /* If device/qdisc don't need skb->dst, release it right now while 4198 * its hot in this cpu cache. 4199 */ 4200 if (dev->priv_flags & IFF_XMIT_DST_RELEASE) 4201 skb_dst_drop(skb); 4202 else 4203 skb_dst_force(skb); 4204 4205 if (!txq) 4206 txq = netdev_core_pick_tx(dev, skb, sb_dev); 4207 4208 q = rcu_dereference_bh(txq->qdisc); 4209 4210 trace_net_dev_queue(skb); 4211 if (q->enqueue) { 4212 rc = __dev_xmit_skb(skb, q, dev, txq); 4213 goto out; 4214 } 4215 4216 /* The device has no queue. Common case for software devices: 4217 * loopback, all the sorts of tunnels... 4218 4219 * Really, it is unlikely that netif_tx_lock protection is necessary 4220 * here. (f.e. loopback and IP tunnels are clean ignoring statistics 4221 * counters.) 4222 * However, it is possible, that they rely on protection 4223 * made by us here. 4224 4225 * Check this and shot the lock. It is not prone from deadlocks. 4226 *Either shot noqueue qdisc, it is even simpler 8) 4227 */ 4228 if (dev->flags & IFF_UP) { 4229 int cpu = smp_processor_id(); /* ok because BHs are off */ 4230 4231 /* Other cpus might concurrently change txq->xmit_lock_owner 4232 * to -1 or to their cpu id, but not to our id. 4233 */ 4234 if (READ_ONCE(txq->xmit_lock_owner) != cpu) { 4235 if (dev_xmit_recursion()) 4236 goto recursion_alert; 4237 4238 skb = validate_xmit_skb(skb, dev, &again); 4239 if (!skb) 4240 goto out; 4241 4242 HARD_TX_LOCK(dev, txq, cpu); 4243 4244 if (!netif_xmit_stopped(txq)) { 4245 dev_xmit_recursion_inc(); 4246 skb = dev_hard_start_xmit(skb, dev, txq, &rc); 4247 dev_xmit_recursion_dec(); 4248 if (dev_xmit_complete(rc)) { 4249 HARD_TX_UNLOCK(dev, txq); 4250 goto out; 4251 } 4252 } 4253 HARD_TX_UNLOCK(dev, txq); 4254 net_crit_ratelimited("Virtual device %s asks to queue packet!\n", 4255 dev->name); 4256 } else { 4257 /* Recursion is detected! It is possible, 4258 * unfortunately 4259 */ 4260 recursion_alert: 4261 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n", 4262 dev->name); 4263 } 4264 } 4265 4266 rc = -ENETDOWN; 4267 rcu_read_unlock_bh(); 4268 4269 dev_core_stats_tx_dropped_inc(dev); 4270 kfree_skb_list(skb); 4271 return rc; 4272 out: 4273 rcu_read_unlock_bh(); 4274 return rc; 4275 } 4276 EXPORT_SYMBOL(__dev_queue_xmit); 4277 4278 int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id) 4279 { 4280 struct net_device *dev = skb->dev; 4281 struct sk_buff *orig_skb = skb; 4282 struct netdev_queue *txq; 4283 int ret = NETDEV_TX_BUSY; 4284 bool again = false; 4285 4286 if (unlikely(!netif_running(dev) || 4287 !netif_carrier_ok(dev))) 4288 goto drop; 4289 4290 skb = validate_xmit_skb_list(skb, dev, &again); 4291 if (skb != orig_skb) 4292 goto drop; 4293 4294 skb_set_queue_mapping(skb, queue_id); 4295 txq = skb_get_tx_queue(dev, skb); 4296 4297 local_bh_disable(); 4298 4299 dev_xmit_recursion_inc(); 4300 HARD_TX_LOCK(dev, txq, smp_processor_id()); 4301 if (!netif_xmit_frozen_or_drv_stopped(txq)) 4302 ret = netdev_start_xmit(skb, dev, txq, false); 4303 HARD_TX_UNLOCK(dev, txq); 4304 dev_xmit_recursion_dec(); 4305 4306 local_bh_enable(); 4307 return ret; 4308 drop: 4309 dev_core_stats_tx_dropped_inc(dev); 4310 kfree_skb_list(skb); 4311 return NET_XMIT_DROP; 4312 } 4313 EXPORT_SYMBOL(__dev_direct_xmit); 4314 4315 /************************************************************************* 4316 * Receiver routines 4317 *************************************************************************/ 4318 4319 int netdev_max_backlog __read_mostly = 1000; 4320 EXPORT_SYMBOL(netdev_max_backlog); 4321 4322 int netdev_tstamp_prequeue __read_mostly = 1; 4323 unsigned int sysctl_skb_defer_max __read_mostly = 64; 4324 int netdev_budget __read_mostly = 300; 4325 /* Must be at least 2 jiffes to guarantee 1 jiffy timeout */ 4326 unsigned int __read_mostly netdev_budget_usecs = 2 * USEC_PER_SEC / HZ; 4327 int weight_p __read_mostly = 64; /* old backlog weight */ 4328 int dev_weight_rx_bias __read_mostly = 1; /* bias for backlog weight */ 4329 int dev_weight_tx_bias __read_mostly = 1; /* bias for output_queue quota */ 4330 int dev_rx_weight __read_mostly = 64; 4331 int dev_tx_weight __read_mostly = 64; 4332 4333 /* Called with irq disabled */ 4334 static inline void ____napi_schedule(struct softnet_data *sd, 4335 struct napi_struct *napi) 4336 { 4337 struct task_struct *thread; 4338 4339 lockdep_assert_irqs_disabled(); 4340 4341 if (test_bit(NAPI_STATE_THREADED, &napi->state)) { 4342 /* Paired with smp_mb__before_atomic() in 4343 * napi_enable()/dev_set_threaded(). 4344 * Use READ_ONCE() to guarantee a complete 4345 * read on napi->thread. Only call 4346 * wake_up_process() when it's not NULL. 4347 */ 4348 thread = READ_ONCE(napi->thread); 4349 if (thread) { 4350 /* Avoid doing set_bit() if the thread is in 4351 * INTERRUPTIBLE state, cause napi_thread_wait() 4352 * makes sure to proceed with napi polling 4353 * if the thread is explicitly woken from here. 4354 */ 4355 if (READ_ONCE(thread->__state) != TASK_INTERRUPTIBLE) 4356 set_bit(NAPI_STATE_SCHED_THREADED, &napi->state); 4357 wake_up_process(thread); 4358 return; 4359 } 4360 } 4361 4362 list_add_tail(&napi->poll_list, &sd->poll_list); 4363 /* If not called from net_rx_action() 4364 * we have to raise NET_RX_SOFTIRQ. 4365 */ 4366 if (!sd->in_net_rx_action) 4367 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 4368 } 4369 4370 #ifdef CONFIG_RPS 4371 4372 /* One global table that all flow-based protocols share. */ 4373 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly; 4374 EXPORT_SYMBOL(rps_sock_flow_table); 4375 u32 rps_cpu_mask __read_mostly; 4376 EXPORT_SYMBOL(rps_cpu_mask); 4377 4378 struct static_key_false rps_needed __read_mostly; 4379 EXPORT_SYMBOL(rps_needed); 4380 struct static_key_false rfs_needed __read_mostly; 4381 EXPORT_SYMBOL(rfs_needed); 4382 4383 static struct rps_dev_flow * 4384 set_rps_cpu(struct net_device *dev, struct sk_buff *skb, 4385 struct rps_dev_flow *rflow, u16 next_cpu) 4386 { 4387 if (next_cpu < nr_cpu_ids) { 4388 #ifdef CONFIG_RFS_ACCEL 4389 struct netdev_rx_queue *rxqueue; 4390 struct rps_dev_flow_table *flow_table; 4391 struct rps_dev_flow *old_rflow; 4392 u32 flow_id; 4393 u16 rxq_index; 4394 int rc; 4395 4396 /* Should we steer this flow to a different hardware queue? */ 4397 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap || 4398 !(dev->features & NETIF_F_NTUPLE)) 4399 goto out; 4400 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu); 4401 if (rxq_index == skb_get_rx_queue(skb)) 4402 goto out; 4403 4404 rxqueue = dev->_rx + rxq_index; 4405 flow_table = rcu_dereference(rxqueue->rps_flow_table); 4406 if (!flow_table) 4407 goto out; 4408 flow_id = skb_get_hash(skb) & flow_table->mask; 4409 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb, 4410 rxq_index, flow_id); 4411 if (rc < 0) 4412 goto out; 4413 old_rflow = rflow; 4414 rflow = &flow_table->flows[flow_id]; 4415 rflow->filter = rc; 4416 if (old_rflow->filter == rflow->filter) 4417 old_rflow->filter = RPS_NO_FILTER; 4418 out: 4419 #endif 4420 rflow->last_qtail = 4421 per_cpu(softnet_data, next_cpu).input_queue_head; 4422 } 4423 4424 rflow->cpu = next_cpu; 4425 return rflow; 4426 } 4427 4428 /* 4429 * get_rps_cpu is called from netif_receive_skb and returns the target 4430 * CPU from the RPS map of the receiving queue for a given skb. 4431 * rcu_read_lock must be held on entry. 4432 */ 4433 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, 4434 struct rps_dev_flow **rflowp) 4435 { 4436 const struct rps_sock_flow_table *sock_flow_table; 4437 struct netdev_rx_queue *rxqueue = dev->_rx; 4438 struct rps_dev_flow_table *flow_table; 4439 struct rps_map *map; 4440 int cpu = -1; 4441 u32 tcpu; 4442 u32 hash; 4443 4444 if (skb_rx_queue_recorded(skb)) { 4445 u16 index = skb_get_rx_queue(skb); 4446 4447 if (unlikely(index >= dev->real_num_rx_queues)) { 4448 WARN_ONCE(dev->real_num_rx_queues > 1, 4449 "%s received packet on queue %u, but number " 4450 "of RX queues is %u\n", 4451 dev->name, index, dev->real_num_rx_queues); 4452 goto done; 4453 } 4454 rxqueue += index; 4455 } 4456 4457 /* Avoid computing hash if RFS/RPS is not active for this rxqueue */ 4458 4459 flow_table = rcu_dereference(rxqueue->rps_flow_table); 4460 map = rcu_dereference(rxqueue->rps_map); 4461 if (!flow_table && !map) 4462 goto done; 4463 4464 skb_reset_network_header(skb); 4465 hash = skb_get_hash(skb); 4466 if (!hash) 4467 goto done; 4468 4469 sock_flow_table = rcu_dereference(rps_sock_flow_table); 4470 if (flow_table && sock_flow_table) { 4471 struct rps_dev_flow *rflow; 4472 u32 next_cpu; 4473 u32 ident; 4474 4475 /* First check into global flow table if there is a match */ 4476 ident = sock_flow_table->ents[hash & sock_flow_table->mask]; 4477 if ((ident ^ hash) & ~rps_cpu_mask) 4478 goto try_rps; 4479 4480 next_cpu = ident & rps_cpu_mask; 4481 4482 /* OK, now we know there is a match, 4483 * we can look at the local (per receive queue) flow table 4484 */ 4485 rflow = &flow_table->flows[hash & flow_table->mask]; 4486 tcpu = rflow->cpu; 4487 4488 /* 4489 * If the desired CPU (where last recvmsg was done) is 4490 * different from current CPU (one in the rx-queue flow 4491 * table entry), switch if one of the following holds: 4492 * - Current CPU is unset (>= nr_cpu_ids). 4493 * - Current CPU is offline. 4494 * - The current CPU's queue tail has advanced beyond the 4495 * last packet that was enqueued using this table entry. 4496 * This guarantees that all previous packets for the flow 4497 * have been dequeued, thus preserving in order delivery. 4498 */ 4499 if (unlikely(tcpu != next_cpu) && 4500 (tcpu >= nr_cpu_ids || !cpu_online(tcpu) || 4501 ((int)(per_cpu(softnet_data, tcpu).input_queue_head - 4502 rflow->last_qtail)) >= 0)) { 4503 tcpu = next_cpu; 4504 rflow = set_rps_cpu(dev, skb, rflow, next_cpu); 4505 } 4506 4507 if (tcpu < nr_cpu_ids && cpu_online(tcpu)) { 4508 *rflowp = rflow; 4509 cpu = tcpu; 4510 goto done; 4511 } 4512 } 4513 4514 try_rps: 4515 4516 if (map) { 4517 tcpu = map->cpus[reciprocal_scale(hash, map->len)]; 4518 if (cpu_online(tcpu)) { 4519 cpu = tcpu; 4520 goto done; 4521 } 4522 } 4523 4524 done: 4525 return cpu; 4526 } 4527 4528 #ifdef CONFIG_RFS_ACCEL 4529 4530 /** 4531 * rps_may_expire_flow - check whether an RFS hardware filter may be removed 4532 * @dev: Device on which the filter was set 4533 * @rxq_index: RX queue index 4534 * @flow_id: Flow ID passed to ndo_rx_flow_steer() 4535 * @filter_id: Filter ID returned by ndo_rx_flow_steer() 4536 * 4537 * Drivers that implement ndo_rx_flow_steer() should periodically call 4538 * this function for each installed filter and remove the filters for 4539 * which it returns %true. 4540 */ 4541 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, 4542 u32 flow_id, u16 filter_id) 4543 { 4544 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index; 4545 struct rps_dev_flow_table *flow_table; 4546 struct rps_dev_flow *rflow; 4547 bool expire = true; 4548 unsigned int cpu; 4549 4550 rcu_read_lock(); 4551 flow_table = rcu_dereference(rxqueue->rps_flow_table); 4552 if (flow_table && flow_id <= flow_table->mask) { 4553 rflow = &flow_table->flows[flow_id]; 4554 cpu = READ_ONCE(rflow->cpu); 4555 if (rflow->filter == filter_id && cpu < nr_cpu_ids && 4556 ((int)(per_cpu(softnet_data, cpu).input_queue_head - 4557 rflow->last_qtail) < 4558 (int)(10 * flow_table->mask))) 4559 expire = false; 4560 } 4561 rcu_read_unlock(); 4562 return expire; 4563 } 4564 EXPORT_SYMBOL(rps_may_expire_flow); 4565 4566 #endif /* CONFIG_RFS_ACCEL */ 4567 4568 /* Called from hardirq (IPI) context */ 4569 static void rps_trigger_softirq(void *data) 4570 { 4571 struct softnet_data *sd = data; 4572 4573 ____napi_schedule(sd, &sd->backlog); 4574 sd->received_rps++; 4575 } 4576 4577 #endif /* CONFIG_RPS */ 4578 4579 /* Called from hardirq (IPI) context */ 4580 static void trigger_rx_softirq(void *data) 4581 { 4582 struct softnet_data *sd = data; 4583 4584 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 4585 smp_store_release(&sd->defer_ipi_scheduled, 0); 4586 } 4587 4588 /* 4589 * After we queued a packet into sd->input_pkt_queue, 4590 * we need to make sure this queue is serviced soon. 4591 * 4592 * - If this is another cpu queue, link it to our rps_ipi_list, 4593 * and make sure we will process rps_ipi_list from net_rx_action(). 4594 * 4595 * - If this is our own queue, NAPI schedule our backlog. 4596 * Note that this also raises NET_RX_SOFTIRQ. 4597 */ 4598 static void napi_schedule_rps(struct softnet_data *sd) 4599 { 4600 struct softnet_data *mysd = this_cpu_ptr(&softnet_data); 4601 4602 #ifdef CONFIG_RPS 4603 if (sd != mysd) { 4604 sd->rps_ipi_next = mysd->rps_ipi_list; 4605 mysd->rps_ipi_list = sd; 4606 4607 /* If not called from net_rx_action() 4608 * we have to raise NET_RX_SOFTIRQ. 4609 */ 4610 if (!mysd->in_net_rx_action) 4611 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 4612 return; 4613 } 4614 #endif /* CONFIG_RPS */ 4615 __napi_schedule_irqoff(&mysd->backlog); 4616 } 4617 4618 #ifdef CONFIG_NET_FLOW_LIMIT 4619 int netdev_flow_limit_table_len __read_mostly = (1 << 12); 4620 #endif 4621 4622 static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen) 4623 { 4624 #ifdef CONFIG_NET_FLOW_LIMIT 4625 struct sd_flow_limit *fl; 4626 struct softnet_data *sd; 4627 unsigned int old_flow, new_flow; 4628 4629 if (qlen < (READ_ONCE(netdev_max_backlog) >> 1)) 4630 return false; 4631 4632 sd = this_cpu_ptr(&softnet_data); 4633 4634 rcu_read_lock(); 4635 fl = rcu_dereference(sd->flow_limit); 4636 if (fl) { 4637 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1); 4638 old_flow = fl->history[fl->history_head]; 4639 fl->history[fl->history_head] = new_flow; 4640 4641 fl->history_head++; 4642 fl->history_head &= FLOW_LIMIT_HISTORY - 1; 4643 4644 if (likely(fl->buckets[old_flow])) 4645 fl->buckets[old_flow]--; 4646 4647 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) { 4648 fl->count++; 4649 rcu_read_unlock(); 4650 return true; 4651 } 4652 } 4653 rcu_read_unlock(); 4654 #endif 4655 return false; 4656 } 4657 4658 /* 4659 * enqueue_to_backlog is called to queue an skb to a per CPU backlog 4660 * queue (may be a remote CPU queue). 4661 */ 4662 static int enqueue_to_backlog(struct sk_buff *skb, int cpu, 4663 unsigned int *qtail) 4664 { 4665 enum skb_drop_reason reason; 4666 struct softnet_data *sd; 4667 unsigned long flags; 4668 unsigned int qlen; 4669 4670 reason = SKB_DROP_REASON_NOT_SPECIFIED; 4671 sd = &per_cpu(softnet_data, cpu); 4672 4673 rps_lock_irqsave(sd, &flags); 4674 if (!netif_running(skb->dev)) 4675 goto drop; 4676 qlen = skb_queue_len(&sd->input_pkt_queue); 4677 if (qlen <= READ_ONCE(netdev_max_backlog) && !skb_flow_limit(skb, qlen)) { 4678 if (qlen) { 4679 enqueue: 4680 __skb_queue_tail(&sd->input_pkt_queue, skb); 4681 input_queue_tail_incr_save(sd, qtail); 4682 rps_unlock_irq_restore(sd, &flags); 4683 return NET_RX_SUCCESS; 4684 } 4685 4686 /* Schedule NAPI for backlog device 4687 * We can use non atomic operation since we own the queue lock 4688 */ 4689 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) 4690 napi_schedule_rps(sd); 4691 goto enqueue; 4692 } 4693 reason = SKB_DROP_REASON_CPU_BACKLOG; 4694 4695 drop: 4696 sd->dropped++; 4697 rps_unlock_irq_restore(sd, &flags); 4698 4699 dev_core_stats_rx_dropped_inc(skb->dev); 4700 kfree_skb_reason(skb, reason); 4701 return NET_RX_DROP; 4702 } 4703 4704 static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb) 4705 { 4706 struct net_device *dev = skb->dev; 4707 struct netdev_rx_queue *rxqueue; 4708 4709 rxqueue = dev->_rx; 4710 4711 if (skb_rx_queue_recorded(skb)) { 4712 u16 index = skb_get_rx_queue(skb); 4713 4714 if (unlikely(index >= dev->real_num_rx_queues)) { 4715 WARN_ONCE(dev->real_num_rx_queues > 1, 4716 "%s received packet on queue %u, but number " 4717 "of RX queues is %u\n", 4718 dev->name, index, dev->real_num_rx_queues); 4719 4720 return rxqueue; /* Return first rxqueue */ 4721 } 4722 rxqueue += index; 4723 } 4724 return rxqueue; 4725 } 4726 4727 u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp, 4728 struct bpf_prog *xdp_prog) 4729 { 4730 void *orig_data, *orig_data_end, *hard_start; 4731 struct netdev_rx_queue *rxqueue; 4732 bool orig_bcast, orig_host; 4733 u32 mac_len, frame_sz; 4734 __be16 orig_eth_type; 4735 struct ethhdr *eth; 4736 u32 metalen, act; 4737 int off; 4738 4739 /* The XDP program wants to see the packet starting at the MAC 4740 * header. 4741 */ 4742 mac_len = skb->data - skb_mac_header(skb); 4743 hard_start = skb->data - skb_headroom(skb); 4744 4745 /* SKB "head" area always have tailroom for skb_shared_info */ 4746 frame_sz = (void *)skb_end_pointer(skb) - hard_start; 4747 frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 4748 4749 rxqueue = netif_get_rxqueue(skb); 4750 xdp_init_buff(xdp, frame_sz, &rxqueue->xdp_rxq); 4751 xdp_prepare_buff(xdp, hard_start, skb_headroom(skb) - mac_len, 4752 skb_headlen(skb) + mac_len, true); 4753 4754 orig_data_end = xdp->data_end; 4755 orig_data = xdp->data; 4756 eth = (struct ethhdr *)xdp->data; 4757 orig_host = ether_addr_equal_64bits(eth->h_dest, skb->dev->dev_addr); 4758 orig_bcast = is_multicast_ether_addr_64bits(eth->h_dest); 4759 orig_eth_type = eth->h_proto; 4760 4761 act = bpf_prog_run_xdp(xdp_prog, xdp); 4762 4763 /* check if bpf_xdp_adjust_head was used */ 4764 off = xdp->data - orig_data; 4765 if (off) { 4766 if (off > 0) 4767 __skb_pull(skb, off); 4768 else if (off < 0) 4769 __skb_push(skb, -off); 4770 4771 skb->mac_header += off; 4772 skb_reset_network_header(skb); 4773 } 4774 4775 /* check if bpf_xdp_adjust_tail was used */ 4776 off = xdp->data_end - orig_data_end; 4777 if (off != 0) { 4778 skb_set_tail_pointer(skb, xdp->data_end - xdp->data); 4779 skb->len += off; /* positive on grow, negative on shrink */ 4780 } 4781 4782 /* check if XDP changed eth hdr such SKB needs update */ 4783 eth = (struct ethhdr *)xdp->data; 4784 if ((orig_eth_type != eth->h_proto) || 4785 (orig_host != ether_addr_equal_64bits(eth->h_dest, 4786 skb->dev->dev_addr)) || 4787 (orig_bcast != is_multicast_ether_addr_64bits(eth->h_dest))) { 4788 __skb_push(skb, ETH_HLEN); 4789 skb->pkt_type = PACKET_HOST; 4790 skb->protocol = eth_type_trans(skb, skb->dev); 4791 } 4792 4793 /* Redirect/Tx gives L2 packet, code that will reuse skb must __skb_pull 4794 * before calling us again on redirect path. We do not call do_redirect 4795 * as we leave that up to the caller. 4796 * 4797 * Caller is responsible for managing lifetime of skb (i.e. calling 4798 * kfree_skb in response to actions it cannot handle/XDP_DROP). 4799 */ 4800 switch (act) { 4801 case XDP_REDIRECT: 4802 case XDP_TX: 4803 __skb_push(skb, mac_len); 4804 break; 4805 case XDP_PASS: 4806 metalen = xdp->data - xdp->data_meta; 4807 if (metalen) 4808 skb_metadata_set(skb, metalen); 4809 break; 4810 } 4811 4812 return act; 4813 } 4814 4815 static u32 netif_receive_generic_xdp(struct sk_buff *skb, 4816 struct xdp_buff *xdp, 4817 struct bpf_prog *xdp_prog) 4818 { 4819 u32 act = XDP_DROP; 4820 4821 /* Reinjected packets coming from act_mirred or similar should 4822 * not get XDP generic processing. 4823 */ 4824 if (skb_is_redirected(skb)) 4825 return XDP_PASS; 4826 4827 /* XDP packets must be linear and must have sufficient headroom 4828 * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also 4829 * native XDP provides, thus we need to do it here as well. 4830 */ 4831 if (skb_cloned(skb) || skb_is_nonlinear(skb) || 4832 skb_headroom(skb) < XDP_PACKET_HEADROOM) { 4833 int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb); 4834 int troom = skb->tail + skb->data_len - skb->end; 4835 4836 /* In case we have to go down the path and also linearize, 4837 * then lets do the pskb_expand_head() work just once here. 4838 */ 4839 if (pskb_expand_head(skb, 4840 hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0, 4841 troom > 0 ? troom + 128 : 0, GFP_ATOMIC)) 4842 goto do_drop; 4843 if (skb_linearize(skb)) 4844 goto do_drop; 4845 } 4846 4847 act = bpf_prog_run_generic_xdp(skb, xdp, xdp_prog); 4848 switch (act) { 4849 case XDP_REDIRECT: 4850 case XDP_TX: 4851 case XDP_PASS: 4852 break; 4853 default: 4854 bpf_warn_invalid_xdp_action(skb->dev, xdp_prog, act); 4855 fallthrough; 4856 case XDP_ABORTED: 4857 trace_xdp_exception(skb->dev, xdp_prog, act); 4858 fallthrough; 4859 case XDP_DROP: 4860 do_drop: 4861 kfree_skb(skb); 4862 break; 4863 } 4864 4865 return act; 4866 } 4867 4868 /* When doing generic XDP we have to bypass the qdisc layer and the 4869 * network taps in order to match in-driver-XDP behavior. This also means 4870 * that XDP packets are able to starve other packets going through a qdisc, 4871 * and DDOS attacks will be more effective. In-driver-XDP use dedicated TX 4872 * queues, so they do not have this starvation issue. 4873 */ 4874 void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog) 4875 { 4876 struct net_device *dev = skb->dev; 4877 struct netdev_queue *txq; 4878 bool free_skb = true; 4879 int cpu, rc; 4880 4881 txq = netdev_core_pick_tx(dev, skb, NULL); 4882 cpu = smp_processor_id(); 4883 HARD_TX_LOCK(dev, txq, cpu); 4884 if (!netif_xmit_frozen_or_drv_stopped(txq)) { 4885 rc = netdev_start_xmit(skb, dev, txq, 0); 4886 if (dev_xmit_complete(rc)) 4887 free_skb = false; 4888 } 4889 HARD_TX_UNLOCK(dev, txq); 4890 if (free_skb) { 4891 trace_xdp_exception(dev, xdp_prog, XDP_TX); 4892 dev_core_stats_tx_dropped_inc(dev); 4893 kfree_skb(skb); 4894 } 4895 } 4896 4897 static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key); 4898 4899 int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb) 4900 { 4901 if (xdp_prog) { 4902 struct xdp_buff xdp; 4903 u32 act; 4904 int err; 4905 4906 act = netif_receive_generic_xdp(skb, &xdp, xdp_prog); 4907 if (act != XDP_PASS) { 4908 switch (act) { 4909 case XDP_REDIRECT: 4910 err = xdp_do_generic_redirect(skb->dev, skb, 4911 &xdp, xdp_prog); 4912 if (err) 4913 goto out_redir; 4914 break; 4915 case XDP_TX: 4916 generic_xdp_tx(skb, xdp_prog); 4917 break; 4918 } 4919 return XDP_DROP; 4920 } 4921 } 4922 return XDP_PASS; 4923 out_redir: 4924 kfree_skb_reason(skb, SKB_DROP_REASON_XDP); 4925 return XDP_DROP; 4926 } 4927 EXPORT_SYMBOL_GPL(do_xdp_generic); 4928 4929 static int netif_rx_internal(struct sk_buff *skb) 4930 { 4931 int ret; 4932 4933 net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb); 4934 4935 trace_netif_rx(skb); 4936 4937 #ifdef CONFIG_RPS 4938 if (static_branch_unlikely(&rps_needed)) { 4939 struct rps_dev_flow voidflow, *rflow = &voidflow; 4940 int cpu; 4941 4942 rcu_read_lock(); 4943 4944 cpu = get_rps_cpu(skb->dev, skb, &rflow); 4945 if (cpu < 0) 4946 cpu = smp_processor_id(); 4947 4948 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 4949 4950 rcu_read_unlock(); 4951 } else 4952 #endif 4953 { 4954 unsigned int qtail; 4955 4956 ret = enqueue_to_backlog(skb, smp_processor_id(), &qtail); 4957 } 4958 return ret; 4959 } 4960 4961 /** 4962 * __netif_rx - Slightly optimized version of netif_rx 4963 * @skb: buffer to post 4964 * 4965 * This behaves as netif_rx except that it does not disable bottom halves. 4966 * As a result this function may only be invoked from the interrupt context 4967 * (either hard or soft interrupt). 4968 */ 4969 int __netif_rx(struct sk_buff *skb) 4970 { 4971 int ret; 4972 4973 lockdep_assert_once(hardirq_count() | softirq_count()); 4974 4975 trace_netif_rx_entry(skb); 4976 ret = netif_rx_internal(skb); 4977 trace_netif_rx_exit(ret); 4978 return ret; 4979 } 4980 EXPORT_SYMBOL(__netif_rx); 4981 4982 /** 4983 * netif_rx - post buffer to the network code 4984 * @skb: buffer to post 4985 * 4986 * This function receives a packet from a device driver and queues it for 4987 * the upper (protocol) levels to process via the backlog NAPI device. It 4988 * always succeeds. The buffer may be dropped during processing for 4989 * congestion control or by the protocol layers. 4990 * The network buffer is passed via the backlog NAPI device. Modern NIC 4991 * driver should use NAPI and GRO. 4992 * This function can used from interrupt and from process context. The 4993 * caller from process context must not disable interrupts before invoking 4994 * this function. 4995 * 4996 * return values: 4997 * NET_RX_SUCCESS (no congestion) 4998 * NET_RX_DROP (packet was dropped) 4999 * 5000 */ 5001 int netif_rx(struct sk_buff *skb) 5002 { 5003 bool need_bh_off = !(hardirq_count() | softirq_count()); 5004 int ret; 5005 5006 if (need_bh_off) 5007 local_bh_disable(); 5008 trace_netif_rx_entry(skb); 5009 ret = netif_rx_internal(skb); 5010 trace_netif_rx_exit(ret); 5011 if (need_bh_off) 5012 local_bh_enable(); 5013 return ret; 5014 } 5015 EXPORT_SYMBOL(netif_rx); 5016 5017 static __latent_entropy void net_tx_action(struct softirq_action *h) 5018 { 5019 struct softnet_data *sd = this_cpu_ptr(&softnet_data); 5020 5021 if (sd->completion_queue) { 5022 struct sk_buff *clist; 5023 5024 local_irq_disable(); 5025 clist = sd->completion_queue; 5026 sd->completion_queue = NULL; 5027 local_irq_enable(); 5028 5029 while (clist) { 5030 struct sk_buff *skb = clist; 5031 5032 clist = clist->next; 5033 5034 WARN_ON(refcount_read(&skb->users)); 5035 if (likely(get_kfree_skb_cb(skb)->reason == SKB_CONSUMED)) 5036 trace_consume_skb(skb, net_tx_action); 5037 else 5038 trace_kfree_skb(skb, net_tx_action, 5039 get_kfree_skb_cb(skb)->reason); 5040 5041 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) 5042 __kfree_skb(skb); 5043 else 5044 __kfree_skb_defer(skb); 5045 } 5046 } 5047 5048 if (sd->output_queue) { 5049 struct Qdisc *head; 5050 5051 local_irq_disable(); 5052 head = sd->output_queue; 5053 sd->output_queue = NULL; 5054 sd->output_queue_tailp = &sd->output_queue; 5055 local_irq_enable(); 5056 5057 rcu_read_lock(); 5058 5059 while (head) { 5060 struct Qdisc *q = head; 5061 spinlock_t *root_lock = NULL; 5062 5063 head = head->next_sched; 5064 5065 /* We need to make sure head->next_sched is read 5066 * before clearing __QDISC_STATE_SCHED 5067 */ 5068 smp_mb__before_atomic(); 5069 5070 if (!(q->flags & TCQ_F_NOLOCK)) { 5071 root_lock = qdisc_lock(q); 5072 spin_lock(root_lock); 5073 } else if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, 5074 &q->state))) { 5075 /* There is a synchronize_net() between 5076 * STATE_DEACTIVATED flag being set and 5077 * qdisc_reset()/some_qdisc_is_busy() in 5078 * dev_deactivate(), so we can safely bail out 5079 * early here to avoid data race between 5080 * qdisc_deactivate() and some_qdisc_is_busy() 5081 * for lockless qdisc. 5082 */ 5083 clear_bit(__QDISC_STATE_SCHED, &q->state); 5084 continue; 5085 } 5086 5087 clear_bit(__QDISC_STATE_SCHED, &q->state); 5088 qdisc_run(q); 5089 if (root_lock) 5090 spin_unlock(root_lock); 5091 } 5092 5093 rcu_read_unlock(); 5094 } 5095 5096 xfrm_dev_backlog(sd); 5097 } 5098 5099 #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE) 5100 /* This hook is defined here for ATM LANE */ 5101 int (*br_fdb_test_addr_hook)(struct net_device *dev, 5102 unsigned char *addr) __read_mostly; 5103 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook); 5104 #endif 5105 5106 static inline struct sk_buff * 5107 sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret, 5108 struct net_device *orig_dev, bool *another) 5109 { 5110 #ifdef CONFIG_NET_CLS_ACT 5111 struct mini_Qdisc *miniq = rcu_dereference_bh(skb->dev->miniq_ingress); 5112 struct tcf_result cl_res; 5113 5114 /* If there's at least one ingress present somewhere (so 5115 * we get here via enabled static key), remaining devices 5116 * that are not configured with an ingress qdisc will bail 5117 * out here. 5118 */ 5119 if (!miniq) 5120 return skb; 5121 5122 if (*pt_prev) { 5123 *ret = deliver_skb(skb, *pt_prev, orig_dev); 5124 *pt_prev = NULL; 5125 } 5126 5127 qdisc_skb_cb(skb)->pkt_len = skb->len; 5128 tc_skb_cb(skb)->mru = 0; 5129 tc_skb_cb(skb)->post_ct = false; 5130 skb->tc_at_ingress = 1; 5131 mini_qdisc_bstats_cpu_update(miniq, skb); 5132 5133 switch (tcf_classify(skb, miniq->block, miniq->filter_list, &cl_res, false)) { 5134 case TC_ACT_OK: 5135 case TC_ACT_RECLASSIFY: 5136 skb->tc_index = TC_H_MIN(cl_res.classid); 5137 break; 5138 case TC_ACT_SHOT: 5139 mini_qdisc_qstats_cpu_drop(miniq); 5140 kfree_skb_reason(skb, SKB_DROP_REASON_TC_INGRESS); 5141 *ret = NET_RX_DROP; 5142 return NULL; 5143 case TC_ACT_STOLEN: 5144 case TC_ACT_QUEUED: 5145 case TC_ACT_TRAP: 5146 consume_skb(skb); 5147 *ret = NET_RX_SUCCESS; 5148 return NULL; 5149 case TC_ACT_REDIRECT: 5150 /* skb_mac_header check was done by cls/act_bpf, so 5151 * we can safely push the L2 header back before 5152 * redirecting to another netdev 5153 */ 5154 __skb_push(skb, skb->mac_len); 5155 if (skb_do_redirect(skb) == -EAGAIN) { 5156 __skb_pull(skb, skb->mac_len); 5157 *another = true; 5158 break; 5159 } 5160 *ret = NET_RX_SUCCESS; 5161 return NULL; 5162 case TC_ACT_CONSUMED: 5163 *ret = NET_RX_SUCCESS; 5164 return NULL; 5165 default: 5166 break; 5167 } 5168 #endif /* CONFIG_NET_CLS_ACT */ 5169 return skb; 5170 } 5171 5172 /** 5173 * netdev_is_rx_handler_busy - check if receive handler is registered 5174 * @dev: device to check 5175 * 5176 * Check if a receive handler is already registered for a given device. 5177 * Return true if there one. 5178 * 5179 * The caller must hold the rtnl_mutex. 5180 */ 5181 bool netdev_is_rx_handler_busy(struct net_device *dev) 5182 { 5183 ASSERT_RTNL(); 5184 return dev && rtnl_dereference(dev->rx_handler); 5185 } 5186 EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy); 5187 5188 /** 5189 * netdev_rx_handler_register - register receive handler 5190 * @dev: device to register a handler for 5191 * @rx_handler: receive handler to register 5192 * @rx_handler_data: data pointer that is used by rx handler 5193 * 5194 * Register a receive handler for a device. This handler will then be 5195 * called from __netif_receive_skb. A negative errno code is returned 5196 * on a failure. 5197 * 5198 * The caller must hold the rtnl_mutex. 5199 * 5200 * For a general description of rx_handler, see enum rx_handler_result. 5201 */ 5202 int netdev_rx_handler_register(struct net_device *dev, 5203 rx_handler_func_t *rx_handler, 5204 void *rx_handler_data) 5205 { 5206 if (netdev_is_rx_handler_busy(dev)) 5207 return -EBUSY; 5208 5209 if (dev->priv_flags & IFF_NO_RX_HANDLER) 5210 return -EINVAL; 5211 5212 /* Note: rx_handler_data must be set before rx_handler */ 5213 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data); 5214 rcu_assign_pointer(dev->rx_handler, rx_handler); 5215 5216 return 0; 5217 } 5218 EXPORT_SYMBOL_GPL(netdev_rx_handler_register); 5219 5220 /** 5221 * netdev_rx_handler_unregister - unregister receive handler 5222 * @dev: device to unregister a handler from 5223 * 5224 * Unregister a receive handler from a device. 5225 * 5226 * The caller must hold the rtnl_mutex. 5227 */ 5228 void netdev_rx_handler_unregister(struct net_device *dev) 5229 { 5230 5231 ASSERT_RTNL(); 5232 RCU_INIT_POINTER(dev->rx_handler, NULL); 5233 /* a reader seeing a non NULL rx_handler in a rcu_read_lock() 5234 * section has a guarantee to see a non NULL rx_handler_data 5235 * as well. 5236 */ 5237 synchronize_net(); 5238 RCU_INIT_POINTER(dev->rx_handler_data, NULL); 5239 } 5240 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); 5241 5242 /* 5243 * Limit the use of PFMEMALLOC reserves to those protocols that implement 5244 * the special handling of PFMEMALLOC skbs. 5245 */ 5246 static bool skb_pfmemalloc_protocol(struct sk_buff *skb) 5247 { 5248 switch (skb->protocol) { 5249 case htons(ETH_P_ARP): 5250 case htons(ETH_P_IP): 5251 case htons(ETH_P_IPV6): 5252 case htons(ETH_P_8021Q): 5253 case htons(ETH_P_8021AD): 5254 return true; 5255 default: 5256 return false; 5257 } 5258 } 5259 5260 static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev, 5261 int *ret, struct net_device *orig_dev) 5262 { 5263 if (nf_hook_ingress_active(skb)) { 5264 int ingress_retval; 5265 5266 if (*pt_prev) { 5267 *ret = deliver_skb(skb, *pt_prev, orig_dev); 5268 *pt_prev = NULL; 5269 } 5270 5271 rcu_read_lock(); 5272 ingress_retval = nf_hook_ingress(skb); 5273 rcu_read_unlock(); 5274 return ingress_retval; 5275 } 5276 return 0; 5277 } 5278 5279 static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc, 5280 struct packet_type **ppt_prev) 5281 { 5282 struct packet_type *ptype, *pt_prev; 5283 rx_handler_func_t *rx_handler; 5284 struct sk_buff *skb = *pskb; 5285 struct net_device *orig_dev; 5286 bool deliver_exact = false; 5287 int ret = NET_RX_DROP; 5288 __be16 type; 5289 5290 net_timestamp_check(!READ_ONCE(netdev_tstamp_prequeue), skb); 5291 5292 trace_netif_receive_skb(skb); 5293 5294 orig_dev = skb->dev; 5295 5296 skb_reset_network_header(skb); 5297 if (!skb_transport_header_was_set(skb)) 5298 skb_reset_transport_header(skb); 5299 skb_reset_mac_len(skb); 5300 5301 pt_prev = NULL; 5302 5303 another_round: 5304 skb->skb_iif = skb->dev->ifindex; 5305 5306 __this_cpu_inc(softnet_data.processed); 5307 5308 if (static_branch_unlikely(&generic_xdp_needed_key)) { 5309 int ret2; 5310 5311 migrate_disable(); 5312 ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb); 5313 migrate_enable(); 5314 5315 if (ret2 != XDP_PASS) { 5316 ret = NET_RX_DROP; 5317 goto out; 5318 } 5319 } 5320 5321 if (eth_type_vlan(skb->protocol)) { 5322 skb = skb_vlan_untag(skb); 5323 if (unlikely(!skb)) 5324 goto out; 5325 } 5326 5327 if (skb_skip_tc_classify(skb)) 5328 goto skip_classify; 5329 5330 if (pfmemalloc) 5331 goto skip_taps; 5332 5333 list_for_each_entry_rcu(ptype, &ptype_all, list) { 5334 if (pt_prev) 5335 ret = deliver_skb(skb, pt_prev, orig_dev); 5336 pt_prev = ptype; 5337 } 5338 5339 list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) { 5340 if (pt_prev) 5341 ret = deliver_skb(skb, pt_prev, orig_dev); 5342 pt_prev = ptype; 5343 } 5344 5345 skip_taps: 5346 #ifdef CONFIG_NET_INGRESS 5347 if (static_branch_unlikely(&ingress_needed_key)) { 5348 bool another = false; 5349 5350 nf_skip_egress(skb, true); 5351 skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev, 5352 &another); 5353 if (another) 5354 goto another_round; 5355 if (!skb) 5356 goto out; 5357 5358 nf_skip_egress(skb, false); 5359 if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0) 5360 goto out; 5361 } 5362 #endif 5363 skb_reset_redirect(skb); 5364 skip_classify: 5365 if (pfmemalloc && !skb_pfmemalloc_protocol(skb)) 5366 goto drop; 5367 5368 if (skb_vlan_tag_present(skb)) { 5369 if (pt_prev) { 5370 ret = deliver_skb(skb, pt_prev, orig_dev); 5371 pt_prev = NULL; 5372 } 5373 if (vlan_do_receive(&skb)) 5374 goto another_round; 5375 else if (unlikely(!skb)) 5376 goto out; 5377 } 5378 5379 rx_handler = rcu_dereference(skb->dev->rx_handler); 5380 if (rx_handler) { 5381 if (pt_prev) { 5382 ret = deliver_skb(skb, pt_prev, orig_dev); 5383 pt_prev = NULL; 5384 } 5385 switch (rx_handler(&skb)) { 5386 case RX_HANDLER_CONSUMED: 5387 ret = NET_RX_SUCCESS; 5388 goto out; 5389 case RX_HANDLER_ANOTHER: 5390 goto another_round; 5391 case RX_HANDLER_EXACT: 5392 deliver_exact = true; 5393 break; 5394 case RX_HANDLER_PASS: 5395 break; 5396 default: 5397 BUG(); 5398 } 5399 } 5400 5401 if (unlikely(skb_vlan_tag_present(skb)) && !netdev_uses_dsa(skb->dev)) { 5402 check_vlan_id: 5403 if (skb_vlan_tag_get_id(skb)) { 5404 /* Vlan id is non 0 and vlan_do_receive() above couldn't 5405 * find vlan device. 5406 */ 5407 skb->pkt_type = PACKET_OTHERHOST; 5408 } else if (eth_type_vlan(skb->protocol)) { 5409 /* Outer header is 802.1P with vlan 0, inner header is 5410 * 802.1Q or 802.1AD and vlan_do_receive() above could 5411 * not find vlan dev for vlan id 0. 5412 */ 5413 __vlan_hwaccel_clear_tag(skb); 5414 skb = skb_vlan_untag(skb); 5415 if (unlikely(!skb)) 5416 goto out; 5417 if (vlan_do_receive(&skb)) 5418 /* After stripping off 802.1P header with vlan 0 5419 * vlan dev is found for inner header. 5420 */ 5421 goto another_round; 5422 else if (unlikely(!skb)) 5423 goto out; 5424 else 5425 /* We have stripped outer 802.1P vlan 0 header. 5426 * But could not find vlan dev. 5427 * check again for vlan id to set OTHERHOST. 5428 */ 5429 goto check_vlan_id; 5430 } 5431 /* Note: we might in the future use prio bits 5432 * and set skb->priority like in vlan_do_receive() 5433 * For the time being, just ignore Priority Code Point 5434 */ 5435 __vlan_hwaccel_clear_tag(skb); 5436 } 5437 5438 type = skb->protocol; 5439 5440 /* deliver only exact match when indicated */ 5441 if (likely(!deliver_exact)) { 5442 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, 5443 &ptype_base[ntohs(type) & 5444 PTYPE_HASH_MASK]); 5445 } 5446 5447 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, 5448 &orig_dev->ptype_specific); 5449 5450 if (unlikely(skb->dev != orig_dev)) { 5451 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, 5452 &skb->dev->ptype_specific); 5453 } 5454 5455 if (pt_prev) { 5456 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) 5457 goto drop; 5458 *ppt_prev = pt_prev; 5459 } else { 5460 drop: 5461 if (!deliver_exact) 5462 dev_core_stats_rx_dropped_inc(skb->dev); 5463 else 5464 dev_core_stats_rx_nohandler_inc(skb->dev); 5465 kfree_skb_reason(skb, SKB_DROP_REASON_UNHANDLED_PROTO); 5466 /* Jamal, now you will not able to escape explaining 5467 * me how you were going to use this. :-) 5468 */ 5469 ret = NET_RX_DROP; 5470 } 5471 5472 out: 5473 /* The invariant here is that if *ppt_prev is not NULL 5474 * then skb should also be non-NULL. 5475 * 5476 * Apparently *ppt_prev assignment above holds this invariant due to 5477 * skb dereferencing near it. 5478 */ 5479 *pskb = skb; 5480 return ret; 5481 } 5482 5483 static int __netif_receive_skb_one_core(struct sk_buff *skb, bool pfmemalloc) 5484 { 5485 struct net_device *orig_dev = skb->dev; 5486 struct packet_type *pt_prev = NULL; 5487 int ret; 5488 5489 ret = __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev); 5490 if (pt_prev) 5491 ret = INDIRECT_CALL_INET(pt_prev->func, ipv6_rcv, ip_rcv, skb, 5492 skb->dev, pt_prev, orig_dev); 5493 return ret; 5494 } 5495 5496 /** 5497 * netif_receive_skb_core - special purpose version of netif_receive_skb 5498 * @skb: buffer to process 5499 * 5500 * More direct receive version of netif_receive_skb(). It should 5501 * only be used by callers that have a need to skip RPS and Generic XDP. 5502 * Caller must also take care of handling if ``(page_is_)pfmemalloc``. 5503 * 5504 * This function may only be called from softirq context and interrupts 5505 * should be enabled. 5506 * 5507 * Return values (usually ignored): 5508 * NET_RX_SUCCESS: no congestion 5509 * NET_RX_DROP: packet was dropped 5510 */ 5511 int netif_receive_skb_core(struct sk_buff *skb) 5512 { 5513 int ret; 5514 5515 rcu_read_lock(); 5516 ret = __netif_receive_skb_one_core(skb, false); 5517 rcu_read_unlock(); 5518 5519 return ret; 5520 } 5521 EXPORT_SYMBOL(netif_receive_skb_core); 5522 5523 static inline void __netif_receive_skb_list_ptype(struct list_head *head, 5524 struct packet_type *pt_prev, 5525 struct net_device *orig_dev) 5526 { 5527 struct sk_buff *skb, *next; 5528 5529 if (!pt_prev) 5530 return; 5531 if (list_empty(head)) 5532 return; 5533 if (pt_prev->list_func != NULL) 5534 INDIRECT_CALL_INET(pt_prev->list_func, ipv6_list_rcv, 5535 ip_list_rcv, head, pt_prev, orig_dev); 5536 else 5537 list_for_each_entry_safe(skb, next, head, list) { 5538 skb_list_del_init(skb); 5539 pt_prev->func(skb, skb->dev, pt_prev, orig_dev); 5540 } 5541 } 5542 5543 static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc) 5544 { 5545 /* Fast-path assumptions: 5546 * - There is no RX handler. 5547 * - Only one packet_type matches. 5548 * If either of these fails, we will end up doing some per-packet 5549 * processing in-line, then handling the 'last ptype' for the whole 5550 * sublist. This can't cause out-of-order delivery to any single ptype, 5551 * because the 'last ptype' must be constant across the sublist, and all 5552 * other ptypes are handled per-packet. 5553 */ 5554 /* Current (common) ptype of sublist */ 5555 struct packet_type *pt_curr = NULL; 5556 /* Current (common) orig_dev of sublist */ 5557 struct net_device *od_curr = NULL; 5558 struct list_head sublist; 5559 struct sk_buff *skb, *next; 5560 5561 INIT_LIST_HEAD(&sublist); 5562 list_for_each_entry_safe(skb, next, head, list) { 5563 struct net_device *orig_dev = skb->dev; 5564 struct packet_type *pt_prev = NULL; 5565 5566 skb_list_del_init(skb); 5567 __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev); 5568 if (!pt_prev) 5569 continue; 5570 if (pt_curr != pt_prev || od_curr != orig_dev) { 5571 /* dispatch old sublist */ 5572 __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr); 5573 /* start new sublist */ 5574 INIT_LIST_HEAD(&sublist); 5575 pt_curr = pt_prev; 5576 od_curr = orig_dev; 5577 } 5578 list_add_tail(&skb->list, &sublist); 5579 } 5580 5581 /* dispatch final sublist */ 5582 __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr); 5583 } 5584 5585 static int __netif_receive_skb(struct sk_buff *skb) 5586 { 5587 int ret; 5588 5589 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) { 5590 unsigned int noreclaim_flag; 5591 5592 /* 5593 * PFMEMALLOC skbs are special, they should 5594 * - be delivered to SOCK_MEMALLOC sockets only 5595 * - stay away from userspace 5596 * - have bounded memory usage 5597 * 5598 * Use PF_MEMALLOC as this saves us from propagating the allocation 5599 * context down to all allocation sites. 5600 */ 5601 noreclaim_flag = memalloc_noreclaim_save(); 5602 ret = __netif_receive_skb_one_core(skb, true); 5603 memalloc_noreclaim_restore(noreclaim_flag); 5604 } else 5605 ret = __netif_receive_skb_one_core(skb, false); 5606 5607 return ret; 5608 } 5609 5610 static void __netif_receive_skb_list(struct list_head *head) 5611 { 5612 unsigned long noreclaim_flag = 0; 5613 struct sk_buff *skb, *next; 5614 bool pfmemalloc = false; /* Is current sublist PF_MEMALLOC? */ 5615 5616 list_for_each_entry_safe(skb, next, head, list) { 5617 if ((sk_memalloc_socks() && skb_pfmemalloc(skb)) != pfmemalloc) { 5618 struct list_head sublist; 5619 5620 /* Handle the previous sublist */ 5621 list_cut_before(&sublist, head, &skb->list); 5622 if (!list_empty(&sublist)) 5623 __netif_receive_skb_list_core(&sublist, pfmemalloc); 5624 pfmemalloc = !pfmemalloc; 5625 /* See comments in __netif_receive_skb */ 5626 if (pfmemalloc) 5627 noreclaim_flag = memalloc_noreclaim_save(); 5628 else 5629 memalloc_noreclaim_restore(noreclaim_flag); 5630 } 5631 } 5632 /* Handle the remaining sublist */ 5633 if (!list_empty(head)) 5634 __netif_receive_skb_list_core(head, pfmemalloc); 5635 /* Restore pflags */ 5636 if (pfmemalloc) 5637 memalloc_noreclaim_restore(noreclaim_flag); 5638 } 5639 5640 static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp) 5641 { 5642 struct bpf_prog *old = rtnl_dereference(dev->xdp_prog); 5643 struct bpf_prog *new = xdp->prog; 5644 int ret = 0; 5645 5646 switch (xdp->command) { 5647 case XDP_SETUP_PROG: 5648 rcu_assign_pointer(dev->xdp_prog, new); 5649 if (old) 5650 bpf_prog_put(old); 5651 5652 if (old && !new) { 5653 static_branch_dec(&generic_xdp_needed_key); 5654 } else if (new && !old) { 5655 static_branch_inc(&generic_xdp_needed_key); 5656 dev_disable_lro(dev); 5657 dev_disable_gro_hw(dev); 5658 } 5659 break; 5660 5661 default: 5662 ret = -EINVAL; 5663 break; 5664 } 5665 5666 return ret; 5667 } 5668 5669 static int netif_receive_skb_internal(struct sk_buff *skb) 5670 { 5671 int ret; 5672 5673 net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb); 5674 5675 if (skb_defer_rx_timestamp(skb)) 5676 return NET_RX_SUCCESS; 5677 5678 rcu_read_lock(); 5679 #ifdef CONFIG_RPS 5680 if (static_branch_unlikely(&rps_needed)) { 5681 struct rps_dev_flow voidflow, *rflow = &voidflow; 5682 int cpu = get_rps_cpu(skb->dev, skb, &rflow); 5683 5684 if (cpu >= 0) { 5685 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 5686 rcu_read_unlock(); 5687 return ret; 5688 } 5689 } 5690 #endif 5691 ret = __netif_receive_skb(skb); 5692 rcu_read_unlock(); 5693 return ret; 5694 } 5695 5696 void netif_receive_skb_list_internal(struct list_head *head) 5697 { 5698 struct sk_buff *skb, *next; 5699 struct list_head sublist; 5700 5701 INIT_LIST_HEAD(&sublist); 5702 list_for_each_entry_safe(skb, next, head, list) { 5703 net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb); 5704 skb_list_del_init(skb); 5705 if (!skb_defer_rx_timestamp(skb)) 5706 list_add_tail(&skb->list, &sublist); 5707 } 5708 list_splice_init(&sublist, head); 5709 5710 rcu_read_lock(); 5711 #ifdef CONFIG_RPS 5712 if (static_branch_unlikely(&rps_needed)) { 5713 list_for_each_entry_safe(skb, next, head, list) { 5714 struct rps_dev_flow voidflow, *rflow = &voidflow; 5715 int cpu = get_rps_cpu(skb->dev, skb, &rflow); 5716 5717 if (cpu >= 0) { 5718 /* Will be handled, remove from list */ 5719 skb_list_del_init(skb); 5720 enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 5721 } 5722 } 5723 } 5724 #endif 5725 __netif_receive_skb_list(head); 5726 rcu_read_unlock(); 5727 } 5728 5729 /** 5730 * netif_receive_skb - process receive buffer from network 5731 * @skb: buffer to process 5732 * 5733 * netif_receive_skb() is the main receive data processing function. 5734 * It always succeeds. The buffer may be dropped during processing 5735 * for congestion control or by the protocol layers. 5736 * 5737 * This function may only be called from softirq context and interrupts 5738 * should be enabled. 5739 * 5740 * Return values (usually ignored): 5741 * NET_RX_SUCCESS: no congestion 5742 * NET_RX_DROP: packet was dropped 5743 */ 5744 int netif_receive_skb(struct sk_buff *skb) 5745 { 5746 int ret; 5747 5748 trace_netif_receive_skb_entry(skb); 5749 5750 ret = netif_receive_skb_internal(skb); 5751 trace_netif_receive_skb_exit(ret); 5752 5753 return ret; 5754 } 5755 EXPORT_SYMBOL(netif_receive_skb); 5756 5757 /** 5758 * netif_receive_skb_list - process many receive buffers from network 5759 * @head: list of skbs to process. 5760 * 5761 * Since return value of netif_receive_skb() is normally ignored, and 5762 * wouldn't be meaningful for a list, this function returns void. 5763 * 5764 * This function may only be called from softirq context and interrupts 5765 * should be enabled. 5766 */ 5767 void netif_receive_skb_list(struct list_head *head) 5768 { 5769 struct sk_buff *skb; 5770 5771 if (list_empty(head)) 5772 return; 5773 if (trace_netif_receive_skb_list_entry_enabled()) { 5774 list_for_each_entry(skb, head, list) 5775 trace_netif_receive_skb_list_entry(skb); 5776 } 5777 netif_receive_skb_list_internal(head); 5778 trace_netif_receive_skb_list_exit(0); 5779 } 5780 EXPORT_SYMBOL(netif_receive_skb_list); 5781 5782 static DEFINE_PER_CPU(struct work_struct, flush_works); 5783 5784 /* Network device is going away, flush any packets still pending */ 5785 static void flush_backlog(struct work_struct *work) 5786 { 5787 struct sk_buff *skb, *tmp; 5788 struct softnet_data *sd; 5789 5790 local_bh_disable(); 5791 sd = this_cpu_ptr(&softnet_data); 5792 5793 rps_lock_irq_disable(sd); 5794 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) { 5795 if (skb->dev->reg_state == NETREG_UNREGISTERING) { 5796 __skb_unlink(skb, &sd->input_pkt_queue); 5797 dev_kfree_skb_irq(skb); 5798 input_queue_head_incr(sd); 5799 } 5800 } 5801 rps_unlock_irq_enable(sd); 5802 5803 skb_queue_walk_safe(&sd->process_queue, skb, tmp) { 5804 if (skb->dev->reg_state == NETREG_UNREGISTERING) { 5805 __skb_unlink(skb, &sd->process_queue); 5806 kfree_skb(skb); 5807 input_queue_head_incr(sd); 5808 } 5809 } 5810 local_bh_enable(); 5811 } 5812 5813 static bool flush_required(int cpu) 5814 { 5815 #if IS_ENABLED(CONFIG_RPS) 5816 struct softnet_data *sd = &per_cpu(softnet_data, cpu); 5817 bool do_flush; 5818 5819 rps_lock_irq_disable(sd); 5820 5821 /* as insertion into process_queue happens with the rps lock held, 5822 * process_queue access may race only with dequeue 5823 */ 5824 do_flush = !skb_queue_empty(&sd->input_pkt_queue) || 5825 !skb_queue_empty_lockless(&sd->process_queue); 5826 rps_unlock_irq_enable(sd); 5827 5828 return do_flush; 5829 #endif 5830 /* without RPS we can't safely check input_pkt_queue: during a 5831 * concurrent remote skb_queue_splice() we can detect as empty both 5832 * input_pkt_queue and process_queue even if the latter could end-up 5833 * containing a lot of packets. 5834 */ 5835 return true; 5836 } 5837 5838 static void flush_all_backlogs(void) 5839 { 5840 static cpumask_t flush_cpus; 5841 unsigned int cpu; 5842 5843 /* since we are under rtnl lock protection we can use static data 5844 * for the cpumask and avoid allocating on stack the possibly 5845 * large mask 5846 */ 5847 ASSERT_RTNL(); 5848 5849 cpus_read_lock(); 5850 5851 cpumask_clear(&flush_cpus); 5852 for_each_online_cpu(cpu) { 5853 if (flush_required(cpu)) { 5854 queue_work_on(cpu, system_highpri_wq, 5855 per_cpu_ptr(&flush_works, cpu)); 5856 cpumask_set_cpu(cpu, &flush_cpus); 5857 } 5858 } 5859 5860 /* we can have in flight packet[s] on the cpus we are not flushing, 5861 * synchronize_net() in unregister_netdevice_many() will take care of 5862 * them 5863 */ 5864 for_each_cpu(cpu, &flush_cpus) 5865 flush_work(per_cpu_ptr(&flush_works, cpu)); 5866 5867 cpus_read_unlock(); 5868 } 5869 5870 static void net_rps_send_ipi(struct softnet_data *remsd) 5871 { 5872 #ifdef CONFIG_RPS 5873 while (remsd) { 5874 struct softnet_data *next = remsd->rps_ipi_next; 5875 5876 if (cpu_online(remsd->cpu)) 5877 smp_call_function_single_async(remsd->cpu, &remsd->csd); 5878 remsd = next; 5879 } 5880 #endif 5881 } 5882 5883 /* 5884 * net_rps_action_and_irq_enable sends any pending IPI's for rps. 5885 * Note: called with local irq disabled, but exits with local irq enabled. 5886 */ 5887 static void net_rps_action_and_irq_enable(struct softnet_data *sd) 5888 { 5889 #ifdef CONFIG_RPS 5890 struct softnet_data *remsd = sd->rps_ipi_list; 5891 5892 if (remsd) { 5893 sd->rps_ipi_list = NULL; 5894 5895 local_irq_enable(); 5896 5897 /* Send pending IPI's to kick RPS processing on remote cpus. */ 5898 net_rps_send_ipi(remsd); 5899 } else 5900 #endif 5901 local_irq_enable(); 5902 } 5903 5904 static bool sd_has_rps_ipi_waiting(struct softnet_data *sd) 5905 { 5906 #ifdef CONFIG_RPS 5907 return sd->rps_ipi_list != NULL; 5908 #else 5909 return false; 5910 #endif 5911 } 5912 5913 static int process_backlog(struct napi_struct *napi, int quota) 5914 { 5915 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog); 5916 bool again = true; 5917 int work = 0; 5918 5919 /* Check if we have pending ipi, its better to send them now, 5920 * not waiting net_rx_action() end. 5921 */ 5922 if (sd_has_rps_ipi_waiting(sd)) { 5923 local_irq_disable(); 5924 net_rps_action_and_irq_enable(sd); 5925 } 5926 5927 napi->weight = READ_ONCE(dev_rx_weight); 5928 while (again) { 5929 struct sk_buff *skb; 5930 5931 while ((skb = __skb_dequeue(&sd->process_queue))) { 5932 rcu_read_lock(); 5933 __netif_receive_skb(skb); 5934 rcu_read_unlock(); 5935 input_queue_head_incr(sd); 5936 if (++work >= quota) 5937 return work; 5938 5939 } 5940 5941 rps_lock_irq_disable(sd); 5942 if (skb_queue_empty(&sd->input_pkt_queue)) { 5943 /* 5944 * Inline a custom version of __napi_complete(). 5945 * only current cpu owns and manipulates this napi, 5946 * and NAPI_STATE_SCHED is the only possible flag set 5947 * on backlog. 5948 * We can use a plain write instead of clear_bit(), 5949 * and we dont need an smp_mb() memory barrier. 5950 */ 5951 napi->state = 0; 5952 again = false; 5953 } else { 5954 skb_queue_splice_tail_init(&sd->input_pkt_queue, 5955 &sd->process_queue); 5956 } 5957 rps_unlock_irq_enable(sd); 5958 } 5959 5960 return work; 5961 } 5962 5963 /** 5964 * __napi_schedule - schedule for receive 5965 * @n: entry to schedule 5966 * 5967 * The entry's receive function will be scheduled to run. 5968 * Consider using __napi_schedule_irqoff() if hard irqs are masked. 5969 */ 5970 void __napi_schedule(struct napi_struct *n) 5971 { 5972 unsigned long flags; 5973 5974 local_irq_save(flags); 5975 ____napi_schedule(this_cpu_ptr(&softnet_data), n); 5976 local_irq_restore(flags); 5977 } 5978 EXPORT_SYMBOL(__napi_schedule); 5979 5980 /** 5981 * napi_schedule_prep - check if napi can be scheduled 5982 * @n: napi context 5983 * 5984 * Test if NAPI routine is already running, and if not mark 5985 * it as running. This is used as a condition variable to 5986 * insure only one NAPI poll instance runs. We also make 5987 * sure there is no pending NAPI disable. 5988 */ 5989 bool napi_schedule_prep(struct napi_struct *n) 5990 { 5991 unsigned long new, val = READ_ONCE(n->state); 5992 5993 do { 5994 if (unlikely(val & NAPIF_STATE_DISABLE)) 5995 return false; 5996 new = val | NAPIF_STATE_SCHED; 5997 5998 /* Sets STATE_MISSED bit if STATE_SCHED was already set 5999 * This was suggested by Alexander Duyck, as compiler 6000 * emits better code than : 6001 * if (val & NAPIF_STATE_SCHED) 6002 * new |= NAPIF_STATE_MISSED; 6003 */ 6004 new |= (val & NAPIF_STATE_SCHED) / NAPIF_STATE_SCHED * 6005 NAPIF_STATE_MISSED; 6006 } while (!try_cmpxchg(&n->state, &val, new)); 6007 6008 return !(val & NAPIF_STATE_SCHED); 6009 } 6010 EXPORT_SYMBOL(napi_schedule_prep); 6011 6012 /** 6013 * __napi_schedule_irqoff - schedule for receive 6014 * @n: entry to schedule 6015 * 6016 * Variant of __napi_schedule() assuming hard irqs are masked. 6017 * 6018 * On PREEMPT_RT enabled kernels this maps to __napi_schedule() 6019 * because the interrupt disabled assumption might not be true 6020 * due to force-threaded interrupts and spinlock substitution. 6021 */ 6022 void __napi_schedule_irqoff(struct napi_struct *n) 6023 { 6024 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) 6025 ____napi_schedule(this_cpu_ptr(&softnet_data), n); 6026 else 6027 __napi_schedule(n); 6028 } 6029 EXPORT_SYMBOL(__napi_schedule_irqoff); 6030 6031 bool napi_complete_done(struct napi_struct *n, int work_done) 6032 { 6033 unsigned long flags, val, new, timeout = 0; 6034 bool ret = true; 6035 6036 /* 6037 * 1) Don't let napi dequeue from the cpu poll list 6038 * just in case its running on a different cpu. 6039 * 2) If we are busy polling, do nothing here, we have 6040 * the guarantee we will be called later. 6041 */ 6042 if (unlikely(n->state & (NAPIF_STATE_NPSVC | 6043 NAPIF_STATE_IN_BUSY_POLL))) 6044 return false; 6045 6046 if (work_done) { 6047 if (n->gro_bitmask) 6048 timeout = READ_ONCE(n->dev->gro_flush_timeout); 6049 n->defer_hard_irqs_count = READ_ONCE(n->dev->napi_defer_hard_irqs); 6050 } 6051 if (n->defer_hard_irqs_count > 0) { 6052 n->defer_hard_irqs_count--; 6053 timeout = READ_ONCE(n->dev->gro_flush_timeout); 6054 if (timeout) 6055 ret = false; 6056 } 6057 if (n->gro_bitmask) { 6058 /* When the NAPI instance uses a timeout and keeps postponing 6059 * it, we need to bound somehow the time packets are kept in 6060 * the GRO layer 6061 */ 6062 napi_gro_flush(n, !!timeout); 6063 } 6064 6065 gro_normal_list(n); 6066 6067 if (unlikely(!list_empty(&n->poll_list))) { 6068 /* If n->poll_list is not empty, we need to mask irqs */ 6069 local_irq_save(flags); 6070 list_del_init(&n->poll_list); 6071 local_irq_restore(flags); 6072 } 6073 6074 val = READ_ONCE(n->state); 6075 do { 6076 WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED)); 6077 6078 new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED | 6079 NAPIF_STATE_SCHED_THREADED | 6080 NAPIF_STATE_PREFER_BUSY_POLL); 6081 6082 /* If STATE_MISSED was set, leave STATE_SCHED set, 6083 * because we will call napi->poll() one more time. 6084 * This C code was suggested by Alexander Duyck to help gcc. 6085 */ 6086 new |= (val & NAPIF_STATE_MISSED) / NAPIF_STATE_MISSED * 6087 NAPIF_STATE_SCHED; 6088 } while (!try_cmpxchg(&n->state, &val, new)); 6089 6090 if (unlikely(val & NAPIF_STATE_MISSED)) { 6091 __napi_schedule(n); 6092 return false; 6093 } 6094 6095 if (timeout) 6096 hrtimer_start(&n->timer, ns_to_ktime(timeout), 6097 HRTIMER_MODE_REL_PINNED); 6098 return ret; 6099 } 6100 EXPORT_SYMBOL(napi_complete_done); 6101 6102 /* must be called under rcu_read_lock(), as we dont take a reference */ 6103 static struct napi_struct *napi_by_id(unsigned int napi_id) 6104 { 6105 unsigned int hash = napi_id % HASH_SIZE(napi_hash); 6106 struct napi_struct *napi; 6107 6108 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node) 6109 if (napi->napi_id == napi_id) 6110 return napi; 6111 6112 return NULL; 6113 } 6114 6115 #if defined(CONFIG_NET_RX_BUSY_POLL) 6116 6117 static void __busy_poll_stop(struct napi_struct *napi, bool skip_schedule) 6118 { 6119 if (!skip_schedule) { 6120 gro_normal_list(napi); 6121 __napi_schedule(napi); 6122 return; 6123 } 6124 6125 if (napi->gro_bitmask) { 6126 /* flush too old packets 6127 * If HZ < 1000, flush all packets. 6128 */ 6129 napi_gro_flush(napi, HZ >= 1000); 6130 } 6131 6132 gro_normal_list(napi); 6133 clear_bit(NAPI_STATE_SCHED, &napi->state); 6134 } 6135 6136 static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock, bool prefer_busy_poll, 6137 u16 budget) 6138 { 6139 bool skip_schedule = false; 6140 unsigned long timeout; 6141 int rc; 6142 6143 /* Busy polling means there is a high chance device driver hard irq 6144 * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was 6145 * set in napi_schedule_prep(). 6146 * Since we are about to call napi->poll() once more, we can safely 6147 * clear NAPI_STATE_MISSED. 6148 * 6149 * Note: x86 could use a single "lock and ..." instruction 6150 * to perform these two clear_bit() 6151 */ 6152 clear_bit(NAPI_STATE_MISSED, &napi->state); 6153 clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state); 6154 6155 local_bh_disable(); 6156 6157 if (prefer_busy_poll) { 6158 napi->defer_hard_irqs_count = READ_ONCE(napi->dev->napi_defer_hard_irqs); 6159 timeout = READ_ONCE(napi->dev->gro_flush_timeout); 6160 if (napi->defer_hard_irqs_count && timeout) { 6161 hrtimer_start(&napi->timer, ns_to_ktime(timeout), HRTIMER_MODE_REL_PINNED); 6162 skip_schedule = true; 6163 } 6164 } 6165 6166 /* All we really want here is to re-enable device interrupts. 6167 * Ideally, a new ndo_busy_poll_stop() could avoid another round. 6168 */ 6169 rc = napi->poll(napi, budget); 6170 /* We can't gro_normal_list() here, because napi->poll() might have 6171 * rearmed the napi (napi_complete_done()) in which case it could 6172 * already be running on another CPU. 6173 */ 6174 trace_napi_poll(napi, rc, budget); 6175 netpoll_poll_unlock(have_poll_lock); 6176 if (rc == budget) 6177 __busy_poll_stop(napi, skip_schedule); 6178 local_bh_enable(); 6179 } 6180 6181 void napi_busy_loop(unsigned int napi_id, 6182 bool (*loop_end)(void *, unsigned long), 6183 void *loop_end_arg, bool prefer_busy_poll, u16 budget) 6184 { 6185 unsigned long start_time = loop_end ? busy_loop_current_time() : 0; 6186 int (*napi_poll)(struct napi_struct *napi, int budget); 6187 void *have_poll_lock = NULL; 6188 struct napi_struct *napi; 6189 6190 restart: 6191 napi_poll = NULL; 6192 6193 rcu_read_lock(); 6194 6195 napi = napi_by_id(napi_id); 6196 if (!napi) 6197 goto out; 6198 6199 preempt_disable(); 6200 for (;;) { 6201 int work = 0; 6202 6203 local_bh_disable(); 6204 if (!napi_poll) { 6205 unsigned long val = READ_ONCE(napi->state); 6206 6207 /* If multiple threads are competing for this napi, 6208 * we avoid dirtying napi->state as much as we can. 6209 */ 6210 if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED | 6211 NAPIF_STATE_IN_BUSY_POLL)) { 6212 if (prefer_busy_poll) 6213 set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state); 6214 goto count; 6215 } 6216 if (cmpxchg(&napi->state, val, 6217 val | NAPIF_STATE_IN_BUSY_POLL | 6218 NAPIF_STATE_SCHED) != val) { 6219 if (prefer_busy_poll) 6220 set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state); 6221 goto count; 6222 } 6223 have_poll_lock = netpoll_poll_lock(napi); 6224 napi_poll = napi->poll; 6225 } 6226 work = napi_poll(napi, budget); 6227 trace_napi_poll(napi, work, budget); 6228 gro_normal_list(napi); 6229 count: 6230 if (work > 0) 6231 __NET_ADD_STATS(dev_net(napi->dev), 6232 LINUX_MIB_BUSYPOLLRXPACKETS, work); 6233 local_bh_enable(); 6234 6235 if (!loop_end || loop_end(loop_end_arg, start_time)) 6236 break; 6237 6238 if (unlikely(need_resched())) { 6239 if (napi_poll) 6240 busy_poll_stop(napi, have_poll_lock, prefer_busy_poll, budget); 6241 preempt_enable(); 6242 rcu_read_unlock(); 6243 cond_resched(); 6244 if (loop_end(loop_end_arg, start_time)) 6245 return; 6246 goto restart; 6247 } 6248 cpu_relax(); 6249 } 6250 if (napi_poll) 6251 busy_poll_stop(napi, have_poll_lock, prefer_busy_poll, budget); 6252 preempt_enable(); 6253 out: 6254 rcu_read_unlock(); 6255 } 6256 EXPORT_SYMBOL(napi_busy_loop); 6257 6258 #endif /* CONFIG_NET_RX_BUSY_POLL */ 6259 6260 static void napi_hash_add(struct napi_struct *napi) 6261 { 6262 if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state)) 6263 return; 6264 6265 spin_lock(&napi_hash_lock); 6266 6267 /* 0..NR_CPUS range is reserved for sender_cpu use */ 6268 do { 6269 if (unlikely(++napi_gen_id < MIN_NAPI_ID)) 6270 napi_gen_id = MIN_NAPI_ID; 6271 } while (napi_by_id(napi_gen_id)); 6272 napi->napi_id = napi_gen_id; 6273 6274 hlist_add_head_rcu(&napi->napi_hash_node, 6275 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]); 6276 6277 spin_unlock(&napi_hash_lock); 6278 } 6279 6280 /* Warning : caller is responsible to make sure rcu grace period 6281 * is respected before freeing memory containing @napi 6282 */ 6283 static void napi_hash_del(struct napi_struct *napi) 6284 { 6285 spin_lock(&napi_hash_lock); 6286 6287 hlist_del_init_rcu(&napi->napi_hash_node); 6288 6289 spin_unlock(&napi_hash_lock); 6290 } 6291 6292 static enum hrtimer_restart napi_watchdog(struct hrtimer *timer) 6293 { 6294 struct napi_struct *napi; 6295 6296 napi = container_of(timer, struct napi_struct, timer); 6297 6298 /* Note : we use a relaxed variant of napi_schedule_prep() not setting 6299 * NAPI_STATE_MISSED, since we do not react to a device IRQ. 6300 */ 6301 if (!napi_disable_pending(napi) && 6302 !test_and_set_bit(NAPI_STATE_SCHED, &napi->state)) { 6303 clear_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state); 6304 __napi_schedule_irqoff(napi); 6305 } 6306 6307 return HRTIMER_NORESTART; 6308 } 6309 6310 static void init_gro_hash(struct napi_struct *napi) 6311 { 6312 int i; 6313 6314 for (i = 0; i < GRO_HASH_BUCKETS; i++) { 6315 INIT_LIST_HEAD(&napi->gro_hash[i].list); 6316 napi->gro_hash[i].count = 0; 6317 } 6318 napi->gro_bitmask = 0; 6319 } 6320 6321 int dev_set_threaded(struct net_device *dev, bool threaded) 6322 { 6323 struct napi_struct *napi; 6324 int err = 0; 6325 6326 if (dev->threaded == threaded) 6327 return 0; 6328 6329 if (threaded) { 6330 list_for_each_entry(napi, &dev->napi_list, dev_list) { 6331 if (!napi->thread) { 6332 err = napi_kthread_create(napi); 6333 if (err) { 6334 threaded = false; 6335 break; 6336 } 6337 } 6338 } 6339 } 6340 6341 dev->threaded = threaded; 6342 6343 /* Make sure kthread is created before THREADED bit 6344 * is set. 6345 */ 6346 smp_mb__before_atomic(); 6347 6348 /* Setting/unsetting threaded mode on a napi might not immediately 6349 * take effect, if the current napi instance is actively being 6350 * polled. In this case, the switch between threaded mode and 6351 * softirq mode will happen in the next round of napi_schedule(). 6352 * This should not cause hiccups/stalls to the live traffic. 6353 */ 6354 list_for_each_entry(napi, &dev->napi_list, dev_list) { 6355 if (threaded) 6356 set_bit(NAPI_STATE_THREADED, &napi->state); 6357 else 6358 clear_bit(NAPI_STATE_THREADED, &napi->state); 6359 } 6360 6361 return err; 6362 } 6363 EXPORT_SYMBOL(dev_set_threaded); 6364 6365 void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi, 6366 int (*poll)(struct napi_struct *, int), int weight) 6367 { 6368 if (WARN_ON(test_and_set_bit(NAPI_STATE_LISTED, &napi->state))) 6369 return; 6370 6371 INIT_LIST_HEAD(&napi->poll_list); 6372 INIT_HLIST_NODE(&napi->napi_hash_node); 6373 hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); 6374 napi->timer.function = napi_watchdog; 6375 init_gro_hash(napi); 6376 napi->skb = NULL; 6377 INIT_LIST_HEAD(&napi->rx_list); 6378 napi->rx_count = 0; 6379 napi->poll = poll; 6380 if (weight > NAPI_POLL_WEIGHT) 6381 netdev_err_once(dev, "%s() called with weight %d\n", __func__, 6382 weight); 6383 napi->weight = weight; 6384 napi->dev = dev; 6385 #ifdef CONFIG_NETPOLL 6386 napi->poll_owner = -1; 6387 #endif 6388 set_bit(NAPI_STATE_SCHED, &napi->state); 6389 set_bit(NAPI_STATE_NPSVC, &napi->state); 6390 list_add_rcu(&napi->dev_list, &dev->napi_list); 6391 napi_hash_add(napi); 6392 napi_get_frags_check(napi); 6393 /* Create kthread for this napi if dev->threaded is set. 6394 * Clear dev->threaded if kthread creation failed so that 6395 * threaded mode will not be enabled in napi_enable(). 6396 */ 6397 if (dev->threaded && napi_kthread_create(napi)) 6398 dev->threaded = 0; 6399 } 6400 EXPORT_SYMBOL(netif_napi_add_weight); 6401 6402 void napi_disable(struct napi_struct *n) 6403 { 6404 unsigned long val, new; 6405 6406 might_sleep(); 6407 set_bit(NAPI_STATE_DISABLE, &n->state); 6408 6409 val = READ_ONCE(n->state); 6410 do { 6411 while (val & (NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC)) { 6412 usleep_range(20, 200); 6413 val = READ_ONCE(n->state); 6414 } 6415 6416 new = val | NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC; 6417 new &= ~(NAPIF_STATE_THREADED | NAPIF_STATE_PREFER_BUSY_POLL); 6418 } while (!try_cmpxchg(&n->state, &val, new)); 6419 6420 hrtimer_cancel(&n->timer); 6421 6422 clear_bit(NAPI_STATE_DISABLE, &n->state); 6423 } 6424 EXPORT_SYMBOL(napi_disable); 6425 6426 /** 6427 * napi_enable - enable NAPI scheduling 6428 * @n: NAPI context 6429 * 6430 * Resume NAPI from being scheduled on this context. 6431 * Must be paired with napi_disable. 6432 */ 6433 void napi_enable(struct napi_struct *n) 6434 { 6435 unsigned long new, val = READ_ONCE(n->state); 6436 6437 do { 6438 BUG_ON(!test_bit(NAPI_STATE_SCHED, &val)); 6439 6440 new = val & ~(NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC); 6441 if (n->dev->threaded && n->thread) 6442 new |= NAPIF_STATE_THREADED; 6443 } while (!try_cmpxchg(&n->state, &val, new)); 6444 } 6445 EXPORT_SYMBOL(napi_enable); 6446 6447 static void flush_gro_hash(struct napi_struct *napi) 6448 { 6449 int i; 6450 6451 for (i = 0; i < GRO_HASH_BUCKETS; i++) { 6452 struct sk_buff *skb, *n; 6453 6454 list_for_each_entry_safe(skb, n, &napi->gro_hash[i].list, list) 6455 kfree_skb(skb); 6456 napi->gro_hash[i].count = 0; 6457 } 6458 } 6459 6460 /* Must be called in process context */ 6461 void __netif_napi_del(struct napi_struct *napi) 6462 { 6463 if (!test_and_clear_bit(NAPI_STATE_LISTED, &napi->state)) 6464 return; 6465 6466 napi_hash_del(napi); 6467 list_del_rcu(&napi->dev_list); 6468 napi_free_frags(napi); 6469 6470 flush_gro_hash(napi); 6471 napi->gro_bitmask = 0; 6472 6473 if (napi->thread) { 6474 kthread_stop(napi->thread); 6475 napi->thread = NULL; 6476 } 6477 } 6478 EXPORT_SYMBOL(__netif_napi_del); 6479 6480 static int __napi_poll(struct napi_struct *n, bool *repoll) 6481 { 6482 int work, weight; 6483 6484 weight = n->weight; 6485 6486 /* This NAPI_STATE_SCHED test is for avoiding a race 6487 * with netpoll's poll_napi(). Only the entity which 6488 * obtains the lock and sees NAPI_STATE_SCHED set will 6489 * actually make the ->poll() call. Therefore we avoid 6490 * accidentally calling ->poll() when NAPI is not scheduled. 6491 */ 6492 work = 0; 6493 if (test_bit(NAPI_STATE_SCHED, &n->state)) { 6494 work = n->poll(n, weight); 6495 trace_napi_poll(n, work, weight); 6496 } 6497 6498 if (unlikely(work > weight)) 6499 netdev_err_once(n->dev, "NAPI poll function %pS returned %d, exceeding its budget of %d.\n", 6500 n->poll, work, weight); 6501 6502 if (likely(work < weight)) 6503 return work; 6504 6505 /* Drivers must not modify the NAPI state if they 6506 * consume the entire weight. In such cases this code 6507 * still "owns" the NAPI instance and therefore can 6508 * move the instance around on the list at-will. 6509 */ 6510 if (unlikely(napi_disable_pending(n))) { 6511 napi_complete(n); 6512 return work; 6513 } 6514 6515 /* The NAPI context has more processing work, but busy-polling 6516 * is preferred. Exit early. 6517 */ 6518 if (napi_prefer_busy_poll(n)) { 6519 if (napi_complete_done(n, work)) { 6520 /* If timeout is not set, we need to make sure 6521 * that the NAPI is re-scheduled. 6522 */ 6523 napi_schedule(n); 6524 } 6525 return work; 6526 } 6527 6528 if (n->gro_bitmask) { 6529 /* flush too old packets 6530 * If HZ < 1000, flush all packets. 6531 */ 6532 napi_gro_flush(n, HZ >= 1000); 6533 } 6534 6535 gro_normal_list(n); 6536 6537 /* Some drivers may have called napi_schedule 6538 * prior to exhausting their budget. 6539 */ 6540 if (unlikely(!list_empty(&n->poll_list))) { 6541 pr_warn_once("%s: Budget exhausted after napi rescheduled\n", 6542 n->dev ? n->dev->name : "backlog"); 6543 return work; 6544 } 6545 6546 *repoll = true; 6547 6548 return work; 6549 } 6550 6551 static int napi_poll(struct napi_struct *n, struct list_head *repoll) 6552 { 6553 bool do_repoll = false; 6554 void *have; 6555 int work; 6556 6557 list_del_init(&n->poll_list); 6558 6559 have = netpoll_poll_lock(n); 6560 6561 work = __napi_poll(n, &do_repoll); 6562 6563 if (do_repoll) 6564 list_add_tail(&n->poll_list, repoll); 6565 6566 netpoll_poll_unlock(have); 6567 6568 return work; 6569 } 6570 6571 static int napi_thread_wait(struct napi_struct *napi) 6572 { 6573 bool woken = false; 6574 6575 set_current_state(TASK_INTERRUPTIBLE); 6576 6577 while (!kthread_should_stop()) { 6578 /* Testing SCHED_THREADED bit here to make sure the current 6579 * kthread owns this napi and could poll on this napi. 6580 * Testing SCHED bit is not enough because SCHED bit might be 6581 * set by some other busy poll thread or by napi_disable(). 6582 */ 6583 if (test_bit(NAPI_STATE_SCHED_THREADED, &napi->state) || woken) { 6584 WARN_ON(!list_empty(&napi->poll_list)); 6585 __set_current_state(TASK_RUNNING); 6586 return 0; 6587 } 6588 6589 schedule(); 6590 /* woken being true indicates this thread owns this napi. */ 6591 woken = true; 6592 set_current_state(TASK_INTERRUPTIBLE); 6593 } 6594 __set_current_state(TASK_RUNNING); 6595 6596 return -1; 6597 } 6598 6599 static int napi_threaded_poll(void *data) 6600 { 6601 struct napi_struct *napi = data; 6602 void *have; 6603 6604 while (!napi_thread_wait(napi)) { 6605 for (;;) { 6606 bool repoll = false; 6607 6608 local_bh_disable(); 6609 6610 have = netpoll_poll_lock(napi); 6611 __napi_poll(napi, &repoll); 6612 netpoll_poll_unlock(have); 6613 6614 local_bh_enable(); 6615 6616 if (!repoll) 6617 break; 6618 6619 cond_resched(); 6620 } 6621 } 6622 return 0; 6623 } 6624 6625 static void skb_defer_free_flush(struct softnet_data *sd) 6626 { 6627 struct sk_buff *skb, *next; 6628 6629 /* Paired with WRITE_ONCE() in skb_attempt_defer_free() */ 6630 if (!READ_ONCE(sd->defer_list)) 6631 return; 6632 6633 spin_lock_irq(&sd->defer_lock); 6634 skb = sd->defer_list; 6635 sd->defer_list = NULL; 6636 sd->defer_count = 0; 6637 spin_unlock_irq(&sd->defer_lock); 6638 6639 while (skb != NULL) { 6640 next = skb->next; 6641 napi_consume_skb(skb, 1); 6642 skb = next; 6643 } 6644 } 6645 6646 static __latent_entropy void net_rx_action(struct softirq_action *h) 6647 { 6648 struct softnet_data *sd = this_cpu_ptr(&softnet_data); 6649 unsigned long time_limit = jiffies + 6650 usecs_to_jiffies(READ_ONCE(netdev_budget_usecs)); 6651 int budget = READ_ONCE(netdev_budget); 6652 LIST_HEAD(list); 6653 LIST_HEAD(repoll); 6654 6655 start: 6656 sd->in_net_rx_action = true; 6657 local_irq_disable(); 6658 list_splice_init(&sd->poll_list, &list); 6659 local_irq_enable(); 6660 6661 for (;;) { 6662 struct napi_struct *n; 6663 6664 skb_defer_free_flush(sd); 6665 6666 if (list_empty(&list)) { 6667 if (list_empty(&repoll)) { 6668 sd->in_net_rx_action = false; 6669 barrier(); 6670 /* We need to check if ____napi_schedule() 6671 * had refilled poll_list while 6672 * sd->in_net_rx_action was true. 6673 */ 6674 if (!list_empty(&sd->poll_list)) 6675 goto start; 6676 if (!sd_has_rps_ipi_waiting(sd)) 6677 goto end; 6678 } 6679 break; 6680 } 6681 6682 n = list_first_entry(&list, struct napi_struct, poll_list); 6683 budget -= napi_poll(n, &repoll); 6684 6685 /* If softirq window is exhausted then punt. 6686 * Allow this to run for 2 jiffies since which will allow 6687 * an average latency of 1.5/HZ. 6688 */ 6689 if (unlikely(budget <= 0 || 6690 time_after_eq(jiffies, time_limit))) { 6691 sd->time_squeeze++; 6692 break; 6693 } 6694 } 6695 6696 local_irq_disable(); 6697 6698 list_splice_tail_init(&sd->poll_list, &list); 6699 list_splice_tail(&repoll, &list); 6700 list_splice(&list, &sd->poll_list); 6701 if (!list_empty(&sd->poll_list)) 6702 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 6703 else 6704 sd->in_net_rx_action = false; 6705 6706 net_rps_action_and_irq_enable(sd); 6707 end:; 6708 } 6709 6710 struct netdev_adjacent { 6711 struct net_device *dev; 6712 netdevice_tracker dev_tracker; 6713 6714 /* upper master flag, there can only be one master device per list */ 6715 bool master; 6716 6717 /* lookup ignore flag */ 6718 bool ignore; 6719 6720 /* counter for the number of times this device was added to us */ 6721 u16 ref_nr; 6722 6723 /* private field for the users */ 6724 void *private; 6725 6726 struct list_head list; 6727 struct rcu_head rcu; 6728 }; 6729 6730 static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev, 6731 struct list_head *adj_list) 6732 { 6733 struct netdev_adjacent *adj; 6734 6735 list_for_each_entry(adj, adj_list, list) { 6736 if (adj->dev == adj_dev) 6737 return adj; 6738 } 6739 return NULL; 6740 } 6741 6742 static int ____netdev_has_upper_dev(struct net_device *upper_dev, 6743 struct netdev_nested_priv *priv) 6744 { 6745 struct net_device *dev = (struct net_device *)priv->data; 6746 6747 return upper_dev == dev; 6748 } 6749 6750 /** 6751 * netdev_has_upper_dev - Check if device is linked to an upper device 6752 * @dev: device 6753 * @upper_dev: upper device to check 6754 * 6755 * Find out if a device is linked to specified upper device and return true 6756 * in case it is. Note that this checks only immediate upper device, 6757 * not through a complete stack of devices. The caller must hold the RTNL lock. 6758 */ 6759 bool netdev_has_upper_dev(struct net_device *dev, 6760 struct net_device *upper_dev) 6761 { 6762 struct netdev_nested_priv priv = { 6763 .data = (void *)upper_dev, 6764 }; 6765 6766 ASSERT_RTNL(); 6767 6768 return netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev, 6769 &priv); 6770 } 6771 EXPORT_SYMBOL(netdev_has_upper_dev); 6772 6773 /** 6774 * netdev_has_upper_dev_all_rcu - Check if device is linked to an upper device 6775 * @dev: device 6776 * @upper_dev: upper device to check 6777 * 6778 * Find out if a device is linked to specified upper device and return true 6779 * in case it is. Note that this checks the entire upper device chain. 6780 * The caller must hold rcu lock. 6781 */ 6782 6783 bool netdev_has_upper_dev_all_rcu(struct net_device *dev, 6784 struct net_device *upper_dev) 6785 { 6786 struct netdev_nested_priv priv = { 6787 .data = (void *)upper_dev, 6788 }; 6789 6790 return !!netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev, 6791 &priv); 6792 } 6793 EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu); 6794 6795 /** 6796 * netdev_has_any_upper_dev - Check if device is linked to some device 6797 * @dev: device 6798 * 6799 * Find out if a device is linked to an upper device and return true in case 6800 * it is. The caller must hold the RTNL lock. 6801 */ 6802 bool netdev_has_any_upper_dev(struct net_device *dev) 6803 { 6804 ASSERT_RTNL(); 6805 6806 return !list_empty(&dev->adj_list.upper); 6807 } 6808 EXPORT_SYMBOL(netdev_has_any_upper_dev); 6809 6810 /** 6811 * netdev_master_upper_dev_get - Get master upper device 6812 * @dev: device 6813 * 6814 * Find a master upper device and return pointer to it or NULL in case 6815 * it's not there. The caller must hold the RTNL lock. 6816 */ 6817 struct net_device *netdev_master_upper_dev_get(struct net_device *dev) 6818 { 6819 struct netdev_adjacent *upper; 6820 6821 ASSERT_RTNL(); 6822 6823 if (list_empty(&dev->adj_list.upper)) 6824 return NULL; 6825 6826 upper = list_first_entry(&dev->adj_list.upper, 6827 struct netdev_adjacent, list); 6828 if (likely(upper->master)) 6829 return upper->dev; 6830 return NULL; 6831 } 6832 EXPORT_SYMBOL(netdev_master_upper_dev_get); 6833 6834 static struct net_device *__netdev_master_upper_dev_get(struct net_device *dev) 6835 { 6836 struct netdev_adjacent *upper; 6837 6838 ASSERT_RTNL(); 6839 6840 if (list_empty(&dev->adj_list.upper)) 6841 return NULL; 6842 6843 upper = list_first_entry(&dev->adj_list.upper, 6844 struct netdev_adjacent, list); 6845 if (likely(upper->master) && !upper->ignore) 6846 return upper->dev; 6847 return NULL; 6848 } 6849 6850 /** 6851 * netdev_has_any_lower_dev - Check if device is linked to some device 6852 * @dev: device 6853 * 6854 * Find out if a device is linked to a lower device and return true in case 6855 * it is. The caller must hold the RTNL lock. 6856 */ 6857 static bool netdev_has_any_lower_dev(struct net_device *dev) 6858 { 6859 ASSERT_RTNL(); 6860 6861 return !list_empty(&dev->adj_list.lower); 6862 } 6863 6864 void *netdev_adjacent_get_private(struct list_head *adj_list) 6865 { 6866 struct netdev_adjacent *adj; 6867 6868 adj = list_entry(adj_list, struct netdev_adjacent, list); 6869 6870 return adj->private; 6871 } 6872 EXPORT_SYMBOL(netdev_adjacent_get_private); 6873 6874 /** 6875 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list 6876 * @dev: device 6877 * @iter: list_head ** of the current position 6878 * 6879 * Gets the next device from the dev's upper list, starting from iter 6880 * position. The caller must hold RCU read lock. 6881 */ 6882 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, 6883 struct list_head **iter) 6884 { 6885 struct netdev_adjacent *upper; 6886 6887 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held()); 6888 6889 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 6890 6891 if (&upper->list == &dev->adj_list.upper) 6892 return NULL; 6893 6894 *iter = &upper->list; 6895 6896 return upper->dev; 6897 } 6898 EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu); 6899 6900 static struct net_device *__netdev_next_upper_dev(struct net_device *dev, 6901 struct list_head **iter, 6902 bool *ignore) 6903 { 6904 struct netdev_adjacent *upper; 6905 6906 upper = list_entry((*iter)->next, struct netdev_adjacent, list); 6907 6908 if (&upper->list == &dev->adj_list.upper) 6909 return NULL; 6910 6911 *iter = &upper->list; 6912 *ignore = upper->ignore; 6913 6914 return upper->dev; 6915 } 6916 6917 static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev, 6918 struct list_head **iter) 6919 { 6920 struct netdev_adjacent *upper; 6921 6922 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held()); 6923 6924 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 6925 6926 if (&upper->list == &dev->adj_list.upper) 6927 return NULL; 6928 6929 *iter = &upper->list; 6930 6931 return upper->dev; 6932 } 6933 6934 static int __netdev_walk_all_upper_dev(struct net_device *dev, 6935 int (*fn)(struct net_device *dev, 6936 struct netdev_nested_priv *priv), 6937 struct netdev_nested_priv *priv) 6938 { 6939 struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; 6940 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; 6941 int ret, cur = 0; 6942 bool ignore; 6943 6944 now = dev; 6945 iter = &dev->adj_list.upper; 6946 6947 while (1) { 6948 if (now != dev) { 6949 ret = fn(now, priv); 6950 if (ret) 6951 return ret; 6952 } 6953 6954 next = NULL; 6955 while (1) { 6956 udev = __netdev_next_upper_dev(now, &iter, &ignore); 6957 if (!udev) 6958 break; 6959 if (ignore) 6960 continue; 6961 6962 next = udev; 6963 niter = &udev->adj_list.upper; 6964 dev_stack[cur] = now; 6965 iter_stack[cur++] = iter; 6966 break; 6967 } 6968 6969 if (!next) { 6970 if (!cur) 6971 return 0; 6972 next = dev_stack[--cur]; 6973 niter = iter_stack[cur]; 6974 } 6975 6976 now = next; 6977 iter = niter; 6978 } 6979 6980 return 0; 6981 } 6982 6983 int netdev_walk_all_upper_dev_rcu(struct net_device *dev, 6984 int (*fn)(struct net_device *dev, 6985 struct netdev_nested_priv *priv), 6986 struct netdev_nested_priv *priv) 6987 { 6988 struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; 6989 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; 6990 int ret, cur = 0; 6991 6992 now = dev; 6993 iter = &dev->adj_list.upper; 6994 6995 while (1) { 6996 if (now != dev) { 6997 ret = fn(now, priv); 6998 if (ret) 6999 return ret; 7000 } 7001 7002 next = NULL; 7003 while (1) { 7004 udev = netdev_next_upper_dev_rcu(now, &iter); 7005 if (!udev) 7006 break; 7007 7008 next = udev; 7009 niter = &udev->adj_list.upper; 7010 dev_stack[cur] = now; 7011 iter_stack[cur++] = iter; 7012 break; 7013 } 7014 7015 if (!next) { 7016 if (!cur) 7017 return 0; 7018 next = dev_stack[--cur]; 7019 niter = iter_stack[cur]; 7020 } 7021 7022 now = next; 7023 iter = niter; 7024 } 7025 7026 return 0; 7027 } 7028 EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu); 7029 7030 static bool __netdev_has_upper_dev(struct net_device *dev, 7031 struct net_device *upper_dev) 7032 { 7033 struct netdev_nested_priv priv = { 7034 .flags = 0, 7035 .data = (void *)upper_dev, 7036 }; 7037 7038 ASSERT_RTNL(); 7039 7040 return __netdev_walk_all_upper_dev(dev, ____netdev_has_upper_dev, 7041 &priv); 7042 } 7043 7044 /** 7045 * netdev_lower_get_next_private - Get the next ->private from the 7046 * lower neighbour list 7047 * @dev: device 7048 * @iter: list_head ** of the current position 7049 * 7050 * Gets the next netdev_adjacent->private from the dev's lower neighbour 7051 * list, starting from iter position. The caller must hold either hold the 7052 * RTNL lock or its own locking that guarantees that the neighbour lower 7053 * list will remain unchanged. 7054 */ 7055 void *netdev_lower_get_next_private(struct net_device *dev, 7056 struct list_head **iter) 7057 { 7058 struct netdev_adjacent *lower; 7059 7060 lower = list_entry(*iter, struct netdev_adjacent, list); 7061 7062 if (&lower->list == &dev->adj_list.lower) 7063 return NULL; 7064 7065 *iter = lower->list.next; 7066 7067 return lower->private; 7068 } 7069 EXPORT_SYMBOL(netdev_lower_get_next_private); 7070 7071 /** 7072 * netdev_lower_get_next_private_rcu - Get the next ->private from the 7073 * lower neighbour list, RCU 7074 * variant 7075 * @dev: device 7076 * @iter: list_head ** of the current position 7077 * 7078 * Gets the next netdev_adjacent->private from the dev's lower neighbour 7079 * list, starting from iter position. The caller must hold RCU read lock. 7080 */ 7081 void *netdev_lower_get_next_private_rcu(struct net_device *dev, 7082 struct list_head **iter) 7083 { 7084 struct netdev_adjacent *lower; 7085 7086 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); 7087 7088 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 7089 7090 if (&lower->list == &dev->adj_list.lower) 7091 return NULL; 7092 7093 *iter = &lower->list; 7094 7095 return lower->private; 7096 } 7097 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu); 7098 7099 /** 7100 * netdev_lower_get_next - Get the next device from the lower neighbour 7101 * list 7102 * @dev: device 7103 * @iter: list_head ** of the current position 7104 * 7105 * Gets the next netdev_adjacent from the dev's lower neighbour 7106 * list, starting from iter position. The caller must hold RTNL lock or 7107 * its own locking that guarantees that the neighbour lower 7108 * list will remain unchanged. 7109 */ 7110 void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter) 7111 { 7112 struct netdev_adjacent *lower; 7113 7114 lower = list_entry(*iter, struct netdev_adjacent, list); 7115 7116 if (&lower->list == &dev->adj_list.lower) 7117 return NULL; 7118 7119 *iter = lower->list.next; 7120 7121 return lower->dev; 7122 } 7123 EXPORT_SYMBOL(netdev_lower_get_next); 7124 7125 static struct net_device *netdev_next_lower_dev(struct net_device *dev, 7126 struct list_head **iter) 7127 { 7128 struct netdev_adjacent *lower; 7129 7130 lower = list_entry((*iter)->next, struct netdev_adjacent, list); 7131 7132 if (&lower->list == &dev->adj_list.lower) 7133 return NULL; 7134 7135 *iter = &lower->list; 7136 7137 return lower->dev; 7138 } 7139 7140 static struct net_device *__netdev_next_lower_dev(struct net_device *dev, 7141 struct list_head **iter, 7142 bool *ignore) 7143 { 7144 struct netdev_adjacent *lower; 7145 7146 lower = list_entry((*iter)->next, struct netdev_adjacent, list); 7147 7148 if (&lower->list == &dev->adj_list.lower) 7149 return NULL; 7150 7151 *iter = &lower->list; 7152 *ignore = lower->ignore; 7153 7154 return lower->dev; 7155 } 7156 7157 int netdev_walk_all_lower_dev(struct net_device *dev, 7158 int (*fn)(struct net_device *dev, 7159 struct netdev_nested_priv *priv), 7160 struct netdev_nested_priv *priv) 7161 { 7162 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; 7163 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; 7164 int ret, cur = 0; 7165 7166 now = dev; 7167 iter = &dev->adj_list.lower; 7168 7169 while (1) { 7170 if (now != dev) { 7171 ret = fn(now, priv); 7172 if (ret) 7173 return ret; 7174 } 7175 7176 next = NULL; 7177 while (1) { 7178 ldev = netdev_next_lower_dev(now, &iter); 7179 if (!ldev) 7180 break; 7181 7182 next = ldev; 7183 niter = &ldev->adj_list.lower; 7184 dev_stack[cur] = now; 7185 iter_stack[cur++] = iter; 7186 break; 7187 } 7188 7189 if (!next) { 7190 if (!cur) 7191 return 0; 7192 next = dev_stack[--cur]; 7193 niter = iter_stack[cur]; 7194 } 7195 7196 now = next; 7197 iter = niter; 7198 } 7199 7200 return 0; 7201 } 7202 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev); 7203 7204 static int __netdev_walk_all_lower_dev(struct net_device *dev, 7205 int (*fn)(struct net_device *dev, 7206 struct netdev_nested_priv *priv), 7207 struct netdev_nested_priv *priv) 7208 { 7209 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; 7210 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; 7211 int ret, cur = 0; 7212 bool ignore; 7213 7214 now = dev; 7215 iter = &dev->adj_list.lower; 7216 7217 while (1) { 7218 if (now != dev) { 7219 ret = fn(now, priv); 7220 if (ret) 7221 return ret; 7222 } 7223 7224 next = NULL; 7225 while (1) { 7226 ldev = __netdev_next_lower_dev(now, &iter, &ignore); 7227 if (!ldev) 7228 break; 7229 if (ignore) 7230 continue; 7231 7232 next = ldev; 7233 niter = &ldev->adj_list.lower; 7234 dev_stack[cur] = now; 7235 iter_stack[cur++] = iter; 7236 break; 7237 } 7238 7239 if (!next) { 7240 if (!cur) 7241 return 0; 7242 next = dev_stack[--cur]; 7243 niter = iter_stack[cur]; 7244 } 7245 7246 now = next; 7247 iter = niter; 7248 } 7249 7250 return 0; 7251 } 7252 7253 struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev, 7254 struct list_head **iter) 7255 { 7256 struct netdev_adjacent *lower; 7257 7258 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 7259 if (&lower->list == &dev->adj_list.lower) 7260 return NULL; 7261 7262 *iter = &lower->list; 7263 7264 return lower->dev; 7265 } 7266 EXPORT_SYMBOL(netdev_next_lower_dev_rcu); 7267 7268 static u8 __netdev_upper_depth(struct net_device *dev) 7269 { 7270 struct net_device *udev; 7271 struct list_head *iter; 7272 u8 max_depth = 0; 7273 bool ignore; 7274 7275 for (iter = &dev->adj_list.upper, 7276 udev = __netdev_next_upper_dev(dev, &iter, &ignore); 7277 udev; 7278 udev = __netdev_next_upper_dev(dev, &iter, &ignore)) { 7279 if (ignore) 7280 continue; 7281 if (max_depth < udev->upper_level) 7282 max_depth = udev->upper_level; 7283 } 7284 7285 return max_depth; 7286 } 7287 7288 static u8 __netdev_lower_depth(struct net_device *dev) 7289 { 7290 struct net_device *ldev; 7291 struct list_head *iter; 7292 u8 max_depth = 0; 7293 bool ignore; 7294 7295 for (iter = &dev->adj_list.lower, 7296 ldev = __netdev_next_lower_dev(dev, &iter, &ignore); 7297 ldev; 7298 ldev = __netdev_next_lower_dev(dev, &iter, &ignore)) { 7299 if (ignore) 7300 continue; 7301 if (max_depth < ldev->lower_level) 7302 max_depth = ldev->lower_level; 7303 } 7304 7305 return max_depth; 7306 } 7307 7308 static int __netdev_update_upper_level(struct net_device *dev, 7309 struct netdev_nested_priv *__unused) 7310 { 7311 dev->upper_level = __netdev_upper_depth(dev) + 1; 7312 return 0; 7313 } 7314 7315 #ifdef CONFIG_LOCKDEP 7316 static LIST_HEAD(net_unlink_list); 7317 7318 static void net_unlink_todo(struct net_device *dev) 7319 { 7320 if (list_empty(&dev->unlink_list)) 7321 list_add_tail(&dev->unlink_list, &net_unlink_list); 7322 } 7323 #endif 7324 7325 static int __netdev_update_lower_level(struct net_device *dev, 7326 struct netdev_nested_priv *priv) 7327 { 7328 dev->lower_level = __netdev_lower_depth(dev) + 1; 7329 7330 #ifdef CONFIG_LOCKDEP 7331 if (!priv) 7332 return 0; 7333 7334 if (priv->flags & NESTED_SYNC_IMM) 7335 dev->nested_level = dev->lower_level - 1; 7336 if (priv->flags & NESTED_SYNC_TODO) 7337 net_unlink_todo(dev); 7338 #endif 7339 return 0; 7340 } 7341 7342 int netdev_walk_all_lower_dev_rcu(struct net_device *dev, 7343 int (*fn)(struct net_device *dev, 7344 struct netdev_nested_priv *priv), 7345 struct netdev_nested_priv *priv) 7346 { 7347 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; 7348 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; 7349 int ret, cur = 0; 7350 7351 now = dev; 7352 iter = &dev->adj_list.lower; 7353 7354 while (1) { 7355 if (now != dev) { 7356 ret = fn(now, priv); 7357 if (ret) 7358 return ret; 7359 } 7360 7361 next = NULL; 7362 while (1) { 7363 ldev = netdev_next_lower_dev_rcu(now, &iter); 7364 if (!ldev) 7365 break; 7366 7367 next = ldev; 7368 niter = &ldev->adj_list.lower; 7369 dev_stack[cur] = now; 7370 iter_stack[cur++] = iter; 7371 break; 7372 } 7373 7374 if (!next) { 7375 if (!cur) 7376 return 0; 7377 next = dev_stack[--cur]; 7378 niter = iter_stack[cur]; 7379 } 7380 7381 now = next; 7382 iter = niter; 7383 } 7384 7385 return 0; 7386 } 7387 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu); 7388 7389 /** 7390 * netdev_lower_get_first_private_rcu - Get the first ->private from the 7391 * lower neighbour list, RCU 7392 * variant 7393 * @dev: device 7394 * 7395 * Gets the first netdev_adjacent->private from the dev's lower neighbour 7396 * list. The caller must hold RCU read lock. 7397 */ 7398 void *netdev_lower_get_first_private_rcu(struct net_device *dev) 7399 { 7400 struct netdev_adjacent *lower; 7401 7402 lower = list_first_or_null_rcu(&dev->adj_list.lower, 7403 struct netdev_adjacent, list); 7404 if (lower) 7405 return lower->private; 7406 return NULL; 7407 } 7408 EXPORT_SYMBOL(netdev_lower_get_first_private_rcu); 7409 7410 /** 7411 * netdev_master_upper_dev_get_rcu - Get master upper device 7412 * @dev: device 7413 * 7414 * Find a master upper device and return pointer to it or NULL in case 7415 * it's not there. The caller must hold the RCU read lock. 7416 */ 7417 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev) 7418 { 7419 struct netdev_adjacent *upper; 7420 7421 upper = list_first_or_null_rcu(&dev->adj_list.upper, 7422 struct netdev_adjacent, list); 7423 if (upper && likely(upper->master)) 7424 return upper->dev; 7425 return NULL; 7426 } 7427 EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu); 7428 7429 static int netdev_adjacent_sysfs_add(struct net_device *dev, 7430 struct net_device *adj_dev, 7431 struct list_head *dev_list) 7432 { 7433 char linkname[IFNAMSIZ+7]; 7434 7435 sprintf(linkname, dev_list == &dev->adj_list.upper ? 7436 "upper_%s" : "lower_%s", adj_dev->name); 7437 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj), 7438 linkname); 7439 } 7440 static void netdev_adjacent_sysfs_del(struct net_device *dev, 7441 char *name, 7442 struct list_head *dev_list) 7443 { 7444 char linkname[IFNAMSIZ+7]; 7445 7446 sprintf(linkname, dev_list == &dev->adj_list.upper ? 7447 "upper_%s" : "lower_%s", name); 7448 sysfs_remove_link(&(dev->dev.kobj), linkname); 7449 } 7450 7451 static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev, 7452 struct net_device *adj_dev, 7453 struct list_head *dev_list) 7454 { 7455 return (dev_list == &dev->adj_list.upper || 7456 dev_list == &dev->adj_list.lower) && 7457 net_eq(dev_net(dev), dev_net(adj_dev)); 7458 } 7459 7460 static int __netdev_adjacent_dev_insert(struct net_device *dev, 7461 struct net_device *adj_dev, 7462 struct list_head *dev_list, 7463 void *private, bool master) 7464 { 7465 struct netdev_adjacent *adj; 7466 int ret; 7467 7468 adj = __netdev_find_adj(adj_dev, dev_list); 7469 7470 if (adj) { 7471 adj->ref_nr += 1; 7472 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n", 7473 dev->name, adj_dev->name, adj->ref_nr); 7474 7475 return 0; 7476 } 7477 7478 adj = kmalloc(sizeof(*adj), GFP_KERNEL); 7479 if (!adj) 7480 return -ENOMEM; 7481 7482 adj->dev = adj_dev; 7483 adj->master = master; 7484 adj->ref_nr = 1; 7485 adj->private = private; 7486 adj->ignore = false; 7487 netdev_hold(adj_dev, &adj->dev_tracker, GFP_KERNEL); 7488 7489 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n", 7490 dev->name, adj_dev->name, adj->ref_nr, adj_dev->name); 7491 7492 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) { 7493 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list); 7494 if (ret) 7495 goto free_adj; 7496 } 7497 7498 /* Ensure that master link is always the first item in list. */ 7499 if (master) { 7500 ret = sysfs_create_link(&(dev->dev.kobj), 7501 &(adj_dev->dev.kobj), "master"); 7502 if (ret) 7503 goto remove_symlinks; 7504 7505 list_add_rcu(&adj->list, dev_list); 7506 } else { 7507 list_add_tail_rcu(&adj->list, dev_list); 7508 } 7509 7510 return 0; 7511 7512 remove_symlinks: 7513 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) 7514 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list); 7515 free_adj: 7516 netdev_put(adj_dev, &adj->dev_tracker); 7517 kfree(adj); 7518 7519 return ret; 7520 } 7521 7522 static void __netdev_adjacent_dev_remove(struct net_device *dev, 7523 struct net_device *adj_dev, 7524 u16 ref_nr, 7525 struct list_head *dev_list) 7526 { 7527 struct netdev_adjacent *adj; 7528 7529 pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n", 7530 dev->name, adj_dev->name, ref_nr); 7531 7532 adj = __netdev_find_adj(adj_dev, dev_list); 7533 7534 if (!adj) { 7535 pr_err("Adjacency does not exist for device %s from %s\n", 7536 dev->name, adj_dev->name); 7537 WARN_ON(1); 7538 return; 7539 } 7540 7541 if (adj->ref_nr > ref_nr) { 7542 pr_debug("adjacency: %s to %s ref_nr - %d = %d\n", 7543 dev->name, adj_dev->name, ref_nr, 7544 adj->ref_nr - ref_nr); 7545 adj->ref_nr -= ref_nr; 7546 return; 7547 } 7548 7549 if (adj->master) 7550 sysfs_remove_link(&(dev->dev.kobj), "master"); 7551 7552 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) 7553 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list); 7554 7555 list_del_rcu(&adj->list); 7556 pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n", 7557 adj_dev->name, dev->name, adj_dev->name); 7558 netdev_put(adj_dev, &adj->dev_tracker); 7559 kfree_rcu(adj, rcu); 7560 } 7561 7562 static int __netdev_adjacent_dev_link_lists(struct net_device *dev, 7563 struct net_device *upper_dev, 7564 struct list_head *up_list, 7565 struct list_head *down_list, 7566 void *private, bool master) 7567 { 7568 int ret; 7569 7570 ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, 7571 private, master); 7572 if (ret) 7573 return ret; 7574 7575 ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, 7576 private, false); 7577 if (ret) { 7578 __netdev_adjacent_dev_remove(dev, upper_dev, 1, up_list); 7579 return ret; 7580 } 7581 7582 return 0; 7583 } 7584 7585 static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev, 7586 struct net_device *upper_dev, 7587 u16 ref_nr, 7588 struct list_head *up_list, 7589 struct list_head *down_list) 7590 { 7591 __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list); 7592 __netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list); 7593 } 7594 7595 static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev, 7596 struct net_device *upper_dev, 7597 void *private, bool master) 7598 { 7599 return __netdev_adjacent_dev_link_lists(dev, upper_dev, 7600 &dev->adj_list.upper, 7601 &upper_dev->adj_list.lower, 7602 private, master); 7603 } 7604 7605 static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev, 7606 struct net_device *upper_dev) 7607 { 7608 __netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1, 7609 &dev->adj_list.upper, 7610 &upper_dev->adj_list.lower); 7611 } 7612 7613 static int __netdev_upper_dev_link(struct net_device *dev, 7614 struct net_device *upper_dev, bool master, 7615 void *upper_priv, void *upper_info, 7616 struct netdev_nested_priv *priv, 7617 struct netlink_ext_ack *extack) 7618 { 7619 struct netdev_notifier_changeupper_info changeupper_info = { 7620 .info = { 7621 .dev = dev, 7622 .extack = extack, 7623 }, 7624 .upper_dev = upper_dev, 7625 .master = master, 7626 .linking = true, 7627 .upper_info = upper_info, 7628 }; 7629 struct net_device *master_dev; 7630 int ret = 0; 7631 7632 ASSERT_RTNL(); 7633 7634 if (dev == upper_dev) 7635 return -EBUSY; 7636 7637 /* To prevent loops, check if dev is not upper device to upper_dev. */ 7638 if (__netdev_has_upper_dev(upper_dev, dev)) 7639 return -EBUSY; 7640 7641 if ((dev->lower_level + upper_dev->upper_level) > MAX_NEST_DEV) 7642 return -EMLINK; 7643 7644 if (!master) { 7645 if (__netdev_has_upper_dev(dev, upper_dev)) 7646 return -EEXIST; 7647 } else { 7648 master_dev = __netdev_master_upper_dev_get(dev); 7649 if (master_dev) 7650 return master_dev == upper_dev ? -EEXIST : -EBUSY; 7651 } 7652 7653 ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, 7654 &changeupper_info.info); 7655 ret = notifier_to_errno(ret); 7656 if (ret) 7657 return ret; 7658 7659 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv, 7660 master); 7661 if (ret) 7662 return ret; 7663 7664 ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, 7665 &changeupper_info.info); 7666 ret = notifier_to_errno(ret); 7667 if (ret) 7668 goto rollback; 7669 7670 __netdev_update_upper_level(dev, NULL); 7671 __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL); 7672 7673 __netdev_update_lower_level(upper_dev, priv); 7674 __netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level, 7675 priv); 7676 7677 return 0; 7678 7679 rollback: 7680 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev); 7681 7682 return ret; 7683 } 7684 7685 /** 7686 * netdev_upper_dev_link - Add a link to the upper device 7687 * @dev: device 7688 * @upper_dev: new upper device 7689 * @extack: netlink extended ack 7690 * 7691 * Adds a link to device which is upper to this one. The caller must hold 7692 * the RTNL lock. On a failure a negative errno code is returned. 7693 * On success the reference counts are adjusted and the function 7694 * returns zero. 7695 */ 7696 int netdev_upper_dev_link(struct net_device *dev, 7697 struct net_device *upper_dev, 7698 struct netlink_ext_ack *extack) 7699 { 7700 struct netdev_nested_priv priv = { 7701 .flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO, 7702 .data = NULL, 7703 }; 7704 7705 return __netdev_upper_dev_link(dev, upper_dev, false, 7706 NULL, NULL, &priv, extack); 7707 } 7708 EXPORT_SYMBOL(netdev_upper_dev_link); 7709 7710 /** 7711 * netdev_master_upper_dev_link - Add a master link to the upper device 7712 * @dev: device 7713 * @upper_dev: new upper device 7714 * @upper_priv: upper device private 7715 * @upper_info: upper info to be passed down via notifier 7716 * @extack: netlink extended ack 7717 * 7718 * Adds a link to device which is upper to this one. In this case, only 7719 * one master upper device can be linked, although other non-master devices 7720 * might be linked as well. The caller must hold the RTNL lock. 7721 * On a failure a negative errno code is returned. On success the reference 7722 * counts are adjusted and the function returns zero. 7723 */ 7724 int netdev_master_upper_dev_link(struct net_device *dev, 7725 struct net_device *upper_dev, 7726 void *upper_priv, void *upper_info, 7727 struct netlink_ext_ack *extack) 7728 { 7729 struct netdev_nested_priv priv = { 7730 .flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO, 7731 .data = NULL, 7732 }; 7733 7734 return __netdev_upper_dev_link(dev, upper_dev, true, 7735 upper_priv, upper_info, &priv, extack); 7736 } 7737 EXPORT_SYMBOL(netdev_master_upper_dev_link); 7738 7739 static void __netdev_upper_dev_unlink(struct net_device *dev, 7740 struct net_device *upper_dev, 7741 struct netdev_nested_priv *priv) 7742 { 7743 struct netdev_notifier_changeupper_info changeupper_info = { 7744 .info = { 7745 .dev = dev, 7746 }, 7747 .upper_dev = upper_dev, 7748 .linking = false, 7749 }; 7750 7751 ASSERT_RTNL(); 7752 7753 changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev; 7754 7755 call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, 7756 &changeupper_info.info); 7757 7758 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev); 7759 7760 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, 7761 &changeupper_info.info); 7762 7763 __netdev_update_upper_level(dev, NULL); 7764 __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL); 7765 7766 __netdev_update_lower_level(upper_dev, priv); 7767 __netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level, 7768 priv); 7769 } 7770 7771 /** 7772 * netdev_upper_dev_unlink - Removes a link to upper device 7773 * @dev: device 7774 * @upper_dev: new upper device 7775 * 7776 * Removes a link to device which is upper to this one. The caller must hold 7777 * the RTNL lock. 7778 */ 7779 void netdev_upper_dev_unlink(struct net_device *dev, 7780 struct net_device *upper_dev) 7781 { 7782 struct netdev_nested_priv priv = { 7783 .flags = NESTED_SYNC_TODO, 7784 .data = NULL, 7785 }; 7786 7787 __netdev_upper_dev_unlink(dev, upper_dev, &priv); 7788 } 7789 EXPORT_SYMBOL(netdev_upper_dev_unlink); 7790 7791 static void __netdev_adjacent_dev_set(struct net_device *upper_dev, 7792 struct net_device *lower_dev, 7793 bool val) 7794 { 7795 struct netdev_adjacent *adj; 7796 7797 adj = __netdev_find_adj(lower_dev, &upper_dev->adj_list.lower); 7798 if (adj) 7799 adj->ignore = val; 7800 7801 adj = __netdev_find_adj(upper_dev, &lower_dev->adj_list.upper); 7802 if (adj) 7803 adj->ignore = val; 7804 } 7805 7806 static void netdev_adjacent_dev_disable(struct net_device *upper_dev, 7807 struct net_device *lower_dev) 7808 { 7809 __netdev_adjacent_dev_set(upper_dev, lower_dev, true); 7810 } 7811 7812 static void netdev_adjacent_dev_enable(struct net_device *upper_dev, 7813 struct net_device *lower_dev) 7814 { 7815 __netdev_adjacent_dev_set(upper_dev, lower_dev, false); 7816 } 7817 7818 int netdev_adjacent_change_prepare(struct net_device *old_dev, 7819 struct net_device *new_dev, 7820 struct net_device *dev, 7821 struct netlink_ext_ack *extack) 7822 { 7823 struct netdev_nested_priv priv = { 7824 .flags = 0, 7825 .data = NULL, 7826 }; 7827 int err; 7828 7829 if (!new_dev) 7830 return 0; 7831 7832 if (old_dev && new_dev != old_dev) 7833 netdev_adjacent_dev_disable(dev, old_dev); 7834 err = __netdev_upper_dev_link(new_dev, dev, false, NULL, NULL, &priv, 7835 extack); 7836 if (err) { 7837 if (old_dev && new_dev != old_dev) 7838 netdev_adjacent_dev_enable(dev, old_dev); 7839 return err; 7840 } 7841 7842 return 0; 7843 } 7844 EXPORT_SYMBOL(netdev_adjacent_change_prepare); 7845 7846 void netdev_adjacent_change_commit(struct net_device *old_dev, 7847 struct net_device *new_dev, 7848 struct net_device *dev) 7849 { 7850 struct netdev_nested_priv priv = { 7851 .flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO, 7852 .data = NULL, 7853 }; 7854 7855 if (!new_dev || !old_dev) 7856 return; 7857 7858 if (new_dev == old_dev) 7859 return; 7860 7861 netdev_adjacent_dev_enable(dev, old_dev); 7862 __netdev_upper_dev_unlink(old_dev, dev, &priv); 7863 } 7864 EXPORT_SYMBOL(netdev_adjacent_change_commit); 7865 7866 void netdev_adjacent_change_abort(struct net_device *old_dev, 7867 struct net_device *new_dev, 7868 struct net_device *dev) 7869 { 7870 struct netdev_nested_priv priv = { 7871 .flags = 0, 7872 .data = NULL, 7873 }; 7874 7875 if (!new_dev) 7876 return; 7877 7878 if (old_dev && new_dev != old_dev) 7879 netdev_adjacent_dev_enable(dev, old_dev); 7880 7881 __netdev_upper_dev_unlink(new_dev, dev, &priv); 7882 } 7883 EXPORT_SYMBOL(netdev_adjacent_change_abort); 7884 7885 /** 7886 * netdev_bonding_info_change - Dispatch event about slave change 7887 * @dev: device 7888 * @bonding_info: info to dispatch 7889 * 7890 * Send NETDEV_BONDING_INFO to netdev notifiers with info. 7891 * The caller must hold the RTNL lock. 7892 */ 7893 void netdev_bonding_info_change(struct net_device *dev, 7894 struct netdev_bonding_info *bonding_info) 7895 { 7896 struct netdev_notifier_bonding_info info = { 7897 .info.dev = dev, 7898 }; 7899 7900 memcpy(&info.bonding_info, bonding_info, 7901 sizeof(struct netdev_bonding_info)); 7902 call_netdevice_notifiers_info(NETDEV_BONDING_INFO, 7903 &info.info); 7904 } 7905 EXPORT_SYMBOL(netdev_bonding_info_change); 7906 7907 static int netdev_offload_xstats_enable_l3(struct net_device *dev, 7908 struct netlink_ext_ack *extack) 7909 { 7910 struct netdev_notifier_offload_xstats_info info = { 7911 .info.dev = dev, 7912 .info.extack = extack, 7913 .type = NETDEV_OFFLOAD_XSTATS_TYPE_L3, 7914 }; 7915 int err; 7916 int rc; 7917 7918 dev->offload_xstats_l3 = kzalloc(sizeof(*dev->offload_xstats_l3), 7919 GFP_KERNEL); 7920 if (!dev->offload_xstats_l3) 7921 return -ENOMEM; 7922 7923 rc = call_netdevice_notifiers_info_robust(NETDEV_OFFLOAD_XSTATS_ENABLE, 7924 NETDEV_OFFLOAD_XSTATS_DISABLE, 7925 &info.info); 7926 err = notifier_to_errno(rc); 7927 if (err) 7928 goto free_stats; 7929 7930 return 0; 7931 7932 free_stats: 7933 kfree(dev->offload_xstats_l3); 7934 dev->offload_xstats_l3 = NULL; 7935 return err; 7936 } 7937 7938 int netdev_offload_xstats_enable(struct net_device *dev, 7939 enum netdev_offload_xstats_type type, 7940 struct netlink_ext_ack *extack) 7941 { 7942 ASSERT_RTNL(); 7943 7944 if (netdev_offload_xstats_enabled(dev, type)) 7945 return -EALREADY; 7946 7947 switch (type) { 7948 case NETDEV_OFFLOAD_XSTATS_TYPE_L3: 7949 return netdev_offload_xstats_enable_l3(dev, extack); 7950 } 7951 7952 WARN_ON(1); 7953 return -EINVAL; 7954 } 7955 EXPORT_SYMBOL(netdev_offload_xstats_enable); 7956 7957 static void netdev_offload_xstats_disable_l3(struct net_device *dev) 7958 { 7959 struct netdev_notifier_offload_xstats_info info = { 7960 .info.dev = dev, 7961 .type = NETDEV_OFFLOAD_XSTATS_TYPE_L3, 7962 }; 7963 7964 call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_DISABLE, 7965 &info.info); 7966 kfree(dev->offload_xstats_l3); 7967 dev->offload_xstats_l3 = NULL; 7968 } 7969 7970 int netdev_offload_xstats_disable(struct net_device *dev, 7971 enum netdev_offload_xstats_type type) 7972 { 7973 ASSERT_RTNL(); 7974 7975 if (!netdev_offload_xstats_enabled(dev, type)) 7976 return -EALREADY; 7977 7978 switch (type) { 7979 case NETDEV_OFFLOAD_XSTATS_TYPE_L3: 7980 netdev_offload_xstats_disable_l3(dev); 7981 return 0; 7982 } 7983 7984 WARN_ON(1); 7985 return -EINVAL; 7986 } 7987 EXPORT_SYMBOL(netdev_offload_xstats_disable); 7988 7989 static void netdev_offload_xstats_disable_all(struct net_device *dev) 7990 { 7991 netdev_offload_xstats_disable(dev, NETDEV_OFFLOAD_XSTATS_TYPE_L3); 7992 } 7993 7994 static struct rtnl_hw_stats64 * 7995 netdev_offload_xstats_get_ptr(const struct net_device *dev, 7996 enum netdev_offload_xstats_type type) 7997 { 7998 switch (type) { 7999 case NETDEV_OFFLOAD_XSTATS_TYPE_L3: 8000 return dev->offload_xstats_l3; 8001 } 8002 8003 WARN_ON(1); 8004 return NULL; 8005 } 8006 8007 bool netdev_offload_xstats_enabled(const struct net_device *dev, 8008 enum netdev_offload_xstats_type type) 8009 { 8010 ASSERT_RTNL(); 8011 8012 return netdev_offload_xstats_get_ptr(dev, type); 8013 } 8014 EXPORT_SYMBOL(netdev_offload_xstats_enabled); 8015 8016 struct netdev_notifier_offload_xstats_ru { 8017 bool used; 8018 }; 8019 8020 struct netdev_notifier_offload_xstats_rd { 8021 struct rtnl_hw_stats64 stats; 8022 bool used; 8023 }; 8024 8025 static void netdev_hw_stats64_add(struct rtnl_hw_stats64 *dest, 8026 const struct rtnl_hw_stats64 *src) 8027 { 8028 dest->rx_packets += src->rx_packets; 8029 dest->tx_packets += src->tx_packets; 8030 dest->rx_bytes += src->rx_bytes; 8031 dest->tx_bytes += src->tx_bytes; 8032 dest->rx_errors += src->rx_errors; 8033 dest->tx_errors += src->tx_errors; 8034 dest->rx_dropped += src->rx_dropped; 8035 dest->tx_dropped += src->tx_dropped; 8036 dest->multicast += src->multicast; 8037 } 8038 8039 static int netdev_offload_xstats_get_used(struct net_device *dev, 8040 enum netdev_offload_xstats_type type, 8041 bool *p_used, 8042 struct netlink_ext_ack *extack) 8043 { 8044 struct netdev_notifier_offload_xstats_ru report_used = {}; 8045 struct netdev_notifier_offload_xstats_info info = { 8046 .info.dev = dev, 8047 .info.extack = extack, 8048 .type = type, 8049 .report_used = &report_used, 8050 }; 8051 int rc; 8052 8053 WARN_ON(!netdev_offload_xstats_enabled(dev, type)); 8054 rc = call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_REPORT_USED, 8055 &info.info); 8056 *p_used = report_used.used; 8057 return notifier_to_errno(rc); 8058 } 8059 8060 static int netdev_offload_xstats_get_stats(struct net_device *dev, 8061 enum netdev_offload_xstats_type type, 8062 struct rtnl_hw_stats64 *p_stats, 8063 bool *p_used, 8064 struct netlink_ext_ack *extack) 8065 { 8066 struct netdev_notifier_offload_xstats_rd report_delta = {}; 8067 struct netdev_notifier_offload_xstats_info info = { 8068 .info.dev = dev, 8069 .info.extack = extack, 8070 .type = type, 8071 .report_delta = &report_delta, 8072 }; 8073 struct rtnl_hw_stats64 *stats; 8074 int rc; 8075 8076 stats = netdev_offload_xstats_get_ptr(dev, type); 8077 if (WARN_ON(!stats)) 8078 return -EINVAL; 8079 8080 rc = call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_REPORT_DELTA, 8081 &info.info); 8082 8083 /* Cache whatever we got, even if there was an error, otherwise the 8084 * successful stats retrievals would get lost. 8085 */ 8086 netdev_hw_stats64_add(stats, &report_delta.stats); 8087 8088 if (p_stats) 8089 *p_stats = *stats; 8090 *p_used = report_delta.used; 8091 8092 return notifier_to_errno(rc); 8093 } 8094 8095 int netdev_offload_xstats_get(struct net_device *dev, 8096 enum netdev_offload_xstats_type type, 8097 struct rtnl_hw_stats64 *p_stats, bool *p_used, 8098 struct netlink_ext_ack *extack) 8099 { 8100 ASSERT_RTNL(); 8101 8102 if (p_stats) 8103 return netdev_offload_xstats_get_stats(dev, type, p_stats, 8104 p_used, extack); 8105 else 8106 return netdev_offload_xstats_get_used(dev, type, p_used, 8107 extack); 8108 } 8109 EXPORT_SYMBOL(netdev_offload_xstats_get); 8110 8111 void 8112 netdev_offload_xstats_report_delta(struct netdev_notifier_offload_xstats_rd *report_delta, 8113 const struct rtnl_hw_stats64 *stats) 8114 { 8115 report_delta->used = true; 8116 netdev_hw_stats64_add(&report_delta->stats, stats); 8117 } 8118 EXPORT_SYMBOL(netdev_offload_xstats_report_delta); 8119 8120 void 8121 netdev_offload_xstats_report_used(struct netdev_notifier_offload_xstats_ru *report_used) 8122 { 8123 report_used->used = true; 8124 } 8125 EXPORT_SYMBOL(netdev_offload_xstats_report_used); 8126 8127 void netdev_offload_xstats_push_delta(struct net_device *dev, 8128 enum netdev_offload_xstats_type type, 8129 const struct rtnl_hw_stats64 *p_stats) 8130 { 8131 struct rtnl_hw_stats64 *stats; 8132 8133 ASSERT_RTNL(); 8134 8135 stats = netdev_offload_xstats_get_ptr(dev, type); 8136 if (WARN_ON(!stats)) 8137 return; 8138 8139 netdev_hw_stats64_add(stats, p_stats); 8140 } 8141 EXPORT_SYMBOL(netdev_offload_xstats_push_delta); 8142 8143 /** 8144 * netdev_get_xmit_slave - Get the xmit slave of master device 8145 * @dev: device 8146 * @skb: The packet 8147 * @all_slaves: assume all the slaves are active 8148 * 8149 * The reference counters are not incremented so the caller must be 8150 * careful with locks. The caller must hold RCU lock. 8151 * %NULL is returned if no slave is found. 8152 */ 8153 8154 struct net_device *netdev_get_xmit_slave(struct net_device *dev, 8155 struct sk_buff *skb, 8156 bool all_slaves) 8157 { 8158 const struct net_device_ops *ops = dev->netdev_ops; 8159 8160 if (!ops->ndo_get_xmit_slave) 8161 return NULL; 8162 return ops->ndo_get_xmit_slave(dev, skb, all_slaves); 8163 } 8164 EXPORT_SYMBOL(netdev_get_xmit_slave); 8165 8166 static struct net_device *netdev_sk_get_lower_dev(struct net_device *dev, 8167 struct sock *sk) 8168 { 8169 const struct net_device_ops *ops = dev->netdev_ops; 8170 8171 if (!ops->ndo_sk_get_lower_dev) 8172 return NULL; 8173 return ops->ndo_sk_get_lower_dev(dev, sk); 8174 } 8175 8176 /** 8177 * netdev_sk_get_lowest_dev - Get the lowest device in chain given device and socket 8178 * @dev: device 8179 * @sk: the socket 8180 * 8181 * %NULL is returned if no lower device is found. 8182 */ 8183 8184 struct net_device *netdev_sk_get_lowest_dev(struct net_device *dev, 8185 struct sock *sk) 8186 { 8187 struct net_device *lower; 8188 8189 lower = netdev_sk_get_lower_dev(dev, sk); 8190 while (lower) { 8191 dev = lower; 8192 lower = netdev_sk_get_lower_dev(dev, sk); 8193 } 8194 8195 return dev; 8196 } 8197 EXPORT_SYMBOL(netdev_sk_get_lowest_dev); 8198 8199 static void netdev_adjacent_add_links(struct net_device *dev) 8200 { 8201 struct netdev_adjacent *iter; 8202 8203 struct net *net = dev_net(dev); 8204 8205 list_for_each_entry(iter, &dev->adj_list.upper, list) { 8206 if (!net_eq(net, dev_net(iter->dev))) 8207 continue; 8208 netdev_adjacent_sysfs_add(iter->dev, dev, 8209 &iter->dev->adj_list.lower); 8210 netdev_adjacent_sysfs_add(dev, iter->dev, 8211 &dev->adj_list.upper); 8212 } 8213 8214 list_for_each_entry(iter, &dev->adj_list.lower, list) { 8215 if (!net_eq(net, dev_net(iter->dev))) 8216 continue; 8217 netdev_adjacent_sysfs_add(iter->dev, dev, 8218 &iter->dev->adj_list.upper); 8219 netdev_adjacent_sysfs_add(dev, iter->dev, 8220 &dev->adj_list.lower); 8221 } 8222 } 8223 8224 static void netdev_adjacent_del_links(struct net_device *dev) 8225 { 8226 struct netdev_adjacent *iter; 8227 8228 struct net *net = dev_net(dev); 8229 8230 list_for_each_entry(iter, &dev->adj_list.upper, list) { 8231 if (!net_eq(net, dev_net(iter->dev))) 8232 continue; 8233 netdev_adjacent_sysfs_del(iter->dev, dev->name, 8234 &iter->dev->adj_list.lower); 8235 netdev_adjacent_sysfs_del(dev, iter->dev->name, 8236 &dev->adj_list.upper); 8237 } 8238 8239 list_for_each_entry(iter, &dev->adj_list.lower, list) { 8240 if (!net_eq(net, dev_net(iter->dev))) 8241 continue; 8242 netdev_adjacent_sysfs_del(iter->dev, dev->name, 8243 &iter->dev->adj_list.upper); 8244 netdev_adjacent_sysfs_del(dev, iter->dev->name, 8245 &dev->adj_list.lower); 8246 } 8247 } 8248 8249 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname) 8250 { 8251 struct netdev_adjacent *iter; 8252 8253 struct net *net = dev_net(dev); 8254 8255 list_for_each_entry(iter, &dev->adj_list.upper, list) { 8256 if (!net_eq(net, dev_net(iter->dev))) 8257 continue; 8258 netdev_adjacent_sysfs_del(iter->dev, oldname, 8259 &iter->dev->adj_list.lower); 8260 netdev_adjacent_sysfs_add(iter->dev, dev, 8261 &iter->dev->adj_list.lower); 8262 } 8263 8264 list_for_each_entry(iter, &dev->adj_list.lower, list) { 8265 if (!net_eq(net, dev_net(iter->dev))) 8266 continue; 8267 netdev_adjacent_sysfs_del(iter->dev, oldname, 8268 &iter->dev->adj_list.upper); 8269 netdev_adjacent_sysfs_add(iter->dev, dev, 8270 &iter->dev->adj_list.upper); 8271 } 8272 } 8273 8274 void *netdev_lower_dev_get_private(struct net_device *dev, 8275 struct net_device *lower_dev) 8276 { 8277 struct netdev_adjacent *lower; 8278 8279 if (!lower_dev) 8280 return NULL; 8281 lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower); 8282 if (!lower) 8283 return NULL; 8284 8285 return lower->private; 8286 } 8287 EXPORT_SYMBOL(netdev_lower_dev_get_private); 8288 8289 8290 /** 8291 * netdev_lower_state_changed - Dispatch event about lower device state change 8292 * @lower_dev: device 8293 * @lower_state_info: state to dispatch 8294 * 8295 * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info. 8296 * The caller must hold the RTNL lock. 8297 */ 8298 void netdev_lower_state_changed(struct net_device *lower_dev, 8299 void *lower_state_info) 8300 { 8301 struct netdev_notifier_changelowerstate_info changelowerstate_info = { 8302 .info.dev = lower_dev, 8303 }; 8304 8305 ASSERT_RTNL(); 8306 changelowerstate_info.lower_state_info = lower_state_info; 8307 call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE, 8308 &changelowerstate_info.info); 8309 } 8310 EXPORT_SYMBOL(netdev_lower_state_changed); 8311 8312 static void dev_change_rx_flags(struct net_device *dev, int flags) 8313 { 8314 const struct net_device_ops *ops = dev->netdev_ops; 8315 8316 if (ops->ndo_change_rx_flags) 8317 ops->ndo_change_rx_flags(dev, flags); 8318 } 8319 8320 static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify) 8321 { 8322 unsigned int old_flags = dev->flags; 8323 kuid_t uid; 8324 kgid_t gid; 8325 8326 ASSERT_RTNL(); 8327 8328 dev->flags |= IFF_PROMISC; 8329 dev->promiscuity += inc; 8330 if (dev->promiscuity == 0) { 8331 /* 8332 * Avoid overflow. 8333 * If inc causes overflow, untouch promisc and return error. 8334 */ 8335 if (inc < 0) 8336 dev->flags &= ~IFF_PROMISC; 8337 else { 8338 dev->promiscuity -= inc; 8339 netdev_warn(dev, "promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n"); 8340 return -EOVERFLOW; 8341 } 8342 } 8343 if (dev->flags != old_flags) { 8344 netdev_info(dev, "%s promiscuous mode\n", 8345 dev->flags & IFF_PROMISC ? "entered" : "left"); 8346 if (audit_enabled) { 8347 current_uid_gid(&uid, &gid); 8348 audit_log(audit_context(), GFP_ATOMIC, 8349 AUDIT_ANOM_PROMISCUOUS, 8350 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u", 8351 dev->name, (dev->flags & IFF_PROMISC), 8352 (old_flags & IFF_PROMISC), 8353 from_kuid(&init_user_ns, audit_get_loginuid(current)), 8354 from_kuid(&init_user_ns, uid), 8355 from_kgid(&init_user_ns, gid), 8356 audit_get_sessionid(current)); 8357 } 8358 8359 dev_change_rx_flags(dev, IFF_PROMISC); 8360 } 8361 if (notify) 8362 __dev_notify_flags(dev, old_flags, IFF_PROMISC, 0, NULL); 8363 return 0; 8364 } 8365 8366 /** 8367 * dev_set_promiscuity - update promiscuity count on a device 8368 * @dev: device 8369 * @inc: modifier 8370 * 8371 * Add or remove promiscuity from a device. While the count in the device 8372 * remains above zero the interface remains promiscuous. Once it hits zero 8373 * the device reverts back to normal filtering operation. A negative inc 8374 * value is used to drop promiscuity on the device. 8375 * Return 0 if successful or a negative errno code on error. 8376 */ 8377 int dev_set_promiscuity(struct net_device *dev, int inc) 8378 { 8379 unsigned int old_flags = dev->flags; 8380 int err; 8381 8382 err = __dev_set_promiscuity(dev, inc, true); 8383 if (err < 0) 8384 return err; 8385 if (dev->flags != old_flags) 8386 dev_set_rx_mode(dev); 8387 return err; 8388 } 8389 EXPORT_SYMBOL(dev_set_promiscuity); 8390 8391 static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify) 8392 { 8393 unsigned int old_flags = dev->flags, old_gflags = dev->gflags; 8394 8395 ASSERT_RTNL(); 8396 8397 dev->flags |= IFF_ALLMULTI; 8398 dev->allmulti += inc; 8399 if (dev->allmulti == 0) { 8400 /* 8401 * Avoid overflow. 8402 * If inc causes overflow, untouch allmulti and return error. 8403 */ 8404 if (inc < 0) 8405 dev->flags &= ~IFF_ALLMULTI; 8406 else { 8407 dev->allmulti -= inc; 8408 netdev_warn(dev, "allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n"); 8409 return -EOVERFLOW; 8410 } 8411 } 8412 if (dev->flags ^ old_flags) { 8413 netdev_info(dev, "%s allmulticast mode\n", 8414 dev->flags & IFF_ALLMULTI ? "entered" : "left"); 8415 dev_change_rx_flags(dev, IFF_ALLMULTI); 8416 dev_set_rx_mode(dev); 8417 if (notify) 8418 __dev_notify_flags(dev, old_flags, 8419 dev->gflags ^ old_gflags, 0, NULL); 8420 } 8421 return 0; 8422 } 8423 8424 /** 8425 * dev_set_allmulti - update allmulti count on a device 8426 * @dev: device 8427 * @inc: modifier 8428 * 8429 * Add or remove reception of all multicast frames to a device. While the 8430 * count in the device remains above zero the interface remains listening 8431 * to all interfaces. Once it hits zero the device reverts back to normal 8432 * filtering operation. A negative @inc value is used to drop the counter 8433 * when releasing a resource needing all multicasts. 8434 * Return 0 if successful or a negative errno code on error. 8435 */ 8436 8437 int dev_set_allmulti(struct net_device *dev, int inc) 8438 { 8439 return __dev_set_allmulti(dev, inc, true); 8440 } 8441 EXPORT_SYMBOL(dev_set_allmulti); 8442 8443 /* 8444 * Upload unicast and multicast address lists to device and 8445 * configure RX filtering. When the device doesn't support unicast 8446 * filtering it is put in promiscuous mode while unicast addresses 8447 * are present. 8448 */ 8449 void __dev_set_rx_mode(struct net_device *dev) 8450 { 8451 const struct net_device_ops *ops = dev->netdev_ops; 8452 8453 /* dev_open will call this function so the list will stay sane. */ 8454 if (!(dev->flags&IFF_UP)) 8455 return; 8456 8457 if (!netif_device_present(dev)) 8458 return; 8459 8460 if (!(dev->priv_flags & IFF_UNICAST_FLT)) { 8461 /* Unicast addresses changes may only happen under the rtnl, 8462 * therefore calling __dev_set_promiscuity here is safe. 8463 */ 8464 if (!netdev_uc_empty(dev) && !dev->uc_promisc) { 8465 __dev_set_promiscuity(dev, 1, false); 8466 dev->uc_promisc = true; 8467 } else if (netdev_uc_empty(dev) && dev->uc_promisc) { 8468 __dev_set_promiscuity(dev, -1, false); 8469 dev->uc_promisc = false; 8470 } 8471 } 8472 8473 if (ops->ndo_set_rx_mode) 8474 ops->ndo_set_rx_mode(dev); 8475 } 8476 8477 void dev_set_rx_mode(struct net_device *dev) 8478 { 8479 netif_addr_lock_bh(dev); 8480 __dev_set_rx_mode(dev); 8481 netif_addr_unlock_bh(dev); 8482 } 8483 8484 /** 8485 * dev_get_flags - get flags reported to userspace 8486 * @dev: device 8487 * 8488 * Get the combination of flag bits exported through APIs to userspace. 8489 */ 8490 unsigned int dev_get_flags(const struct net_device *dev) 8491 { 8492 unsigned int flags; 8493 8494 flags = (dev->flags & ~(IFF_PROMISC | 8495 IFF_ALLMULTI | 8496 IFF_RUNNING | 8497 IFF_LOWER_UP | 8498 IFF_DORMANT)) | 8499 (dev->gflags & (IFF_PROMISC | 8500 IFF_ALLMULTI)); 8501 8502 if (netif_running(dev)) { 8503 if (netif_oper_up(dev)) 8504 flags |= IFF_RUNNING; 8505 if (netif_carrier_ok(dev)) 8506 flags |= IFF_LOWER_UP; 8507 if (netif_dormant(dev)) 8508 flags |= IFF_DORMANT; 8509 } 8510 8511 return flags; 8512 } 8513 EXPORT_SYMBOL(dev_get_flags); 8514 8515 int __dev_change_flags(struct net_device *dev, unsigned int flags, 8516 struct netlink_ext_ack *extack) 8517 { 8518 unsigned int old_flags = dev->flags; 8519 int ret; 8520 8521 ASSERT_RTNL(); 8522 8523 /* 8524 * Set the flags on our device. 8525 */ 8526 8527 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP | 8528 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL | 8529 IFF_AUTOMEDIA)) | 8530 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC | 8531 IFF_ALLMULTI)); 8532 8533 /* 8534 * Load in the correct multicast list now the flags have changed. 8535 */ 8536 8537 if ((old_flags ^ flags) & IFF_MULTICAST) 8538 dev_change_rx_flags(dev, IFF_MULTICAST); 8539 8540 dev_set_rx_mode(dev); 8541 8542 /* 8543 * Have we downed the interface. We handle IFF_UP ourselves 8544 * according to user attempts to set it, rather than blindly 8545 * setting it. 8546 */ 8547 8548 ret = 0; 8549 if ((old_flags ^ flags) & IFF_UP) { 8550 if (old_flags & IFF_UP) 8551 __dev_close(dev); 8552 else 8553 ret = __dev_open(dev, extack); 8554 } 8555 8556 if ((flags ^ dev->gflags) & IFF_PROMISC) { 8557 int inc = (flags & IFF_PROMISC) ? 1 : -1; 8558 unsigned int old_flags = dev->flags; 8559 8560 dev->gflags ^= IFF_PROMISC; 8561 8562 if (__dev_set_promiscuity(dev, inc, false) >= 0) 8563 if (dev->flags != old_flags) 8564 dev_set_rx_mode(dev); 8565 } 8566 8567 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI 8568 * is important. Some (broken) drivers set IFF_PROMISC, when 8569 * IFF_ALLMULTI is requested not asking us and not reporting. 8570 */ 8571 if ((flags ^ dev->gflags) & IFF_ALLMULTI) { 8572 int inc = (flags & IFF_ALLMULTI) ? 1 : -1; 8573 8574 dev->gflags ^= IFF_ALLMULTI; 8575 __dev_set_allmulti(dev, inc, false); 8576 } 8577 8578 return ret; 8579 } 8580 8581 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags, 8582 unsigned int gchanges, u32 portid, 8583 const struct nlmsghdr *nlh) 8584 { 8585 unsigned int changes = dev->flags ^ old_flags; 8586 8587 if (gchanges) 8588 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC, portid, nlh); 8589 8590 if (changes & IFF_UP) { 8591 if (dev->flags & IFF_UP) 8592 call_netdevice_notifiers(NETDEV_UP, dev); 8593 else 8594 call_netdevice_notifiers(NETDEV_DOWN, dev); 8595 } 8596 8597 if (dev->flags & IFF_UP && 8598 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) { 8599 struct netdev_notifier_change_info change_info = { 8600 .info = { 8601 .dev = dev, 8602 }, 8603 .flags_changed = changes, 8604 }; 8605 8606 call_netdevice_notifiers_info(NETDEV_CHANGE, &change_info.info); 8607 } 8608 } 8609 8610 /** 8611 * dev_change_flags - change device settings 8612 * @dev: device 8613 * @flags: device state flags 8614 * @extack: netlink extended ack 8615 * 8616 * Change settings on device based state flags. The flags are 8617 * in the userspace exported format. 8618 */ 8619 int dev_change_flags(struct net_device *dev, unsigned int flags, 8620 struct netlink_ext_ack *extack) 8621 { 8622 int ret; 8623 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags; 8624 8625 ret = __dev_change_flags(dev, flags, extack); 8626 if (ret < 0) 8627 return ret; 8628 8629 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags); 8630 __dev_notify_flags(dev, old_flags, changes, 0, NULL); 8631 return ret; 8632 } 8633 EXPORT_SYMBOL(dev_change_flags); 8634 8635 int __dev_set_mtu(struct net_device *dev, int new_mtu) 8636 { 8637 const struct net_device_ops *ops = dev->netdev_ops; 8638 8639 if (ops->ndo_change_mtu) 8640 return ops->ndo_change_mtu(dev, new_mtu); 8641 8642 /* Pairs with all the lockless reads of dev->mtu in the stack */ 8643 WRITE_ONCE(dev->mtu, new_mtu); 8644 return 0; 8645 } 8646 EXPORT_SYMBOL(__dev_set_mtu); 8647 8648 int dev_validate_mtu(struct net_device *dev, int new_mtu, 8649 struct netlink_ext_ack *extack) 8650 { 8651 /* MTU must be positive, and in range */ 8652 if (new_mtu < 0 || new_mtu < dev->min_mtu) { 8653 NL_SET_ERR_MSG(extack, "mtu less than device minimum"); 8654 return -EINVAL; 8655 } 8656 8657 if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) { 8658 NL_SET_ERR_MSG(extack, "mtu greater than device maximum"); 8659 return -EINVAL; 8660 } 8661 return 0; 8662 } 8663 8664 /** 8665 * dev_set_mtu_ext - Change maximum transfer unit 8666 * @dev: device 8667 * @new_mtu: new transfer unit 8668 * @extack: netlink extended ack 8669 * 8670 * Change the maximum transfer size of the network device. 8671 */ 8672 int dev_set_mtu_ext(struct net_device *dev, int new_mtu, 8673 struct netlink_ext_ack *extack) 8674 { 8675 int err, orig_mtu; 8676 8677 if (new_mtu == dev->mtu) 8678 return 0; 8679 8680 err = dev_validate_mtu(dev, new_mtu, extack); 8681 if (err) 8682 return err; 8683 8684 if (!netif_device_present(dev)) 8685 return -ENODEV; 8686 8687 err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev); 8688 err = notifier_to_errno(err); 8689 if (err) 8690 return err; 8691 8692 orig_mtu = dev->mtu; 8693 err = __dev_set_mtu(dev, new_mtu); 8694 8695 if (!err) { 8696 err = call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev, 8697 orig_mtu); 8698 err = notifier_to_errno(err); 8699 if (err) { 8700 /* setting mtu back and notifying everyone again, 8701 * so that they have a chance to revert changes. 8702 */ 8703 __dev_set_mtu(dev, orig_mtu); 8704 call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev, 8705 new_mtu); 8706 } 8707 } 8708 return err; 8709 } 8710 8711 int dev_set_mtu(struct net_device *dev, int new_mtu) 8712 { 8713 struct netlink_ext_ack extack; 8714 int err; 8715 8716 memset(&extack, 0, sizeof(extack)); 8717 err = dev_set_mtu_ext(dev, new_mtu, &extack); 8718 if (err && extack._msg) 8719 net_err_ratelimited("%s: %s\n", dev->name, extack._msg); 8720 return err; 8721 } 8722 EXPORT_SYMBOL(dev_set_mtu); 8723 8724 /** 8725 * dev_change_tx_queue_len - Change TX queue length of a netdevice 8726 * @dev: device 8727 * @new_len: new tx queue length 8728 */ 8729 int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len) 8730 { 8731 unsigned int orig_len = dev->tx_queue_len; 8732 int res; 8733 8734 if (new_len != (unsigned int)new_len) 8735 return -ERANGE; 8736 8737 if (new_len != orig_len) { 8738 dev->tx_queue_len = new_len; 8739 res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev); 8740 res = notifier_to_errno(res); 8741 if (res) 8742 goto err_rollback; 8743 res = dev_qdisc_change_tx_queue_len(dev); 8744 if (res) 8745 goto err_rollback; 8746 } 8747 8748 return 0; 8749 8750 err_rollback: 8751 netdev_err(dev, "refused to change device tx_queue_len\n"); 8752 dev->tx_queue_len = orig_len; 8753 return res; 8754 } 8755 8756 /** 8757 * dev_set_group - Change group this device belongs to 8758 * @dev: device 8759 * @new_group: group this device should belong to 8760 */ 8761 void dev_set_group(struct net_device *dev, int new_group) 8762 { 8763 dev->group = new_group; 8764 } 8765 8766 /** 8767 * dev_pre_changeaddr_notify - Call NETDEV_PRE_CHANGEADDR. 8768 * @dev: device 8769 * @addr: new address 8770 * @extack: netlink extended ack 8771 */ 8772 int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr, 8773 struct netlink_ext_ack *extack) 8774 { 8775 struct netdev_notifier_pre_changeaddr_info info = { 8776 .info.dev = dev, 8777 .info.extack = extack, 8778 .dev_addr = addr, 8779 }; 8780 int rc; 8781 8782 rc = call_netdevice_notifiers_info(NETDEV_PRE_CHANGEADDR, &info.info); 8783 return notifier_to_errno(rc); 8784 } 8785 EXPORT_SYMBOL(dev_pre_changeaddr_notify); 8786 8787 /** 8788 * dev_set_mac_address - Change Media Access Control Address 8789 * @dev: device 8790 * @sa: new address 8791 * @extack: netlink extended ack 8792 * 8793 * Change the hardware (MAC) address of the device 8794 */ 8795 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa, 8796 struct netlink_ext_ack *extack) 8797 { 8798 const struct net_device_ops *ops = dev->netdev_ops; 8799 int err; 8800 8801 if (!ops->ndo_set_mac_address) 8802 return -EOPNOTSUPP; 8803 if (sa->sa_family != dev->type) 8804 return -EINVAL; 8805 if (!netif_device_present(dev)) 8806 return -ENODEV; 8807 err = dev_pre_changeaddr_notify(dev, sa->sa_data, extack); 8808 if (err) 8809 return err; 8810 err = ops->ndo_set_mac_address(dev, sa); 8811 if (err) 8812 return err; 8813 dev->addr_assign_type = NET_ADDR_SET; 8814 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 8815 add_device_randomness(dev->dev_addr, dev->addr_len); 8816 return 0; 8817 } 8818 EXPORT_SYMBOL(dev_set_mac_address); 8819 8820 static DECLARE_RWSEM(dev_addr_sem); 8821 8822 int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa, 8823 struct netlink_ext_ack *extack) 8824 { 8825 int ret; 8826 8827 down_write(&dev_addr_sem); 8828 ret = dev_set_mac_address(dev, sa, extack); 8829 up_write(&dev_addr_sem); 8830 return ret; 8831 } 8832 EXPORT_SYMBOL(dev_set_mac_address_user); 8833 8834 int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name) 8835 { 8836 size_t size = sizeof(sa->sa_data_min); 8837 struct net_device *dev; 8838 int ret = 0; 8839 8840 down_read(&dev_addr_sem); 8841 rcu_read_lock(); 8842 8843 dev = dev_get_by_name_rcu(net, dev_name); 8844 if (!dev) { 8845 ret = -ENODEV; 8846 goto unlock; 8847 } 8848 if (!dev->addr_len) 8849 memset(sa->sa_data, 0, size); 8850 else 8851 memcpy(sa->sa_data, dev->dev_addr, 8852 min_t(size_t, size, dev->addr_len)); 8853 sa->sa_family = dev->type; 8854 8855 unlock: 8856 rcu_read_unlock(); 8857 up_read(&dev_addr_sem); 8858 return ret; 8859 } 8860 EXPORT_SYMBOL(dev_get_mac_address); 8861 8862 /** 8863 * dev_change_carrier - Change device carrier 8864 * @dev: device 8865 * @new_carrier: new value 8866 * 8867 * Change device carrier 8868 */ 8869 int dev_change_carrier(struct net_device *dev, bool new_carrier) 8870 { 8871 const struct net_device_ops *ops = dev->netdev_ops; 8872 8873 if (!ops->ndo_change_carrier) 8874 return -EOPNOTSUPP; 8875 if (!netif_device_present(dev)) 8876 return -ENODEV; 8877 return ops->ndo_change_carrier(dev, new_carrier); 8878 } 8879 8880 /** 8881 * dev_get_phys_port_id - Get device physical port ID 8882 * @dev: device 8883 * @ppid: port ID 8884 * 8885 * Get device physical port ID 8886 */ 8887 int dev_get_phys_port_id(struct net_device *dev, 8888 struct netdev_phys_item_id *ppid) 8889 { 8890 const struct net_device_ops *ops = dev->netdev_ops; 8891 8892 if (!ops->ndo_get_phys_port_id) 8893 return -EOPNOTSUPP; 8894 return ops->ndo_get_phys_port_id(dev, ppid); 8895 } 8896 8897 /** 8898 * dev_get_phys_port_name - Get device physical port name 8899 * @dev: device 8900 * @name: port name 8901 * @len: limit of bytes to copy to name 8902 * 8903 * Get device physical port name 8904 */ 8905 int dev_get_phys_port_name(struct net_device *dev, 8906 char *name, size_t len) 8907 { 8908 const struct net_device_ops *ops = dev->netdev_ops; 8909 int err; 8910 8911 if (ops->ndo_get_phys_port_name) { 8912 err = ops->ndo_get_phys_port_name(dev, name, len); 8913 if (err != -EOPNOTSUPP) 8914 return err; 8915 } 8916 return devlink_compat_phys_port_name_get(dev, name, len); 8917 } 8918 8919 /** 8920 * dev_get_port_parent_id - Get the device's port parent identifier 8921 * @dev: network device 8922 * @ppid: pointer to a storage for the port's parent identifier 8923 * @recurse: allow/disallow recursion to lower devices 8924 * 8925 * Get the devices's port parent identifier 8926 */ 8927 int dev_get_port_parent_id(struct net_device *dev, 8928 struct netdev_phys_item_id *ppid, 8929 bool recurse) 8930 { 8931 const struct net_device_ops *ops = dev->netdev_ops; 8932 struct netdev_phys_item_id first = { }; 8933 struct net_device *lower_dev; 8934 struct list_head *iter; 8935 int err; 8936 8937 if (ops->ndo_get_port_parent_id) { 8938 err = ops->ndo_get_port_parent_id(dev, ppid); 8939 if (err != -EOPNOTSUPP) 8940 return err; 8941 } 8942 8943 err = devlink_compat_switch_id_get(dev, ppid); 8944 if (!recurse || err != -EOPNOTSUPP) 8945 return err; 8946 8947 netdev_for_each_lower_dev(dev, lower_dev, iter) { 8948 err = dev_get_port_parent_id(lower_dev, ppid, true); 8949 if (err) 8950 break; 8951 if (!first.id_len) 8952 first = *ppid; 8953 else if (memcmp(&first, ppid, sizeof(*ppid))) 8954 return -EOPNOTSUPP; 8955 } 8956 8957 return err; 8958 } 8959 EXPORT_SYMBOL(dev_get_port_parent_id); 8960 8961 /** 8962 * netdev_port_same_parent_id - Indicate if two network devices have 8963 * the same port parent identifier 8964 * @a: first network device 8965 * @b: second network device 8966 */ 8967 bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b) 8968 { 8969 struct netdev_phys_item_id a_id = { }; 8970 struct netdev_phys_item_id b_id = { }; 8971 8972 if (dev_get_port_parent_id(a, &a_id, true) || 8973 dev_get_port_parent_id(b, &b_id, true)) 8974 return false; 8975 8976 return netdev_phys_item_id_same(&a_id, &b_id); 8977 } 8978 EXPORT_SYMBOL(netdev_port_same_parent_id); 8979 8980 /** 8981 * dev_change_proto_down - set carrier according to proto_down. 8982 * 8983 * @dev: device 8984 * @proto_down: new value 8985 */ 8986 int dev_change_proto_down(struct net_device *dev, bool proto_down) 8987 { 8988 if (!(dev->priv_flags & IFF_CHANGE_PROTO_DOWN)) 8989 return -EOPNOTSUPP; 8990 if (!netif_device_present(dev)) 8991 return -ENODEV; 8992 if (proto_down) 8993 netif_carrier_off(dev); 8994 else 8995 netif_carrier_on(dev); 8996 dev->proto_down = proto_down; 8997 return 0; 8998 } 8999 9000 /** 9001 * dev_change_proto_down_reason - proto down reason 9002 * 9003 * @dev: device 9004 * @mask: proto down mask 9005 * @value: proto down value 9006 */ 9007 void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask, 9008 u32 value) 9009 { 9010 int b; 9011 9012 if (!mask) { 9013 dev->proto_down_reason = value; 9014 } else { 9015 for_each_set_bit(b, &mask, 32) { 9016 if (value & (1 << b)) 9017 dev->proto_down_reason |= BIT(b); 9018 else 9019 dev->proto_down_reason &= ~BIT(b); 9020 } 9021 } 9022 } 9023 9024 struct bpf_xdp_link { 9025 struct bpf_link link; 9026 struct net_device *dev; /* protected by rtnl_lock, no refcnt held */ 9027 int flags; 9028 }; 9029 9030 static enum bpf_xdp_mode dev_xdp_mode(struct net_device *dev, u32 flags) 9031 { 9032 if (flags & XDP_FLAGS_HW_MODE) 9033 return XDP_MODE_HW; 9034 if (flags & XDP_FLAGS_DRV_MODE) 9035 return XDP_MODE_DRV; 9036 if (flags & XDP_FLAGS_SKB_MODE) 9037 return XDP_MODE_SKB; 9038 return dev->netdev_ops->ndo_bpf ? XDP_MODE_DRV : XDP_MODE_SKB; 9039 } 9040 9041 static bpf_op_t dev_xdp_bpf_op(struct net_device *dev, enum bpf_xdp_mode mode) 9042 { 9043 switch (mode) { 9044 case XDP_MODE_SKB: 9045 return generic_xdp_install; 9046 case XDP_MODE_DRV: 9047 case XDP_MODE_HW: 9048 return dev->netdev_ops->ndo_bpf; 9049 default: 9050 return NULL; 9051 } 9052 } 9053 9054 static struct bpf_xdp_link *dev_xdp_link(struct net_device *dev, 9055 enum bpf_xdp_mode mode) 9056 { 9057 return dev->xdp_state[mode].link; 9058 } 9059 9060 static struct bpf_prog *dev_xdp_prog(struct net_device *dev, 9061 enum bpf_xdp_mode mode) 9062 { 9063 struct bpf_xdp_link *link = dev_xdp_link(dev, mode); 9064 9065 if (link) 9066 return link->link.prog; 9067 return dev->xdp_state[mode].prog; 9068 } 9069 9070 u8 dev_xdp_prog_count(struct net_device *dev) 9071 { 9072 u8 count = 0; 9073 int i; 9074 9075 for (i = 0; i < __MAX_XDP_MODE; i++) 9076 if (dev->xdp_state[i].prog || dev->xdp_state[i].link) 9077 count++; 9078 return count; 9079 } 9080 EXPORT_SYMBOL_GPL(dev_xdp_prog_count); 9081 9082 u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode) 9083 { 9084 struct bpf_prog *prog = dev_xdp_prog(dev, mode); 9085 9086 return prog ? prog->aux->id : 0; 9087 } 9088 9089 static void dev_xdp_set_link(struct net_device *dev, enum bpf_xdp_mode mode, 9090 struct bpf_xdp_link *link) 9091 { 9092 dev->xdp_state[mode].link = link; 9093 dev->xdp_state[mode].prog = NULL; 9094 } 9095 9096 static void dev_xdp_set_prog(struct net_device *dev, enum bpf_xdp_mode mode, 9097 struct bpf_prog *prog) 9098 { 9099 dev->xdp_state[mode].link = NULL; 9100 dev->xdp_state[mode].prog = prog; 9101 } 9102 9103 static int dev_xdp_install(struct net_device *dev, enum bpf_xdp_mode mode, 9104 bpf_op_t bpf_op, struct netlink_ext_ack *extack, 9105 u32 flags, struct bpf_prog *prog) 9106 { 9107 struct netdev_bpf xdp; 9108 int err; 9109 9110 memset(&xdp, 0, sizeof(xdp)); 9111 xdp.command = mode == XDP_MODE_HW ? XDP_SETUP_PROG_HW : XDP_SETUP_PROG; 9112 xdp.extack = extack; 9113 xdp.flags = flags; 9114 xdp.prog = prog; 9115 9116 /* Drivers assume refcnt is already incremented (i.e, prog pointer is 9117 * "moved" into driver), so they don't increment it on their own, but 9118 * they do decrement refcnt when program is detached or replaced. 9119 * Given net_device also owns link/prog, we need to bump refcnt here 9120 * to prevent drivers from underflowing it. 9121 */ 9122 if (prog) 9123 bpf_prog_inc(prog); 9124 err = bpf_op(dev, &xdp); 9125 if (err) { 9126 if (prog) 9127 bpf_prog_put(prog); 9128 return err; 9129 } 9130 9131 if (mode != XDP_MODE_HW) 9132 bpf_prog_change_xdp(dev_xdp_prog(dev, mode), prog); 9133 9134 return 0; 9135 } 9136 9137 static void dev_xdp_uninstall(struct net_device *dev) 9138 { 9139 struct bpf_xdp_link *link; 9140 struct bpf_prog *prog; 9141 enum bpf_xdp_mode mode; 9142 bpf_op_t bpf_op; 9143 9144 ASSERT_RTNL(); 9145 9146 for (mode = XDP_MODE_SKB; mode < __MAX_XDP_MODE; mode++) { 9147 prog = dev_xdp_prog(dev, mode); 9148 if (!prog) 9149 continue; 9150 9151 bpf_op = dev_xdp_bpf_op(dev, mode); 9152 if (!bpf_op) 9153 continue; 9154 9155 WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL)); 9156 9157 /* auto-detach link from net device */ 9158 link = dev_xdp_link(dev, mode); 9159 if (link) 9160 link->dev = NULL; 9161 else 9162 bpf_prog_put(prog); 9163 9164 dev_xdp_set_link(dev, mode, NULL); 9165 } 9166 } 9167 9168 static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack, 9169 struct bpf_xdp_link *link, struct bpf_prog *new_prog, 9170 struct bpf_prog *old_prog, u32 flags) 9171 { 9172 unsigned int num_modes = hweight32(flags & XDP_FLAGS_MODES); 9173 struct bpf_prog *cur_prog; 9174 struct net_device *upper; 9175 struct list_head *iter; 9176 enum bpf_xdp_mode mode; 9177 bpf_op_t bpf_op; 9178 int err; 9179 9180 ASSERT_RTNL(); 9181 9182 /* either link or prog attachment, never both */ 9183 if (link && (new_prog || old_prog)) 9184 return -EINVAL; 9185 /* link supports only XDP mode flags */ 9186 if (link && (flags & ~XDP_FLAGS_MODES)) { 9187 NL_SET_ERR_MSG(extack, "Invalid XDP flags for BPF link attachment"); 9188 return -EINVAL; 9189 } 9190 /* just one XDP mode bit should be set, zero defaults to drv/skb mode */ 9191 if (num_modes > 1) { 9192 NL_SET_ERR_MSG(extack, "Only one XDP mode flag can be set"); 9193 return -EINVAL; 9194 } 9195 /* avoid ambiguity if offload + drv/skb mode progs are both loaded */ 9196 if (!num_modes && dev_xdp_prog_count(dev) > 1) { 9197 NL_SET_ERR_MSG(extack, 9198 "More than one program loaded, unset mode is ambiguous"); 9199 return -EINVAL; 9200 } 9201 /* old_prog != NULL implies XDP_FLAGS_REPLACE is set */ 9202 if (old_prog && !(flags & XDP_FLAGS_REPLACE)) { 9203 NL_SET_ERR_MSG(extack, "XDP_FLAGS_REPLACE is not specified"); 9204 return -EINVAL; 9205 } 9206 9207 mode = dev_xdp_mode(dev, flags); 9208 /* can't replace attached link */ 9209 if (dev_xdp_link(dev, mode)) { 9210 NL_SET_ERR_MSG(extack, "Can't replace active BPF XDP link"); 9211 return -EBUSY; 9212 } 9213 9214 /* don't allow if an upper device already has a program */ 9215 netdev_for_each_upper_dev_rcu(dev, upper, iter) { 9216 if (dev_xdp_prog_count(upper) > 0) { 9217 NL_SET_ERR_MSG(extack, "Cannot attach when an upper device already has a program"); 9218 return -EEXIST; 9219 } 9220 } 9221 9222 cur_prog = dev_xdp_prog(dev, mode); 9223 /* can't replace attached prog with link */ 9224 if (link && cur_prog) { 9225 NL_SET_ERR_MSG(extack, "Can't replace active XDP program with BPF link"); 9226 return -EBUSY; 9227 } 9228 if ((flags & XDP_FLAGS_REPLACE) && cur_prog != old_prog) { 9229 NL_SET_ERR_MSG(extack, "Active program does not match expected"); 9230 return -EEXIST; 9231 } 9232 9233 /* put effective new program into new_prog */ 9234 if (link) 9235 new_prog = link->link.prog; 9236 9237 if (new_prog) { 9238 bool offload = mode == XDP_MODE_HW; 9239 enum bpf_xdp_mode other_mode = mode == XDP_MODE_SKB 9240 ? XDP_MODE_DRV : XDP_MODE_SKB; 9241 9242 if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && cur_prog) { 9243 NL_SET_ERR_MSG(extack, "XDP program already attached"); 9244 return -EBUSY; 9245 } 9246 if (!offload && dev_xdp_prog(dev, other_mode)) { 9247 NL_SET_ERR_MSG(extack, "Native and generic XDP can't be active at the same time"); 9248 return -EEXIST; 9249 } 9250 if (!offload && bpf_prog_is_offloaded(new_prog->aux)) { 9251 NL_SET_ERR_MSG(extack, "Using offloaded program without HW_MODE flag is not supported"); 9252 return -EINVAL; 9253 } 9254 if (bpf_prog_is_dev_bound(new_prog->aux) && !bpf_offload_dev_match(new_prog, dev)) { 9255 NL_SET_ERR_MSG(extack, "Program bound to different device"); 9256 return -EINVAL; 9257 } 9258 if (new_prog->expected_attach_type == BPF_XDP_DEVMAP) { 9259 NL_SET_ERR_MSG(extack, "BPF_XDP_DEVMAP programs can not be attached to a device"); 9260 return -EINVAL; 9261 } 9262 if (new_prog->expected_attach_type == BPF_XDP_CPUMAP) { 9263 NL_SET_ERR_MSG(extack, "BPF_XDP_CPUMAP programs can not be attached to a device"); 9264 return -EINVAL; 9265 } 9266 } 9267 9268 /* don't call drivers if the effective program didn't change */ 9269 if (new_prog != cur_prog) { 9270 bpf_op = dev_xdp_bpf_op(dev, mode); 9271 if (!bpf_op) { 9272 NL_SET_ERR_MSG(extack, "Underlying driver does not support XDP in native mode"); 9273 return -EOPNOTSUPP; 9274 } 9275 9276 err = dev_xdp_install(dev, mode, bpf_op, extack, flags, new_prog); 9277 if (err) 9278 return err; 9279 } 9280 9281 if (link) 9282 dev_xdp_set_link(dev, mode, link); 9283 else 9284 dev_xdp_set_prog(dev, mode, new_prog); 9285 if (cur_prog) 9286 bpf_prog_put(cur_prog); 9287 9288 return 0; 9289 } 9290 9291 static int dev_xdp_attach_link(struct net_device *dev, 9292 struct netlink_ext_ack *extack, 9293 struct bpf_xdp_link *link) 9294 { 9295 return dev_xdp_attach(dev, extack, link, NULL, NULL, link->flags); 9296 } 9297 9298 static int dev_xdp_detach_link(struct net_device *dev, 9299 struct netlink_ext_ack *extack, 9300 struct bpf_xdp_link *link) 9301 { 9302 enum bpf_xdp_mode mode; 9303 bpf_op_t bpf_op; 9304 9305 ASSERT_RTNL(); 9306 9307 mode = dev_xdp_mode(dev, link->flags); 9308 if (dev_xdp_link(dev, mode) != link) 9309 return -EINVAL; 9310 9311 bpf_op = dev_xdp_bpf_op(dev, mode); 9312 WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL)); 9313 dev_xdp_set_link(dev, mode, NULL); 9314 return 0; 9315 } 9316 9317 static void bpf_xdp_link_release(struct bpf_link *link) 9318 { 9319 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link); 9320 9321 rtnl_lock(); 9322 9323 /* if racing with net_device's tear down, xdp_link->dev might be 9324 * already NULL, in which case link was already auto-detached 9325 */ 9326 if (xdp_link->dev) { 9327 WARN_ON(dev_xdp_detach_link(xdp_link->dev, NULL, xdp_link)); 9328 xdp_link->dev = NULL; 9329 } 9330 9331 rtnl_unlock(); 9332 } 9333 9334 static int bpf_xdp_link_detach(struct bpf_link *link) 9335 { 9336 bpf_xdp_link_release(link); 9337 return 0; 9338 } 9339 9340 static void bpf_xdp_link_dealloc(struct bpf_link *link) 9341 { 9342 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link); 9343 9344 kfree(xdp_link); 9345 } 9346 9347 static void bpf_xdp_link_show_fdinfo(const struct bpf_link *link, 9348 struct seq_file *seq) 9349 { 9350 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link); 9351 u32 ifindex = 0; 9352 9353 rtnl_lock(); 9354 if (xdp_link->dev) 9355 ifindex = xdp_link->dev->ifindex; 9356 rtnl_unlock(); 9357 9358 seq_printf(seq, "ifindex:\t%u\n", ifindex); 9359 } 9360 9361 static int bpf_xdp_link_fill_link_info(const struct bpf_link *link, 9362 struct bpf_link_info *info) 9363 { 9364 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link); 9365 u32 ifindex = 0; 9366 9367 rtnl_lock(); 9368 if (xdp_link->dev) 9369 ifindex = xdp_link->dev->ifindex; 9370 rtnl_unlock(); 9371 9372 info->xdp.ifindex = ifindex; 9373 return 0; 9374 } 9375 9376 static int bpf_xdp_link_update(struct bpf_link *link, struct bpf_prog *new_prog, 9377 struct bpf_prog *old_prog) 9378 { 9379 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link); 9380 enum bpf_xdp_mode mode; 9381 bpf_op_t bpf_op; 9382 int err = 0; 9383 9384 rtnl_lock(); 9385 9386 /* link might have been auto-released already, so fail */ 9387 if (!xdp_link->dev) { 9388 err = -ENOLINK; 9389 goto out_unlock; 9390 } 9391 9392 if (old_prog && link->prog != old_prog) { 9393 err = -EPERM; 9394 goto out_unlock; 9395 } 9396 old_prog = link->prog; 9397 if (old_prog->type != new_prog->type || 9398 old_prog->expected_attach_type != new_prog->expected_attach_type) { 9399 err = -EINVAL; 9400 goto out_unlock; 9401 } 9402 9403 if (old_prog == new_prog) { 9404 /* no-op, don't disturb drivers */ 9405 bpf_prog_put(new_prog); 9406 goto out_unlock; 9407 } 9408 9409 mode = dev_xdp_mode(xdp_link->dev, xdp_link->flags); 9410 bpf_op = dev_xdp_bpf_op(xdp_link->dev, mode); 9411 err = dev_xdp_install(xdp_link->dev, mode, bpf_op, NULL, 9412 xdp_link->flags, new_prog); 9413 if (err) 9414 goto out_unlock; 9415 9416 old_prog = xchg(&link->prog, new_prog); 9417 bpf_prog_put(old_prog); 9418 9419 out_unlock: 9420 rtnl_unlock(); 9421 return err; 9422 } 9423 9424 static const struct bpf_link_ops bpf_xdp_link_lops = { 9425 .release = bpf_xdp_link_release, 9426 .dealloc = bpf_xdp_link_dealloc, 9427 .detach = bpf_xdp_link_detach, 9428 .show_fdinfo = bpf_xdp_link_show_fdinfo, 9429 .fill_link_info = bpf_xdp_link_fill_link_info, 9430 .update_prog = bpf_xdp_link_update, 9431 }; 9432 9433 int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 9434 { 9435 struct net *net = current->nsproxy->net_ns; 9436 struct bpf_link_primer link_primer; 9437 struct bpf_xdp_link *link; 9438 struct net_device *dev; 9439 int err, fd; 9440 9441 rtnl_lock(); 9442 dev = dev_get_by_index(net, attr->link_create.target_ifindex); 9443 if (!dev) { 9444 rtnl_unlock(); 9445 return -EINVAL; 9446 } 9447 9448 link = kzalloc(sizeof(*link), GFP_USER); 9449 if (!link) { 9450 err = -ENOMEM; 9451 goto unlock; 9452 } 9453 9454 bpf_link_init(&link->link, BPF_LINK_TYPE_XDP, &bpf_xdp_link_lops, prog); 9455 link->dev = dev; 9456 link->flags = attr->link_create.flags; 9457 9458 err = bpf_link_prime(&link->link, &link_primer); 9459 if (err) { 9460 kfree(link); 9461 goto unlock; 9462 } 9463 9464 err = dev_xdp_attach_link(dev, NULL, link); 9465 rtnl_unlock(); 9466 9467 if (err) { 9468 link->dev = NULL; 9469 bpf_link_cleanup(&link_primer); 9470 goto out_put_dev; 9471 } 9472 9473 fd = bpf_link_settle(&link_primer); 9474 /* link itself doesn't hold dev's refcnt to not complicate shutdown */ 9475 dev_put(dev); 9476 return fd; 9477 9478 unlock: 9479 rtnl_unlock(); 9480 9481 out_put_dev: 9482 dev_put(dev); 9483 return err; 9484 } 9485 9486 /** 9487 * dev_change_xdp_fd - set or clear a bpf program for a device rx path 9488 * @dev: device 9489 * @extack: netlink extended ack 9490 * @fd: new program fd or negative value to clear 9491 * @expected_fd: old program fd that userspace expects to replace or clear 9492 * @flags: xdp-related flags 9493 * 9494 * Set or clear a bpf program for a device 9495 */ 9496 int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, 9497 int fd, int expected_fd, u32 flags) 9498 { 9499 enum bpf_xdp_mode mode = dev_xdp_mode(dev, flags); 9500 struct bpf_prog *new_prog = NULL, *old_prog = NULL; 9501 int err; 9502 9503 ASSERT_RTNL(); 9504 9505 if (fd >= 0) { 9506 new_prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP, 9507 mode != XDP_MODE_SKB); 9508 if (IS_ERR(new_prog)) 9509 return PTR_ERR(new_prog); 9510 } 9511 9512 if (expected_fd >= 0) { 9513 old_prog = bpf_prog_get_type_dev(expected_fd, BPF_PROG_TYPE_XDP, 9514 mode != XDP_MODE_SKB); 9515 if (IS_ERR(old_prog)) { 9516 err = PTR_ERR(old_prog); 9517 old_prog = NULL; 9518 goto err_out; 9519 } 9520 } 9521 9522 err = dev_xdp_attach(dev, extack, NULL, new_prog, old_prog, flags); 9523 9524 err_out: 9525 if (err && new_prog) 9526 bpf_prog_put(new_prog); 9527 if (old_prog) 9528 bpf_prog_put(old_prog); 9529 return err; 9530 } 9531 9532 /** 9533 * dev_new_index - allocate an ifindex 9534 * @net: the applicable net namespace 9535 * 9536 * Returns a suitable unique value for a new device interface 9537 * number. The caller must hold the rtnl semaphore or the 9538 * dev_base_lock to be sure it remains unique. 9539 */ 9540 static int dev_new_index(struct net *net) 9541 { 9542 int ifindex = net->ifindex; 9543 9544 for (;;) { 9545 if (++ifindex <= 0) 9546 ifindex = 1; 9547 if (!__dev_get_by_index(net, ifindex)) 9548 return net->ifindex = ifindex; 9549 } 9550 } 9551 9552 /* Delayed registration/unregisteration */ 9553 LIST_HEAD(net_todo_list); 9554 DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq); 9555 9556 static void net_set_todo(struct net_device *dev) 9557 { 9558 list_add_tail(&dev->todo_list, &net_todo_list); 9559 atomic_inc(&dev_net(dev)->dev_unreg_count); 9560 } 9561 9562 static netdev_features_t netdev_sync_upper_features(struct net_device *lower, 9563 struct net_device *upper, netdev_features_t features) 9564 { 9565 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES; 9566 netdev_features_t feature; 9567 int feature_bit; 9568 9569 for_each_netdev_feature(upper_disables, feature_bit) { 9570 feature = __NETIF_F_BIT(feature_bit); 9571 if (!(upper->wanted_features & feature) 9572 && (features & feature)) { 9573 netdev_dbg(lower, "Dropping feature %pNF, upper dev %s has it off.\n", 9574 &feature, upper->name); 9575 features &= ~feature; 9576 } 9577 } 9578 9579 return features; 9580 } 9581 9582 static void netdev_sync_lower_features(struct net_device *upper, 9583 struct net_device *lower, netdev_features_t features) 9584 { 9585 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES; 9586 netdev_features_t feature; 9587 int feature_bit; 9588 9589 for_each_netdev_feature(upper_disables, feature_bit) { 9590 feature = __NETIF_F_BIT(feature_bit); 9591 if (!(features & feature) && (lower->features & feature)) { 9592 netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n", 9593 &feature, lower->name); 9594 lower->wanted_features &= ~feature; 9595 __netdev_update_features(lower); 9596 9597 if (unlikely(lower->features & feature)) 9598 netdev_WARN(upper, "failed to disable %pNF on %s!\n", 9599 &feature, lower->name); 9600 else 9601 netdev_features_change(lower); 9602 } 9603 } 9604 } 9605 9606 static netdev_features_t netdev_fix_features(struct net_device *dev, 9607 netdev_features_t features) 9608 { 9609 /* Fix illegal checksum combinations */ 9610 if ((features & NETIF_F_HW_CSUM) && 9611 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { 9612 netdev_warn(dev, "mixed HW and IP checksum settings.\n"); 9613 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); 9614 } 9615 9616 /* TSO requires that SG is present as well. */ 9617 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) { 9618 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n"); 9619 features &= ~NETIF_F_ALL_TSO; 9620 } 9621 9622 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) && 9623 !(features & NETIF_F_IP_CSUM)) { 9624 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n"); 9625 features &= ~NETIF_F_TSO; 9626 features &= ~NETIF_F_TSO_ECN; 9627 } 9628 9629 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) && 9630 !(features & NETIF_F_IPV6_CSUM)) { 9631 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n"); 9632 features &= ~NETIF_F_TSO6; 9633 } 9634 9635 /* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */ 9636 if ((features & NETIF_F_TSO_MANGLEID) && !(features & NETIF_F_TSO)) 9637 features &= ~NETIF_F_TSO_MANGLEID; 9638 9639 /* TSO ECN requires that TSO is present as well. */ 9640 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN) 9641 features &= ~NETIF_F_TSO_ECN; 9642 9643 /* Software GSO depends on SG. */ 9644 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) { 9645 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n"); 9646 features &= ~NETIF_F_GSO; 9647 } 9648 9649 /* GSO partial features require GSO partial be set */ 9650 if ((features & dev->gso_partial_features) && 9651 !(features & NETIF_F_GSO_PARTIAL)) { 9652 netdev_dbg(dev, 9653 "Dropping partially supported GSO features since no GSO partial.\n"); 9654 features &= ~dev->gso_partial_features; 9655 } 9656 9657 if (!(features & NETIF_F_RXCSUM)) { 9658 /* NETIF_F_GRO_HW implies doing RXCSUM since every packet 9659 * successfully merged by hardware must also have the 9660 * checksum verified by hardware. If the user does not 9661 * want to enable RXCSUM, logically, we should disable GRO_HW. 9662 */ 9663 if (features & NETIF_F_GRO_HW) { 9664 netdev_dbg(dev, "Dropping NETIF_F_GRO_HW since no RXCSUM feature.\n"); 9665 features &= ~NETIF_F_GRO_HW; 9666 } 9667 } 9668 9669 /* LRO/HW-GRO features cannot be combined with RX-FCS */ 9670 if (features & NETIF_F_RXFCS) { 9671 if (features & NETIF_F_LRO) { 9672 netdev_dbg(dev, "Dropping LRO feature since RX-FCS is requested.\n"); 9673 features &= ~NETIF_F_LRO; 9674 } 9675 9676 if (features & NETIF_F_GRO_HW) { 9677 netdev_dbg(dev, "Dropping HW-GRO feature since RX-FCS is requested.\n"); 9678 features &= ~NETIF_F_GRO_HW; 9679 } 9680 } 9681 9682 if ((features & NETIF_F_GRO_HW) && (features & NETIF_F_LRO)) { 9683 netdev_dbg(dev, "Dropping LRO feature since HW-GRO is requested.\n"); 9684 features &= ~NETIF_F_LRO; 9685 } 9686 9687 if (features & NETIF_F_HW_TLS_TX) { 9688 bool ip_csum = (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) == 9689 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); 9690 bool hw_csum = features & NETIF_F_HW_CSUM; 9691 9692 if (!ip_csum && !hw_csum) { 9693 netdev_dbg(dev, "Dropping TLS TX HW offload feature since no CSUM feature.\n"); 9694 features &= ~NETIF_F_HW_TLS_TX; 9695 } 9696 } 9697 9698 if ((features & NETIF_F_HW_TLS_RX) && !(features & NETIF_F_RXCSUM)) { 9699 netdev_dbg(dev, "Dropping TLS RX HW offload feature since no RXCSUM feature.\n"); 9700 features &= ~NETIF_F_HW_TLS_RX; 9701 } 9702 9703 return features; 9704 } 9705 9706 int __netdev_update_features(struct net_device *dev) 9707 { 9708 struct net_device *upper, *lower; 9709 netdev_features_t features; 9710 struct list_head *iter; 9711 int err = -1; 9712 9713 ASSERT_RTNL(); 9714 9715 features = netdev_get_wanted_features(dev); 9716 9717 if (dev->netdev_ops->ndo_fix_features) 9718 features = dev->netdev_ops->ndo_fix_features(dev, features); 9719 9720 /* driver might be less strict about feature dependencies */ 9721 features = netdev_fix_features(dev, features); 9722 9723 /* some features can't be enabled if they're off on an upper device */ 9724 netdev_for_each_upper_dev_rcu(dev, upper, iter) 9725 features = netdev_sync_upper_features(dev, upper, features); 9726 9727 if (dev->features == features) 9728 goto sync_lower; 9729 9730 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n", 9731 &dev->features, &features); 9732 9733 if (dev->netdev_ops->ndo_set_features) 9734 err = dev->netdev_ops->ndo_set_features(dev, features); 9735 else 9736 err = 0; 9737 9738 if (unlikely(err < 0)) { 9739 netdev_err(dev, 9740 "set_features() failed (%d); wanted %pNF, left %pNF\n", 9741 err, &features, &dev->features); 9742 /* return non-0 since some features might have changed and 9743 * it's better to fire a spurious notification than miss it 9744 */ 9745 return -1; 9746 } 9747 9748 sync_lower: 9749 /* some features must be disabled on lower devices when disabled 9750 * on an upper device (think: bonding master or bridge) 9751 */ 9752 netdev_for_each_lower_dev(dev, lower, iter) 9753 netdev_sync_lower_features(dev, lower, features); 9754 9755 if (!err) { 9756 netdev_features_t diff = features ^ dev->features; 9757 9758 if (diff & NETIF_F_RX_UDP_TUNNEL_PORT) { 9759 /* udp_tunnel_{get,drop}_rx_info both need 9760 * NETIF_F_RX_UDP_TUNNEL_PORT enabled on the 9761 * device, or they won't do anything. 9762 * Thus we need to update dev->features 9763 * *before* calling udp_tunnel_get_rx_info, 9764 * but *after* calling udp_tunnel_drop_rx_info. 9765 */ 9766 if (features & NETIF_F_RX_UDP_TUNNEL_PORT) { 9767 dev->features = features; 9768 udp_tunnel_get_rx_info(dev); 9769 } else { 9770 udp_tunnel_drop_rx_info(dev); 9771 } 9772 } 9773 9774 if (diff & NETIF_F_HW_VLAN_CTAG_FILTER) { 9775 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) { 9776 dev->features = features; 9777 err |= vlan_get_rx_ctag_filter_info(dev); 9778 } else { 9779 vlan_drop_rx_ctag_filter_info(dev); 9780 } 9781 } 9782 9783 if (diff & NETIF_F_HW_VLAN_STAG_FILTER) { 9784 if (features & NETIF_F_HW_VLAN_STAG_FILTER) { 9785 dev->features = features; 9786 err |= vlan_get_rx_stag_filter_info(dev); 9787 } else { 9788 vlan_drop_rx_stag_filter_info(dev); 9789 } 9790 } 9791 9792 dev->features = features; 9793 } 9794 9795 return err < 0 ? 0 : 1; 9796 } 9797 9798 /** 9799 * netdev_update_features - recalculate device features 9800 * @dev: the device to check 9801 * 9802 * Recalculate dev->features set and send notifications if it 9803 * has changed. Should be called after driver or hardware dependent 9804 * conditions might have changed that influence the features. 9805 */ 9806 void netdev_update_features(struct net_device *dev) 9807 { 9808 if (__netdev_update_features(dev)) 9809 netdev_features_change(dev); 9810 } 9811 EXPORT_SYMBOL(netdev_update_features); 9812 9813 /** 9814 * netdev_change_features - recalculate device features 9815 * @dev: the device to check 9816 * 9817 * Recalculate dev->features set and send notifications even 9818 * if they have not changed. Should be called instead of 9819 * netdev_update_features() if also dev->vlan_features might 9820 * have changed to allow the changes to be propagated to stacked 9821 * VLAN devices. 9822 */ 9823 void netdev_change_features(struct net_device *dev) 9824 { 9825 __netdev_update_features(dev); 9826 netdev_features_change(dev); 9827 } 9828 EXPORT_SYMBOL(netdev_change_features); 9829 9830 /** 9831 * netif_stacked_transfer_operstate - transfer operstate 9832 * @rootdev: the root or lower level device to transfer state from 9833 * @dev: the device to transfer operstate to 9834 * 9835 * Transfer operational state from root to device. This is normally 9836 * called when a stacking relationship exists between the root 9837 * device and the device(a leaf device). 9838 */ 9839 void netif_stacked_transfer_operstate(const struct net_device *rootdev, 9840 struct net_device *dev) 9841 { 9842 if (rootdev->operstate == IF_OPER_DORMANT) 9843 netif_dormant_on(dev); 9844 else 9845 netif_dormant_off(dev); 9846 9847 if (rootdev->operstate == IF_OPER_TESTING) 9848 netif_testing_on(dev); 9849 else 9850 netif_testing_off(dev); 9851 9852 if (netif_carrier_ok(rootdev)) 9853 netif_carrier_on(dev); 9854 else 9855 netif_carrier_off(dev); 9856 } 9857 EXPORT_SYMBOL(netif_stacked_transfer_operstate); 9858 9859 static int netif_alloc_rx_queues(struct net_device *dev) 9860 { 9861 unsigned int i, count = dev->num_rx_queues; 9862 struct netdev_rx_queue *rx; 9863 size_t sz = count * sizeof(*rx); 9864 int err = 0; 9865 9866 BUG_ON(count < 1); 9867 9868 rx = kvzalloc(sz, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL); 9869 if (!rx) 9870 return -ENOMEM; 9871 9872 dev->_rx = rx; 9873 9874 for (i = 0; i < count; i++) { 9875 rx[i].dev = dev; 9876 9877 /* XDP RX-queue setup */ 9878 err = xdp_rxq_info_reg(&rx[i].xdp_rxq, dev, i, 0); 9879 if (err < 0) 9880 goto err_rxq_info; 9881 } 9882 return 0; 9883 9884 err_rxq_info: 9885 /* Rollback successful reg's and free other resources */ 9886 while (i--) 9887 xdp_rxq_info_unreg(&rx[i].xdp_rxq); 9888 kvfree(dev->_rx); 9889 dev->_rx = NULL; 9890 return err; 9891 } 9892 9893 static void netif_free_rx_queues(struct net_device *dev) 9894 { 9895 unsigned int i, count = dev->num_rx_queues; 9896 9897 /* netif_alloc_rx_queues alloc failed, resources have been unreg'ed */ 9898 if (!dev->_rx) 9899 return; 9900 9901 for (i = 0; i < count; i++) 9902 xdp_rxq_info_unreg(&dev->_rx[i].xdp_rxq); 9903 9904 kvfree(dev->_rx); 9905 } 9906 9907 static void netdev_init_one_queue(struct net_device *dev, 9908 struct netdev_queue *queue, void *_unused) 9909 { 9910 /* Initialize queue lock */ 9911 spin_lock_init(&queue->_xmit_lock); 9912 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type); 9913 queue->xmit_lock_owner = -1; 9914 netdev_queue_numa_node_write(queue, NUMA_NO_NODE); 9915 queue->dev = dev; 9916 #ifdef CONFIG_BQL 9917 dql_init(&queue->dql, HZ); 9918 #endif 9919 } 9920 9921 static void netif_free_tx_queues(struct net_device *dev) 9922 { 9923 kvfree(dev->_tx); 9924 } 9925 9926 static int netif_alloc_netdev_queues(struct net_device *dev) 9927 { 9928 unsigned int count = dev->num_tx_queues; 9929 struct netdev_queue *tx; 9930 size_t sz = count * sizeof(*tx); 9931 9932 if (count < 1 || count > 0xffff) 9933 return -EINVAL; 9934 9935 tx = kvzalloc(sz, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL); 9936 if (!tx) 9937 return -ENOMEM; 9938 9939 dev->_tx = tx; 9940 9941 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL); 9942 spin_lock_init(&dev->tx_global_lock); 9943 9944 return 0; 9945 } 9946 9947 void netif_tx_stop_all_queues(struct net_device *dev) 9948 { 9949 unsigned int i; 9950 9951 for (i = 0; i < dev->num_tx_queues; i++) { 9952 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 9953 9954 netif_tx_stop_queue(txq); 9955 } 9956 } 9957 EXPORT_SYMBOL(netif_tx_stop_all_queues); 9958 9959 /** 9960 * register_netdevice() - register a network device 9961 * @dev: device to register 9962 * 9963 * Take a prepared network device structure and make it externally accessible. 9964 * A %NETDEV_REGISTER message is sent to the netdev notifier chain. 9965 * Callers must hold the rtnl lock - you may want register_netdev() 9966 * instead of this. 9967 */ 9968 int register_netdevice(struct net_device *dev) 9969 { 9970 int ret; 9971 struct net *net = dev_net(dev); 9972 9973 BUILD_BUG_ON(sizeof(netdev_features_t) * BITS_PER_BYTE < 9974 NETDEV_FEATURE_COUNT); 9975 BUG_ON(dev_boot_phase); 9976 ASSERT_RTNL(); 9977 9978 might_sleep(); 9979 9980 /* When net_device's are persistent, this will be fatal. */ 9981 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED); 9982 BUG_ON(!net); 9983 9984 ret = ethtool_check_ops(dev->ethtool_ops); 9985 if (ret) 9986 return ret; 9987 9988 spin_lock_init(&dev->addr_list_lock); 9989 netdev_set_addr_lockdep_class(dev); 9990 9991 ret = dev_get_valid_name(net, dev, dev->name); 9992 if (ret < 0) 9993 goto out; 9994 9995 ret = -ENOMEM; 9996 dev->name_node = netdev_name_node_head_alloc(dev); 9997 if (!dev->name_node) 9998 goto out; 9999 10000 /* Init, if this function is available */ 10001 if (dev->netdev_ops->ndo_init) { 10002 ret = dev->netdev_ops->ndo_init(dev); 10003 if (ret) { 10004 if (ret > 0) 10005 ret = -EIO; 10006 goto err_free_name; 10007 } 10008 } 10009 10010 if (((dev->hw_features | dev->features) & 10011 NETIF_F_HW_VLAN_CTAG_FILTER) && 10012 (!dev->netdev_ops->ndo_vlan_rx_add_vid || 10013 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) { 10014 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n"); 10015 ret = -EINVAL; 10016 goto err_uninit; 10017 } 10018 10019 ret = -EBUSY; 10020 if (!dev->ifindex) 10021 dev->ifindex = dev_new_index(net); 10022 else if (__dev_get_by_index(net, dev->ifindex)) 10023 goto err_uninit; 10024 10025 /* Transfer changeable features to wanted_features and enable 10026 * software offloads (GSO and GRO). 10027 */ 10028 dev->hw_features |= (NETIF_F_SOFT_FEATURES | NETIF_F_SOFT_FEATURES_OFF); 10029 dev->features |= NETIF_F_SOFT_FEATURES; 10030 10031 if (dev->udp_tunnel_nic_info) { 10032 dev->features |= NETIF_F_RX_UDP_TUNNEL_PORT; 10033 dev->hw_features |= NETIF_F_RX_UDP_TUNNEL_PORT; 10034 } 10035 10036 dev->wanted_features = dev->features & dev->hw_features; 10037 10038 if (!(dev->flags & IFF_LOOPBACK)) 10039 dev->hw_features |= NETIF_F_NOCACHE_COPY; 10040 10041 /* If IPv4 TCP segmentation offload is supported we should also 10042 * allow the device to enable segmenting the frame with the option 10043 * of ignoring a static IP ID value. This doesn't enable the 10044 * feature itself but allows the user to enable it later. 10045 */ 10046 if (dev->hw_features & NETIF_F_TSO) 10047 dev->hw_features |= NETIF_F_TSO_MANGLEID; 10048 if (dev->vlan_features & NETIF_F_TSO) 10049 dev->vlan_features |= NETIF_F_TSO_MANGLEID; 10050 if (dev->mpls_features & NETIF_F_TSO) 10051 dev->mpls_features |= NETIF_F_TSO_MANGLEID; 10052 if (dev->hw_enc_features & NETIF_F_TSO) 10053 dev->hw_enc_features |= NETIF_F_TSO_MANGLEID; 10054 10055 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices. 10056 */ 10057 dev->vlan_features |= NETIF_F_HIGHDMA; 10058 10059 /* Make NETIF_F_SG inheritable to tunnel devices. 10060 */ 10061 dev->hw_enc_features |= NETIF_F_SG | NETIF_F_GSO_PARTIAL; 10062 10063 /* Make NETIF_F_SG inheritable to MPLS. 10064 */ 10065 dev->mpls_features |= NETIF_F_SG; 10066 10067 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev); 10068 ret = notifier_to_errno(ret); 10069 if (ret) 10070 goto err_uninit; 10071 10072 ret = netdev_register_kobject(dev); 10073 write_lock(&dev_base_lock); 10074 dev->reg_state = ret ? NETREG_UNREGISTERED : NETREG_REGISTERED; 10075 write_unlock(&dev_base_lock); 10076 if (ret) 10077 goto err_uninit_notify; 10078 10079 __netdev_update_features(dev); 10080 10081 /* 10082 * Default initial state at registry is that the 10083 * device is present. 10084 */ 10085 10086 set_bit(__LINK_STATE_PRESENT, &dev->state); 10087 10088 linkwatch_init_dev(dev); 10089 10090 dev_init_scheduler(dev); 10091 10092 netdev_hold(dev, &dev->dev_registered_tracker, GFP_KERNEL); 10093 list_netdevice(dev); 10094 10095 add_device_randomness(dev->dev_addr, dev->addr_len); 10096 10097 /* If the device has permanent device address, driver should 10098 * set dev_addr and also addr_assign_type should be set to 10099 * NET_ADDR_PERM (default value). 10100 */ 10101 if (dev->addr_assign_type == NET_ADDR_PERM) 10102 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 10103 10104 /* Notify protocols, that a new device appeared. */ 10105 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev); 10106 ret = notifier_to_errno(ret); 10107 if (ret) { 10108 /* Expect explicit free_netdev() on failure */ 10109 dev->needs_free_netdev = false; 10110 unregister_netdevice_queue(dev, NULL); 10111 goto out; 10112 } 10113 /* 10114 * Prevent userspace races by waiting until the network 10115 * device is fully setup before sending notifications. 10116 */ 10117 if (!dev->rtnl_link_ops || 10118 dev->rtnl_link_state == RTNL_LINK_INITIALIZED) 10119 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL, 0, NULL); 10120 10121 out: 10122 return ret; 10123 10124 err_uninit_notify: 10125 call_netdevice_notifiers(NETDEV_PRE_UNINIT, dev); 10126 err_uninit: 10127 if (dev->netdev_ops->ndo_uninit) 10128 dev->netdev_ops->ndo_uninit(dev); 10129 if (dev->priv_destructor) 10130 dev->priv_destructor(dev); 10131 err_free_name: 10132 netdev_name_node_free(dev->name_node); 10133 goto out; 10134 } 10135 EXPORT_SYMBOL(register_netdevice); 10136 10137 /** 10138 * init_dummy_netdev - init a dummy network device for NAPI 10139 * @dev: device to init 10140 * 10141 * This takes a network device structure and initialize the minimum 10142 * amount of fields so it can be used to schedule NAPI polls without 10143 * registering a full blown interface. This is to be used by drivers 10144 * that need to tie several hardware interfaces to a single NAPI 10145 * poll scheduler due to HW limitations. 10146 */ 10147 int init_dummy_netdev(struct net_device *dev) 10148 { 10149 /* Clear everything. Note we don't initialize spinlocks 10150 * are they aren't supposed to be taken by any of the 10151 * NAPI code and this dummy netdev is supposed to be 10152 * only ever used for NAPI polls 10153 */ 10154 memset(dev, 0, sizeof(struct net_device)); 10155 10156 /* make sure we BUG if trying to hit standard 10157 * register/unregister code path 10158 */ 10159 dev->reg_state = NETREG_DUMMY; 10160 10161 /* NAPI wants this */ 10162 INIT_LIST_HEAD(&dev->napi_list); 10163 10164 /* a dummy interface is started by default */ 10165 set_bit(__LINK_STATE_PRESENT, &dev->state); 10166 set_bit(__LINK_STATE_START, &dev->state); 10167 10168 /* napi_busy_loop stats accounting wants this */ 10169 dev_net_set(dev, &init_net); 10170 10171 /* Note : We dont allocate pcpu_refcnt for dummy devices, 10172 * because users of this 'device' dont need to change 10173 * its refcount. 10174 */ 10175 10176 return 0; 10177 } 10178 EXPORT_SYMBOL_GPL(init_dummy_netdev); 10179 10180 10181 /** 10182 * register_netdev - register a network device 10183 * @dev: device to register 10184 * 10185 * Take a completed network device structure and add it to the kernel 10186 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier 10187 * chain. 0 is returned on success. A negative errno code is returned 10188 * on a failure to set up the device, or if the name is a duplicate. 10189 * 10190 * This is a wrapper around register_netdevice that takes the rtnl semaphore 10191 * and expands the device name if you passed a format string to 10192 * alloc_netdev. 10193 */ 10194 int register_netdev(struct net_device *dev) 10195 { 10196 int err; 10197 10198 if (rtnl_lock_killable()) 10199 return -EINTR; 10200 err = register_netdevice(dev); 10201 rtnl_unlock(); 10202 return err; 10203 } 10204 EXPORT_SYMBOL(register_netdev); 10205 10206 int netdev_refcnt_read(const struct net_device *dev) 10207 { 10208 #ifdef CONFIG_PCPU_DEV_REFCNT 10209 int i, refcnt = 0; 10210 10211 for_each_possible_cpu(i) 10212 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i); 10213 return refcnt; 10214 #else 10215 return refcount_read(&dev->dev_refcnt); 10216 #endif 10217 } 10218 EXPORT_SYMBOL(netdev_refcnt_read); 10219 10220 int netdev_unregister_timeout_secs __read_mostly = 10; 10221 10222 #define WAIT_REFS_MIN_MSECS 1 10223 #define WAIT_REFS_MAX_MSECS 250 10224 /** 10225 * netdev_wait_allrefs_any - wait until all references are gone. 10226 * @list: list of net_devices to wait on 10227 * 10228 * This is called when unregistering network devices. 10229 * 10230 * Any protocol or device that holds a reference should register 10231 * for netdevice notification, and cleanup and put back the 10232 * reference if they receive an UNREGISTER event. 10233 * We can get stuck here if buggy protocols don't correctly 10234 * call dev_put. 10235 */ 10236 static struct net_device *netdev_wait_allrefs_any(struct list_head *list) 10237 { 10238 unsigned long rebroadcast_time, warning_time; 10239 struct net_device *dev; 10240 int wait = 0; 10241 10242 rebroadcast_time = warning_time = jiffies; 10243 10244 list_for_each_entry(dev, list, todo_list) 10245 if (netdev_refcnt_read(dev) == 1) 10246 return dev; 10247 10248 while (true) { 10249 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) { 10250 rtnl_lock(); 10251 10252 /* Rebroadcast unregister notification */ 10253 list_for_each_entry(dev, list, todo_list) 10254 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 10255 10256 __rtnl_unlock(); 10257 rcu_barrier(); 10258 rtnl_lock(); 10259 10260 list_for_each_entry(dev, list, todo_list) 10261 if (test_bit(__LINK_STATE_LINKWATCH_PENDING, 10262 &dev->state)) { 10263 /* We must not have linkwatch events 10264 * pending on unregister. If this 10265 * happens, we simply run the queue 10266 * unscheduled, resulting in a noop 10267 * for this device. 10268 */ 10269 linkwatch_run_queue(); 10270 break; 10271 } 10272 10273 __rtnl_unlock(); 10274 10275 rebroadcast_time = jiffies; 10276 } 10277 10278 if (!wait) { 10279 rcu_barrier(); 10280 wait = WAIT_REFS_MIN_MSECS; 10281 } else { 10282 msleep(wait); 10283 wait = min(wait << 1, WAIT_REFS_MAX_MSECS); 10284 } 10285 10286 list_for_each_entry(dev, list, todo_list) 10287 if (netdev_refcnt_read(dev) == 1) 10288 return dev; 10289 10290 if (time_after(jiffies, warning_time + 10291 READ_ONCE(netdev_unregister_timeout_secs) * HZ)) { 10292 list_for_each_entry(dev, list, todo_list) { 10293 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n", 10294 dev->name, netdev_refcnt_read(dev)); 10295 ref_tracker_dir_print(&dev->refcnt_tracker, 10); 10296 } 10297 10298 warning_time = jiffies; 10299 } 10300 } 10301 } 10302 10303 /* The sequence is: 10304 * 10305 * rtnl_lock(); 10306 * ... 10307 * register_netdevice(x1); 10308 * register_netdevice(x2); 10309 * ... 10310 * unregister_netdevice(y1); 10311 * unregister_netdevice(y2); 10312 * ... 10313 * rtnl_unlock(); 10314 * free_netdev(y1); 10315 * free_netdev(y2); 10316 * 10317 * We are invoked by rtnl_unlock(). 10318 * This allows us to deal with problems: 10319 * 1) We can delete sysfs objects which invoke hotplug 10320 * without deadlocking with linkwatch via keventd. 10321 * 2) Since we run with the RTNL semaphore not held, we can sleep 10322 * safely in order to wait for the netdev refcnt to drop to zero. 10323 * 10324 * We must not return until all unregister events added during 10325 * the interval the lock was held have been completed. 10326 */ 10327 void netdev_run_todo(void) 10328 { 10329 struct net_device *dev, *tmp; 10330 struct list_head list; 10331 #ifdef CONFIG_LOCKDEP 10332 struct list_head unlink_list; 10333 10334 list_replace_init(&net_unlink_list, &unlink_list); 10335 10336 while (!list_empty(&unlink_list)) { 10337 struct net_device *dev = list_first_entry(&unlink_list, 10338 struct net_device, 10339 unlink_list); 10340 list_del_init(&dev->unlink_list); 10341 dev->nested_level = dev->lower_level - 1; 10342 } 10343 #endif 10344 10345 /* Snapshot list, allow later requests */ 10346 list_replace_init(&net_todo_list, &list); 10347 10348 __rtnl_unlock(); 10349 10350 /* Wait for rcu callbacks to finish before next phase */ 10351 if (!list_empty(&list)) 10352 rcu_barrier(); 10353 10354 list_for_each_entry_safe(dev, tmp, &list, todo_list) { 10355 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) { 10356 netdev_WARN(dev, "run_todo but not unregistering\n"); 10357 list_del(&dev->todo_list); 10358 continue; 10359 } 10360 10361 write_lock(&dev_base_lock); 10362 dev->reg_state = NETREG_UNREGISTERED; 10363 write_unlock(&dev_base_lock); 10364 linkwatch_forget_dev(dev); 10365 } 10366 10367 while (!list_empty(&list)) { 10368 dev = netdev_wait_allrefs_any(&list); 10369 list_del(&dev->todo_list); 10370 10371 /* paranoia */ 10372 BUG_ON(netdev_refcnt_read(dev) != 1); 10373 BUG_ON(!list_empty(&dev->ptype_all)); 10374 BUG_ON(!list_empty(&dev->ptype_specific)); 10375 WARN_ON(rcu_access_pointer(dev->ip_ptr)); 10376 WARN_ON(rcu_access_pointer(dev->ip6_ptr)); 10377 10378 if (dev->priv_destructor) 10379 dev->priv_destructor(dev); 10380 if (dev->needs_free_netdev) 10381 free_netdev(dev); 10382 10383 if (atomic_dec_and_test(&dev_net(dev)->dev_unreg_count)) 10384 wake_up(&netdev_unregistering_wq); 10385 10386 /* Free network device */ 10387 kobject_put(&dev->dev.kobj); 10388 } 10389 } 10390 10391 /* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has 10392 * all the same fields in the same order as net_device_stats, with only 10393 * the type differing, but rtnl_link_stats64 may have additional fields 10394 * at the end for newer counters. 10395 */ 10396 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, 10397 const struct net_device_stats *netdev_stats) 10398 { 10399 size_t i, n = sizeof(*netdev_stats) / sizeof(atomic_long_t); 10400 const atomic_long_t *src = (atomic_long_t *)netdev_stats; 10401 u64 *dst = (u64 *)stats64; 10402 10403 BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64)); 10404 for (i = 0; i < n; i++) 10405 dst[i] = (unsigned long)atomic_long_read(&src[i]); 10406 /* zero out counters that only exist in rtnl_link_stats64 */ 10407 memset((char *)stats64 + n * sizeof(u64), 0, 10408 sizeof(*stats64) - n * sizeof(u64)); 10409 } 10410 EXPORT_SYMBOL(netdev_stats_to_stats64); 10411 10412 struct net_device_core_stats __percpu *netdev_core_stats_alloc(struct net_device *dev) 10413 { 10414 struct net_device_core_stats __percpu *p; 10415 10416 p = alloc_percpu_gfp(struct net_device_core_stats, 10417 GFP_ATOMIC | __GFP_NOWARN); 10418 10419 if (p && cmpxchg(&dev->core_stats, NULL, p)) 10420 free_percpu(p); 10421 10422 /* This READ_ONCE() pairs with the cmpxchg() above */ 10423 return READ_ONCE(dev->core_stats); 10424 } 10425 EXPORT_SYMBOL(netdev_core_stats_alloc); 10426 10427 /** 10428 * dev_get_stats - get network device statistics 10429 * @dev: device to get statistics from 10430 * @storage: place to store stats 10431 * 10432 * Get network statistics from device. Return @storage. 10433 * The device driver may provide its own method by setting 10434 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats; 10435 * otherwise the internal statistics structure is used. 10436 */ 10437 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, 10438 struct rtnl_link_stats64 *storage) 10439 { 10440 const struct net_device_ops *ops = dev->netdev_ops; 10441 const struct net_device_core_stats __percpu *p; 10442 10443 if (ops->ndo_get_stats64) { 10444 memset(storage, 0, sizeof(*storage)); 10445 ops->ndo_get_stats64(dev, storage); 10446 } else if (ops->ndo_get_stats) { 10447 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev)); 10448 } else { 10449 netdev_stats_to_stats64(storage, &dev->stats); 10450 } 10451 10452 /* This READ_ONCE() pairs with the write in netdev_core_stats_alloc() */ 10453 p = READ_ONCE(dev->core_stats); 10454 if (p) { 10455 const struct net_device_core_stats *core_stats; 10456 int i; 10457 10458 for_each_possible_cpu(i) { 10459 core_stats = per_cpu_ptr(p, i); 10460 storage->rx_dropped += READ_ONCE(core_stats->rx_dropped); 10461 storage->tx_dropped += READ_ONCE(core_stats->tx_dropped); 10462 storage->rx_nohandler += READ_ONCE(core_stats->rx_nohandler); 10463 storage->rx_otherhost_dropped += READ_ONCE(core_stats->rx_otherhost_dropped); 10464 } 10465 } 10466 return storage; 10467 } 10468 EXPORT_SYMBOL(dev_get_stats); 10469 10470 /** 10471 * dev_fetch_sw_netstats - get per-cpu network device statistics 10472 * @s: place to store stats 10473 * @netstats: per-cpu network stats to read from 10474 * 10475 * Read per-cpu network statistics and populate the related fields in @s. 10476 */ 10477 void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s, 10478 const struct pcpu_sw_netstats __percpu *netstats) 10479 { 10480 int cpu; 10481 10482 for_each_possible_cpu(cpu) { 10483 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 10484 const struct pcpu_sw_netstats *stats; 10485 unsigned int start; 10486 10487 stats = per_cpu_ptr(netstats, cpu); 10488 do { 10489 start = u64_stats_fetch_begin(&stats->syncp); 10490 rx_packets = u64_stats_read(&stats->rx_packets); 10491 rx_bytes = u64_stats_read(&stats->rx_bytes); 10492 tx_packets = u64_stats_read(&stats->tx_packets); 10493 tx_bytes = u64_stats_read(&stats->tx_bytes); 10494 } while (u64_stats_fetch_retry(&stats->syncp, start)); 10495 10496 s->rx_packets += rx_packets; 10497 s->rx_bytes += rx_bytes; 10498 s->tx_packets += tx_packets; 10499 s->tx_bytes += tx_bytes; 10500 } 10501 } 10502 EXPORT_SYMBOL_GPL(dev_fetch_sw_netstats); 10503 10504 /** 10505 * dev_get_tstats64 - ndo_get_stats64 implementation 10506 * @dev: device to get statistics from 10507 * @s: place to store stats 10508 * 10509 * Populate @s from dev->stats and dev->tstats. Can be used as 10510 * ndo_get_stats64() callback. 10511 */ 10512 void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s) 10513 { 10514 netdev_stats_to_stats64(s, &dev->stats); 10515 dev_fetch_sw_netstats(s, dev->tstats); 10516 } 10517 EXPORT_SYMBOL_GPL(dev_get_tstats64); 10518 10519 struct netdev_queue *dev_ingress_queue_create(struct net_device *dev) 10520 { 10521 struct netdev_queue *queue = dev_ingress_queue(dev); 10522 10523 #ifdef CONFIG_NET_CLS_ACT 10524 if (queue) 10525 return queue; 10526 queue = kzalloc(sizeof(*queue), GFP_KERNEL); 10527 if (!queue) 10528 return NULL; 10529 netdev_init_one_queue(dev, queue, NULL); 10530 RCU_INIT_POINTER(queue->qdisc, &noop_qdisc); 10531 queue->qdisc_sleeping = &noop_qdisc; 10532 rcu_assign_pointer(dev->ingress_queue, queue); 10533 #endif 10534 return queue; 10535 } 10536 10537 static const struct ethtool_ops default_ethtool_ops; 10538 10539 void netdev_set_default_ethtool_ops(struct net_device *dev, 10540 const struct ethtool_ops *ops) 10541 { 10542 if (dev->ethtool_ops == &default_ethtool_ops) 10543 dev->ethtool_ops = ops; 10544 } 10545 EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops); 10546 10547 /** 10548 * netdev_sw_irq_coalesce_default_on() - enable SW IRQ coalescing by default 10549 * @dev: netdev to enable the IRQ coalescing on 10550 * 10551 * Sets a conservative default for SW IRQ coalescing. Users can use 10552 * sysfs attributes to override the default values. 10553 */ 10554 void netdev_sw_irq_coalesce_default_on(struct net_device *dev) 10555 { 10556 WARN_ON(dev->reg_state == NETREG_REGISTERED); 10557 10558 dev->gro_flush_timeout = 20000; 10559 dev->napi_defer_hard_irqs = 1; 10560 } 10561 EXPORT_SYMBOL_GPL(netdev_sw_irq_coalesce_default_on); 10562 10563 void netdev_freemem(struct net_device *dev) 10564 { 10565 char *addr = (char *)dev - dev->padded; 10566 10567 kvfree(addr); 10568 } 10569 10570 /** 10571 * alloc_netdev_mqs - allocate network device 10572 * @sizeof_priv: size of private data to allocate space for 10573 * @name: device name format string 10574 * @name_assign_type: origin of device name 10575 * @setup: callback to initialize device 10576 * @txqs: the number of TX subqueues to allocate 10577 * @rxqs: the number of RX subqueues to allocate 10578 * 10579 * Allocates a struct net_device with private data area for driver use 10580 * and performs basic initialization. Also allocates subqueue structs 10581 * for each queue on the device. 10582 */ 10583 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, 10584 unsigned char name_assign_type, 10585 void (*setup)(struct net_device *), 10586 unsigned int txqs, unsigned int rxqs) 10587 { 10588 struct net_device *dev; 10589 unsigned int alloc_size; 10590 struct net_device *p; 10591 10592 BUG_ON(strlen(name) >= sizeof(dev->name)); 10593 10594 if (txqs < 1) { 10595 pr_err("alloc_netdev: Unable to allocate device with zero queues\n"); 10596 return NULL; 10597 } 10598 10599 if (rxqs < 1) { 10600 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n"); 10601 return NULL; 10602 } 10603 10604 alloc_size = sizeof(struct net_device); 10605 if (sizeof_priv) { 10606 /* ensure 32-byte alignment of private area */ 10607 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN); 10608 alloc_size += sizeof_priv; 10609 } 10610 /* ensure 32-byte alignment of whole construct */ 10611 alloc_size += NETDEV_ALIGN - 1; 10612 10613 p = kvzalloc(alloc_size, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL); 10614 if (!p) 10615 return NULL; 10616 10617 dev = PTR_ALIGN(p, NETDEV_ALIGN); 10618 dev->padded = (char *)dev - (char *)p; 10619 10620 ref_tracker_dir_init(&dev->refcnt_tracker, 128); 10621 #ifdef CONFIG_PCPU_DEV_REFCNT 10622 dev->pcpu_refcnt = alloc_percpu(int); 10623 if (!dev->pcpu_refcnt) 10624 goto free_dev; 10625 __dev_hold(dev); 10626 #else 10627 refcount_set(&dev->dev_refcnt, 1); 10628 #endif 10629 10630 if (dev_addr_init(dev)) 10631 goto free_pcpu; 10632 10633 dev_mc_init(dev); 10634 dev_uc_init(dev); 10635 10636 dev_net_set(dev, &init_net); 10637 10638 dev->gso_max_size = GSO_LEGACY_MAX_SIZE; 10639 dev->gso_max_segs = GSO_MAX_SEGS; 10640 dev->gro_max_size = GRO_LEGACY_MAX_SIZE; 10641 dev->gso_ipv4_max_size = GSO_LEGACY_MAX_SIZE; 10642 dev->gro_ipv4_max_size = GRO_LEGACY_MAX_SIZE; 10643 dev->tso_max_size = TSO_LEGACY_MAX_SIZE; 10644 dev->tso_max_segs = TSO_MAX_SEGS; 10645 dev->upper_level = 1; 10646 dev->lower_level = 1; 10647 #ifdef CONFIG_LOCKDEP 10648 dev->nested_level = 0; 10649 INIT_LIST_HEAD(&dev->unlink_list); 10650 #endif 10651 10652 INIT_LIST_HEAD(&dev->napi_list); 10653 INIT_LIST_HEAD(&dev->unreg_list); 10654 INIT_LIST_HEAD(&dev->close_list); 10655 INIT_LIST_HEAD(&dev->link_watch_list); 10656 INIT_LIST_HEAD(&dev->adj_list.upper); 10657 INIT_LIST_HEAD(&dev->adj_list.lower); 10658 INIT_LIST_HEAD(&dev->ptype_all); 10659 INIT_LIST_HEAD(&dev->ptype_specific); 10660 INIT_LIST_HEAD(&dev->net_notifier_list); 10661 #ifdef CONFIG_NET_SCHED 10662 hash_init(dev->qdisc_hash); 10663 #endif 10664 dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM; 10665 setup(dev); 10666 10667 if (!dev->tx_queue_len) { 10668 dev->priv_flags |= IFF_NO_QUEUE; 10669 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN; 10670 } 10671 10672 dev->num_tx_queues = txqs; 10673 dev->real_num_tx_queues = txqs; 10674 if (netif_alloc_netdev_queues(dev)) 10675 goto free_all; 10676 10677 dev->num_rx_queues = rxqs; 10678 dev->real_num_rx_queues = rxqs; 10679 if (netif_alloc_rx_queues(dev)) 10680 goto free_all; 10681 10682 strcpy(dev->name, name); 10683 dev->name_assign_type = name_assign_type; 10684 dev->group = INIT_NETDEV_GROUP; 10685 if (!dev->ethtool_ops) 10686 dev->ethtool_ops = &default_ethtool_ops; 10687 10688 nf_hook_netdev_init(dev); 10689 10690 return dev; 10691 10692 free_all: 10693 free_netdev(dev); 10694 return NULL; 10695 10696 free_pcpu: 10697 #ifdef CONFIG_PCPU_DEV_REFCNT 10698 free_percpu(dev->pcpu_refcnt); 10699 free_dev: 10700 #endif 10701 netdev_freemem(dev); 10702 return NULL; 10703 } 10704 EXPORT_SYMBOL(alloc_netdev_mqs); 10705 10706 /** 10707 * free_netdev - free network device 10708 * @dev: device 10709 * 10710 * This function does the last stage of destroying an allocated device 10711 * interface. The reference to the device object is released. If this 10712 * is the last reference then it will be freed.Must be called in process 10713 * context. 10714 */ 10715 void free_netdev(struct net_device *dev) 10716 { 10717 struct napi_struct *p, *n; 10718 10719 might_sleep(); 10720 10721 /* When called immediately after register_netdevice() failed the unwind 10722 * handling may still be dismantling the device. Handle that case by 10723 * deferring the free. 10724 */ 10725 if (dev->reg_state == NETREG_UNREGISTERING) { 10726 ASSERT_RTNL(); 10727 dev->needs_free_netdev = true; 10728 return; 10729 } 10730 10731 netif_free_tx_queues(dev); 10732 netif_free_rx_queues(dev); 10733 10734 kfree(rcu_dereference_protected(dev->ingress_queue, 1)); 10735 10736 /* Flush device addresses */ 10737 dev_addr_flush(dev); 10738 10739 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list) 10740 netif_napi_del(p); 10741 10742 ref_tracker_dir_exit(&dev->refcnt_tracker); 10743 #ifdef CONFIG_PCPU_DEV_REFCNT 10744 free_percpu(dev->pcpu_refcnt); 10745 dev->pcpu_refcnt = NULL; 10746 #endif 10747 free_percpu(dev->core_stats); 10748 dev->core_stats = NULL; 10749 free_percpu(dev->xdp_bulkq); 10750 dev->xdp_bulkq = NULL; 10751 10752 /* Compatibility with error handling in drivers */ 10753 if (dev->reg_state == NETREG_UNINITIALIZED) { 10754 netdev_freemem(dev); 10755 return; 10756 } 10757 10758 BUG_ON(dev->reg_state != NETREG_UNREGISTERED); 10759 dev->reg_state = NETREG_RELEASED; 10760 10761 /* will free via device release */ 10762 put_device(&dev->dev); 10763 } 10764 EXPORT_SYMBOL(free_netdev); 10765 10766 /** 10767 * synchronize_net - Synchronize with packet receive processing 10768 * 10769 * Wait for packets currently being received to be done. 10770 * Does not block later packets from starting. 10771 */ 10772 void synchronize_net(void) 10773 { 10774 might_sleep(); 10775 if (rtnl_is_locked()) 10776 synchronize_rcu_expedited(); 10777 else 10778 synchronize_rcu(); 10779 } 10780 EXPORT_SYMBOL(synchronize_net); 10781 10782 /** 10783 * unregister_netdevice_queue - remove device from the kernel 10784 * @dev: device 10785 * @head: list 10786 * 10787 * This function shuts down a device interface and removes it 10788 * from the kernel tables. 10789 * If head not NULL, device is queued to be unregistered later. 10790 * 10791 * Callers must hold the rtnl semaphore. You may want 10792 * unregister_netdev() instead of this. 10793 */ 10794 10795 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head) 10796 { 10797 ASSERT_RTNL(); 10798 10799 if (head) { 10800 list_move_tail(&dev->unreg_list, head); 10801 } else { 10802 LIST_HEAD(single); 10803 10804 list_add(&dev->unreg_list, &single); 10805 unregister_netdevice_many(&single); 10806 } 10807 } 10808 EXPORT_SYMBOL(unregister_netdevice_queue); 10809 10810 void unregister_netdevice_many_notify(struct list_head *head, 10811 u32 portid, const struct nlmsghdr *nlh) 10812 { 10813 struct net_device *dev, *tmp; 10814 LIST_HEAD(close_head); 10815 10816 BUG_ON(dev_boot_phase); 10817 ASSERT_RTNL(); 10818 10819 if (list_empty(head)) 10820 return; 10821 10822 list_for_each_entry_safe(dev, tmp, head, unreg_list) { 10823 /* Some devices call without registering 10824 * for initialization unwind. Remove those 10825 * devices and proceed with the remaining. 10826 */ 10827 if (dev->reg_state == NETREG_UNINITIALIZED) { 10828 pr_debug("unregister_netdevice: device %s/%p never was registered\n", 10829 dev->name, dev); 10830 10831 WARN_ON(1); 10832 list_del(&dev->unreg_list); 10833 continue; 10834 } 10835 dev->dismantle = true; 10836 BUG_ON(dev->reg_state != NETREG_REGISTERED); 10837 } 10838 10839 /* If device is running, close it first. */ 10840 list_for_each_entry(dev, head, unreg_list) 10841 list_add_tail(&dev->close_list, &close_head); 10842 dev_close_many(&close_head, true); 10843 10844 list_for_each_entry(dev, head, unreg_list) { 10845 /* And unlink it from device chain. */ 10846 write_lock(&dev_base_lock); 10847 unlist_netdevice(dev, false); 10848 dev->reg_state = NETREG_UNREGISTERING; 10849 write_unlock(&dev_base_lock); 10850 } 10851 flush_all_backlogs(); 10852 10853 synchronize_net(); 10854 10855 list_for_each_entry(dev, head, unreg_list) { 10856 struct sk_buff *skb = NULL; 10857 10858 /* Shutdown queueing discipline. */ 10859 dev_shutdown(dev); 10860 10861 dev_xdp_uninstall(dev); 10862 bpf_dev_bound_netdev_unregister(dev); 10863 10864 netdev_offload_xstats_disable_all(dev); 10865 10866 /* Notify protocols, that we are about to destroy 10867 * this device. They should clean all the things. 10868 */ 10869 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 10870 10871 if (!dev->rtnl_link_ops || 10872 dev->rtnl_link_state == RTNL_LINK_INITIALIZED) 10873 skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0, 10874 GFP_KERNEL, NULL, 0, 10875 portid, nlmsg_seq(nlh)); 10876 10877 /* 10878 * Flush the unicast and multicast chains 10879 */ 10880 dev_uc_flush(dev); 10881 dev_mc_flush(dev); 10882 10883 netdev_name_node_alt_flush(dev); 10884 netdev_name_node_free(dev->name_node); 10885 10886 call_netdevice_notifiers(NETDEV_PRE_UNINIT, dev); 10887 10888 if (dev->netdev_ops->ndo_uninit) 10889 dev->netdev_ops->ndo_uninit(dev); 10890 10891 if (skb) 10892 rtmsg_ifinfo_send(skb, dev, GFP_KERNEL, portid, nlh); 10893 10894 /* Notifier chain MUST detach us all upper devices. */ 10895 WARN_ON(netdev_has_any_upper_dev(dev)); 10896 WARN_ON(netdev_has_any_lower_dev(dev)); 10897 10898 /* Remove entries from kobject tree */ 10899 netdev_unregister_kobject(dev); 10900 #ifdef CONFIG_XPS 10901 /* Remove XPS queueing entries */ 10902 netif_reset_xps_queues_gt(dev, 0); 10903 #endif 10904 } 10905 10906 synchronize_net(); 10907 10908 list_for_each_entry(dev, head, unreg_list) { 10909 netdev_put(dev, &dev->dev_registered_tracker); 10910 net_set_todo(dev); 10911 } 10912 10913 list_del(head); 10914 } 10915 10916 /** 10917 * unregister_netdevice_many - unregister many devices 10918 * @head: list of devices 10919 * 10920 * Note: As most callers use a stack allocated list_head, 10921 * we force a list_del() to make sure stack wont be corrupted later. 10922 */ 10923 void unregister_netdevice_many(struct list_head *head) 10924 { 10925 unregister_netdevice_many_notify(head, 0, NULL); 10926 } 10927 EXPORT_SYMBOL(unregister_netdevice_many); 10928 10929 /** 10930 * unregister_netdev - remove device from the kernel 10931 * @dev: device 10932 * 10933 * This function shuts down a device interface and removes it 10934 * from the kernel tables. 10935 * 10936 * This is just a wrapper for unregister_netdevice that takes 10937 * the rtnl semaphore. In general you want to use this and not 10938 * unregister_netdevice. 10939 */ 10940 void unregister_netdev(struct net_device *dev) 10941 { 10942 rtnl_lock(); 10943 unregister_netdevice(dev); 10944 rtnl_unlock(); 10945 } 10946 EXPORT_SYMBOL(unregister_netdev); 10947 10948 /** 10949 * __dev_change_net_namespace - move device to different nethost namespace 10950 * @dev: device 10951 * @net: network namespace 10952 * @pat: If not NULL name pattern to try if the current device name 10953 * is already taken in the destination network namespace. 10954 * @new_ifindex: If not zero, specifies device index in the target 10955 * namespace. 10956 * 10957 * This function shuts down a device interface and moves it 10958 * to a new network namespace. On success 0 is returned, on 10959 * a failure a netagive errno code is returned. 10960 * 10961 * Callers must hold the rtnl semaphore. 10962 */ 10963 10964 int __dev_change_net_namespace(struct net_device *dev, struct net *net, 10965 const char *pat, int new_ifindex) 10966 { 10967 struct net *net_old = dev_net(dev); 10968 int err, new_nsid; 10969 10970 ASSERT_RTNL(); 10971 10972 /* Don't allow namespace local devices to be moved. */ 10973 err = -EINVAL; 10974 if (dev->features & NETIF_F_NETNS_LOCAL) 10975 goto out; 10976 10977 /* Ensure the device has been registrered */ 10978 if (dev->reg_state != NETREG_REGISTERED) 10979 goto out; 10980 10981 /* Get out if there is nothing todo */ 10982 err = 0; 10983 if (net_eq(net_old, net)) 10984 goto out; 10985 10986 /* Pick the destination device name, and ensure 10987 * we can use it in the destination network namespace. 10988 */ 10989 err = -EEXIST; 10990 if (netdev_name_in_use(net, dev->name)) { 10991 /* We get here if we can't use the current device name */ 10992 if (!pat) 10993 goto out; 10994 err = dev_get_valid_name(net, dev, pat); 10995 if (err < 0) 10996 goto out; 10997 } 10998 10999 /* Check that new_ifindex isn't used yet. */ 11000 err = -EBUSY; 11001 if (new_ifindex && __dev_get_by_index(net, new_ifindex)) 11002 goto out; 11003 11004 /* 11005 * And now a mini version of register_netdevice unregister_netdevice. 11006 */ 11007 11008 /* If device is running close it first. */ 11009 dev_close(dev); 11010 11011 /* And unlink it from device chain */ 11012 unlist_netdevice(dev, true); 11013 11014 synchronize_net(); 11015 11016 /* Shutdown queueing discipline. */ 11017 dev_shutdown(dev); 11018 11019 /* Notify protocols, that we are about to destroy 11020 * this device. They should clean all the things. 11021 * 11022 * Note that dev->reg_state stays at NETREG_REGISTERED. 11023 * This is wanted because this way 8021q and macvlan know 11024 * the device is just moving and can keep their slaves up. 11025 */ 11026 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 11027 rcu_barrier(); 11028 11029 new_nsid = peernet2id_alloc(dev_net(dev), net, GFP_KERNEL); 11030 /* If there is an ifindex conflict assign a new one */ 11031 if (!new_ifindex) { 11032 if (__dev_get_by_index(net, dev->ifindex)) 11033 new_ifindex = dev_new_index(net); 11034 else 11035 new_ifindex = dev->ifindex; 11036 } 11037 11038 rtmsg_ifinfo_newnet(RTM_DELLINK, dev, ~0U, GFP_KERNEL, &new_nsid, 11039 new_ifindex); 11040 11041 /* 11042 * Flush the unicast and multicast chains 11043 */ 11044 dev_uc_flush(dev); 11045 dev_mc_flush(dev); 11046 11047 /* Send a netdev-removed uevent to the old namespace */ 11048 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE); 11049 netdev_adjacent_del_links(dev); 11050 11051 /* Move per-net netdevice notifiers that are following the netdevice */ 11052 move_netdevice_notifiers_dev_net(dev, net); 11053 11054 /* Actually switch the network namespace */ 11055 dev_net_set(dev, net); 11056 dev->ifindex = new_ifindex; 11057 11058 /* Send a netdev-add uevent to the new namespace */ 11059 kobject_uevent(&dev->dev.kobj, KOBJ_ADD); 11060 netdev_adjacent_add_links(dev); 11061 11062 /* Fixup kobjects */ 11063 err = device_rename(&dev->dev, dev->name); 11064 WARN_ON(err); 11065 11066 /* Adapt owner in case owning user namespace of target network 11067 * namespace is different from the original one. 11068 */ 11069 err = netdev_change_owner(dev, net_old, net); 11070 WARN_ON(err); 11071 11072 /* Add the device back in the hashes */ 11073 list_netdevice(dev); 11074 11075 /* Notify protocols, that a new device appeared. */ 11076 call_netdevice_notifiers(NETDEV_REGISTER, dev); 11077 11078 /* 11079 * Prevent userspace races by waiting until the network 11080 * device is fully setup before sending notifications. 11081 */ 11082 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL, 0, NULL); 11083 11084 synchronize_net(); 11085 err = 0; 11086 out: 11087 return err; 11088 } 11089 EXPORT_SYMBOL_GPL(__dev_change_net_namespace); 11090 11091 static int dev_cpu_dead(unsigned int oldcpu) 11092 { 11093 struct sk_buff **list_skb; 11094 struct sk_buff *skb; 11095 unsigned int cpu; 11096 struct softnet_data *sd, *oldsd, *remsd = NULL; 11097 11098 local_irq_disable(); 11099 cpu = smp_processor_id(); 11100 sd = &per_cpu(softnet_data, cpu); 11101 oldsd = &per_cpu(softnet_data, oldcpu); 11102 11103 /* Find end of our completion_queue. */ 11104 list_skb = &sd->completion_queue; 11105 while (*list_skb) 11106 list_skb = &(*list_skb)->next; 11107 /* Append completion queue from offline CPU. */ 11108 *list_skb = oldsd->completion_queue; 11109 oldsd->completion_queue = NULL; 11110 11111 /* Append output queue from offline CPU. */ 11112 if (oldsd->output_queue) { 11113 *sd->output_queue_tailp = oldsd->output_queue; 11114 sd->output_queue_tailp = oldsd->output_queue_tailp; 11115 oldsd->output_queue = NULL; 11116 oldsd->output_queue_tailp = &oldsd->output_queue; 11117 } 11118 /* Append NAPI poll list from offline CPU, with one exception : 11119 * process_backlog() must be called by cpu owning percpu backlog. 11120 * We properly handle process_queue & input_pkt_queue later. 11121 */ 11122 while (!list_empty(&oldsd->poll_list)) { 11123 struct napi_struct *napi = list_first_entry(&oldsd->poll_list, 11124 struct napi_struct, 11125 poll_list); 11126 11127 list_del_init(&napi->poll_list); 11128 if (napi->poll == process_backlog) 11129 napi->state = 0; 11130 else 11131 ____napi_schedule(sd, napi); 11132 } 11133 11134 raise_softirq_irqoff(NET_TX_SOFTIRQ); 11135 local_irq_enable(); 11136 11137 #ifdef CONFIG_RPS 11138 remsd = oldsd->rps_ipi_list; 11139 oldsd->rps_ipi_list = NULL; 11140 #endif 11141 /* send out pending IPI's on offline CPU */ 11142 net_rps_send_ipi(remsd); 11143 11144 /* Process offline CPU's input_pkt_queue */ 11145 while ((skb = __skb_dequeue(&oldsd->process_queue))) { 11146 netif_rx(skb); 11147 input_queue_head_incr(oldsd); 11148 } 11149 while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) { 11150 netif_rx(skb); 11151 input_queue_head_incr(oldsd); 11152 } 11153 11154 return 0; 11155 } 11156 11157 /** 11158 * netdev_increment_features - increment feature set by one 11159 * @all: current feature set 11160 * @one: new feature set 11161 * @mask: mask feature set 11162 * 11163 * Computes a new feature set after adding a device with feature set 11164 * @one to the master device with current feature set @all. Will not 11165 * enable anything that is off in @mask. Returns the new feature set. 11166 */ 11167 netdev_features_t netdev_increment_features(netdev_features_t all, 11168 netdev_features_t one, netdev_features_t mask) 11169 { 11170 if (mask & NETIF_F_HW_CSUM) 11171 mask |= NETIF_F_CSUM_MASK; 11172 mask |= NETIF_F_VLAN_CHALLENGED; 11173 11174 all |= one & (NETIF_F_ONE_FOR_ALL | NETIF_F_CSUM_MASK) & mask; 11175 all &= one | ~NETIF_F_ALL_FOR_ALL; 11176 11177 /* If one device supports hw checksumming, set for all. */ 11178 if (all & NETIF_F_HW_CSUM) 11179 all &= ~(NETIF_F_CSUM_MASK & ~NETIF_F_HW_CSUM); 11180 11181 return all; 11182 } 11183 EXPORT_SYMBOL(netdev_increment_features); 11184 11185 static struct hlist_head * __net_init netdev_create_hash(void) 11186 { 11187 int i; 11188 struct hlist_head *hash; 11189 11190 hash = kmalloc_array(NETDEV_HASHENTRIES, sizeof(*hash), GFP_KERNEL); 11191 if (hash != NULL) 11192 for (i = 0; i < NETDEV_HASHENTRIES; i++) 11193 INIT_HLIST_HEAD(&hash[i]); 11194 11195 return hash; 11196 } 11197 11198 /* Initialize per network namespace state */ 11199 static int __net_init netdev_init(struct net *net) 11200 { 11201 BUILD_BUG_ON(GRO_HASH_BUCKETS > 11202 8 * sizeof_field(struct napi_struct, gro_bitmask)); 11203 11204 INIT_LIST_HEAD(&net->dev_base_head); 11205 11206 net->dev_name_head = netdev_create_hash(); 11207 if (net->dev_name_head == NULL) 11208 goto err_name; 11209 11210 net->dev_index_head = netdev_create_hash(); 11211 if (net->dev_index_head == NULL) 11212 goto err_idx; 11213 11214 RAW_INIT_NOTIFIER_HEAD(&net->netdev_chain); 11215 11216 return 0; 11217 11218 err_idx: 11219 kfree(net->dev_name_head); 11220 err_name: 11221 return -ENOMEM; 11222 } 11223 11224 /** 11225 * netdev_drivername - network driver for the device 11226 * @dev: network device 11227 * 11228 * Determine network driver for device. 11229 */ 11230 const char *netdev_drivername(const struct net_device *dev) 11231 { 11232 const struct device_driver *driver; 11233 const struct device *parent; 11234 const char *empty = ""; 11235 11236 parent = dev->dev.parent; 11237 if (!parent) 11238 return empty; 11239 11240 driver = parent->driver; 11241 if (driver && driver->name) 11242 return driver->name; 11243 return empty; 11244 } 11245 11246 static void __netdev_printk(const char *level, const struct net_device *dev, 11247 struct va_format *vaf) 11248 { 11249 if (dev && dev->dev.parent) { 11250 dev_printk_emit(level[1] - '0', 11251 dev->dev.parent, 11252 "%s %s %s%s: %pV", 11253 dev_driver_string(dev->dev.parent), 11254 dev_name(dev->dev.parent), 11255 netdev_name(dev), netdev_reg_state(dev), 11256 vaf); 11257 } else if (dev) { 11258 printk("%s%s%s: %pV", 11259 level, netdev_name(dev), netdev_reg_state(dev), vaf); 11260 } else { 11261 printk("%s(NULL net_device): %pV", level, vaf); 11262 } 11263 } 11264 11265 void netdev_printk(const char *level, const struct net_device *dev, 11266 const char *format, ...) 11267 { 11268 struct va_format vaf; 11269 va_list args; 11270 11271 va_start(args, format); 11272 11273 vaf.fmt = format; 11274 vaf.va = &args; 11275 11276 __netdev_printk(level, dev, &vaf); 11277 11278 va_end(args); 11279 } 11280 EXPORT_SYMBOL(netdev_printk); 11281 11282 #define define_netdev_printk_level(func, level) \ 11283 void func(const struct net_device *dev, const char *fmt, ...) \ 11284 { \ 11285 struct va_format vaf; \ 11286 va_list args; \ 11287 \ 11288 va_start(args, fmt); \ 11289 \ 11290 vaf.fmt = fmt; \ 11291 vaf.va = &args; \ 11292 \ 11293 __netdev_printk(level, dev, &vaf); \ 11294 \ 11295 va_end(args); \ 11296 } \ 11297 EXPORT_SYMBOL(func); 11298 11299 define_netdev_printk_level(netdev_emerg, KERN_EMERG); 11300 define_netdev_printk_level(netdev_alert, KERN_ALERT); 11301 define_netdev_printk_level(netdev_crit, KERN_CRIT); 11302 define_netdev_printk_level(netdev_err, KERN_ERR); 11303 define_netdev_printk_level(netdev_warn, KERN_WARNING); 11304 define_netdev_printk_level(netdev_notice, KERN_NOTICE); 11305 define_netdev_printk_level(netdev_info, KERN_INFO); 11306 11307 static void __net_exit netdev_exit(struct net *net) 11308 { 11309 kfree(net->dev_name_head); 11310 kfree(net->dev_index_head); 11311 if (net != &init_net) 11312 WARN_ON_ONCE(!list_empty(&net->dev_base_head)); 11313 } 11314 11315 static struct pernet_operations __net_initdata netdev_net_ops = { 11316 .init = netdev_init, 11317 .exit = netdev_exit, 11318 }; 11319 11320 static void __net_exit default_device_exit_net(struct net *net) 11321 { 11322 struct net_device *dev, *aux; 11323 /* 11324 * Push all migratable network devices back to the 11325 * initial network namespace 11326 */ 11327 ASSERT_RTNL(); 11328 for_each_netdev_safe(net, dev, aux) { 11329 int err; 11330 char fb_name[IFNAMSIZ]; 11331 11332 /* Ignore unmoveable devices (i.e. loopback) */ 11333 if (dev->features & NETIF_F_NETNS_LOCAL) 11334 continue; 11335 11336 /* Leave virtual devices for the generic cleanup */ 11337 if (dev->rtnl_link_ops && !dev->rtnl_link_ops->netns_refund) 11338 continue; 11339 11340 /* Push remaining network devices to init_net */ 11341 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex); 11342 if (netdev_name_in_use(&init_net, fb_name)) 11343 snprintf(fb_name, IFNAMSIZ, "dev%%d"); 11344 err = dev_change_net_namespace(dev, &init_net, fb_name); 11345 if (err) { 11346 pr_emerg("%s: failed to move %s to init_net: %d\n", 11347 __func__, dev->name, err); 11348 BUG(); 11349 } 11350 } 11351 } 11352 11353 static void __net_exit default_device_exit_batch(struct list_head *net_list) 11354 { 11355 /* At exit all network devices most be removed from a network 11356 * namespace. Do this in the reverse order of registration. 11357 * Do this across as many network namespaces as possible to 11358 * improve batching efficiency. 11359 */ 11360 struct net_device *dev; 11361 struct net *net; 11362 LIST_HEAD(dev_kill_list); 11363 11364 rtnl_lock(); 11365 list_for_each_entry(net, net_list, exit_list) { 11366 default_device_exit_net(net); 11367 cond_resched(); 11368 } 11369 11370 list_for_each_entry(net, net_list, exit_list) { 11371 for_each_netdev_reverse(net, dev) { 11372 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) 11373 dev->rtnl_link_ops->dellink(dev, &dev_kill_list); 11374 else 11375 unregister_netdevice_queue(dev, &dev_kill_list); 11376 } 11377 } 11378 unregister_netdevice_many(&dev_kill_list); 11379 rtnl_unlock(); 11380 } 11381 11382 static struct pernet_operations __net_initdata default_device_ops = { 11383 .exit_batch = default_device_exit_batch, 11384 }; 11385 11386 /* 11387 * Initialize the DEV module. At boot time this walks the device list and 11388 * unhooks any devices that fail to initialise (normally hardware not 11389 * present) and leaves us with a valid list of present and active devices. 11390 * 11391 */ 11392 11393 /* 11394 * This is called single threaded during boot, so no need 11395 * to take the rtnl semaphore. 11396 */ 11397 static int __init net_dev_init(void) 11398 { 11399 int i, rc = -ENOMEM; 11400 11401 BUG_ON(!dev_boot_phase); 11402 11403 if (dev_proc_init()) 11404 goto out; 11405 11406 if (netdev_kobject_init()) 11407 goto out; 11408 11409 INIT_LIST_HEAD(&ptype_all); 11410 for (i = 0; i < PTYPE_HASH_SIZE; i++) 11411 INIT_LIST_HEAD(&ptype_base[i]); 11412 11413 if (register_pernet_subsys(&netdev_net_ops)) 11414 goto out; 11415 11416 /* 11417 * Initialise the packet receive queues. 11418 */ 11419 11420 for_each_possible_cpu(i) { 11421 struct work_struct *flush = per_cpu_ptr(&flush_works, i); 11422 struct softnet_data *sd = &per_cpu(softnet_data, i); 11423 11424 INIT_WORK(flush, flush_backlog); 11425 11426 skb_queue_head_init(&sd->input_pkt_queue); 11427 skb_queue_head_init(&sd->process_queue); 11428 #ifdef CONFIG_XFRM_OFFLOAD 11429 skb_queue_head_init(&sd->xfrm_backlog); 11430 #endif 11431 INIT_LIST_HEAD(&sd->poll_list); 11432 sd->output_queue_tailp = &sd->output_queue; 11433 #ifdef CONFIG_RPS 11434 INIT_CSD(&sd->csd, rps_trigger_softirq, sd); 11435 sd->cpu = i; 11436 #endif 11437 INIT_CSD(&sd->defer_csd, trigger_rx_softirq, sd); 11438 spin_lock_init(&sd->defer_lock); 11439 11440 init_gro_hash(&sd->backlog); 11441 sd->backlog.poll = process_backlog; 11442 sd->backlog.weight = weight_p; 11443 } 11444 11445 dev_boot_phase = 0; 11446 11447 /* The loopback device is special if any other network devices 11448 * is present in a network namespace the loopback device must 11449 * be present. Since we now dynamically allocate and free the 11450 * loopback device ensure this invariant is maintained by 11451 * keeping the loopback device as the first device on the 11452 * list of network devices. Ensuring the loopback devices 11453 * is the first device that appears and the last network device 11454 * that disappears. 11455 */ 11456 if (register_pernet_device(&loopback_net_ops)) 11457 goto out; 11458 11459 if (register_pernet_device(&default_device_ops)) 11460 goto out; 11461 11462 open_softirq(NET_TX_SOFTIRQ, net_tx_action); 11463 open_softirq(NET_RX_SOFTIRQ, net_rx_action); 11464 11465 rc = cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD, "net/dev:dead", 11466 NULL, dev_cpu_dead); 11467 WARN_ON(rc < 0); 11468 rc = 0; 11469 out: 11470 return rc; 11471 } 11472 11473 subsys_initcall(net_dev_init); 11474