1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * NET3 Protocol independent device support routines. 4 * 5 * Derived from the non IP parts of dev.c 1.0.19 6 * Authors: Ross Biro 7 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 8 * Mark Evans, <evansmp@uhura.aston.ac.uk> 9 * 10 * Additional Authors: 11 * Florian la Roche <rzsfl@rz.uni-sb.de> 12 * Alan Cox <gw4pts@gw4pts.ampr.org> 13 * David Hinds <dahinds@users.sourceforge.net> 14 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> 15 * Adam Sulmicki <adam@cfar.umd.edu> 16 * Pekka Riikonen <priikone@poesidon.pspt.fi> 17 * 18 * Changes: 19 * D.J. Barrow : Fixed bug where dev->refcnt gets set 20 * to 2 if register_netdev gets called 21 * before net_dev_init & also removed a 22 * few lines of code in the process. 23 * Alan Cox : device private ioctl copies fields back. 24 * Alan Cox : Transmit queue code does relevant 25 * stunts to keep the queue safe. 26 * Alan Cox : Fixed double lock. 27 * Alan Cox : Fixed promisc NULL pointer trap 28 * ???????? : Support the full private ioctl range 29 * Alan Cox : Moved ioctl permission check into 30 * drivers 31 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI 32 * Alan Cox : 100 backlog just doesn't cut it when 33 * you start doing multicast video 8) 34 * Alan Cox : Rewrote net_bh and list manager. 35 * Alan Cox : Fix ETH_P_ALL echoback lengths. 36 * Alan Cox : Took out transmit every packet pass 37 * Saved a few bytes in the ioctl handler 38 * Alan Cox : Network driver sets packet type before 39 * calling netif_rx. Saves a function 40 * call a packet. 41 * Alan Cox : Hashed net_bh() 42 * Richard Kooijman: Timestamp fixes. 43 * Alan Cox : Wrong field in SIOCGIFDSTADDR 44 * Alan Cox : Device lock protection. 45 * Alan Cox : Fixed nasty side effect of device close 46 * changes. 47 * Rudi Cilibrasi : Pass the right thing to 48 * set_mac_address() 49 * Dave Miller : 32bit quantity for the device lock to 50 * make it work out on a Sparc. 51 * Bjorn Ekwall : Added KERNELD hack. 52 * Alan Cox : Cleaned up the backlog initialise. 53 * Craig Metz : SIOCGIFCONF fix if space for under 54 * 1 device. 55 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there 56 * is no device open function. 57 * Andi Kleen : Fix error reporting for SIOCGIFCONF 58 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF 59 * Cyrus Durgin : Cleaned for KMOD 60 * Adam Sulmicki : Bug Fix : Network Device Unload 61 * A network device unload needs to purge 62 * the backlog queue. 63 * Paul Rusty Russell : SIOCSIFNAME 64 * Pekka Riikonen : Netdev boot-time settings code 65 * Andrew Morton : Make unregister_netdevice wait 66 * indefinitely on dev->refcnt 67 * J Hadi Salim : - Backlog queue sampling 68 * - netif_rx() feedback 69 */ 70 71 #include <linux/uaccess.h> 72 #include <linux/bitops.h> 73 #include <linux/capability.h> 74 #include <linux/cpu.h> 75 #include <linux/types.h> 76 #include <linux/kernel.h> 77 #include <linux/hash.h> 78 #include <linux/slab.h> 79 #include <linux/sched.h> 80 #include <linux/sched/mm.h> 81 #include <linux/mutex.h> 82 #include <linux/rwsem.h> 83 #include <linux/string.h> 84 #include <linux/mm.h> 85 #include <linux/socket.h> 86 #include <linux/sockios.h> 87 #include <linux/errno.h> 88 #include <linux/interrupt.h> 89 #include <linux/if_ether.h> 90 #include <linux/netdevice.h> 91 #include <linux/etherdevice.h> 92 #include <linux/ethtool.h> 93 #include <linux/skbuff.h> 94 #include <linux/kthread.h> 95 #include <linux/bpf.h> 96 #include <linux/bpf_trace.h> 97 #include <net/net_namespace.h> 98 #include <net/sock.h> 99 #include <net/busy_poll.h> 100 #include <linux/rtnetlink.h> 101 #include <linux/stat.h> 102 #include <net/dsa.h> 103 #include <net/dst.h> 104 #include <net/dst_metadata.h> 105 #include <net/gro.h> 106 #include <net/pkt_sched.h> 107 #include <net/pkt_cls.h> 108 #include <net/checksum.h> 109 #include <net/xfrm.h> 110 #include <linux/highmem.h> 111 #include <linux/init.h> 112 #include <linux/module.h> 113 #include <linux/netpoll.h> 114 #include <linux/rcupdate.h> 115 #include <linux/delay.h> 116 #include <net/iw_handler.h> 117 #include <asm/current.h> 118 #include <linux/audit.h> 119 #include <linux/dmaengine.h> 120 #include <linux/err.h> 121 #include <linux/ctype.h> 122 #include <linux/if_arp.h> 123 #include <linux/if_vlan.h> 124 #include <linux/ip.h> 125 #include <net/ip.h> 126 #include <net/mpls.h> 127 #include <linux/ipv6.h> 128 #include <linux/in.h> 129 #include <linux/jhash.h> 130 #include <linux/random.h> 131 #include <trace/events/napi.h> 132 #include <trace/events/net.h> 133 #include <trace/events/skb.h> 134 #include <trace/events/qdisc.h> 135 #include <linux/inetdevice.h> 136 #include <linux/cpu_rmap.h> 137 #include <linux/static_key.h> 138 #include <linux/hashtable.h> 139 #include <linux/vmalloc.h> 140 #include <linux/if_macvlan.h> 141 #include <linux/errqueue.h> 142 #include <linux/hrtimer.h> 143 #include <linux/netfilter_netdev.h> 144 #include <linux/crash_dump.h> 145 #include <linux/sctp.h> 146 #include <net/udp_tunnel.h> 147 #include <linux/net_namespace.h> 148 #include <linux/indirect_call_wrapper.h> 149 #include <net/devlink.h> 150 #include <linux/pm_runtime.h> 151 #include <linux/prandom.h> 152 #include <linux/once_lite.h> 153 154 #include "dev.h" 155 #include "net-sysfs.h" 156 157 158 static DEFINE_SPINLOCK(ptype_lock); 159 struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; 160 struct list_head ptype_all __read_mostly; /* Taps */ 161 162 static int netif_rx_internal(struct sk_buff *skb); 163 static int call_netdevice_notifiers_extack(unsigned long val, 164 struct net_device *dev, 165 struct netlink_ext_ack *extack); 166 static struct napi_struct *napi_by_id(unsigned int napi_id); 167 168 /* 169 * The @dev_base_head list is protected by @dev_base_lock and the rtnl 170 * semaphore. 171 * 172 * Pure readers hold dev_base_lock for reading, or rcu_read_lock() 173 * 174 * Writers must hold the rtnl semaphore while they loop through the 175 * dev_base_head list, and hold dev_base_lock for writing when they do the 176 * actual updates. This allows pure readers to access the list even 177 * while a writer is preparing to update it. 178 * 179 * To put it another way, dev_base_lock is held for writing only to 180 * protect against pure readers; the rtnl semaphore provides the 181 * protection against other writers. 182 * 183 * See, for example usages, register_netdevice() and 184 * unregister_netdevice(), which must be called with the rtnl 185 * semaphore held. 186 */ 187 DEFINE_RWLOCK(dev_base_lock); 188 EXPORT_SYMBOL(dev_base_lock); 189 190 static DEFINE_MUTEX(ifalias_mutex); 191 192 /* protects napi_hash addition/deletion and napi_gen_id */ 193 static DEFINE_SPINLOCK(napi_hash_lock); 194 195 static unsigned int napi_gen_id = NR_CPUS; 196 static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8); 197 198 static DECLARE_RWSEM(devnet_rename_sem); 199 200 static inline void dev_base_seq_inc(struct net *net) 201 { 202 while (++net->dev_base_seq == 0) 203 ; 204 } 205 206 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name) 207 { 208 unsigned int hash = full_name_hash(net, name, strnlen(name, IFNAMSIZ)); 209 210 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)]; 211 } 212 213 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) 214 { 215 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)]; 216 } 217 218 static inline void rps_lock_irqsave(struct softnet_data *sd, 219 unsigned long *flags) 220 { 221 if (IS_ENABLED(CONFIG_RPS)) 222 spin_lock_irqsave(&sd->input_pkt_queue.lock, *flags); 223 else if (!IS_ENABLED(CONFIG_PREEMPT_RT)) 224 local_irq_save(*flags); 225 } 226 227 static inline void rps_lock_irq_disable(struct softnet_data *sd) 228 { 229 if (IS_ENABLED(CONFIG_RPS)) 230 spin_lock_irq(&sd->input_pkt_queue.lock); 231 else if (!IS_ENABLED(CONFIG_PREEMPT_RT)) 232 local_irq_disable(); 233 } 234 235 static inline void rps_unlock_irq_restore(struct softnet_data *sd, 236 unsigned long *flags) 237 { 238 if (IS_ENABLED(CONFIG_RPS)) 239 spin_unlock_irqrestore(&sd->input_pkt_queue.lock, *flags); 240 else if (!IS_ENABLED(CONFIG_PREEMPT_RT)) 241 local_irq_restore(*flags); 242 } 243 244 static inline void rps_unlock_irq_enable(struct softnet_data *sd) 245 { 246 if (IS_ENABLED(CONFIG_RPS)) 247 spin_unlock_irq(&sd->input_pkt_queue.lock); 248 else if (!IS_ENABLED(CONFIG_PREEMPT_RT)) 249 local_irq_enable(); 250 } 251 252 static struct netdev_name_node *netdev_name_node_alloc(struct net_device *dev, 253 const char *name) 254 { 255 struct netdev_name_node *name_node; 256 257 name_node = kmalloc(sizeof(*name_node), GFP_KERNEL); 258 if (!name_node) 259 return NULL; 260 INIT_HLIST_NODE(&name_node->hlist); 261 name_node->dev = dev; 262 name_node->name = name; 263 return name_node; 264 } 265 266 static struct netdev_name_node * 267 netdev_name_node_head_alloc(struct net_device *dev) 268 { 269 struct netdev_name_node *name_node; 270 271 name_node = netdev_name_node_alloc(dev, dev->name); 272 if (!name_node) 273 return NULL; 274 INIT_LIST_HEAD(&name_node->list); 275 return name_node; 276 } 277 278 static void netdev_name_node_free(struct netdev_name_node *name_node) 279 { 280 kfree(name_node); 281 } 282 283 static void netdev_name_node_add(struct net *net, 284 struct netdev_name_node *name_node) 285 { 286 hlist_add_head_rcu(&name_node->hlist, 287 dev_name_hash(net, name_node->name)); 288 } 289 290 static void netdev_name_node_del(struct netdev_name_node *name_node) 291 { 292 hlist_del_rcu(&name_node->hlist); 293 } 294 295 static struct netdev_name_node *netdev_name_node_lookup(struct net *net, 296 const char *name) 297 { 298 struct hlist_head *head = dev_name_hash(net, name); 299 struct netdev_name_node *name_node; 300 301 hlist_for_each_entry(name_node, head, hlist) 302 if (!strcmp(name_node->name, name)) 303 return name_node; 304 return NULL; 305 } 306 307 static struct netdev_name_node *netdev_name_node_lookup_rcu(struct net *net, 308 const char *name) 309 { 310 struct hlist_head *head = dev_name_hash(net, name); 311 struct netdev_name_node *name_node; 312 313 hlist_for_each_entry_rcu(name_node, head, hlist) 314 if (!strcmp(name_node->name, name)) 315 return name_node; 316 return NULL; 317 } 318 319 bool netdev_name_in_use(struct net *net, const char *name) 320 { 321 return netdev_name_node_lookup(net, name); 322 } 323 EXPORT_SYMBOL(netdev_name_in_use); 324 325 int netdev_name_node_alt_create(struct net_device *dev, const char *name) 326 { 327 struct netdev_name_node *name_node; 328 struct net *net = dev_net(dev); 329 330 name_node = netdev_name_node_lookup(net, name); 331 if (name_node) 332 return -EEXIST; 333 name_node = netdev_name_node_alloc(dev, name); 334 if (!name_node) 335 return -ENOMEM; 336 netdev_name_node_add(net, name_node); 337 /* The node that holds dev->name acts as a head of per-device list. */ 338 list_add_tail(&name_node->list, &dev->name_node->list); 339 340 return 0; 341 } 342 343 static void __netdev_name_node_alt_destroy(struct netdev_name_node *name_node) 344 { 345 list_del(&name_node->list); 346 netdev_name_node_del(name_node); 347 kfree(name_node->name); 348 netdev_name_node_free(name_node); 349 } 350 351 int netdev_name_node_alt_destroy(struct net_device *dev, const char *name) 352 { 353 struct netdev_name_node *name_node; 354 struct net *net = dev_net(dev); 355 356 name_node = netdev_name_node_lookup(net, name); 357 if (!name_node) 358 return -ENOENT; 359 /* lookup might have found our primary name or a name belonging 360 * to another device. 361 */ 362 if (name_node == dev->name_node || name_node->dev != dev) 363 return -EINVAL; 364 365 __netdev_name_node_alt_destroy(name_node); 366 367 return 0; 368 } 369 370 static void netdev_name_node_alt_flush(struct net_device *dev) 371 { 372 struct netdev_name_node *name_node, *tmp; 373 374 list_for_each_entry_safe(name_node, tmp, &dev->name_node->list, list) 375 __netdev_name_node_alt_destroy(name_node); 376 } 377 378 /* Device list insertion */ 379 static void list_netdevice(struct net_device *dev) 380 { 381 struct net *net = dev_net(dev); 382 383 ASSERT_RTNL(); 384 385 write_lock(&dev_base_lock); 386 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head); 387 netdev_name_node_add(net, dev->name_node); 388 hlist_add_head_rcu(&dev->index_hlist, 389 dev_index_hash(net, dev->ifindex)); 390 write_unlock(&dev_base_lock); 391 392 dev_base_seq_inc(net); 393 } 394 395 /* Device list removal 396 * caller must respect a RCU grace period before freeing/reusing dev 397 */ 398 static void unlist_netdevice(struct net_device *dev, bool lock) 399 { 400 ASSERT_RTNL(); 401 402 /* Unlink dev from the device chain */ 403 if (lock) 404 write_lock(&dev_base_lock); 405 list_del_rcu(&dev->dev_list); 406 netdev_name_node_del(dev->name_node); 407 hlist_del_rcu(&dev->index_hlist); 408 if (lock) 409 write_unlock(&dev_base_lock); 410 411 dev_base_seq_inc(dev_net(dev)); 412 } 413 414 /* 415 * Our notifier list 416 */ 417 418 static RAW_NOTIFIER_HEAD(netdev_chain); 419 420 /* 421 * Device drivers call our routines to queue packets here. We empty the 422 * queue in the local softnet handler. 423 */ 424 425 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); 426 EXPORT_PER_CPU_SYMBOL(softnet_data); 427 428 #ifdef CONFIG_LOCKDEP 429 /* 430 * register_netdevice() inits txq->_xmit_lock and sets lockdep class 431 * according to dev->type 432 */ 433 static const unsigned short netdev_lock_type[] = { 434 ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25, 435 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET, 436 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM, 437 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP, 438 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD, 439 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25, 440 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP, 441 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD, 442 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI, 443 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE, 444 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET, 445 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL, 446 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM, 447 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE, 448 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE}; 449 450 static const char *const netdev_lock_name[] = { 451 "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25", 452 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET", 453 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM", 454 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP", 455 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD", 456 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25", 457 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP", 458 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD", 459 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI", 460 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE", 461 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET", 462 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL", 463 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM", 464 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE", 465 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"}; 466 467 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)]; 468 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)]; 469 470 static inline unsigned short netdev_lock_pos(unsigned short dev_type) 471 { 472 int i; 473 474 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++) 475 if (netdev_lock_type[i] == dev_type) 476 return i; 477 /* the last key is used by default */ 478 return ARRAY_SIZE(netdev_lock_type) - 1; 479 } 480 481 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, 482 unsigned short dev_type) 483 { 484 int i; 485 486 i = netdev_lock_pos(dev_type); 487 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i], 488 netdev_lock_name[i]); 489 } 490 491 static inline void netdev_set_addr_lockdep_class(struct net_device *dev) 492 { 493 int i; 494 495 i = netdev_lock_pos(dev->type); 496 lockdep_set_class_and_name(&dev->addr_list_lock, 497 &netdev_addr_lock_key[i], 498 netdev_lock_name[i]); 499 } 500 #else 501 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, 502 unsigned short dev_type) 503 { 504 } 505 506 static inline void netdev_set_addr_lockdep_class(struct net_device *dev) 507 { 508 } 509 #endif 510 511 /******************************************************************************* 512 * 513 * Protocol management and registration routines 514 * 515 *******************************************************************************/ 516 517 518 /* 519 * Add a protocol ID to the list. Now that the input handler is 520 * smarter we can dispense with all the messy stuff that used to be 521 * here. 522 * 523 * BEWARE!!! Protocol handlers, mangling input packets, 524 * MUST BE last in hash buckets and checking protocol handlers 525 * MUST start from promiscuous ptype_all chain in net_bh. 526 * It is true now, do not change it. 527 * Explanation follows: if protocol handler, mangling packet, will 528 * be the first on list, it is not able to sense, that packet 529 * is cloned and should be copied-on-write, so that it will 530 * change it and subsequent readers will get broken packet. 531 * --ANK (980803) 532 */ 533 534 static inline struct list_head *ptype_head(const struct packet_type *pt) 535 { 536 if (pt->type == htons(ETH_P_ALL)) 537 return pt->dev ? &pt->dev->ptype_all : &ptype_all; 538 else 539 return pt->dev ? &pt->dev->ptype_specific : 540 &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK]; 541 } 542 543 /** 544 * dev_add_pack - add packet handler 545 * @pt: packet type declaration 546 * 547 * Add a protocol handler to the networking stack. The passed &packet_type 548 * is linked into kernel lists and may not be freed until it has been 549 * removed from the kernel lists. 550 * 551 * This call does not sleep therefore it can not 552 * guarantee all CPU's that are in middle of receiving packets 553 * will see the new packet type (until the next received packet). 554 */ 555 556 void dev_add_pack(struct packet_type *pt) 557 { 558 struct list_head *head = ptype_head(pt); 559 560 spin_lock(&ptype_lock); 561 list_add_rcu(&pt->list, head); 562 spin_unlock(&ptype_lock); 563 } 564 EXPORT_SYMBOL(dev_add_pack); 565 566 /** 567 * __dev_remove_pack - remove packet handler 568 * @pt: packet type declaration 569 * 570 * Remove a protocol handler that was previously added to the kernel 571 * protocol handlers by dev_add_pack(). The passed &packet_type is removed 572 * from the kernel lists and can be freed or reused once this function 573 * returns. 574 * 575 * The packet type might still be in use by receivers 576 * and must not be freed until after all the CPU's have gone 577 * through a quiescent state. 578 */ 579 void __dev_remove_pack(struct packet_type *pt) 580 { 581 struct list_head *head = ptype_head(pt); 582 struct packet_type *pt1; 583 584 spin_lock(&ptype_lock); 585 586 list_for_each_entry(pt1, head, list) { 587 if (pt == pt1) { 588 list_del_rcu(&pt->list); 589 goto out; 590 } 591 } 592 593 pr_warn("dev_remove_pack: %p not found\n", pt); 594 out: 595 spin_unlock(&ptype_lock); 596 } 597 EXPORT_SYMBOL(__dev_remove_pack); 598 599 /** 600 * dev_remove_pack - remove packet handler 601 * @pt: packet type declaration 602 * 603 * Remove a protocol handler that was previously added to the kernel 604 * protocol handlers by dev_add_pack(). The passed &packet_type is removed 605 * from the kernel lists and can be freed or reused once this function 606 * returns. 607 * 608 * This call sleeps to guarantee that no CPU is looking at the packet 609 * type after return. 610 */ 611 void dev_remove_pack(struct packet_type *pt) 612 { 613 __dev_remove_pack(pt); 614 615 synchronize_net(); 616 } 617 EXPORT_SYMBOL(dev_remove_pack); 618 619 620 /******************************************************************************* 621 * 622 * Device Interface Subroutines 623 * 624 *******************************************************************************/ 625 626 /** 627 * dev_get_iflink - get 'iflink' value of a interface 628 * @dev: targeted interface 629 * 630 * Indicates the ifindex the interface is linked to. 631 * Physical interfaces have the same 'ifindex' and 'iflink' values. 632 */ 633 634 int dev_get_iflink(const struct net_device *dev) 635 { 636 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink) 637 return dev->netdev_ops->ndo_get_iflink(dev); 638 639 return dev->ifindex; 640 } 641 EXPORT_SYMBOL(dev_get_iflink); 642 643 /** 644 * dev_fill_metadata_dst - Retrieve tunnel egress information. 645 * @dev: targeted interface 646 * @skb: The packet. 647 * 648 * For better visibility of tunnel traffic OVS needs to retrieve 649 * egress tunnel information for a packet. Following API allows 650 * user to get this info. 651 */ 652 int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) 653 { 654 struct ip_tunnel_info *info; 655 656 if (!dev->netdev_ops || !dev->netdev_ops->ndo_fill_metadata_dst) 657 return -EINVAL; 658 659 info = skb_tunnel_info_unclone(skb); 660 if (!info) 661 return -ENOMEM; 662 if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX))) 663 return -EINVAL; 664 665 return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb); 666 } 667 EXPORT_SYMBOL_GPL(dev_fill_metadata_dst); 668 669 static struct net_device_path *dev_fwd_path(struct net_device_path_stack *stack) 670 { 671 int k = stack->num_paths++; 672 673 if (WARN_ON_ONCE(k >= NET_DEVICE_PATH_STACK_MAX)) 674 return NULL; 675 676 return &stack->path[k]; 677 } 678 679 int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr, 680 struct net_device_path_stack *stack) 681 { 682 const struct net_device *last_dev; 683 struct net_device_path_ctx ctx = { 684 .dev = dev, 685 }; 686 struct net_device_path *path; 687 int ret = 0; 688 689 memcpy(ctx.daddr, daddr, sizeof(ctx.daddr)); 690 stack->num_paths = 0; 691 while (ctx.dev && ctx.dev->netdev_ops->ndo_fill_forward_path) { 692 last_dev = ctx.dev; 693 path = dev_fwd_path(stack); 694 if (!path) 695 return -1; 696 697 memset(path, 0, sizeof(struct net_device_path)); 698 ret = ctx.dev->netdev_ops->ndo_fill_forward_path(&ctx, path); 699 if (ret < 0) 700 return -1; 701 702 if (WARN_ON_ONCE(last_dev == ctx.dev)) 703 return -1; 704 } 705 706 if (!ctx.dev) 707 return ret; 708 709 path = dev_fwd_path(stack); 710 if (!path) 711 return -1; 712 path->type = DEV_PATH_ETHERNET; 713 path->dev = ctx.dev; 714 715 return ret; 716 } 717 EXPORT_SYMBOL_GPL(dev_fill_forward_path); 718 719 /** 720 * __dev_get_by_name - find a device by its name 721 * @net: the applicable net namespace 722 * @name: name to find 723 * 724 * Find an interface by name. Must be called under RTNL semaphore 725 * or @dev_base_lock. If the name is found a pointer to the device 726 * is returned. If the name is not found then %NULL is returned. The 727 * reference counters are not incremented so the caller must be 728 * careful with locks. 729 */ 730 731 struct net_device *__dev_get_by_name(struct net *net, const char *name) 732 { 733 struct netdev_name_node *node_name; 734 735 node_name = netdev_name_node_lookup(net, name); 736 return node_name ? node_name->dev : NULL; 737 } 738 EXPORT_SYMBOL(__dev_get_by_name); 739 740 /** 741 * dev_get_by_name_rcu - find a device by its name 742 * @net: the applicable net namespace 743 * @name: name to find 744 * 745 * Find an interface by name. 746 * If the name is found a pointer to the device is returned. 747 * If the name is not found then %NULL is returned. 748 * The reference counters are not incremented so the caller must be 749 * careful with locks. The caller must hold RCU lock. 750 */ 751 752 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name) 753 { 754 struct netdev_name_node *node_name; 755 756 node_name = netdev_name_node_lookup_rcu(net, name); 757 return node_name ? node_name->dev : NULL; 758 } 759 EXPORT_SYMBOL(dev_get_by_name_rcu); 760 761 /** 762 * dev_get_by_name - find a device by its name 763 * @net: the applicable net namespace 764 * @name: name to find 765 * 766 * Find an interface by name. This can be called from any 767 * context and does its own locking. The returned handle has 768 * the usage count incremented and the caller must use dev_put() to 769 * release it when it is no longer needed. %NULL is returned if no 770 * matching device is found. 771 */ 772 773 struct net_device *dev_get_by_name(struct net *net, const char *name) 774 { 775 struct net_device *dev; 776 777 rcu_read_lock(); 778 dev = dev_get_by_name_rcu(net, name); 779 dev_hold(dev); 780 rcu_read_unlock(); 781 return dev; 782 } 783 EXPORT_SYMBOL(dev_get_by_name); 784 785 /** 786 * __dev_get_by_index - find a device by its ifindex 787 * @net: the applicable net namespace 788 * @ifindex: index of device 789 * 790 * Search for an interface by index. Returns %NULL if the device 791 * is not found or a pointer to the device. The device has not 792 * had its reference counter increased so the caller must be careful 793 * about locking. The caller must hold either the RTNL semaphore 794 * or @dev_base_lock. 795 */ 796 797 struct net_device *__dev_get_by_index(struct net *net, int ifindex) 798 { 799 struct net_device *dev; 800 struct hlist_head *head = dev_index_hash(net, ifindex); 801 802 hlist_for_each_entry(dev, head, index_hlist) 803 if (dev->ifindex == ifindex) 804 return dev; 805 806 return NULL; 807 } 808 EXPORT_SYMBOL(__dev_get_by_index); 809 810 /** 811 * dev_get_by_index_rcu - find a device by its ifindex 812 * @net: the applicable net namespace 813 * @ifindex: index of device 814 * 815 * Search for an interface by index. Returns %NULL if the device 816 * is not found or a pointer to the device. The device has not 817 * had its reference counter increased so the caller must be careful 818 * about locking. The caller must hold RCU lock. 819 */ 820 821 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex) 822 { 823 struct net_device *dev; 824 struct hlist_head *head = dev_index_hash(net, ifindex); 825 826 hlist_for_each_entry_rcu(dev, head, index_hlist) 827 if (dev->ifindex == ifindex) 828 return dev; 829 830 return NULL; 831 } 832 EXPORT_SYMBOL(dev_get_by_index_rcu); 833 834 835 /** 836 * dev_get_by_index - find a device by its ifindex 837 * @net: the applicable net namespace 838 * @ifindex: index of device 839 * 840 * Search for an interface by index. Returns NULL if the device 841 * is not found or a pointer to the device. The device returned has 842 * had a reference added and the pointer is safe until the user calls 843 * dev_put to indicate they have finished with it. 844 */ 845 846 struct net_device *dev_get_by_index(struct net *net, int ifindex) 847 { 848 struct net_device *dev; 849 850 rcu_read_lock(); 851 dev = dev_get_by_index_rcu(net, ifindex); 852 dev_hold(dev); 853 rcu_read_unlock(); 854 return dev; 855 } 856 EXPORT_SYMBOL(dev_get_by_index); 857 858 /** 859 * dev_get_by_napi_id - find a device by napi_id 860 * @napi_id: ID of the NAPI struct 861 * 862 * Search for an interface by NAPI ID. Returns %NULL if the device 863 * is not found or a pointer to the device. The device has not had 864 * its reference counter increased so the caller must be careful 865 * about locking. The caller must hold RCU lock. 866 */ 867 868 struct net_device *dev_get_by_napi_id(unsigned int napi_id) 869 { 870 struct napi_struct *napi; 871 872 WARN_ON_ONCE(!rcu_read_lock_held()); 873 874 if (napi_id < MIN_NAPI_ID) 875 return NULL; 876 877 napi = napi_by_id(napi_id); 878 879 return napi ? napi->dev : NULL; 880 } 881 EXPORT_SYMBOL(dev_get_by_napi_id); 882 883 /** 884 * netdev_get_name - get a netdevice name, knowing its ifindex. 885 * @net: network namespace 886 * @name: a pointer to the buffer where the name will be stored. 887 * @ifindex: the ifindex of the interface to get the name from. 888 */ 889 int netdev_get_name(struct net *net, char *name, int ifindex) 890 { 891 struct net_device *dev; 892 int ret; 893 894 down_read(&devnet_rename_sem); 895 rcu_read_lock(); 896 897 dev = dev_get_by_index_rcu(net, ifindex); 898 if (!dev) { 899 ret = -ENODEV; 900 goto out; 901 } 902 903 strcpy(name, dev->name); 904 905 ret = 0; 906 out: 907 rcu_read_unlock(); 908 up_read(&devnet_rename_sem); 909 return ret; 910 } 911 912 /** 913 * dev_getbyhwaddr_rcu - find a device by its hardware address 914 * @net: the applicable net namespace 915 * @type: media type of device 916 * @ha: hardware address 917 * 918 * Search for an interface by MAC address. Returns NULL if the device 919 * is not found or a pointer to the device. 920 * The caller must hold RCU or RTNL. 921 * The returned device has not had its ref count increased 922 * and the caller must therefore be careful about locking 923 * 924 */ 925 926 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, 927 const char *ha) 928 { 929 struct net_device *dev; 930 931 for_each_netdev_rcu(net, dev) 932 if (dev->type == type && 933 !memcmp(dev->dev_addr, ha, dev->addr_len)) 934 return dev; 935 936 return NULL; 937 } 938 EXPORT_SYMBOL(dev_getbyhwaddr_rcu); 939 940 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type) 941 { 942 struct net_device *dev, *ret = NULL; 943 944 rcu_read_lock(); 945 for_each_netdev_rcu(net, dev) 946 if (dev->type == type) { 947 dev_hold(dev); 948 ret = dev; 949 break; 950 } 951 rcu_read_unlock(); 952 return ret; 953 } 954 EXPORT_SYMBOL(dev_getfirstbyhwtype); 955 956 /** 957 * __dev_get_by_flags - find any device with given flags 958 * @net: the applicable net namespace 959 * @if_flags: IFF_* values 960 * @mask: bitmask of bits in if_flags to check 961 * 962 * Search for any interface with the given flags. Returns NULL if a device 963 * is not found or a pointer to the device. Must be called inside 964 * rtnl_lock(), and result refcount is unchanged. 965 */ 966 967 struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags, 968 unsigned short mask) 969 { 970 struct net_device *dev, *ret; 971 972 ASSERT_RTNL(); 973 974 ret = NULL; 975 for_each_netdev(net, dev) { 976 if (((dev->flags ^ if_flags) & mask) == 0) { 977 ret = dev; 978 break; 979 } 980 } 981 return ret; 982 } 983 EXPORT_SYMBOL(__dev_get_by_flags); 984 985 /** 986 * dev_valid_name - check if name is okay for network device 987 * @name: name string 988 * 989 * Network device names need to be valid file names to 990 * allow sysfs to work. We also disallow any kind of 991 * whitespace. 992 */ 993 bool dev_valid_name(const char *name) 994 { 995 if (*name == '\0') 996 return false; 997 if (strnlen(name, IFNAMSIZ) == IFNAMSIZ) 998 return false; 999 if (!strcmp(name, ".") || !strcmp(name, "..")) 1000 return false; 1001 1002 while (*name) { 1003 if (*name == '/' || *name == ':' || isspace(*name)) 1004 return false; 1005 name++; 1006 } 1007 return true; 1008 } 1009 EXPORT_SYMBOL(dev_valid_name); 1010 1011 /** 1012 * __dev_alloc_name - allocate a name for a device 1013 * @net: network namespace to allocate the device name in 1014 * @name: name format string 1015 * @buf: scratch buffer and result name string 1016 * 1017 * Passed a format string - eg "lt%d" it will try and find a suitable 1018 * id. It scans list of devices to build up a free map, then chooses 1019 * the first empty slot. The caller must hold the dev_base or rtnl lock 1020 * while allocating the name and adding the device in order to avoid 1021 * duplicates. 1022 * Limited to bits_per_byte * page size devices (ie 32K on most platforms). 1023 * Returns the number of the unit assigned or a negative errno code. 1024 */ 1025 1026 static int __dev_alloc_name(struct net *net, const char *name, char *buf) 1027 { 1028 int i = 0; 1029 const char *p; 1030 const int max_netdevices = 8*PAGE_SIZE; 1031 unsigned long *inuse; 1032 struct net_device *d; 1033 1034 if (!dev_valid_name(name)) 1035 return -EINVAL; 1036 1037 p = strchr(name, '%'); 1038 if (p) { 1039 /* 1040 * Verify the string as this thing may have come from 1041 * the user. There must be either one "%d" and no other "%" 1042 * characters. 1043 */ 1044 if (p[1] != 'd' || strchr(p + 2, '%')) 1045 return -EINVAL; 1046 1047 /* Use one page as a bit array of possible slots */ 1048 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC); 1049 if (!inuse) 1050 return -ENOMEM; 1051 1052 for_each_netdev(net, d) { 1053 struct netdev_name_node *name_node; 1054 list_for_each_entry(name_node, &d->name_node->list, list) { 1055 if (!sscanf(name_node->name, name, &i)) 1056 continue; 1057 if (i < 0 || i >= max_netdevices) 1058 continue; 1059 1060 /* avoid cases where sscanf is not exact inverse of printf */ 1061 snprintf(buf, IFNAMSIZ, name, i); 1062 if (!strncmp(buf, name_node->name, IFNAMSIZ)) 1063 __set_bit(i, inuse); 1064 } 1065 if (!sscanf(d->name, name, &i)) 1066 continue; 1067 if (i < 0 || i >= max_netdevices) 1068 continue; 1069 1070 /* avoid cases where sscanf is not exact inverse of printf */ 1071 snprintf(buf, IFNAMSIZ, name, i); 1072 if (!strncmp(buf, d->name, IFNAMSIZ)) 1073 __set_bit(i, inuse); 1074 } 1075 1076 i = find_first_zero_bit(inuse, max_netdevices); 1077 free_page((unsigned long) inuse); 1078 } 1079 1080 snprintf(buf, IFNAMSIZ, name, i); 1081 if (!netdev_name_in_use(net, buf)) 1082 return i; 1083 1084 /* It is possible to run out of possible slots 1085 * when the name is long and there isn't enough space left 1086 * for the digits, or if all bits are used. 1087 */ 1088 return -ENFILE; 1089 } 1090 1091 static int dev_alloc_name_ns(struct net *net, 1092 struct net_device *dev, 1093 const char *name) 1094 { 1095 char buf[IFNAMSIZ]; 1096 int ret; 1097 1098 BUG_ON(!net); 1099 ret = __dev_alloc_name(net, name, buf); 1100 if (ret >= 0) 1101 strscpy(dev->name, buf, IFNAMSIZ); 1102 return ret; 1103 } 1104 1105 /** 1106 * dev_alloc_name - allocate a name for a device 1107 * @dev: device 1108 * @name: name format string 1109 * 1110 * Passed a format string - eg "lt%d" it will try and find a suitable 1111 * id. It scans list of devices to build up a free map, then chooses 1112 * the first empty slot. The caller must hold the dev_base or rtnl lock 1113 * while allocating the name and adding the device in order to avoid 1114 * duplicates. 1115 * Limited to bits_per_byte * page size devices (ie 32K on most platforms). 1116 * Returns the number of the unit assigned or a negative errno code. 1117 */ 1118 1119 int dev_alloc_name(struct net_device *dev, const char *name) 1120 { 1121 return dev_alloc_name_ns(dev_net(dev), dev, name); 1122 } 1123 EXPORT_SYMBOL(dev_alloc_name); 1124 1125 static int dev_get_valid_name(struct net *net, struct net_device *dev, 1126 const char *name) 1127 { 1128 BUG_ON(!net); 1129 1130 if (!dev_valid_name(name)) 1131 return -EINVAL; 1132 1133 if (strchr(name, '%')) 1134 return dev_alloc_name_ns(net, dev, name); 1135 else if (netdev_name_in_use(net, name)) 1136 return -EEXIST; 1137 else if (dev->name != name) 1138 strscpy(dev->name, name, IFNAMSIZ); 1139 1140 return 0; 1141 } 1142 1143 /** 1144 * dev_change_name - change name of a device 1145 * @dev: device 1146 * @newname: name (or format string) must be at least IFNAMSIZ 1147 * 1148 * Change name of a device, can pass format strings "eth%d". 1149 * for wildcarding. 1150 */ 1151 int dev_change_name(struct net_device *dev, const char *newname) 1152 { 1153 unsigned char old_assign_type; 1154 char oldname[IFNAMSIZ]; 1155 int err = 0; 1156 int ret; 1157 struct net *net; 1158 1159 ASSERT_RTNL(); 1160 BUG_ON(!dev_net(dev)); 1161 1162 net = dev_net(dev); 1163 1164 down_write(&devnet_rename_sem); 1165 1166 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) { 1167 up_write(&devnet_rename_sem); 1168 return 0; 1169 } 1170 1171 memcpy(oldname, dev->name, IFNAMSIZ); 1172 1173 err = dev_get_valid_name(net, dev, newname); 1174 if (err < 0) { 1175 up_write(&devnet_rename_sem); 1176 return err; 1177 } 1178 1179 if (oldname[0] && !strchr(oldname, '%')) 1180 netdev_info(dev, "renamed from %s%s\n", oldname, 1181 dev->flags & IFF_UP ? " (while UP)" : ""); 1182 1183 old_assign_type = dev->name_assign_type; 1184 dev->name_assign_type = NET_NAME_RENAMED; 1185 1186 rollback: 1187 ret = device_rename(&dev->dev, dev->name); 1188 if (ret) { 1189 memcpy(dev->name, oldname, IFNAMSIZ); 1190 dev->name_assign_type = old_assign_type; 1191 up_write(&devnet_rename_sem); 1192 return ret; 1193 } 1194 1195 up_write(&devnet_rename_sem); 1196 1197 netdev_adjacent_rename_links(dev, oldname); 1198 1199 write_lock(&dev_base_lock); 1200 netdev_name_node_del(dev->name_node); 1201 write_unlock(&dev_base_lock); 1202 1203 synchronize_rcu(); 1204 1205 write_lock(&dev_base_lock); 1206 netdev_name_node_add(net, dev->name_node); 1207 write_unlock(&dev_base_lock); 1208 1209 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev); 1210 ret = notifier_to_errno(ret); 1211 1212 if (ret) { 1213 /* err >= 0 after dev_alloc_name() or stores the first errno */ 1214 if (err >= 0) { 1215 err = ret; 1216 down_write(&devnet_rename_sem); 1217 memcpy(dev->name, oldname, IFNAMSIZ); 1218 memcpy(oldname, newname, IFNAMSIZ); 1219 dev->name_assign_type = old_assign_type; 1220 old_assign_type = NET_NAME_RENAMED; 1221 goto rollback; 1222 } else { 1223 netdev_err(dev, "name change rollback failed: %d\n", 1224 ret); 1225 } 1226 } 1227 1228 return err; 1229 } 1230 1231 /** 1232 * dev_set_alias - change ifalias of a device 1233 * @dev: device 1234 * @alias: name up to IFALIASZ 1235 * @len: limit of bytes to copy from info 1236 * 1237 * Set ifalias for a device, 1238 */ 1239 int dev_set_alias(struct net_device *dev, const char *alias, size_t len) 1240 { 1241 struct dev_ifalias *new_alias = NULL; 1242 1243 if (len >= IFALIASZ) 1244 return -EINVAL; 1245 1246 if (len) { 1247 new_alias = kmalloc(sizeof(*new_alias) + len + 1, GFP_KERNEL); 1248 if (!new_alias) 1249 return -ENOMEM; 1250 1251 memcpy(new_alias->ifalias, alias, len); 1252 new_alias->ifalias[len] = 0; 1253 } 1254 1255 mutex_lock(&ifalias_mutex); 1256 new_alias = rcu_replace_pointer(dev->ifalias, new_alias, 1257 mutex_is_locked(&ifalias_mutex)); 1258 mutex_unlock(&ifalias_mutex); 1259 1260 if (new_alias) 1261 kfree_rcu(new_alias, rcuhead); 1262 1263 return len; 1264 } 1265 EXPORT_SYMBOL(dev_set_alias); 1266 1267 /** 1268 * dev_get_alias - get ifalias of a device 1269 * @dev: device 1270 * @name: buffer to store name of ifalias 1271 * @len: size of buffer 1272 * 1273 * get ifalias for a device. Caller must make sure dev cannot go 1274 * away, e.g. rcu read lock or own a reference count to device. 1275 */ 1276 int dev_get_alias(const struct net_device *dev, char *name, size_t len) 1277 { 1278 const struct dev_ifalias *alias; 1279 int ret = 0; 1280 1281 rcu_read_lock(); 1282 alias = rcu_dereference(dev->ifalias); 1283 if (alias) 1284 ret = snprintf(name, len, "%s", alias->ifalias); 1285 rcu_read_unlock(); 1286 1287 return ret; 1288 } 1289 1290 /** 1291 * netdev_features_change - device changes features 1292 * @dev: device to cause notification 1293 * 1294 * Called to indicate a device has changed features. 1295 */ 1296 void netdev_features_change(struct net_device *dev) 1297 { 1298 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev); 1299 } 1300 EXPORT_SYMBOL(netdev_features_change); 1301 1302 /** 1303 * netdev_state_change - device changes state 1304 * @dev: device to cause notification 1305 * 1306 * Called to indicate a device has changed state. This function calls 1307 * the notifier chains for netdev_chain and sends a NEWLINK message 1308 * to the routing socket. 1309 */ 1310 void netdev_state_change(struct net_device *dev) 1311 { 1312 if (dev->flags & IFF_UP) { 1313 struct netdev_notifier_change_info change_info = { 1314 .info.dev = dev, 1315 }; 1316 1317 call_netdevice_notifiers_info(NETDEV_CHANGE, 1318 &change_info.info); 1319 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL, 0, NULL); 1320 } 1321 } 1322 EXPORT_SYMBOL(netdev_state_change); 1323 1324 /** 1325 * __netdev_notify_peers - notify network peers about existence of @dev, 1326 * to be called when rtnl lock is already held. 1327 * @dev: network device 1328 * 1329 * Generate traffic such that interested network peers are aware of 1330 * @dev, such as by generating a gratuitous ARP. This may be used when 1331 * a device wants to inform the rest of the network about some sort of 1332 * reconfiguration such as a failover event or virtual machine 1333 * migration. 1334 */ 1335 void __netdev_notify_peers(struct net_device *dev) 1336 { 1337 ASSERT_RTNL(); 1338 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev); 1339 call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev); 1340 } 1341 EXPORT_SYMBOL(__netdev_notify_peers); 1342 1343 /** 1344 * netdev_notify_peers - notify network peers about existence of @dev 1345 * @dev: network device 1346 * 1347 * Generate traffic such that interested network peers are aware of 1348 * @dev, such as by generating a gratuitous ARP. This may be used when 1349 * a device wants to inform the rest of the network about some sort of 1350 * reconfiguration such as a failover event or virtual machine 1351 * migration. 1352 */ 1353 void netdev_notify_peers(struct net_device *dev) 1354 { 1355 rtnl_lock(); 1356 __netdev_notify_peers(dev); 1357 rtnl_unlock(); 1358 } 1359 EXPORT_SYMBOL(netdev_notify_peers); 1360 1361 static int napi_threaded_poll(void *data); 1362 1363 static int napi_kthread_create(struct napi_struct *n) 1364 { 1365 int err = 0; 1366 1367 /* Create and wake up the kthread once to put it in 1368 * TASK_INTERRUPTIBLE mode to avoid the blocked task 1369 * warning and work with loadavg. 1370 */ 1371 n->thread = kthread_run(napi_threaded_poll, n, "napi/%s-%d", 1372 n->dev->name, n->napi_id); 1373 if (IS_ERR(n->thread)) { 1374 err = PTR_ERR(n->thread); 1375 pr_err("kthread_run failed with err %d\n", err); 1376 n->thread = NULL; 1377 } 1378 1379 return err; 1380 } 1381 1382 static int __dev_open(struct net_device *dev, struct netlink_ext_ack *extack) 1383 { 1384 const struct net_device_ops *ops = dev->netdev_ops; 1385 int ret; 1386 1387 ASSERT_RTNL(); 1388 dev_addr_check(dev); 1389 1390 if (!netif_device_present(dev)) { 1391 /* may be detached because parent is runtime-suspended */ 1392 if (dev->dev.parent) 1393 pm_runtime_resume(dev->dev.parent); 1394 if (!netif_device_present(dev)) 1395 return -ENODEV; 1396 } 1397 1398 /* Block netpoll from trying to do any rx path servicing. 1399 * If we don't do this there is a chance ndo_poll_controller 1400 * or ndo_poll may be running while we open the device 1401 */ 1402 netpoll_poll_disable(dev); 1403 1404 ret = call_netdevice_notifiers_extack(NETDEV_PRE_UP, dev, extack); 1405 ret = notifier_to_errno(ret); 1406 if (ret) 1407 return ret; 1408 1409 set_bit(__LINK_STATE_START, &dev->state); 1410 1411 if (ops->ndo_validate_addr) 1412 ret = ops->ndo_validate_addr(dev); 1413 1414 if (!ret && ops->ndo_open) 1415 ret = ops->ndo_open(dev); 1416 1417 netpoll_poll_enable(dev); 1418 1419 if (ret) 1420 clear_bit(__LINK_STATE_START, &dev->state); 1421 else { 1422 dev->flags |= IFF_UP; 1423 dev_set_rx_mode(dev); 1424 dev_activate(dev); 1425 add_device_randomness(dev->dev_addr, dev->addr_len); 1426 } 1427 1428 return ret; 1429 } 1430 1431 /** 1432 * dev_open - prepare an interface for use. 1433 * @dev: device to open 1434 * @extack: netlink extended ack 1435 * 1436 * Takes a device from down to up state. The device's private open 1437 * function is invoked and then the multicast lists are loaded. Finally 1438 * the device is moved into the up state and a %NETDEV_UP message is 1439 * sent to the netdev notifier chain. 1440 * 1441 * Calling this function on an active interface is a nop. On a failure 1442 * a negative errno code is returned. 1443 */ 1444 int dev_open(struct net_device *dev, struct netlink_ext_ack *extack) 1445 { 1446 int ret; 1447 1448 if (dev->flags & IFF_UP) 1449 return 0; 1450 1451 ret = __dev_open(dev, extack); 1452 if (ret < 0) 1453 return ret; 1454 1455 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP | IFF_RUNNING, GFP_KERNEL, 0, NULL); 1456 call_netdevice_notifiers(NETDEV_UP, dev); 1457 1458 return ret; 1459 } 1460 EXPORT_SYMBOL(dev_open); 1461 1462 static void __dev_close_many(struct list_head *head) 1463 { 1464 struct net_device *dev; 1465 1466 ASSERT_RTNL(); 1467 might_sleep(); 1468 1469 list_for_each_entry(dev, head, close_list) { 1470 /* Temporarily disable netpoll until the interface is down */ 1471 netpoll_poll_disable(dev); 1472 1473 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev); 1474 1475 clear_bit(__LINK_STATE_START, &dev->state); 1476 1477 /* Synchronize to scheduled poll. We cannot touch poll list, it 1478 * can be even on different cpu. So just clear netif_running(). 1479 * 1480 * dev->stop() will invoke napi_disable() on all of it's 1481 * napi_struct instances on this device. 1482 */ 1483 smp_mb__after_atomic(); /* Commit netif_running(). */ 1484 } 1485 1486 dev_deactivate_many(head); 1487 1488 list_for_each_entry(dev, head, close_list) { 1489 const struct net_device_ops *ops = dev->netdev_ops; 1490 1491 /* 1492 * Call the device specific close. This cannot fail. 1493 * Only if device is UP 1494 * 1495 * We allow it to be called even after a DETACH hot-plug 1496 * event. 1497 */ 1498 if (ops->ndo_stop) 1499 ops->ndo_stop(dev); 1500 1501 dev->flags &= ~IFF_UP; 1502 netpoll_poll_enable(dev); 1503 } 1504 } 1505 1506 static void __dev_close(struct net_device *dev) 1507 { 1508 LIST_HEAD(single); 1509 1510 list_add(&dev->close_list, &single); 1511 __dev_close_many(&single); 1512 list_del(&single); 1513 } 1514 1515 void dev_close_many(struct list_head *head, bool unlink) 1516 { 1517 struct net_device *dev, *tmp; 1518 1519 /* Remove the devices that don't need to be closed */ 1520 list_for_each_entry_safe(dev, tmp, head, close_list) 1521 if (!(dev->flags & IFF_UP)) 1522 list_del_init(&dev->close_list); 1523 1524 __dev_close_many(head); 1525 1526 list_for_each_entry_safe(dev, tmp, head, close_list) { 1527 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP | IFF_RUNNING, GFP_KERNEL, 0, NULL); 1528 call_netdevice_notifiers(NETDEV_DOWN, dev); 1529 if (unlink) 1530 list_del_init(&dev->close_list); 1531 } 1532 } 1533 EXPORT_SYMBOL(dev_close_many); 1534 1535 /** 1536 * dev_close - shutdown an interface. 1537 * @dev: device to shutdown 1538 * 1539 * This function moves an active device into down state. A 1540 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device 1541 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier 1542 * chain. 1543 */ 1544 void dev_close(struct net_device *dev) 1545 { 1546 if (dev->flags & IFF_UP) { 1547 LIST_HEAD(single); 1548 1549 list_add(&dev->close_list, &single); 1550 dev_close_many(&single, true); 1551 list_del(&single); 1552 } 1553 } 1554 EXPORT_SYMBOL(dev_close); 1555 1556 1557 /** 1558 * dev_disable_lro - disable Large Receive Offload on a device 1559 * @dev: device 1560 * 1561 * Disable Large Receive Offload (LRO) on a net device. Must be 1562 * called under RTNL. This is needed if received packets may be 1563 * forwarded to another interface. 1564 */ 1565 void dev_disable_lro(struct net_device *dev) 1566 { 1567 struct net_device *lower_dev; 1568 struct list_head *iter; 1569 1570 dev->wanted_features &= ~NETIF_F_LRO; 1571 netdev_update_features(dev); 1572 1573 if (unlikely(dev->features & NETIF_F_LRO)) 1574 netdev_WARN(dev, "failed to disable LRO!\n"); 1575 1576 netdev_for_each_lower_dev(dev, lower_dev, iter) 1577 dev_disable_lro(lower_dev); 1578 } 1579 EXPORT_SYMBOL(dev_disable_lro); 1580 1581 /** 1582 * dev_disable_gro_hw - disable HW Generic Receive Offload on a device 1583 * @dev: device 1584 * 1585 * Disable HW Generic Receive Offload (GRO_HW) on a net device. Must be 1586 * called under RTNL. This is needed if Generic XDP is installed on 1587 * the device. 1588 */ 1589 static void dev_disable_gro_hw(struct net_device *dev) 1590 { 1591 dev->wanted_features &= ~NETIF_F_GRO_HW; 1592 netdev_update_features(dev); 1593 1594 if (unlikely(dev->features & NETIF_F_GRO_HW)) 1595 netdev_WARN(dev, "failed to disable GRO_HW!\n"); 1596 } 1597 1598 const char *netdev_cmd_to_name(enum netdev_cmd cmd) 1599 { 1600 #define N(val) \ 1601 case NETDEV_##val: \ 1602 return "NETDEV_" __stringify(val); 1603 switch (cmd) { 1604 N(UP) N(DOWN) N(REBOOT) N(CHANGE) N(REGISTER) N(UNREGISTER) 1605 N(CHANGEMTU) N(CHANGEADDR) N(GOING_DOWN) N(CHANGENAME) N(FEAT_CHANGE) 1606 N(BONDING_FAILOVER) N(PRE_UP) N(PRE_TYPE_CHANGE) N(POST_TYPE_CHANGE) 1607 N(POST_INIT) N(PRE_UNINIT) N(RELEASE) N(NOTIFY_PEERS) N(JOIN) 1608 N(CHANGEUPPER) N(RESEND_IGMP) N(PRECHANGEMTU) N(CHANGEINFODATA) 1609 N(BONDING_INFO) N(PRECHANGEUPPER) N(CHANGELOWERSTATE) 1610 N(UDP_TUNNEL_PUSH_INFO) N(UDP_TUNNEL_DROP_INFO) N(CHANGE_TX_QUEUE_LEN) 1611 N(CVLAN_FILTER_PUSH_INFO) N(CVLAN_FILTER_DROP_INFO) 1612 N(SVLAN_FILTER_PUSH_INFO) N(SVLAN_FILTER_DROP_INFO) 1613 N(PRE_CHANGEADDR) N(OFFLOAD_XSTATS_ENABLE) N(OFFLOAD_XSTATS_DISABLE) 1614 N(OFFLOAD_XSTATS_REPORT_USED) N(OFFLOAD_XSTATS_REPORT_DELTA) 1615 N(XDP_FEAT_CHANGE) N(PRE_CHANGE_HWTSTAMP) 1616 } 1617 #undef N 1618 return "UNKNOWN_NETDEV_EVENT"; 1619 } 1620 EXPORT_SYMBOL_GPL(netdev_cmd_to_name); 1621 1622 static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val, 1623 struct net_device *dev) 1624 { 1625 struct netdev_notifier_info info = { 1626 .dev = dev, 1627 }; 1628 1629 return nb->notifier_call(nb, val, &info); 1630 } 1631 1632 static int call_netdevice_register_notifiers(struct notifier_block *nb, 1633 struct net_device *dev) 1634 { 1635 int err; 1636 1637 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev); 1638 err = notifier_to_errno(err); 1639 if (err) 1640 return err; 1641 1642 if (!(dev->flags & IFF_UP)) 1643 return 0; 1644 1645 call_netdevice_notifier(nb, NETDEV_UP, dev); 1646 return 0; 1647 } 1648 1649 static void call_netdevice_unregister_notifiers(struct notifier_block *nb, 1650 struct net_device *dev) 1651 { 1652 if (dev->flags & IFF_UP) { 1653 call_netdevice_notifier(nb, NETDEV_GOING_DOWN, 1654 dev); 1655 call_netdevice_notifier(nb, NETDEV_DOWN, dev); 1656 } 1657 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev); 1658 } 1659 1660 static int call_netdevice_register_net_notifiers(struct notifier_block *nb, 1661 struct net *net) 1662 { 1663 struct net_device *dev; 1664 int err; 1665 1666 for_each_netdev(net, dev) { 1667 err = call_netdevice_register_notifiers(nb, dev); 1668 if (err) 1669 goto rollback; 1670 } 1671 return 0; 1672 1673 rollback: 1674 for_each_netdev_continue_reverse(net, dev) 1675 call_netdevice_unregister_notifiers(nb, dev); 1676 return err; 1677 } 1678 1679 static void call_netdevice_unregister_net_notifiers(struct notifier_block *nb, 1680 struct net *net) 1681 { 1682 struct net_device *dev; 1683 1684 for_each_netdev(net, dev) 1685 call_netdevice_unregister_notifiers(nb, dev); 1686 } 1687 1688 static int dev_boot_phase = 1; 1689 1690 /** 1691 * register_netdevice_notifier - register a network notifier block 1692 * @nb: notifier 1693 * 1694 * Register a notifier to be called when network device events occur. 1695 * The notifier passed is linked into the kernel structures and must 1696 * not be reused until it has been unregistered. A negative errno code 1697 * is returned on a failure. 1698 * 1699 * When registered all registration and up events are replayed 1700 * to the new notifier to allow device to have a race free 1701 * view of the network device list. 1702 */ 1703 1704 int register_netdevice_notifier(struct notifier_block *nb) 1705 { 1706 struct net *net; 1707 int err; 1708 1709 /* Close race with setup_net() and cleanup_net() */ 1710 down_write(&pernet_ops_rwsem); 1711 rtnl_lock(); 1712 err = raw_notifier_chain_register(&netdev_chain, nb); 1713 if (err) 1714 goto unlock; 1715 if (dev_boot_phase) 1716 goto unlock; 1717 for_each_net(net) { 1718 err = call_netdevice_register_net_notifiers(nb, net); 1719 if (err) 1720 goto rollback; 1721 } 1722 1723 unlock: 1724 rtnl_unlock(); 1725 up_write(&pernet_ops_rwsem); 1726 return err; 1727 1728 rollback: 1729 for_each_net_continue_reverse(net) 1730 call_netdevice_unregister_net_notifiers(nb, net); 1731 1732 raw_notifier_chain_unregister(&netdev_chain, nb); 1733 goto unlock; 1734 } 1735 EXPORT_SYMBOL(register_netdevice_notifier); 1736 1737 /** 1738 * unregister_netdevice_notifier - unregister a network notifier block 1739 * @nb: notifier 1740 * 1741 * Unregister a notifier previously registered by 1742 * register_netdevice_notifier(). The notifier is unlinked into the 1743 * kernel structures and may then be reused. A negative errno code 1744 * is returned on a failure. 1745 * 1746 * After unregistering unregister and down device events are synthesized 1747 * for all devices on the device list to the removed notifier to remove 1748 * the need for special case cleanup code. 1749 */ 1750 1751 int unregister_netdevice_notifier(struct notifier_block *nb) 1752 { 1753 struct net *net; 1754 int err; 1755 1756 /* Close race with setup_net() and cleanup_net() */ 1757 down_write(&pernet_ops_rwsem); 1758 rtnl_lock(); 1759 err = raw_notifier_chain_unregister(&netdev_chain, nb); 1760 if (err) 1761 goto unlock; 1762 1763 for_each_net(net) 1764 call_netdevice_unregister_net_notifiers(nb, net); 1765 1766 unlock: 1767 rtnl_unlock(); 1768 up_write(&pernet_ops_rwsem); 1769 return err; 1770 } 1771 EXPORT_SYMBOL(unregister_netdevice_notifier); 1772 1773 static int __register_netdevice_notifier_net(struct net *net, 1774 struct notifier_block *nb, 1775 bool ignore_call_fail) 1776 { 1777 int err; 1778 1779 err = raw_notifier_chain_register(&net->netdev_chain, nb); 1780 if (err) 1781 return err; 1782 if (dev_boot_phase) 1783 return 0; 1784 1785 err = call_netdevice_register_net_notifiers(nb, net); 1786 if (err && !ignore_call_fail) 1787 goto chain_unregister; 1788 1789 return 0; 1790 1791 chain_unregister: 1792 raw_notifier_chain_unregister(&net->netdev_chain, nb); 1793 return err; 1794 } 1795 1796 static int __unregister_netdevice_notifier_net(struct net *net, 1797 struct notifier_block *nb) 1798 { 1799 int err; 1800 1801 err = raw_notifier_chain_unregister(&net->netdev_chain, nb); 1802 if (err) 1803 return err; 1804 1805 call_netdevice_unregister_net_notifiers(nb, net); 1806 return 0; 1807 } 1808 1809 /** 1810 * register_netdevice_notifier_net - register a per-netns network notifier block 1811 * @net: network namespace 1812 * @nb: notifier 1813 * 1814 * Register a notifier to be called when network device events occur. 1815 * The notifier passed is linked into the kernel structures and must 1816 * not be reused until it has been unregistered. A negative errno code 1817 * is returned on a failure. 1818 * 1819 * When registered all registration and up events are replayed 1820 * to the new notifier to allow device to have a race free 1821 * view of the network device list. 1822 */ 1823 1824 int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb) 1825 { 1826 int err; 1827 1828 rtnl_lock(); 1829 err = __register_netdevice_notifier_net(net, nb, false); 1830 rtnl_unlock(); 1831 return err; 1832 } 1833 EXPORT_SYMBOL(register_netdevice_notifier_net); 1834 1835 /** 1836 * unregister_netdevice_notifier_net - unregister a per-netns 1837 * network notifier block 1838 * @net: network namespace 1839 * @nb: notifier 1840 * 1841 * Unregister a notifier previously registered by 1842 * register_netdevice_notifier_net(). The notifier is unlinked from the 1843 * kernel structures and may then be reused. A negative errno code 1844 * is returned on a failure. 1845 * 1846 * After unregistering unregister and down device events are synthesized 1847 * for all devices on the device list to the removed notifier to remove 1848 * the need for special case cleanup code. 1849 */ 1850 1851 int unregister_netdevice_notifier_net(struct net *net, 1852 struct notifier_block *nb) 1853 { 1854 int err; 1855 1856 rtnl_lock(); 1857 err = __unregister_netdevice_notifier_net(net, nb); 1858 rtnl_unlock(); 1859 return err; 1860 } 1861 EXPORT_SYMBOL(unregister_netdevice_notifier_net); 1862 1863 static void __move_netdevice_notifier_net(struct net *src_net, 1864 struct net *dst_net, 1865 struct notifier_block *nb) 1866 { 1867 __unregister_netdevice_notifier_net(src_net, nb); 1868 __register_netdevice_notifier_net(dst_net, nb, true); 1869 } 1870 1871 int register_netdevice_notifier_dev_net(struct net_device *dev, 1872 struct notifier_block *nb, 1873 struct netdev_net_notifier *nn) 1874 { 1875 int err; 1876 1877 rtnl_lock(); 1878 err = __register_netdevice_notifier_net(dev_net(dev), nb, false); 1879 if (!err) { 1880 nn->nb = nb; 1881 list_add(&nn->list, &dev->net_notifier_list); 1882 } 1883 rtnl_unlock(); 1884 return err; 1885 } 1886 EXPORT_SYMBOL(register_netdevice_notifier_dev_net); 1887 1888 int unregister_netdevice_notifier_dev_net(struct net_device *dev, 1889 struct notifier_block *nb, 1890 struct netdev_net_notifier *nn) 1891 { 1892 int err; 1893 1894 rtnl_lock(); 1895 list_del(&nn->list); 1896 err = __unregister_netdevice_notifier_net(dev_net(dev), nb); 1897 rtnl_unlock(); 1898 return err; 1899 } 1900 EXPORT_SYMBOL(unregister_netdevice_notifier_dev_net); 1901 1902 static void move_netdevice_notifiers_dev_net(struct net_device *dev, 1903 struct net *net) 1904 { 1905 struct netdev_net_notifier *nn; 1906 1907 list_for_each_entry(nn, &dev->net_notifier_list, list) 1908 __move_netdevice_notifier_net(dev_net(dev), net, nn->nb); 1909 } 1910 1911 /** 1912 * call_netdevice_notifiers_info - call all network notifier blocks 1913 * @val: value passed unmodified to notifier function 1914 * @info: notifier information data 1915 * 1916 * Call all network notifier blocks. Parameters and return value 1917 * are as for raw_notifier_call_chain(). 1918 */ 1919 1920 int call_netdevice_notifiers_info(unsigned long val, 1921 struct netdev_notifier_info *info) 1922 { 1923 struct net *net = dev_net(info->dev); 1924 int ret; 1925 1926 ASSERT_RTNL(); 1927 1928 /* Run per-netns notifier block chain first, then run the global one. 1929 * Hopefully, one day, the global one is going to be removed after 1930 * all notifier block registrators get converted to be per-netns. 1931 */ 1932 ret = raw_notifier_call_chain(&net->netdev_chain, val, info); 1933 if (ret & NOTIFY_STOP_MASK) 1934 return ret; 1935 return raw_notifier_call_chain(&netdev_chain, val, info); 1936 } 1937 1938 /** 1939 * call_netdevice_notifiers_info_robust - call per-netns notifier blocks 1940 * for and rollback on error 1941 * @val_up: value passed unmodified to notifier function 1942 * @val_down: value passed unmodified to the notifier function when 1943 * recovering from an error on @val_up 1944 * @info: notifier information data 1945 * 1946 * Call all per-netns network notifier blocks, but not notifier blocks on 1947 * the global notifier chain. Parameters and return value are as for 1948 * raw_notifier_call_chain_robust(). 1949 */ 1950 1951 static int 1952 call_netdevice_notifiers_info_robust(unsigned long val_up, 1953 unsigned long val_down, 1954 struct netdev_notifier_info *info) 1955 { 1956 struct net *net = dev_net(info->dev); 1957 1958 ASSERT_RTNL(); 1959 1960 return raw_notifier_call_chain_robust(&net->netdev_chain, 1961 val_up, val_down, info); 1962 } 1963 1964 static int call_netdevice_notifiers_extack(unsigned long val, 1965 struct net_device *dev, 1966 struct netlink_ext_ack *extack) 1967 { 1968 struct netdev_notifier_info info = { 1969 .dev = dev, 1970 .extack = extack, 1971 }; 1972 1973 return call_netdevice_notifiers_info(val, &info); 1974 } 1975 1976 /** 1977 * call_netdevice_notifiers - call all network notifier blocks 1978 * @val: value passed unmodified to notifier function 1979 * @dev: net_device pointer passed unmodified to notifier function 1980 * 1981 * Call all network notifier blocks. Parameters and return value 1982 * are as for raw_notifier_call_chain(). 1983 */ 1984 1985 int call_netdevice_notifiers(unsigned long val, struct net_device *dev) 1986 { 1987 return call_netdevice_notifiers_extack(val, dev, NULL); 1988 } 1989 EXPORT_SYMBOL(call_netdevice_notifiers); 1990 1991 /** 1992 * call_netdevice_notifiers_mtu - call all network notifier blocks 1993 * @val: value passed unmodified to notifier function 1994 * @dev: net_device pointer passed unmodified to notifier function 1995 * @arg: additional u32 argument passed to the notifier function 1996 * 1997 * Call all network notifier blocks. Parameters and return value 1998 * are as for raw_notifier_call_chain(). 1999 */ 2000 static int call_netdevice_notifiers_mtu(unsigned long val, 2001 struct net_device *dev, u32 arg) 2002 { 2003 struct netdev_notifier_info_ext info = { 2004 .info.dev = dev, 2005 .ext.mtu = arg, 2006 }; 2007 2008 BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext, info) != 0); 2009 2010 return call_netdevice_notifiers_info(val, &info.info); 2011 } 2012 2013 #ifdef CONFIG_NET_INGRESS 2014 static DEFINE_STATIC_KEY_FALSE(ingress_needed_key); 2015 2016 void net_inc_ingress_queue(void) 2017 { 2018 static_branch_inc(&ingress_needed_key); 2019 } 2020 EXPORT_SYMBOL_GPL(net_inc_ingress_queue); 2021 2022 void net_dec_ingress_queue(void) 2023 { 2024 static_branch_dec(&ingress_needed_key); 2025 } 2026 EXPORT_SYMBOL_GPL(net_dec_ingress_queue); 2027 #endif 2028 2029 #ifdef CONFIG_NET_EGRESS 2030 static DEFINE_STATIC_KEY_FALSE(egress_needed_key); 2031 2032 void net_inc_egress_queue(void) 2033 { 2034 static_branch_inc(&egress_needed_key); 2035 } 2036 EXPORT_SYMBOL_GPL(net_inc_egress_queue); 2037 2038 void net_dec_egress_queue(void) 2039 { 2040 static_branch_dec(&egress_needed_key); 2041 } 2042 EXPORT_SYMBOL_GPL(net_dec_egress_queue); 2043 #endif 2044 2045 DEFINE_STATIC_KEY_FALSE(netstamp_needed_key); 2046 EXPORT_SYMBOL(netstamp_needed_key); 2047 #ifdef CONFIG_JUMP_LABEL 2048 static atomic_t netstamp_needed_deferred; 2049 static atomic_t netstamp_wanted; 2050 static void netstamp_clear(struct work_struct *work) 2051 { 2052 int deferred = atomic_xchg(&netstamp_needed_deferred, 0); 2053 int wanted; 2054 2055 wanted = atomic_add_return(deferred, &netstamp_wanted); 2056 if (wanted > 0) 2057 static_branch_enable(&netstamp_needed_key); 2058 else 2059 static_branch_disable(&netstamp_needed_key); 2060 } 2061 static DECLARE_WORK(netstamp_work, netstamp_clear); 2062 #endif 2063 2064 void net_enable_timestamp(void) 2065 { 2066 #ifdef CONFIG_JUMP_LABEL 2067 int wanted = atomic_read(&netstamp_wanted); 2068 2069 while (wanted > 0) { 2070 if (atomic_try_cmpxchg(&netstamp_wanted, &wanted, wanted + 1)) 2071 return; 2072 } 2073 atomic_inc(&netstamp_needed_deferred); 2074 schedule_work(&netstamp_work); 2075 #else 2076 static_branch_inc(&netstamp_needed_key); 2077 #endif 2078 } 2079 EXPORT_SYMBOL(net_enable_timestamp); 2080 2081 void net_disable_timestamp(void) 2082 { 2083 #ifdef CONFIG_JUMP_LABEL 2084 int wanted = atomic_read(&netstamp_wanted); 2085 2086 while (wanted > 1) { 2087 if (atomic_try_cmpxchg(&netstamp_wanted, &wanted, wanted - 1)) 2088 return; 2089 } 2090 atomic_dec(&netstamp_needed_deferred); 2091 schedule_work(&netstamp_work); 2092 #else 2093 static_branch_dec(&netstamp_needed_key); 2094 #endif 2095 } 2096 EXPORT_SYMBOL(net_disable_timestamp); 2097 2098 static inline void net_timestamp_set(struct sk_buff *skb) 2099 { 2100 skb->tstamp = 0; 2101 skb->mono_delivery_time = 0; 2102 if (static_branch_unlikely(&netstamp_needed_key)) 2103 skb->tstamp = ktime_get_real(); 2104 } 2105 2106 #define net_timestamp_check(COND, SKB) \ 2107 if (static_branch_unlikely(&netstamp_needed_key)) { \ 2108 if ((COND) && !(SKB)->tstamp) \ 2109 (SKB)->tstamp = ktime_get_real(); \ 2110 } \ 2111 2112 bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb) 2113 { 2114 return __is_skb_forwardable(dev, skb, true); 2115 } 2116 EXPORT_SYMBOL_GPL(is_skb_forwardable); 2117 2118 static int __dev_forward_skb2(struct net_device *dev, struct sk_buff *skb, 2119 bool check_mtu) 2120 { 2121 int ret = ____dev_forward_skb(dev, skb, check_mtu); 2122 2123 if (likely(!ret)) { 2124 skb->protocol = eth_type_trans(skb, dev); 2125 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); 2126 } 2127 2128 return ret; 2129 } 2130 2131 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb) 2132 { 2133 return __dev_forward_skb2(dev, skb, true); 2134 } 2135 EXPORT_SYMBOL_GPL(__dev_forward_skb); 2136 2137 /** 2138 * dev_forward_skb - loopback an skb to another netif 2139 * 2140 * @dev: destination network device 2141 * @skb: buffer to forward 2142 * 2143 * return values: 2144 * NET_RX_SUCCESS (no congestion) 2145 * NET_RX_DROP (packet was dropped, but freed) 2146 * 2147 * dev_forward_skb can be used for injecting an skb from the 2148 * start_xmit function of one device into the receive queue 2149 * of another device. 2150 * 2151 * The receiving device may be in another namespace, so 2152 * we have to clear all information in the skb that could 2153 * impact namespace isolation. 2154 */ 2155 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) 2156 { 2157 return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb); 2158 } 2159 EXPORT_SYMBOL_GPL(dev_forward_skb); 2160 2161 int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb) 2162 { 2163 return __dev_forward_skb2(dev, skb, false) ?: netif_rx_internal(skb); 2164 } 2165 2166 static inline int deliver_skb(struct sk_buff *skb, 2167 struct packet_type *pt_prev, 2168 struct net_device *orig_dev) 2169 { 2170 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) 2171 return -ENOMEM; 2172 refcount_inc(&skb->users); 2173 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev); 2174 } 2175 2176 static inline void deliver_ptype_list_skb(struct sk_buff *skb, 2177 struct packet_type **pt, 2178 struct net_device *orig_dev, 2179 __be16 type, 2180 struct list_head *ptype_list) 2181 { 2182 struct packet_type *ptype, *pt_prev = *pt; 2183 2184 list_for_each_entry_rcu(ptype, ptype_list, list) { 2185 if (ptype->type != type) 2186 continue; 2187 if (pt_prev) 2188 deliver_skb(skb, pt_prev, orig_dev); 2189 pt_prev = ptype; 2190 } 2191 *pt = pt_prev; 2192 } 2193 2194 static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb) 2195 { 2196 if (!ptype->af_packet_priv || !skb->sk) 2197 return false; 2198 2199 if (ptype->id_match) 2200 return ptype->id_match(ptype, skb->sk); 2201 else if ((struct sock *)ptype->af_packet_priv == skb->sk) 2202 return true; 2203 2204 return false; 2205 } 2206 2207 /** 2208 * dev_nit_active - return true if any network interface taps are in use 2209 * 2210 * @dev: network device to check for the presence of taps 2211 */ 2212 bool dev_nit_active(struct net_device *dev) 2213 { 2214 return !list_empty(&ptype_all) || !list_empty(&dev->ptype_all); 2215 } 2216 EXPORT_SYMBOL_GPL(dev_nit_active); 2217 2218 /* 2219 * Support routine. Sends outgoing frames to any network 2220 * taps currently in use. 2221 */ 2222 2223 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) 2224 { 2225 struct packet_type *ptype; 2226 struct sk_buff *skb2 = NULL; 2227 struct packet_type *pt_prev = NULL; 2228 struct list_head *ptype_list = &ptype_all; 2229 2230 rcu_read_lock(); 2231 again: 2232 list_for_each_entry_rcu(ptype, ptype_list, list) { 2233 if (ptype->ignore_outgoing) 2234 continue; 2235 2236 /* Never send packets back to the socket 2237 * they originated from - MvS (miquels@drinkel.ow.org) 2238 */ 2239 if (skb_loop_sk(ptype, skb)) 2240 continue; 2241 2242 if (pt_prev) { 2243 deliver_skb(skb2, pt_prev, skb->dev); 2244 pt_prev = ptype; 2245 continue; 2246 } 2247 2248 /* need to clone skb, done only once */ 2249 skb2 = skb_clone(skb, GFP_ATOMIC); 2250 if (!skb2) 2251 goto out_unlock; 2252 2253 net_timestamp_set(skb2); 2254 2255 /* skb->nh should be correctly 2256 * set by sender, so that the second statement is 2257 * just protection against buggy protocols. 2258 */ 2259 skb_reset_mac_header(skb2); 2260 2261 if (skb_network_header(skb2) < skb2->data || 2262 skb_network_header(skb2) > skb_tail_pointer(skb2)) { 2263 net_crit_ratelimited("protocol %04x is buggy, dev %s\n", 2264 ntohs(skb2->protocol), 2265 dev->name); 2266 skb_reset_network_header(skb2); 2267 } 2268 2269 skb2->transport_header = skb2->network_header; 2270 skb2->pkt_type = PACKET_OUTGOING; 2271 pt_prev = ptype; 2272 } 2273 2274 if (ptype_list == &ptype_all) { 2275 ptype_list = &dev->ptype_all; 2276 goto again; 2277 } 2278 out_unlock: 2279 if (pt_prev) { 2280 if (!skb_orphan_frags_rx(skb2, GFP_ATOMIC)) 2281 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev); 2282 else 2283 kfree_skb(skb2); 2284 } 2285 rcu_read_unlock(); 2286 } 2287 EXPORT_SYMBOL_GPL(dev_queue_xmit_nit); 2288 2289 /** 2290 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change 2291 * @dev: Network device 2292 * @txq: number of queues available 2293 * 2294 * If real_num_tx_queues is changed the tc mappings may no longer be 2295 * valid. To resolve this verify the tc mapping remains valid and if 2296 * not NULL the mapping. With no priorities mapping to this 2297 * offset/count pair it will no longer be used. In the worst case TC0 2298 * is invalid nothing can be done so disable priority mappings. If is 2299 * expected that drivers will fix this mapping if they can before 2300 * calling netif_set_real_num_tx_queues. 2301 */ 2302 static void netif_setup_tc(struct net_device *dev, unsigned int txq) 2303 { 2304 int i; 2305 struct netdev_tc_txq *tc = &dev->tc_to_txq[0]; 2306 2307 /* If TC0 is invalidated disable TC mapping */ 2308 if (tc->offset + tc->count > txq) { 2309 netdev_warn(dev, "Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n"); 2310 dev->num_tc = 0; 2311 return; 2312 } 2313 2314 /* Invalidated prio to tc mappings set to TC0 */ 2315 for (i = 1; i < TC_BITMASK + 1; i++) { 2316 int q = netdev_get_prio_tc_map(dev, i); 2317 2318 tc = &dev->tc_to_txq[q]; 2319 if (tc->offset + tc->count > txq) { 2320 netdev_warn(dev, "Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n", 2321 i, q); 2322 netdev_set_prio_tc_map(dev, i, 0); 2323 } 2324 } 2325 } 2326 2327 int netdev_txq_to_tc(struct net_device *dev, unsigned int txq) 2328 { 2329 if (dev->num_tc) { 2330 struct netdev_tc_txq *tc = &dev->tc_to_txq[0]; 2331 int i; 2332 2333 /* walk through the TCs and see if it falls into any of them */ 2334 for (i = 0; i < TC_MAX_QUEUE; i++, tc++) { 2335 if ((txq - tc->offset) < tc->count) 2336 return i; 2337 } 2338 2339 /* didn't find it, just return -1 to indicate no match */ 2340 return -1; 2341 } 2342 2343 return 0; 2344 } 2345 EXPORT_SYMBOL(netdev_txq_to_tc); 2346 2347 #ifdef CONFIG_XPS 2348 static struct static_key xps_needed __read_mostly; 2349 static struct static_key xps_rxqs_needed __read_mostly; 2350 static DEFINE_MUTEX(xps_map_mutex); 2351 #define xmap_dereference(P) \ 2352 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex)) 2353 2354 static bool remove_xps_queue(struct xps_dev_maps *dev_maps, 2355 struct xps_dev_maps *old_maps, int tci, u16 index) 2356 { 2357 struct xps_map *map = NULL; 2358 int pos; 2359 2360 if (dev_maps) 2361 map = xmap_dereference(dev_maps->attr_map[tci]); 2362 if (!map) 2363 return false; 2364 2365 for (pos = map->len; pos--;) { 2366 if (map->queues[pos] != index) 2367 continue; 2368 2369 if (map->len > 1) { 2370 map->queues[pos] = map->queues[--map->len]; 2371 break; 2372 } 2373 2374 if (old_maps) 2375 RCU_INIT_POINTER(old_maps->attr_map[tci], NULL); 2376 RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL); 2377 kfree_rcu(map, rcu); 2378 return false; 2379 } 2380 2381 return true; 2382 } 2383 2384 static bool remove_xps_queue_cpu(struct net_device *dev, 2385 struct xps_dev_maps *dev_maps, 2386 int cpu, u16 offset, u16 count) 2387 { 2388 int num_tc = dev_maps->num_tc; 2389 bool active = false; 2390 int tci; 2391 2392 for (tci = cpu * num_tc; num_tc--; tci++) { 2393 int i, j; 2394 2395 for (i = count, j = offset; i--; j++) { 2396 if (!remove_xps_queue(dev_maps, NULL, tci, j)) 2397 break; 2398 } 2399 2400 active |= i < 0; 2401 } 2402 2403 return active; 2404 } 2405 2406 static void reset_xps_maps(struct net_device *dev, 2407 struct xps_dev_maps *dev_maps, 2408 enum xps_map_type type) 2409 { 2410 static_key_slow_dec_cpuslocked(&xps_needed); 2411 if (type == XPS_RXQS) 2412 static_key_slow_dec_cpuslocked(&xps_rxqs_needed); 2413 2414 RCU_INIT_POINTER(dev->xps_maps[type], NULL); 2415 2416 kfree_rcu(dev_maps, rcu); 2417 } 2418 2419 static void clean_xps_maps(struct net_device *dev, enum xps_map_type type, 2420 u16 offset, u16 count) 2421 { 2422 struct xps_dev_maps *dev_maps; 2423 bool active = false; 2424 int i, j; 2425 2426 dev_maps = xmap_dereference(dev->xps_maps[type]); 2427 if (!dev_maps) 2428 return; 2429 2430 for (j = 0; j < dev_maps->nr_ids; j++) 2431 active |= remove_xps_queue_cpu(dev, dev_maps, j, offset, count); 2432 if (!active) 2433 reset_xps_maps(dev, dev_maps, type); 2434 2435 if (type == XPS_CPUS) { 2436 for (i = offset + (count - 1); count--; i--) 2437 netdev_queue_numa_node_write( 2438 netdev_get_tx_queue(dev, i), NUMA_NO_NODE); 2439 } 2440 } 2441 2442 static void netif_reset_xps_queues(struct net_device *dev, u16 offset, 2443 u16 count) 2444 { 2445 if (!static_key_false(&xps_needed)) 2446 return; 2447 2448 cpus_read_lock(); 2449 mutex_lock(&xps_map_mutex); 2450 2451 if (static_key_false(&xps_rxqs_needed)) 2452 clean_xps_maps(dev, XPS_RXQS, offset, count); 2453 2454 clean_xps_maps(dev, XPS_CPUS, offset, count); 2455 2456 mutex_unlock(&xps_map_mutex); 2457 cpus_read_unlock(); 2458 } 2459 2460 static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index) 2461 { 2462 netif_reset_xps_queues(dev, index, dev->num_tx_queues - index); 2463 } 2464 2465 static struct xps_map *expand_xps_map(struct xps_map *map, int attr_index, 2466 u16 index, bool is_rxqs_map) 2467 { 2468 struct xps_map *new_map; 2469 int alloc_len = XPS_MIN_MAP_ALLOC; 2470 int i, pos; 2471 2472 for (pos = 0; map && pos < map->len; pos++) { 2473 if (map->queues[pos] != index) 2474 continue; 2475 return map; 2476 } 2477 2478 /* Need to add tx-queue to this CPU's/rx-queue's existing map */ 2479 if (map) { 2480 if (pos < map->alloc_len) 2481 return map; 2482 2483 alloc_len = map->alloc_len * 2; 2484 } 2485 2486 /* Need to allocate new map to store tx-queue on this CPU's/rx-queue's 2487 * map 2488 */ 2489 if (is_rxqs_map) 2490 new_map = kzalloc(XPS_MAP_SIZE(alloc_len), GFP_KERNEL); 2491 else 2492 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL, 2493 cpu_to_node(attr_index)); 2494 if (!new_map) 2495 return NULL; 2496 2497 for (i = 0; i < pos; i++) 2498 new_map->queues[i] = map->queues[i]; 2499 new_map->alloc_len = alloc_len; 2500 new_map->len = pos; 2501 2502 return new_map; 2503 } 2504 2505 /* Copy xps maps at a given index */ 2506 static void xps_copy_dev_maps(struct xps_dev_maps *dev_maps, 2507 struct xps_dev_maps *new_dev_maps, int index, 2508 int tc, bool skip_tc) 2509 { 2510 int i, tci = index * dev_maps->num_tc; 2511 struct xps_map *map; 2512 2513 /* copy maps belonging to foreign traffic classes */ 2514 for (i = 0; i < dev_maps->num_tc; i++, tci++) { 2515 if (i == tc && skip_tc) 2516 continue; 2517 2518 /* fill in the new device map from the old device map */ 2519 map = xmap_dereference(dev_maps->attr_map[tci]); 2520 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map); 2521 } 2522 } 2523 2524 /* Must be called under cpus_read_lock */ 2525 int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, 2526 u16 index, enum xps_map_type type) 2527 { 2528 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL, *old_dev_maps = NULL; 2529 const unsigned long *online_mask = NULL; 2530 bool active = false, copy = false; 2531 int i, j, tci, numa_node_id = -2; 2532 int maps_sz, num_tc = 1, tc = 0; 2533 struct xps_map *map, *new_map; 2534 unsigned int nr_ids; 2535 2536 WARN_ON_ONCE(index >= dev->num_tx_queues); 2537 2538 if (dev->num_tc) { 2539 /* Do not allow XPS on subordinate device directly */ 2540 num_tc = dev->num_tc; 2541 if (num_tc < 0) 2542 return -EINVAL; 2543 2544 /* If queue belongs to subordinate dev use its map */ 2545 dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev; 2546 2547 tc = netdev_txq_to_tc(dev, index); 2548 if (tc < 0) 2549 return -EINVAL; 2550 } 2551 2552 mutex_lock(&xps_map_mutex); 2553 2554 dev_maps = xmap_dereference(dev->xps_maps[type]); 2555 if (type == XPS_RXQS) { 2556 maps_sz = XPS_RXQ_DEV_MAPS_SIZE(num_tc, dev->num_rx_queues); 2557 nr_ids = dev->num_rx_queues; 2558 } else { 2559 maps_sz = XPS_CPU_DEV_MAPS_SIZE(num_tc); 2560 if (num_possible_cpus() > 1) 2561 online_mask = cpumask_bits(cpu_online_mask); 2562 nr_ids = nr_cpu_ids; 2563 } 2564 2565 if (maps_sz < L1_CACHE_BYTES) 2566 maps_sz = L1_CACHE_BYTES; 2567 2568 /* The old dev_maps could be larger or smaller than the one we're 2569 * setting up now, as dev->num_tc or nr_ids could have been updated in 2570 * between. We could try to be smart, but let's be safe instead and only 2571 * copy foreign traffic classes if the two map sizes match. 2572 */ 2573 if (dev_maps && 2574 dev_maps->num_tc == num_tc && dev_maps->nr_ids == nr_ids) 2575 copy = true; 2576 2577 /* allocate memory for queue storage */ 2578 for (j = -1; j = netif_attrmask_next_and(j, online_mask, mask, nr_ids), 2579 j < nr_ids;) { 2580 if (!new_dev_maps) { 2581 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL); 2582 if (!new_dev_maps) { 2583 mutex_unlock(&xps_map_mutex); 2584 return -ENOMEM; 2585 } 2586 2587 new_dev_maps->nr_ids = nr_ids; 2588 new_dev_maps->num_tc = num_tc; 2589 } 2590 2591 tci = j * num_tc + tc; 2592 map = copy ? xmap_dereference(dev_maps->attr_map[tci]) : NULL; 2593 2594 map = expand_xps_map(map, j, index, type == XPS_RXQS); 2595 if (!map) 2596 goto error; 2597 2598 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map); 2599 } 2600 2601 if (!new_dev_maps) 2602 goto out_no_new_maps; 2603 2604 if (!dev_maps) { 2605 /* Increment static keys at most once per type */ 2606 static_key_slow_inc_cpuslocked(&xps_needed); 2607 if (type == XPS_RXQS) 2608 static_key_slow_inc_cpuslocked(&xps_rxqs_needed); 2609 } 2610 2611 for (j = 0; j < nr_ids; j++) { 2612 bool skip_tc = false; 2613 2614 tci = j * num_tc + tc; 2615 if (netif_attr_test_mask(j, mask, nr_ids) && 2616 netif_attr_test_online(j, online_mask, nr_ids)) { 2617 /* add tx-queue to CPU/rx-queue maps */ 2618 int pos = 0; 2619 2620 skip_tc = true; 2621 2622 map = xmap_dereference(new_dev_maps->attr_map[tci]); 2623 while ((pos < map->len) && (map->queues[pos] != index)) 2624 pos++; 2625 2626 if (pos == map->len) 2627 map->queues[map->len++] = index; 2628 #ifdef CONFIG_NUMA 2629 if (type == XPS_CPUS) { 2630 if (numa_node_id == -2) 2631 numa_node_id = cpu_to_node(j); 2632 else if (numa_node_id != cpu_to_node(j)) 2633 numa_node_id = -1; 2634 } 2635 #endif 2636 } 2637 2638 if (copy) 2639 xps_copy_dev_maps(dev_maps, new_dev_maps, j, tc, 2640 skip_tc); 2641 } 2642 2643 rcu_assign_pointer(dev->xps_maps[type], new_dev_maps); 2644 2645 /* Cleanup old maps */ 2646 if (!dev_maps) 2647 goto out_no_old_maps; 2648 2649 for (j = 0; j < dev_maps->nr_ids; j++) { 2650 for (i = num_tc, tci = j * dev_maps->num_tc; i--; tci++) { 2651 map = xmap_dereference(dev_maps->attr_map[tci]); 2652 if (!map) 2653 continue; 2654 2655 if (copy) { 2656 new_map = xmap_dereference(new_dev_maps->attr_map[tci]); 2657 if (map == new_map) 2658 continue; 2659 } 2660 2661 RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL); 2662 kfree_rcu(map, rcu); 2663 } 2664 } 2665 2666 old_dev_maps = dev_maps; 2667 2668 out_no_old_maps: 2669 dev_maps = new_dev_maps; 2670 active = true; 2671 2672 out_no_new_maps: 2673 if (type == XPS_CPUS) 2674 /* update Tx queue numa node */ 2675 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index), 2676 (numa_node_id >= 0) ? 2677 numa_node_id : NUMA_NO_NODE); 2678 2679 if (!dev_maps) 2680 goto out_no_maps; 2681 2682 /* removes tx-queue from unused CPUs/rx-queues */ 2683 for (j = 0; j < dev_maps->nr_ids; j++) { 2684 tci = j * dev_maps->num_tc; 2685 2686 for (i = 0; i < dev_maps->num_tc; i++, tci++) { 2687 if (i == tc && 2688 netif_attr_test_mask(j, mask, dev_maps->nr_ids) && 2689 netif_attr_test_online(j, online_mask, dev_maps->nr_ids)) 2690 continue; 2691 2692 active |= remove_xps_queue(dev_maps, 2693 copy ? old_dev_maps : NULL, 2694 tci, index); 2695 } 2696 } 2697 2698 if (old_dev_maps) 2699 kfree_rcu(old_dev_maps, rcu); 2700 2701 /* free map if not active */ 2702 if (!active) 2703 reset_xps_maps(dev, dev_maps, type); 2704 2705 out_no_maps: 2706 mutex_unlock(&xps_map_mutex); 2707 2708 return 0; 2709 error: 2710 /* remove any maps that we added */ 2711 for (j = 0; j < nr_ids; j++) { 2712 for (i = num_tc, tci = j * num_tc; i--; tci++) { 2713 new_map = xmap_dereference(new_dev_maps->attr_map[tci]); 2714 map = copy ? 2715 xmap_dereference(dev_maps->attr_map[tci]) : 2716 NULL; 2717 if (new_map && new_map != map) 2718 kfree(new_map); 2719 } 2720 } 2721 2722 mutex_unlock(&xps_map_mutex); 2723 2724 kfree(new_dev_maps); 2725 return -ENOMEM; 2726 } 2727 EXPORT_SYMBOL_GPL(__netif_set_xps_queue); 2728 2729 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, 2730 u16 index) 2731 { 2732 int ret; 2733 2734 cpus_read_lock(); 2735 ret = __netif_set_xps_queue(dev, cpumask_bits(mask), index, XPS_CPUS); 2736 cpus_read_unlock(); 2737 2738 return ret; 2739 } 2740 EXPORT_SYMBOL(netif_set_xps_queue); 2741 2742 #endif 2743 static void netdev_unbind_all_sb_channels(struct net_device *dev) 2744 { 2745 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues]; 2746 2747 /* Unbind any subordinate channels */ 2748 while (txq-- != &dev->_tx[0]) { 2749 if (txq->sb_dev) 2750 netdev_unbind_sb_channel(dev, txq->sb_dev); 2751 } 2752 } 2753 2754 void netdev_reset_tc(struct net_device *dev) 2755 { 2756 #ifdef CONFIG_XPS 2757 netif_reset_xps_queues_gt(dev, 0); 2758 #endif 2759 netdev_unbind_all_sb_channels(dev); 2760 2761 /* Reset TC configuration of device */ 2762 dev->num_tc = 0; 2763 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq)); 2764 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map)); 2765 } 2766 EXPORT_SYMBOL(netdev_reset_tc); 2767 2768 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset) 2769 { 2770 if (tc >= dev->num_tc) 2771 return -EINVAL; 2772 2773 #ifdef CONFIG_XPS 2774 netif_reset_xps_queues(dev, offset, count); 2775 #endif 2776 dev->tc_to_txq[tc].count = count; 2777 dev->tc_to_txq[tc].offset = offset; 2778 return 0; 2779 } 2780 EXPORT_SYMBOL(netdev_set_tc_queue); 2781 2782 int netdev_set_num_tc(struct net_device *dev, u8 num_tc) 2783 { 2784 if (num_tc > TC_MAX_QUEUE) 2785 return -EINVAL; 2786 2787 #ifdef CONFIG_XPS 2788 netif_reset_xps_queues_gt(dev, 0); 2789 #endif 2790 netdev_unbind_all_sb_channels(dev); 2791 2792 dev->num_tc = num_tc; 2793 return 0; 2794 } 2795 EXPORT_SYMBOL(netdev_set_num_tc); 2796 2797 void netdev_unbind_sb_channel(struct net_device *dev, 2798 struct net_device *sb_dev) 2799 { 2800 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues]; 2801 2802 #ifdef CONFIG_XPS 2803 netif_reset_xps_queues_gt(sb_dev, 0); 2804 #endif 2805 memset(sb_dev->tc_to_txq, 0, sizeof(sb_dev->tc_to_txq)); 2806 memset(sb_dev->prio_tc_map, 0, sizeof(sb_dev->prio_tc_map)); 2807 2808 while (txq-- != &dev->_tx[0]) { 2809 if (txq->sb_dev == sb_dev) 2810 txq->sb_dev = NULL; 2811 } 2812 } 2813 EXPORT_SYMBOL(netdev_unbind_sb_channel); 2814 2815 int netdev_bind_sb_channel_queue(struct net_device *dev, 2816 struct net_device *sb_dev, 2817 u8 tc, u16 count, u16 offset) 2818 { 2819 /* Make certain the sb_dev and dev are already configured */ 2820 if (sb_dev->num_tc >= 0 || tc >= dev->num_tc) 2821 return -EINVAL; 2822 2823 /* We cannot hand out queues we don't have */ 2824 if ((offset + count) > dev->real_num_tx_queues) 2825 return -EINVAL; 2826 2827 /* Record the mapping */ 2828 sb_dev->tc_to_txq[tc].count = count; 2829 sb_dev->tc_to_txq[tc].offset = offset; 2830 2831 /* Provide a way for Tx queue to find the tc_to_txq map or 2832 * XPS map for itself. 2833 */ 2834 while (count--) 2835 netdev_get_tx_queue(dev, count + offset)->sb_dev = sb_dev; 2836 2837 return 0; 2838 } 2839 EXPORT_SYMBOL(netdev_bind_sb_channel_queue); 2840 2841 int netdev_set_sb_channel(struct net_device *dev, u16 channel) 2842 { 2843 /* Do not use a multiqueue device to represent a subordinate channel */ 2844 if (netif_is_multiqueue(dev)) 2845 return -ENODEV; 2846 2847 /* We allow channels 1 - 32767 to be used for subordinate channels. 2848 * Channel 0 is meant to be "native" mode and used only to represent 2849 * the main root device. We allow writing 0 to reset the device back 2850 * to normal mode after being used as a subordinate channel. 2851 */ 2852 if (channel > S16_MAX) 2853 return -EINVAL; 2854 2855 dev->num_tc = -channel; 2856 2857 return 0; 2858 } 2859 EXPORT_SYMBOL(netdev_set_sb_channel); 2860 2861 /* 2862 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues 2863 * greater than real_num_tx_queues stale skbs on the qdisc must be flushed. 2864 */ 2865 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) 2866 { 2867 bool disabling; 2868 int rc; 2869 2870 disabling = txq < dev->real_num_tx_queues; 2871 2872 if (txq < 1 || txq > dev->num_tx_queues) 2873 return -EINVAL; 2874 2875 if (dev->reg_state == NETREG_REGISTERED || 2876 dev->reg_state == NETREG_UNREGISTERING) { 2877 ASSERT_RTNL(); 2878 2879 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues, 2880 txq); 2881 if (rc) 2882 return rc; 2883 2884 if (dev->num_tc) 2885 netif_setup_tc(dev, txq); 2886 2887 dev_qdisc_change_real_num_tx(dev, txq); 2888 2889 dev->real_num_tx_queues = txq; 2890 2891 if (disabling) { 2892 synchronize_net(); 2893 qdisc_reset_all_tx_gt(dev, txq); 2894 #ifdef CONFIG_XPS 2895 netif_reset_xps_queues_gt(dev, txq); 2896 #endif 2897 } 2898 } else { 2899 dev->real_num_tx_queues = txq; 2900 } 2901 2902 return 0; 2903 } 2904 EXPORT_SYMBOL(netif_set_real_num_tx_queues); 2905 2906 #ifdef CONFIG_SYSFS 2907 /** 2908 * netif_set_real_num_rx_queues - set actual number of RX queues used 2909 * @dev: Network device 2910 * @rxq: Actual number of RX queues 2911 * 2912 * This must be called either with the rtnl_lock held or before 2913 * registration of the net device. Returns 0 on success, or a 2914 * negative error code. If called before registration, it always 2915 * succeeds. 2916 */ 2917 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq) 2918 { 2919 int rc; 2920 2921 if (rxq < 1 || rxq > dev->num_rx_queues) 2922 return -EINVAL; 2923 2924 if (dev->reg_state == NETREG_REGISTERED) { 2925 ASSERT_RTNL(); 2926 2927 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues, 2928 rxq); 2929 if (rc) 2930 return rc; 2931 } 2932 2933 dev->real_num_rx_queues = rxq; 2934 return 0; 2935 } 2936 EXPORT_SYMBOL(netif_set_real_num_rx_queues); 2937 #endif 2938 2939 /** 2940 * netif_set_real_num_queues - set actual number of RX and TX queues used 2941 * @dev: Network device 2942 * @txq: Actual number of TX queues 2943 * @rxq: Actual number of RX queues 2944 * 2945 * Set the real number of both TX and RX queues. 2946 * Does nothing if the number of queues is already correct. 2947 */ 2948 int netif_set_real_num_queues(struct net_device *dev, 2949 unsigned int txq, unsigned int rxq) 2950 { 2951 unsigned int old_rxq = dev->real_num_rx_queues; 2952 int err; 2953 2954 if (txq < 1 || txq > dev->num_tx_queues || 2955 rxq < 1 || rxq > dev->num_rx_queues) 2956 return -EINVAL; 2957 2958 /* Start from increases, so the error path only does decreases - 2959 * decreases can't fail. 2960 */ 2961 if (rxq > dev->real_num_rx_queues) { 2962 err = netif_set_real_num_rx_queues(dev, rxq); 2963 if (err) 2964 return err; 2965 } 2966 if (txq > dev->real_num_tx_queues) { 2967 err = netif_set_real_num_tx_queues(dev, txq); 2968 if (err) 2969 goto undo_rx; 2970 } 2971 if (rxq < dev->real_num_rx_queues) 2972 WARN_ON(netif_set_real_num_rx_queues(dev, rxq)); 2973 if (txq < dev->real_num_tx_queues) 2974 WARN_ON(netif_set_real_num_tx_queues(dev, txq)); 2975 2976 return 0; 2977 undo_rx: 2978 WARN_ON(netif_set_real_num_rx_queues(dev, old_rxq)); 2979 return err; 2980 } 2981 EXPORT_SYMBOL(netif_set_real_num_queues); 2982 2983 /** 2984 * netif_set_tso_max_size() - set the max size of TSO frames supported 2985 * @dev: netdev to update 2986 * @size: max skb->len of a TSO frame 2987 * 2988 * Set the limit on the size of TSO super-frames the device can handle. 2989 * Unless explicitly set the stack will assume the value of 2990 * %GSO_LEGACY_MAX_SIZE. 2991 */ 2992 void netif_set_tso_max_size(struct net_device *dev, unsigned int size) 2993 { 2994 dev->tso_max_size = min(GSO_MAX_SIZE, size); 2995 if (size < READ_ONCE(dev->gso_max_size)) 2996 netif_set_gso_max_size(dev, size); 2997 if (size < READ_ONCE(dev->gso_ipv4_max_size)) 2998 netif_set_gso_ipv4_max_size(dev, size); 2999 } 3000 EXPORT_SYMBOL(netif_set_tso_max_size); 3001 3002 /** 3003 * netif_set_tso_max_segs() - set the max number of segs supported for TSO 3004 * @dev: netdev to update 3005 * @segs: max number of TCP segments 3006 * 3007 * Set the limit on the number of TCP segments the device can generate from 3008 * a single TSO super-frame. 3009 * Unless explicitly set the stack will assume the value of %GSO_MAX_SEGS. 3010 */ 3011 void netif_set_tso_max_segs(struct net_device *dev, unsigned int segs) 3012 { 3013 dev->tso_max_segs = segs; 3014 if (segs < READ_ONCE(dev->gso_max_segs)) 3015 netif_set_gso_max_segs(dev, segs); 3016 } 3017 EXPORT_SYMBOL(netif_set_tso_max_segs); 3018 3019 /** 3020 * netif_inherit_tso_max() - copy all TSO limits from a lower device to an upper 3021 * @to: netdev to update 3022 * @from: netdev from which to copy the limits 3023 */ 3024 void netif_inherit_tso_max(struct net_device *to, const struct net_device *from) 3025 { 3026 netif_set_tso_max_size(to, from->tso_max_size); 3027 netif_set_tso_max_segs(to, from->tso_max_segs); 3028 } 3029 EXPORT_SYMBOL(netif_inherit_tso_max); 3030 3031 /** 3032 * netif_get_num_default_rss_queues - default number of RSS queues 3033 * 3034 * Default value is the number of physical cores if there are only 1 or 2, or 3035 * divided by 2 if there are more. 3036 */ 3037 int netif_get_num_default_rss_queues(void) 3038 { 3039 cpumask_var_t cpus; 3040 int cpu, count = 0; 3041 3042 if (unlikely(is_kdump_kernel() || !zalloc_cpumask_var(&cpus, GFP_KERNEL))) 3043 return 1; 3044 3045 cpumask_copy(cpus, cpu_online_mask); 3046 for_each_cpu(cpu, cpus) { 3047 ++count; 3048 cpumask_andnot(cpus, cpus, topology_sibling_cpumask(cpu)); 3049 } 3050 free_cpumask_var(cpus); 3051 3052 return count > 2 ? DIV_ROUND_UP(count, 2) : count; 3053 } 3054 EXPORT_SYMBOL(netif_get_num_default_rss_queues); 3055 3056 static void __netif_reschedule(struct Qdisc *q) 3057 { 3058 struct softnet_data *sd; 3059 unsigned long flags; 3060 3061 local_irq_save(flags); 3062 sd = this_cpu_ptr(&softnet_data); 3063 q->next_sched = NULL; 3064 *sd->output_queue_tailp = q; 3065 sd->output_queue_tailp = &q->next_sched; 3066 raise_softirq_irqoff(NET_TX_SOFTIRQ); 3067 local_irq_restore(flags); 3068 } 3069 3070 void __netif_schedule(struct Qdisc *q) 3071 { 3072 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) 3073 __netif_reschedule(q); 3074 } 3075 EXPORT_SYMBOL(__netif_schedule); 3076 3077 struct dev_kfree_skb_cb { 3078 enum skb_drop_reason reason; 3079 }; 3080 3081 static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb) 3082 { 3083 return (struct dev_kfree_skb_cb *)skb->cb; 3084 } 3085 3086 void netif_schedule_queue(struct netdev_queue *txq) 3087 { 3088 rcu_read_lock(); 3089 if (!netif_xmit_stopped(txq)) { 3090 struct Qdisc *q = rcu_dereference(txq->qdisc); 3091 3092 __netif_schedule(q); 3093 } 3094 rcu_read_unlock(); 3095 } 3096 EXPORT_SYMBOL(netif_schedule_queue); 3097 3098 void netif_tx_wake_queue(struct netdev_queue *dev_queue) 3099 { 3100 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) { 3101 struct Qdisc *q; 3102 3103 rcu_read_lock(); 3104 q = rcu_dereference(dev_queue->qdisc); 3105 __netif_schedule(q); 3106 rcu_read_unlock(); 3107 } 3108 } 3109 EXPORT_SYMBOL(netif_tx_wake_queue); 3110 3111 void dev_kfree_skb_irq_reason(struct sk_buff *skb, enum skb_drop_reason reason) 3112 { 3113 unsigned long flags; 3114 3115 if (unlikely(!skb)) 3116 return; 3117 3118 if (likely(refcount_read(&skb->users) == 1)) { 3119 smp_rmb(); 3120 refcount_set(&skb->users, 0); 3121 } else if (likely(!refcount_dec_and_test(&skb->users))) { 3122 return; 3123 } 3124 get_kfree_skb_cb(skb)->reason = reason; 3125 local_irq_save(flags); 3126 skb->next = __this_cpu_read(softnet_data.completion_queue); 3127 __this_cpu_write(softnet_data.completion_queue, skb); 3128 raise_softirq_irqoff(NET_TX_SOFTIRQ); 3129 local_irq_restore(flags); 3130 } 3131 EXPORT_SYMBOL(dev_kfree_skb_irq_reason); 3132 3133 void dev_kfree_skb_any_reason(struct sk_buff *skb, enum skb_drop_reason reason) 3134 { 3135 if (in_hardirq() || irqs_disabled()) 3136 dev_kfree_skb_irq_reason(skb, reason); 3137 else 3138 kfree_skb_reason(skb, reason); 3139 } 3140 EXPORT_SYMBOL(dev_kfree_skb_any_reason); 3141 3142 3143 /** 3144 * netif_device_detach - mark device as removed 3145 * @dev: network device 3146 * 3147 * Mark device as removed from system and therefore no longer available. 3148 */ 3149 void netif_device_detach(struct net_device *dev) 3150 { 3151 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) && 3152 netif_running(dev)) { 3153 netif_tx_stop_all_queues(dev); 3154 } 3155 } 3156 EXPORT_SYMBOL(netif_device_detach); 3157 3158 /** 3159 * netif_device_attach - mark device as attached 3160 * @dev: network device 3161 * 3162 * Mark device as attached from system and restart if needed. 3163 */ 3164 void netif_device_attach(struct net_device *dev) 3165 { 3166 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) && 3167 netif_running(dev)) { 3168 netif_tx_wake_all_queues(dev); 3169 __netdev_watchdog_up(dev); 3170 } 3171 } 3172 EXPORT_SYMBOL(netif_device_attach); 3173 3174 /* 3175 * Returns a Tx hash based on the given packet descriptor a Tx queues' number 3176 * to be used as a distribution range. 3177 */ 3178 static u16 skb_tx_hash(const struct net_device *dev, 3179 const struct net_device *sb_dev, 3180 struct sk_buff *skb) 3181 { 3182 u32 hash; 3183 u16 qoffset = 0; 3184 u16 qcount = dev->real_num_tx_queues; 3185 3186 if (dev->num_tc) { 3187 u8 tc = netdev_get_prio_tc_map(dev, skb->priority); 3188 3189 qoffset = sb_dev->tc_to_txq[tc].offset; 3190 qcount = sb_dev->tc_to_txq[tc].count; 3191 if (unlikely(!qcount)) { 3192 net_warn_ratelimited("%s: invalid qcount, qoffset %u for tc %u\n", 3193 sb_dev->name, qoffset, tc); 3194 qoffset = 0; 3195 qcount = dev->real_num_tx_queues; 3196 } 3197 } 3198 3199 if (skb_rx_queue_recorded(skb)) { 3200 hash = skb_get_rx_queue(skb); 3201 if (hash >= qoffset) 3202 hash -= qoffset; 3203 while (unlikely(hash >= qcount)) 3204 hash -= qcount; 3205 return hash + qoffset; 3206 } 3207 3208 return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset; 3209 } 3210 3211 static void skb_warn_bad_offload(const struct sk_buff *skb) 3212 { 3213 static const netdev_features_t null_features; 3214 struct net_device *dev = skb->dev; 3215 const char *name = ""; 3216 3217 if (!net_ratelimit()) 3218 return; 3219 3220 if (dev) { 3221 if (dev->dev.parent) 3222 name = dev_driver_string(dev->dev.parent); 3223 else 3224 name = netdev_name(dev); 3225 } 3226 skb_dump(KERN_WARNING, skb, false); 3227 WARN(1, "%s: caps=(%pNF, %pNF)\n", 3228 name, dev ? &dev->features : &null_features, 3229 skb->sk ? &skb->sk->sk_route_caps : &null_features); 3230 } 3231 3232 /* 3233 * Invalidate hardware checksum when packet is to be mangled, and 3234 * complete checksum manually on outgoing path. 3235 */ 3236 int skb_checksum_help(struct sk_buff *skb) 3237 { 3238 __wsum csum; 3239 int ret = 0, offset; 3240 3241 if (skb->ip_summed == CHECKSUM_COMPLETE) 3242 goto out_set_summed; 3243 3244 if (unlikely(skb_is_gso(skb))) { 3245 skb_warn_bad_offload(skb); 3246 return -EINVAL; 3247 } 3248 3249 /* Before computing a checksum, we should make sure no frag could 3250 * be modified by an external entity : checksum could be wrong. 3251 */ 3252 if (skb_has_shared_frag(skb)) { 3253 ret = __skb_linearize(skb); 3254 if (ret) 3255 goto out; 3256 } 3257 3258 offset = skb_checksum_start_offset(skb); 3259 ret = -EINVAL; 3260 if (WARN_ON_ONCE(offset >= skb_headlen(skb))) { 3261 DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false); 3262 goto out; 3263 } 3264 csum = skb_checksum(skb, offset, skb->len - offset, 0); 3265 3266 offset += skb->csum_offset; 3267 if (WARN_ON_ONCE(offset + sizeof(__sum16) > skb_headlen(skb))) { 3268 DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false); 3269 goto out; 3270 } 3271 ret = skb_ensure_writable(skb, offset + sizeof(__sum16)); 3272 if (ret) 3273 goto out; 3274 3275 *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0; 3276 out_set_summed: 3277 skb->ip_summed = CHECKSUM_NONE; 3278 out: 3279 return ret; 3280 } 3281 EXPORT_SYMBOL(skb_checksum_help); 3282 3283 int skb_crc32c_csum_help(struct sk_buff *skb) 3284 { 3285 __le32 crc32c_csum; 3286 int ret = 0, offset, start; 3287 3288 if (skb->ip_summed != CHECKSUM_PARTIAL) 3289 goto out; 3290 3291 if (unlikely(skb_is_gso(skb))) 3292 goto out; 3293 3294 /* Before computing a checksum, we should make sure no frag could 3295 * be modified by an external entity : checksum could be wrong. 3296 */ 3297 if (unlikely(skb_has_shared_frag(skb))) { 3298 ret = __skb_linearize(skb); 3299 if (ret) 3300 goto out; 3301 } 3302 start = skb_checksum_start_offset(skb); 3303 offset = start + offsetof(struct sctphdr, checksum); 3304 if (WARN_ON_ONCE(offset >= skb_headlen(skb))) { 3305 ret = -EINVAL; 3306 goto out; 3307 } 3308 3309 ret = skb_ensure_writable(skb, offset + sizeof(__le32)); 3310 if (ret) 3311 goto out; 3312 3313 crc32c_csum = cpu_to_le32(~__skb_checksum(skb, start, 3314 skb->len - start, ~(__u32)0, 3315 crc32c_csum_stub)); 3316 *(__le32 *)(skb->data + offset) = crc32c_csum; 3317 skb->ip_summed = CHECKSUM_NONE; 3318 skb->csum_not_inet = 0; 3319 out: 3320 return ret; 3321 } 3322 3323 __be16 skb_network_protocol(struct sk_buff *skb, int *depth) 3324 { 3325 __be16 type = skb->protocol; 3326 3327 /* Tunnel gso handlers can set protocol to ethernet. */ 3328 if (type == htons(ETH_P_TEB)) { 3329 struct ethhdr *eth; 3330 3331 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr)))) 3332 return 0; 3333 3334 eth = (struct ethhdr *)skb->data; 3335 type = eth->h_proto; 3336 } 3337 3338 return __vlan_get_protocol(skb, type, depth); 3339 } 3340 3341 /* openvswitch calls this on rx path, so we need a different check. 3342 */ 3343 static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path) 3344 { 3345 if (tx_path) 3346 return skb->ip_summed != CHECKSUM_PARTIAL && 3347 skb->ip_summed != CHECKSUM_UNNECESSARY; 3348 3349 return skb->ip_summed == CHECKSUM_NONE; 3350 } 3351 3352 /** 3353 * __skb_gso_segment - Perform segmentation on skb. 3354 * @skb: buffer to segment 3355 * @features: features for the output path (see dev->features) 3356 * @tx_path: whether it is called in TX path 3357 * 3358 * This function segments the given skb and returns a list of segments. 3359 * 3360 * It may return NULL if the skb requires no segmentation. This is 3361 * only possible when GSO is used for verifying header integrity. 3362 * 3363 * Segmentation preserves SKB_GSO_CB_OFFSET bytes of previous skb cb. 3364 */ 3365 struct sk_buff *__skb_gso_segment(struct sk_buff *skb, 3366 netdev_features_t features, bool tx_path) 3367 { 3368 struct sk_buff *segs; 3369 3370 if (unlikely(skb_needs_check(skb, tx_path))) { 3371 int err; 3372 3373 /* We're going to init ->check field in TCP or UDP header */ 3374 err = skb_cow_head(skb, 0); 3375 if (err < 0) 3376 return ERR_PTR(err); 3377 } 3378 3379 /* Only report GSO partial support if it will enable us to 3380 * support segmentation on this frame without needing additional 3381 * work. 3382 */ 3383 if (features & NETIF_F_GSO_PARTIAL) { 3384 netdev_features_t partial_features = NETIF_F_GSO_ROBUST; 3385 struct net_device *dev = skb->dev; 3386 3387 partial_features |= dev->features & dev->gso_partial_features; 3388 if (!skb_gso_ok(skb, features | partial_features)) 3389 features &= ~NETIF_F_GSO_PARTIAL; 3390 } 3391 3392 BUILD_BUG_ON(SKB_GSO_CB_OFFSET + 3393 sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb)); 3394 3395 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb); 3396 SKB_GSO_CB(skb)->encap_level = 0; 3397 3398 skb_reset_mac_header(skb); 3399 skb_reset_mac_len(skb); 3400 3401 segs = skb_mac_gso_segment(skb, features); 3402 3403 if (segs != skb && unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs))) 3404 skb_warn_bad_offload(skb); 3405 3406 return segs; 3407 } 3408 EXPORT_SYMBOL(__skb_gso_segment); 3409 3410 /* Take action when hardware reception checksum errors are detected. */ 3411 #ifdef CONFIG_BUG 3412 static void do_netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb) 3413 { 3414 netdev_err(dev, "hw csum failure\n"); 3415 skb_dump(KERN_ERR, skb, true); 3416 dump_stack(); 3417 } 3418 3419 void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb) 3420 { 3421 DO_ONCE_LITE(do_netdev_rx_csum_fault, dev, skb); 3422 } 3423 EXPORT_SYMBOL(netdev_rx_csum_fault); 3424 #endif 3425 3426 /* XXX: check that highmem exists at all on the given machine. */ 3427 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb) 3428 { 3429 #ifdef CONFIG_HIGHMEM 3430 int i; 3431 3432 if (!(dev->features & NETIF_F_HIGHDMA)) { 3433 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 3434 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3435 3436 if (PageHighMem(skb_frag_page(frag))) 3437 return 1; 3438 } 3439 } 3440 #endif 3441 return 0; 3442 } 3443 3444 /* If MPLS offload request, verify we are testing hardware MPLS features 3445 * instead of standard features for the netdev. 3446 */ 3447 #if IS_ENABLED(CONFIG_NET_MPLS_GSO) 3448 static netdev_features_t net_mpls_features(struct sk_buff *skb, 3449 netdev_features_t features, 3450 __be16 type) 3451 { 3452 if (eth_p_mpls(type)) 3453 features &= skb->dev->mpls_features; 3454 3455 return features; 3456 } 3457 #else 3458 static netdev_features_t net_mpls_features(struct sk_buff *skb, 3459 netdev_features_t features, 3460 __be16 type) 3461 { 3462 return features; 3463 } 3464 #endif 3465 3466 static netdev_features_t harmonize_features(struct sk_buff *skb, 3467 netdev_features_t features) 3468 { 3469 __be16 type; 3470 3471 type = skb_network_protocol(skb, NULL); 3472 features = net_mpls_features(skb, features, type); 3473 3474 if (skb->ip_summed != CHECKSUM_NONE && 3475 !can_checksum_protocol(features, type)) { 3476 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 3477 } 3478 if (illegal_highdma(skb->dev, skb)) 3479 features &= ~NETIF_F_SG; 3480 3481 return features; 3482 } 3483 3484 netdev_features_t passthru_features_check(struct sk_buff *skb, 3485 struct net_device *dev, 3486 netdev_features_t features) 3487 { 3488 return features; 3489 } 3490 EXPORT_SYMBOL(passthru_features_check); 3491 3492 static netdev_features_t dflt_features_check(struct sk_buff *skb, 3493 struct net_device *dev, 3494 netdev_features_t features) 3495 { 3496 return vlan_features_check(skb, features); 3497 } 3498 3499 static netdev_features_t gso_features_check(const struct sk_buff *skb, 3500 struct net_device *dev, 3501 netdev_features_t features) 3502 { 3503 u16 gso_segs = skb_shinfo(skb)->gso_segs; 3504 3505 if (gso_segs > READ_ONCE(dev->gso_max_segs)) 3506 return features & ~NETIF_F_GSO_MASK; 3507 3508 if (!skb_shinfo(skb)->gso_type) { 3509 skb_warn_bad_offload(skb); 3510 return features & ~NETIF_F_GSO_MASK; 3511 } 3512 3513 /* Support for GSO partial features requires software 3514 * intervention before we can actually process the packets 3515 * so we need to strip support for any partial features now 3516 * and we can pull them back in after we have partially 3517 * segmented the frame. 3518 */ 3519 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL)) 3520 features &= ~dev->gso_partial_features; 3521 3522 /* Make sure to clear the IPv4 ID mangling feature if the 3523 * IPv4 header has the potential to be fragmented. 3524 */ 3525 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) { 3526 struct iphdr *iph = skb->encapsulation ? 3527 inner_ip_hdr(skb) : ip_hdr(skb); 3528 3529 if (!(iph->frag_off & htons(IP_DF))) 3530 features &= ~NETIF_F_TSO_MANGLEID; 3531 } 3532 3533 return features; 3534 } 3535 3536 netdev_features_t netif_skb_features(struct sk_buff *skb) 3537 { 3538 struct net_device *dev = skb->dev; 3539 netdev_features_t features = dev->features; 3540 3541 if (skb_is_gso(skb)) 3542 features = gso_features_check(skb, dev, features); 3543 3544 /* If encapsulation offload request, verify we are testing 3545 * hardware encapsulation features instead of standard 3546 * features for the netdev 3547 */ 3548 if (skb->encapsulation) 3549 features &= dev->hw_enc_features; 3550 3551 if (skb_vlan_tagged(skb)) 3552 features = netdev_intersect_features(features, 3553 dev->vlan_features | 3554 NETIF_F_HW_VLAN_CTAG_TX | 3555 NETIF_F_HW_VLAN_STAG_TX); 3556 3557 if (dev->netdev_ops->ndo_features_check) 3558 features &= dev->netdev_ops->ndo_features_check(skb, dev, 3559 features); 3560 else 3561 features &= dflt_features_check(skb, dev, features); 3562 3563 return harmonize_features(skb, features); 3564 } 3565 EXPORT_SYMBOL(netif_skb_features); 3566 3567 static int xmit_one(struct sk_buff *skb, struct net_device *dev, 3568 struct netdev_queue *txq, bool more) 3569 { 3570 unsigned int len; 3571 int rc; 3572 3573 if (dev_nit_active(dev)) 3574 dev_queue_xmit_nit(skb, dev); 3575 3576 len = skb->len; 3577 trace_net_dev_start_xmit(skb, dev); 3578 rc = netdev_start_xmit(skb, dev, txq, more); 3579 trace_net_dev_xmit(skb, rc, dev, len); 3580 3581 return rc; 3582 } 3583 3584 struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev, 3585 struct netdev_queue *txq, int *ret) 3586 { 3587 struct sk_buff *skb = first; 3588 int rc = NETDEV_TX_OK; 3589 3590 while (skb) { 3591 struct sk_buff *next = skb->next; 3592 3593 skb_mark_not_on_list(skb); 3594 rc = xmit_one(skb, dev, txq, next != NULL); 3595 if (unlikely(!dev_xmit_complete(rc))) { 3596 skb->next = next; 3597 goto out; 3598 } 3599 3600 skb = next; 3601 if (netif_tx_queue_stopped(txq) && skb) { 3602 rc = NETDEV_TX_BUSY; 3603 break; 3604 } 3605 } 3606 3607 out: 3608 *ret = rc; 3609 return skb; 3610 } 3611 3612 static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb, 3613 netdev_features_t features) 3614 { 3615 if (skb_vlan_tag_present(skb) && 3616 !vlan_hw_offload_capable(features, skb->vlan_proto)) 3617 skb = __vlan_hwaccel_push_inside(skb); 3618 return skb; 3619 } 3620 3621 int skb_csum_hwoffload_help(struct sk_buff *skb, 3622 const netdev_features_t features) 3623 { 3624 if (unlikely(skb_csum_is_sctp(skb))) 3625 return !!(features & NETIF_F_SCTP_CRC) ? 0 : 3626 skb_crc32c_csum_help(skb); 3627 3628 if (features & NETIF_F_HW_CSUM) 3629 return 0; 3630 3631 if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) { 3632 switch (skb->csum_offset) { 3633 case offsetof(struct tcphdr, check): 3634 case offsetof(struct udphdr, check): 3635 return 0; 3636 } 3637 } 3638 3639 return skb_checksum_help(skb); 3640 } 3641 EXPORT_SYMBOL(skb_csum_hwoffload_help); 3642 3643 static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again) 3644 { 3645 netdev_features_t features; 3646 3647 features = netif_skb_features(skb); 3648 skb = validate_xmit_vlan(skb, features); 3649 if (unlikely(!skb)) 3650 goto out_null; 3651 3652 skb = sk_validate_xmit_skb(skb, dev); 3653 if (unlikely(!skb)) 3654 goto out_null; 3655 3656 if (netif_needs_gso(skb, features)) { 3657 struct sk_buff *segs; 3658 3659 segs = skb_gso_segment(skb, features); 3660 if (IS_ERR(segs)) { 3661 goto out_kfree_skb; 3662 } else if (segs) { 3663 consume_skb(skb); 3664 skb = segs; 3665 } 3666 } else { 3667 if (skb_needs_linearize(skb, features) && 3668 __skb_linearize(skb)) 3669 goto out_kfree_skb; 3670 3671 /* If packet is not checksummed and device does not 3672 * support checksumming for this protocol, complete 3673 * checksumming here. 3674 */ 3675 if (skb->ip_summed == CHECKSUM_PARTIAL) { 3676 if (skb->encapsulation) 3677 skb_set_inner_transport_header(skb, 3678 skb_checksum_start_offset(skb)); 3679 else 3680 skb_set_transport_header(skb, 3681 skb_checksum_start_offset(skb)); 3682 if (skb_csum_hwoffload_help(skb, features)) 3683 goto out_kfree_skb; 3684 } 3685 } 3686 3687 skb = validate_xmit_xfrm(skb, features, again); 3688 3689 return skb; 3690 3691 out_kfree_skb: 3692 kfree_skb(skb); 3693 out_null: 3694 dev_core_stats_tx_dropped_inc(dev); 3695 return NULL; 3696 } 3697 3698 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again) 3699 { 3700 struct sk_buff *next, *head = NULL, *tail; 3701 3702 for (; skb != NULL; skb = next) { 3703 next = skb->next; 3704 skb_mark_not_on_list(skb); 3705 3706 /* in case skb wont be segmented, point to itself */ 3707 skb->prev = skb; 3708 3709 skb = validate_xmit_skb(skb, dev, again); 3710 if (!skb) 3711 continue; 3712 3713 if (!head) 3714 head = skb; 3715 else 3716 tail->next = skb; 3717 /* If skb was segmented, skb->prev points to 3718 * the last segment. If not, it still contains skb. 3719 */ 3720 tail = skb->prev; 3721 } 3722 return head; 3723 } 3724 EXPORT_SYMBOL_GPL(validate_xmit_skb_list); 3725 3726 static void qdisc_pkt_len_init(struct sk_buff *skb) 3727 { 3728 const struct skb_shared_info *shinfo = skb_shinfo(skb); 3729 3730 qdisc_skb_cb(skb)->pkt_len = skb->len; 3731 3732 /* To get more precise estimation of bytes sent on wire, 3733 * we add to pkt_len the headers size of all segments 3734 */ 3735 if (shinfo->gso_size && skb_transport_header_was_set(skb)) { 3736 u16 gso_segs = shinfo->gso_segs; 3737 unsigned int hdr_len; 3738 3739 /* mac layer + network layer */ 3740 hdr_len = skb_transport_offset(skb); 3741 3742 /* + transport layer */ 3743 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) { 3744 const struct tcphdr *th; 3745 struct tcphdr _tcphdr; 3746 3747 th = skb_header_pointer(skb, hdr_len, 3748 sizeof(_tcphdr), &_tcphdr); 3749 if (likely(th)) 3750 hdr_len += __tcp_hdrlen(th); 3751 } else { 3752 struct udphdr _udphdr; 3753 3754 if (skb_header_pointer(skb, hdr_len, 3755 sizeof(_udphdr), &_udphdr)) 3756 hdr_len += sizeof(struct udphdr); 3757 } 3758 3759 if (shinfo->gso_type & SKB_GSO_DODGY) 3760 gso_segs = DIV_ROUND_UP(skb->len - hdr_len, 3761 shinfo->gso_size); 3762 3763 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len; 3764 } 3765 } 3766 3767 static int dev_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *q, 3768 struct sk_buff **to_free, 3769 struct netdev_queue *txq) 3770 { 3771 int rc; 3772 3773 rc = q->enqueue(skb, q, to_free) & NET_XMIT_MASK; 3774 if (rc == NET_XMIT_SUCCESS) 3775 trace_qdisc_enqueue(q, txq, skb); 3776 return rc; 3777 } 3778 3779 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, 3780 struct net_device *dev, 3781 struct netdev_queue *txq) 3782 { 3783 spinlock_t *root_lock = qdisc_lock(q); 3784 struct sk_buff *to_free = NULL; 3785 bool contended; 3786 int rc; 3787 3788 qdisc_calculate_pkt_len(skb, q); 3789 3790 if (q->flags & TCQ_F_NOLOCK) { 3791 if (q->flags & TCQ_F_CAN_BYPASS && nolock_qdisc_is_empty(q) && 3792 qdisc_run_begin(q)) { 3793 /* Retest nolock_qdisc_is_empty() within the protection 3794 * of q->seqlock to protect from racing with requeuing. 3795 */ 3796 if (unlikely(!nolock_qdisc_is_empty(q))) { 3797 rc = dev_qdisc_enqueue(skb, q, &to_free, txq); 3798 __qdisc_run(q); 3799 qdisc_run_end(q); 3800 3801 goto no_lock_out; 3802 } 3803 3804 qdisc_bstats_cpu_update(q, skb); 3805 if (sch_direct_xmit(skb, q, dev, txq, NULL, true) && 3806 !nolock_qdisc_is_empty(q)) 3807 __qdisc_run(q); 3808 3809 qdisc_run_end(q); 3810 return NET_XMIT_SUCCESS; 3811 } 3812 3813 rc = dev_qdisc_enqueue(skb, q, &to_free, txq); 3814 qdisc_run(q); 3815 3816 no_lock_out: 3817 if (unlikely(to_free)) 3818 kfree_skb_list_reason(to_free, 3819 SKB_DROP_REASON_QDISC_DROP); 3820 return rc; 3821 } 3822 3823 /* 3824 * Heuristic to force contended enqueues to serialize on a 3825 * separate lock before trying to get qdisc main lock. 3826 * This permits qdisc->running owner to get the lock more 3827 * often and dequeue packets faster. 3828 * On PREEMPT_RT it is possible to preempt the qdisc owner during xmit 3829 * and then other tasks will only enqueue packets. The packets will be 3830 * sent after the qdisc owner is scheduled again. To prevent this 3831 * scenario the task always serialize on the lock. 3832 */ 3833 contended = qdisc_is_running(q) || IS_ENABLED(CONFIG_PREEMPT_RT); 3834 if (unlikely(contended)) 3835 spin_lock(&q->busylock); 3836 3837 spin_lock(root_lock); 3838 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) { 3839 __qdisc_drop(skb, &to_free); 3840 rc = NET_XMIT_DROP; 3841 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) && 3842 qdisc_run_begin(q)) { 3843 /* 3844 * This is a work-conserving queue; there are no old skbs 3845 * waiting to be sent out; and the qdisc is not running - 3846 * xmit the skb directly. 3847 */ 3848 3849 qdisc_bstats_update(q, skb); 3850 3851 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) { 3852 if (unlikely(contended)) { 3853 spin_unlock(&q->busylock); 3854 contended = false; 3855 } 3856 __qdisc_run(q); 3857 } 3858 3859 qdisc_run_end(q); 3860 rc = NET_XMIT_SUCCESS; 3861 } else { 3862 rc = dev_qdisc_enqueue(skb, q, &to_free, txq); 3863 if (qdisc_run_begin(q)) { 3864 if (unlikely(contended)) { 3865 spin_unlock(&q->busylock); 3866 contended = false; 3867 } 3868 __qdisc_run(q); 3869 qdisc_run_end(q); 3870 } 3871 } 3872 spin_unlock(root_lock); 3873 if (unlikely(to_free)) 3874 kfree_skb_list_reason(to_free, SKB_DROP_REASON_QDISC_DROP); 3875 if (unlikely(contended)) 3876 spin_unlock(&q->busylock); 3877 return rc; 3878 } 3879 3880 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) 3881 static void skb_update_prio(struct sk_buff *skb) 3882 { 3883 const struct netprio_map *map; 3884 const struct sock *sk; 3885 unsigned int prioidx; 3886 3887 if (skb->priority) 3888 return; 3889 map = rcu_dereference_bh(skb->dev->priomap); 3890 if (!map) 3891 return; 3892 sk = skb_to_full_sk(skb); 3893 if (!sk) 3894 return; 3895 3896 prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data); 3897 3898 if (prioidx < map->priomap_len) 3899 skb->priority = map->priomap[prioidx]; 3900 } 3901 #else 3902 #define skb_update_prio(skb) 3903 #endif 3904 3905 /** 3906 * dev_loopback_xmit - loop back @skb 3907 * @net: network namespace this loopback is happening in 3908 * @sk: sk needed to be a netfilter okfn 3909 * @skb: buffer to transmit 3910 */ 3911 int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb) 3912 { 3913 skb_reset_mac_header(skb); 3914 __skb_pull(skb, skb_network_offset(skb)); 3915 skb->pkt_type = PACKET_LOOPBACK; 3916 if (skb->ip_summed == CHECKSUM_NONE) 3917 skb->ip_summed = CHECKSUM_UNNECESSARY; 3918 DEBUG_NET_WARN_ON_ONCE(!skb_dst(skb)); 3919 skb_dst_force(skb); 3920 netif_rx(skb); 3921 return 0; 3922 } 3923 EXPORT_SYMBOL(dev_loopback_xmit); 3924 3925 #ifdef CONFIG_NET_EGRESS 3926 static struct sk_buff * 3927 sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev) 3928 { 3929 #ifdef CONFIG_NET_CLS_ACT 3930 struct mini_Qdisc *miniq = rcu_dereference_bh(dev->miniq_egress); 3931 struct tcf_result cl_res; 3932 3933 if (!miniq) 3934 return skb; 3935 3936 /* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */ 3937 tc_skb_cb(skb)->mru = 0; 3938 tc_skb_cb(skb)->post_ct = false; 3939 mini_qdisc_bstats_cpu_update(miniq, skb); 3940 3941 switch (tcf_classify(skb, miniq->block, miniq->filter_list, &cl_res, false)) { 3942 case TC_ACT_OK: 3943 case TC_ACT_RECLASSIFY: 3944 skb->tc_index = TC_H_MIN(cl_res.classid); 3945 break; 3946 case TC_ACT_SHOT: 3947 mini_qdisc_qstats_cpu_drop(miniq); 3948 *ret = NET_XMIT_DROP; 3949 kfree_skb_reason(skb, SKB_DROP_REASON_TC_EGRESS); 3950 return NULL; 3951 case TC_ACT_STOLEN: 3952 case TC_ACT_QUEUED: 3953 case TC_ACT_TRAP: 3954 *ret = NET_XMIT_SUCCESS; 3955 consume_skb(skb); 3956 return NULL; 3957 case TC_ACT_REDIRECT: 3958 /* No need to push/pop skb's mac_header here on egress! */ 3959 skb_do_redirect(skb); 3960 *ret = NET_XMIT_SUCCESS; 3961 return NULL; 3962 default: 3963 break; 3964 } 3965 #endif /* CONFIG_NET_CLS_ACT */ 3966 3967 return skb; 3968 } 3969 3970 static struct netdev_queue * 3971 netdev_tx_queue_mapping(struct net_device *dev, struct sk_buff *skb) 3972 { 3973 int qm = skb_get_queue_mapping(skb); 3974 3975 return netdev_get_tx_queue(dev, netdev_cap_txqueue(dev, qm)); 3976 } 3977 3978 static bool netdev_xmit_txqueue_skipped(void) 3979 { 3980 return __this_cpu_read(softnet_data.xmit.skip_txqueue); 3981 } 3982 3983 void netdev_xmit_skip_txqueue(bool skip) 3984 { 3985 __this_cpu_write(softnet_data.xmit.skip_txqueue, skip); 3986 } 3987 EXPORT_SYMBOL_GPL(netdev_xmit_skip_txqueue); 3988 #endif /* CONFIG_NET_EGRESS */ 3989 3990 #ifdef CONFIG_XPS 3991 static int __get_xps_queue_idx(struct net_device *dev, struct sk_buff *skb, 3992 struct xps_dev_maps *dev_maps, unsigned int tci) 3993 { 3994 int tc = netdev_get_prio_tc_map(dev, skb->priority); 3995 struct xps_map *map; 3996 int queue_index = -1; 3997 3998 if (tc >= dev_maps->num_tc || tci >= dev_maps->nr_ids) 3999 return queue_index; 4000 4001 tci *= dev_maps->num_tc; 4002 tci += tc; 4003 4004 map = rcu_dereference(dev_maps->attr_map[tci]); 4005 if (map) { 4006 if (map->len == 1) 4007 queue_index = map->queues[0]; 4008 else 4009 queue_index = map->queues[reciprocal_scale( 4010 skb_get_hash(skb), map->len)]; 4011 if (unlikely(queue_index >= dev->real_num_tx_queues)) 4012 queue_index = -1; 4013 } 4014 return queue_index; 4015 } 4016 #endif 4017 4018 static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev, 4019 struct sk_buff *skb) 4020 { 4021 #ifdef CONFIG_XPS 4022 struct xps_dev_maps *dev_maps; 4023 struct sock *sk = skb->sk; 4024 int queue_index = -1; 4025 4026 if (!static_key_false(&xps_needed)) 4027 return -1; 4028 4029 rcu_read_lock(); 4030 if (!static_key_false(&xps_rxqs_needed)) 4031 goto get_cpus_map; 4032 4033 dev_maps = rcu_dereference(sb_dev->xps_maps[XPS_RXQS]); 4034 if (dev_maps) { 4035 int tci = sk_rx_queue_get(sk); 4036 4037 if (tci >= 0) 4038 queue_index = __get_xps_queue_idx(dev, skb, dev_maps, 4039 tci); 4040 } 4041 4042 get_cpus_map: 4043 if (queue_index < 0) { 4044 dev_maps = rcu_dereference(sb_dev->xps_maps[XPS_CPUS]); 4045 if (dev_maps) { 4046 unsigned int tci = skb->sender_cpu - 1; 4047 4048 queue_index = __get_xps_queue_idx(dev, skb, dev_maps, 4049 tci); 4050 } 4051 } 4052 rcu_read_unlock(); 4053 4054 return queue_index; 4055 #else 4056 return -1; 4057 #endif 4058 } 4059 4060 u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb, 4061 struct net_device *sb_dev) 4062 { 4063 return 0; 4064 } 4065 EXPORT_SYMBOL(dev_pick_tx_zero); 4066 4067 u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb, 4068 struct net_device *sb_dev) 4069 { 4070 return (u16)raw_smp_processor_id() % dev->real_num_tx_queues; 4071 } 4072 EXPORT_SYMBOL(dev_pick_tx_cpu_id); 4073 4074 u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, 4075 struct net_device *sb_dev) 4076 { 4077 struct sock *sk = skb->sk; 4078 int queue_index = sk_tx_queue_get(sk); 4079 4080 sb_dev = sb_dev ? : dev; 4081 4082 if (queue_index < 0 || skb->ooo_okay || 4083 queue_index >= dev->real_num_tx_queues) { 4084 int new_index = get_xps_queue(dev, sb_dev, skb); 4085 4086 if (new_index < 0) 4087 new_index = skb_tx_hash(dev, sb_dev, skb); 4088 4089 if (queue_index != new_index && sk && 4090 sk_fullsock(sk) && 4091 rcu_access_pointer(sk->sk_dst_cache)) 4092 sk_tx_queue_set(sk, new_index); 4093 4094 queue_index = new_index; 4095 } 4096 4097 return queue_index; 4098 } 4099 EXPORT_SYMBOL(netdev_pick_tx); 4100 4101 struct netdev_queue *netdev_core_pick_tx(struct net_device *dev, 4102 struct sk_buff *skb, 4103 struct net_device *sb_dev) 4104 { 4105 int queue_index = 0; 4106 4107 #ifdef CONFIG_XPS 4108 u32 sender_cpu = skb->sender_cpu - 1; 4109 4110 if (sender_cpu >= (u32)NR_CPUS) 4111 skb->sender_cpu = raw_smp_processor_id() + 1; 4112 #endif 4113 4114 if (dev->real_num_tx_queues != 1) { 4115 const struct net_device_ops *ops = dev->netdev_ops; 4116 4117 if (ops->ndo_select_queue) 4118 queue_index = ops->ndo_select_queue(dev, skb, sb_dev); 4119 else 4120 queue_index = netdev_pick_tx(dev, skb, sb_dev); 4121 4122 queue_index = netdev_cap_txqueue(dev, queue_index); 4123 } 4124 4125 skb_set_queue_mapping(skb, queue_index); 4126 return netdev_get_tx_queue(dev, queue_index); 4127 } 4128 4129 /** 4130 * __dev_queue_xmit() - transmit a buffer 4131 * @skb: buffer to transmit 4132 * @sb_dev: suboordinate device used for L2 forwarding offload 4133 * 4134 * Queue a buffer for transmission to a network device. The caller must 4135 * have set the device and priority and built the buffer before calling 4136 * this function. The function can be called from an interrupt. 4137 * 4138 * When calling this method, interrupts MUST be enabled. This is because 4139 * the BH enable code must have IRQs enabled so that it will not deadlock. 4140 * 4141 * Regardless of the return value, the skb is consumed, so it is currently 4142 * difficult to retry a send to this method. (You can bump the ref count 4143 * before sending to hold a reference for retry if you are careful.) 4144 * 4145 * Return: 4146 * * 0 - buffer successfully transmitted 4147 * * positive qdisc return code - NET_XMIT_DROP etc. 4148 * * negative errno - other errors 4149 */ 4150 int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev) 4151 { 4152 struct net_device *dev = skb->dev; 4153 struct netdev_queue *txq = NULL; 4154 struct Qdisc *q; 4155 int rc = -ENOMEM; 4156 bool again = false; 4157 4158 skb_reset_mac_header(skb); 4159 skb_assert_len(skb); 4160 4161 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP)) 4162 __skb_tstamp_tx(skb, NULL, NULL, skb->sk, SCM_TSTAMP_SCHED); 4163 4164 /* Disable soft irqs for various locks below. Also 4165 * stops preemption for RCU. 4166 */ 4167 rcu_read_lock_bh(); 4168 4169 skb_update_prio(skb); 4170 4171 qdisc_pkt_len_init(skb); 4172 #ifdef CONFIG_NET_CLS_ACT 4173 skb->tc_at_ingress = 0; 4174 #endif 4175 #ifdef CONFIG_NET_EGRESS 4176 if (static_branch_unlikely(&egress_needed_key)) { 4177 if (nf_hook_egress_active()) { 4178 skb = nf_hook_egress(skb, &rc, dev); 4179 if (!skb) 4180 goto out; 4181 } 4182 4183 netdev_xmit_skip_txqueue(false); 4184 4185 nf_skip_egress(skb, true); 4186 skb = sch_handle_egress(skb, &rc, dev); 4187 if (!skb) 4188 goto out; 4189 nf_skip_egress(skb, false); 4190 4191 if (netdev_xmit_txqueue_skipped()) 4192 txq = netdev_tx_queue_mapping(dev, skb); 4193 } 4194 #endif 4195 /* If device/qdisc don't need skb->dst, release it right now while 4196 * its hot in this cpu cache. 4197 */ 4198 if (dev->priv_flags & IFF_XMIT_DST_RELEASE) 4199 skb_dst_drop(skb); 4200 else 4201 skb_dst_force(skb); 4202 4203 if (!txq) 4204 txq = netdev_core_pick_tx(dev, skb, sb_dev); 4205 4206 q = rcu_dereference_bh(txq->qdisc); 4207 4208 trace_net_dev_queue(skb); 4209 if (q->enqueue) { 4210 rc = __dev_xmit_skb(skb, q, dev, txq); 4211 goto out; 4212 } 4213 4214 /* The device has no queue. Common case for software devices: 4215 * loopback, all the sorts of tunnels... 4216 4217 * Really, it is unlikely that netif_tx_lock protection is necessary 4218 * here. (f.e. loopback and IP tunnels are clean ignoring statistics 4219 * counters.) 4220 * However, it is possible, that they rely on protection 4221 * made by us here. 4222 4223 * Check this and shot the lock. It is not prone from deadlocks. 4224 *Either shot noqueue qdisc, it is even simpler 8) 4225 */ 4226 if (dev->flags & IFF_UP) { 4227 int cpu = smp_processor_id(); /* ok because BHs are off */ 4228 4229 /* Other cpus might concurrently change txq->xmit_lock_owner 4230 * to -1 or to their cpu id, but not to our id. 4231 */ 4232 if (READ_ONCE(txq->xmit_lock_owner) != cpu) { 4233 if (dev_xmit_recursion()) 4234 goto recursion_alert; 4235 4236 skb = validate_xmit_skb(skb, dev, &again); 4237 if (!skb) 4238 goto out; 4239 4240 HARD_TX_LOCK(dev, txq, cpu); 4241 4242 if (!netif_xmit_stopped(txq)) { 4243 dev_xmit_recursion_inc(); 4244 skb = dev_hard_start_xmit(skb, dev, txq, &rc); 4245 dev_xmit_recursion_dec(); 4246 if (dev_xmit_complete(rc)) { 4247 HARD_TX_UNLOCK(dev, txq); 4248 goto out; 4249 } 4250 } 4251 HARD_TX_UNLOCK(dev, txq); 4252 net_crit_ratelimited("Virtual device %s asks to queue packet!\n", 4253 dev->name); 4254 } else { 4255 /* Recursion is detected! It is possible, 4256 * unfortunately 4257 */ 4258 recursion_alert: 4259 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n", 4260 dev->name); 4261 } 4262 } 4263 4264 rc = -ENETDOWN; 4265 rcu_read_unlock_bh(); 4266 4267 dev_core_stats_tx_dropped_inc(dev); 4268 kfree_skb_list(skb); 4269 return rc; 4270 out: 4271 rcu_read_unlock_bh(); 4272 return rc; 4273 } 4274 EXPORT_SYMBOL(__dev_queue_xmit); 4275 4276 int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id) 4277 { 4278 struct net_device *dev = skb->dev; 4279 struct sk_buff *orig_skb = skb; 4280 struct netdev_queue *txq; 4281 int ret = NETDEV_TX_BUSY; 4282 bool again = false; 4283 4284 if (unlikely(!netif_running(dev) || 4285 !netif_carrier_ok(dev))) 4286 goto drop; 4287 4288 skb = validate_xmit_skb_list(skb, dev, &again); 4289 if (skb != orig_skb) 4290 goto drop; 4291 4292 skb_set_queue_mapping(skb, queue_id); 4293 txq = skb_get_tx_queue(dev, skb); 4294 4295 local_bh_disable(); 4296 4297 dev_xmit_recursion_inc(); 4298 HARD_TX_LOCK(dev, txq, smp_processor_id()); 4299 if (!netif_xmit_frozen_or_drv_stopped(txq)) 4300 ret = netdev_start_xmit(skb, dev, txq, false); 4301 HARD_TX_UNLOCK(dev, txq); 4302 dev_xmit_recursion_dec(); 4303 4304 local_bh_enable(); 4305 return ret; 4306 drop: 4307 dev_core_stats_tx_dropped_inc(dev); 4308 kfree_skb_list(skb); 4309 return NET_XMIT_DROP; 4310 } 4311 EXPORT_SYMBOL(__dev_direct_xmit); 4312 4313 /************************************************************************* 4314 * Receiver routines 4315 *************************************************************************/ 4316 4317 int netdev_max_backlog __read_mostly = 1000; 4318 EXPORT_SYMBOL(netdev_max_backlog); 4319 4320 int netdev_tstamp_prequeue __read_mostly = 1; 4321 unsigned int sysctl_skb_defer_max __read_mostly = 64; 4322 int netdev_budget __read_mostly = 300; 4323 /* Must be at least 2 jiffes to guarantee 1 jiffy timeout */ 4324 unsigned int __read_mostly netdev_budget_usecs = 2 * USEC_PER_SEC / HZ; 4325 int weight_p __read_mostly = 64; /* old backlog weight */ 4326 int dev_weight_rx_bias __read_mostly = 1; /* bias for backlog weight */ 4327 int dev_weight_tx_bias __read_mostly = 1; /* bias for output_queue quota */ 4328 int dev_rx_weight __read_mostly = 64; 4329 int dev_tx_weight __read_mostly = 64; 4330 4331 /* Called with irq disabled */ 4332 static inline void ____napi_schedule(struct softnet_data *sd, 4333 struct napi_struct *napi) 4334 { 4335 struct task_struct *thread; 4336 4337 lockdep_assert_irqs_disabled(); 4338 4339 if (test_bit(NAPI_STATE_THREADED, &napi->state)) { 4340 /* Paired with smp_mb__before_atomic() in 4341 * napi_enable()/dev_set_threaded(). 4342 * Use READ_ONCE() to guarantee a complete 4343 * read on napi->thread. Only call 4344 * wake_up_process() when it's not NULL. 4345 */ 4346 thread = READ_ONCE(napi->thread); 4347 if (thread) { 4348 /* Avoid doing set_bit() if the thread is in 4349 * INTERRUPTIBLE state, cause napi_thread_wait() 4350 * makes sure to proceed with napi polling 4351 * if the thread is explicitly woken from here. 4352 */ 4353 if (READ_ONCE(thread->__state) != TASK_INTERRUPTIBLE) 4354 set_bit(NAPI_STATE_SCHED_THREADED, &napi->state); 4355 wake_up_process(thread); 4356 return; 4357 } 4358 } 4359 4360 list_add_tail(&napi->poll_list, &sd->poll_list); 4361 /* If not called from net_rx_action() 4362 * we have to raise NET_RX_SOFTIRQ. 4363 */ 4364 if (!sd->in_net_rx_action) 4365 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 4366 } 4367 4368 #ifdef CONFIG_RPS 4369 4370 /* One global table that all flow-based protocols share. */ 4371 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly; 4372 EXPORT_SYMBOL(rps_sock_flow_table); 4373 u32 rps_cpu_mask __read_mostly; 4374 EXPORT_SYMBOL(rps_cpu_mask); 4375 4376 struct static_key_false rps_needed __read_mostly; 4377 EXPORT_SYMBOL(rps_needed); 4378 struct static_key_false rfs_needed __read_mostly; 4379 EXPORT_SYMBOL(rfs_needed); 4380 4381 static struct rps_dev_flow * 4382 set_rps_cpu(struct net_device *dev, struct sk_buff *skb, 4383 struct rps_dev_flow *rflow, u16 next_cpu) 4384 { 4385 if (next_cpu < nr_cpu_ids) { 4386 #ifdef CONFIG_RFS_ACCEL 4387 struct netdev_rx_queue *rxqueue; 4388 struct rps_dev_flow_table *flow_table; 4389 struct rps_dev_flow *old_rflow; 4390 u32 flow_id; 4391 u16 rxq_index; 4392 int rc; 4393 4394 /* Should we steer this flow to a different hardware queue? */ 4395 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap || 4396 !(dev->features & NETIF_F_NTUPLE)) 4397 goto out; 4398 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu); 4399 if (rxq_index == skb_get_rx_queue(skb)) 4400 goto out; 4401 4402 rxqueue = dev->_rx + rxq_index; 4403 flow_table = rcu_dereference(rxqueue->rps_flow_table); 4404 if (!flow_table) 4405 goto out; 4406 flow_id = skb_get_hash(skb) & flow_table->mask; 4407 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb, 4408 rxq_index, flow_id); 4409 if (rc < 0) 4410 goto out; 4411 old_rflow = rflow; 4412 rflow = &flow_table->flows[flow_id]; 4413 rflow->filter = rc; 4414 if (old_rflow->filter == rflow->filter) 4415 old_rflow->filter = RPS_NO_FILTER; 4416 out: 4417 #endif 4418 rflow->last_qtail = 4419 per_cpu(softnet_data, next_cpu).input_queue_head; 4420 } 4421 4422 rflow->cpu = next_cpu; 4423 return rflow; 4424 } 4425 4426 /* 4427 * get_rps_cpu is called from netif_receive_skb and returns the target 4428 * CPU from the RPS map of the receiving queue for a given skb. 4429 * rcu_read_lock must be held on entry. 4430 */ 4431 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, 4432 struct rps_dev_flow **rflowp) 4433 { 4434 const struct rps_sock_flow_table *sock_flow_table; 4435 struct netdev_rx_queue *rxqueue = dev->_rx; 4436 struct rps_dev_flow_table *flow_table; 4437 struct rps_map *map; 4438 int cpu = -1; 4439 u32 tcpu; 4440 u32 hash; 4441 4442 if (skb_rx_queue_recorded(skb)) { 4443 u16 index = skb_get_rx_queue(skb); 4444 4445 if (unlikely(index >= dev->real_num_rx_queues)) { 4446 WARN_ONCE(dev->real_num_rx_queues > 1, 4447 "%s received packet on queue %u, but number " 4448 "of RX queues is %u\n", 4449 dev->name, index, dev->real_num_rx_queues); 4450 goto done; 4451 } 4452 rxqueue += index; 4453 } 4454 4455 /* Avoid computing hash if RFS/RPS is not active for this rxqueue */ 4456 4457 flow_table = rcu_dereference(rxqueue->rps_flow_table); 4458 map = rcu_dereference(rxqueue->rps_map); 4459 if (!flow_table && !map) 4460 goto done; 4461 4462 skb_reset_network_header(skb); 4463 hash = skb_get_hash(skb); 4464 if (!hash) 4465 goto done; 4466 4467 sock_flow_table = rcu_dereference(rps_sock_flow_table); 4468 if (flow_table && sock_flow_table) { 4469 struct rps_dev_flow *rflow; 4470 u32 next_cpu; 4471 u32 ident; 4472 4473 /* First check into global flow table if there is a match */ 4474 ident = sock_flow_table->ents[hash & sock_flow_table->mask]; 4475 if ((ident ^ hash) & ~rps_cpu_mask) 4476 goto try_rps; 4477 4478 next_cpu = ident & rps_cpu_mask; 4479 4480 /* OK, now we know there is a match, 4481 * we can look at the local (per receive queue) flow table 4482 */ 4483 rflow = &flow_table->flows[hash & flow_table->mask]; 4484 tcpu = rflow->cpu; 4485 4486 /* 4487 * If the desired CPU (where last recvmsg was done) is 4488 * different from current CPU (one in the rx-queue flow 4489 * table entry), switch if one of the following holds: 4490 * - Current CPU is unset (>= nr_cpu_ids). 4491 * - Current CPU is offline. 4492 * - The current CPU's queue tail has advanced beyond the 4493 * last packet that was enqueued using this table entry. 4494 * This guarantees that all previous packets for the flow 4495 * have been dequeued, thus preserving in order delivery. 4496 */ 4497 if (unlikely(tcpu != next_cpu) && 4498 (tcpu >= nr_cpu_ids || !cpu_online(tcpu) || 4499 ((int)(per_cpu(softnet_data, tcpu).input_queue_head - 4500 rflow->last_qtail)) >= 0)) { 4501 tcpu = next_cpu; 4502 rflow = set_rps_cpu(dev, skb, rflow, next_cpu); 4503 } 4504 4505 if (tcpu < nr_cpu_ids && cpu_online(tcpu)) { 4506 *rflowp = rflow; 4507 cpu = tcpu; 4508 goto done; 4509 } 4510 } 4511 4512 try_rps: 4513 4514 if (map) { 4515 tcpu = map->cpus[reciprocal_scale(hash, map->len)]; 4516 if (cpu_online(tcpu)) { 4517 cpu = tcpu; 4518 goto done; 4519 } 4520 } 4521 4522 done: 4523 return cpu; 4524 } 4525 4526 #ifdef CONFIG_RFS_ACCEL 4527 4528 /** 4529 * rps_may_expire_flow - check whether an RFS hardware filter may be removed 4530 * @dev: Device on which the filter was set 4531 * @rxq_index: RX queue index 4532 * @flow_id: Flow ID passed to ndo_rx_flow_steer() 4533 * @filter_id: Filter ID returned by ndo_rx_flow_steer() 4534 * 4535 * Drivers that implement ndo_rx_flow_steer() should periodically call 4536 * this function for each installed filter and remove the filters for 4537 * which it returns %true. 4538 */ 4539 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, 4540 u32 flow_id, u16 filter_id) 4541 { 4542 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index; 4543 struct rps_dev_flow_table *flow_table; 4544 struct rps_dev_flow *rflow; 4545 bool expire = true; 4546 unsigned int cpu; 4547 4548 rcu_read_lock(); 4549 flow_table = rcu_dereference(rxqueue->rps_flow_table); 4550 if (flow_table && flow_id <= flow_table->mask) { 4551 rflow = &flow_table->flows[flow_id]; 4552 cpu = READ_ONCE(rflow->cpu); 4553 if (rflow->filter == filter_id && cpu < nr_cpu_ids && 4554 ((int)(per_cpu(softnet_data, cpu).input_queue_head - 4555 rflow->last_qtail) < 4556 (int)(10 * flow_table->mask))) 4557 expire = false; 4558 } 4559 rcu_read_unlock(); 4560 return expire; 4561 } 4562 EXPORT_SYMBOL(rps_may_expire_flow); 4563 4564 #endif /* CONFIG_RFS_ACCEL */ 4565 4566 /* Called from hardirq (IPI) context */ 4567 static void rps_trigger_softirq(void *data) 4568 { 4569 struct softnet_data *sd = data; 4570 4571 ____napi_schedule(sd, &sd->backlog); 4572 sd->received_rps++; 4573 } 4574 4575 #endif /* CONFIG_RPS */ 4576 4577 /* Called from hardirq (IPI) context */ 4578 static void trigger_rx_softirq(void *data) 4579 { 4580 struct softnet_data *sd = data; 4581 4582 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 4583 smp_store_release(&sd->defer_ipi_scheduled, 0); 4584 } 4585 4586 /* 4587 * After we queued a packet into sd->input_pkt_queue, 4588 * we need to make sure this queue is serviced soon. 4589 * 4590 * - If this is another cpu queue, link it to our rps_ipi_list, 4591 * and make sure we will process rps_ipi_list from net_rx_action(). 4592 * 4593 * - If this is our own queue, NAPI schedule our backlog. 4594 * Note that this also raises NET_RX_SOFTIRQ. 4595 */ 4596 static void napi_schedule_rps(struct softnet_data *sd) 4597 { 4598 struct softnet_data *mysd = this_cpu_ptr(&softnet_data); 4599 4600 #ifdef CONFIG_RPS 4601 if (sd != mysd) { 4602 sd->rps_ipi_next = mysd->rps_ipi_list; 4603 mysd->rps_ipi_list = sd; 4604 4605 /* If not called from net_rx_action() 4606 * we have to raise NET_RX_SOFTIRQ. 4607 */ 4608 if (!mysd->in_net_rx_action) 4609 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 4610 return; 4611 } 4612 #endif /* CONFIG_RPS */ 4613 __napi_schedule_irqoff(&mysd->backlog); 4614 } 4615 4616 #ifdef CONFIG_NET_FLOW_LIMIT 4617 int netdev_flow_limit_table_len __read_mostly = (1 << 12); 4618 #endif 4619 4620 static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen) 4621 { 4622 #ifdef CONFIG_NET_FLOW_LIMIT 4623 struct sd_flow_limit *fl; 4624 struct softnet_data *sd; 4625 unsigned int old_flow, new_flow; 4626 4627 if (qlen < (READ_ONCE(netdev_max_backlog) >> 1)) 4628 return false; 4629 4630 sd = this_cpu_ptr(&softnet_data); 4631 4632 rcu_read_lock(); 4633 fl = rcu_dereference(sd->flow_limit); 4634 if (fl) { 4635 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1); 4636 old_flow = fl->history[fl->history_head]; 4637 fl->history[fl->history_head] = new_flow; 4638 4639 fl->history_head++; 4640 fl->history_head &= FLOW_LIMIT_HISTORY - 1; 4641 4642 if (likely(fl->buckets[old_flow])) 4643 fl->buckets[old_flow]--; 4644 4645 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) { 4646 fl->count++; 4647 rcu_read_unlock(); 4648 return true; 4649 } 4650 } 4651 rcu_read_unlock(); 4652 #endif 4653 return false; 4654 } 4655 4656 /* 4657 * enqueue_to_backlog is called to queue an skb to a per CPU backlog 4658 * queue (may be a remote CPU queue). 4659 */ 4660 static int enqueue_to_backlog(struct sk_buff *skb, int cpu, 4661 unsigned int *qtail) 4662 { 4663 enum skb_drop_reason reason; 4664 struct softnet_data *sd; 4665 unsigned long flags; 4666 unsigned int qlen; 4667 4668 reason = SKB_DROP_REASON_NOT_SPECIFIED; 4669 sd = &per_cpu(softnet_data, cpu); 4670 4671 rps_lock_irqsave(sd, &flags); 4672 if (!netif_running(skb->dev)) 4673 goto drop; 4674 qlen = skb_queue_len(&sd->input_pkt_queue); 4675 if (qlen <= READ_ONCE(netdev_max_backlog) && !skb_flow_limit(skb, qlen)) { 4676 if (qlen) { 4677 enqueue: 4678 __skb_queue_tail(&sd->input_pkt_queue, skb); 4679 input_queue_tail_incr_save(sd, qtail); 4680 rps_unlock_irq_restore(sd, &flags); 4681 return NET_RX_SUCCESS; 4682 } 4683 4684 /* Schedule NAPI for backlog device 4685 * We can use non atomic operation since we own the queue lock 4686 */ 4687 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) 4688 napi_schedule_rps(sd); 4689 goto enqueue; 4690 } 4691 reason = SKB_DROP_REASON_CPU_BACKLOG; 4692 4693 drop: 4694 sd->dropped++; 4695 rps_unlock_irq_restore(sd, &flags); 4696 4697 dev_core_stats_rx_dropped_inc(skb->dev); 4698 kfree_skb_reason(skb, reason); 4699 return NET_RX_DROP; 4700 } 4701 4702 static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb) 4703 { 4704 struct net_device *dev = skb->dev; 4705 struct netdev_rx_queue *rxqueue; 4706 4707 rxqueue = dev->_rx; 4708 4709 if (skb_rx_queue_recorded(skb)) { 4710 u16 index = skb_get_rx_queue(skb); 4711 4712 if (unlikely(index >= dev->real_num_rx_queues)) { 4713 WARN_ONCE(dev->real_num_rx_queues > 1, 4714 "%s received packet on queue %u, but number " 4715 "of RX queues is %u\n", 4716 dev->name, index, dev->real_num_rx_queues); 4717 4718 return rxqueue; /* Return first rxqueue */ 4719 } 4720 rxqueue += index; 4721 } 4722 return rxqueue; 4723 } 4724 4725 u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp, 4726 struct bpf_prog *xdp_prog) 4727 { 4728 void *orig_data, *orig_data_end, *hard_start; 4729 struct netdev_rx_queue *rxqueue; 4730 bool orig_bcast, orig_host; 4731 u32 mac_len, frame_sz; 4732 __be16 orig_eth_type; 4733 struct ethhdr *eth; 4734 u32 metalen, act; 4735 int off; 4736 4737 /* The XDP program wants to see the packet starting at the MAC 4738 * header. 4739 */ 4740 mac_len = skb->data - skb_mac_header(skb); 4741 hard_start = skb->data - skb_headroom(skb); 4742 4743 /* SKB "head" area always have tailroom for skb_shared_info */ 4744 frame_sz = (void *)skb_end_pointer(skb) - hard_start; 4745 frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 4746 4747 rxqueue = netif_get_rxqueue(skb); 4748 xdp_init_buff(xdp, frame_sz, &rxqueue->xdp_rxq); 4749 xdp_prepare_buff(xdp, hard_start, skb_headroom(skb) - mac_len, 4750 skb_headlen(skb) + mac_len, true); 4751 4752 orig_data_end = xdp->data_end; 4753 orig_data = xdp->data; 4754 eth = (struct ethhdr *)xdp->data; 4755 orig_host = ether_addr_equal_64bits(eth->h_dest, skb->dev->dev_addr); 4756 orig_bcast = is_multicast_ether_addr_64bits(eth->h_dest); 4757 orig_eth_type = eth->h_proto; 4758 4759 act = bpf_prog_run_xdp(xdp_prog, xdp); 4760 4761 /* check if bpf_xdp_adjust_head was used */ 4762 off = xdp->data - orig_data; 4763 if (off) { 4764 if (off > 0) 4765 __skb_pull(skb, off); 4766 else if (off < 0) 4767 __skb_push(skb, -off); 4768 4769 skb->mac_header += off; 4770 skb_reset_network_header(skb); 4771 } 4772 4773 /* check if bpf_xdp_adjust_tail was used */ 4774 off = xdp->data_end - orig_data_end; 4775 if (off != 0) { 4776 skb_set_tail_pointer(skb, xdp->data_end - xdp->data); 4777 skb->len += off; /* positive on grow, negative on shrink */ 4778 } 4779 4780 /* check if XDP changed eth hdr such SKB needs update */ 4781 eth = (struct ethhdr *)xdp->data; 4782 if ((orig_eth_type != eth->h_proto) || 4783 (orig_host != ether_addr_equal_64bits(eth->h_dest, 4784 skb->dev->dev_addr)) || 4785 (orig_bcast != is_multicast_ether_addr_64bits(eth->h_dest))) { 4786 __skb_push(skb, ETH_HLEN); 4787 skb->pkt_type = PACKET_HOST; 4788 skb->protocol = eth_type_trans(skb, skb->dev); 4789 } 4790 4791 /* Redirect/Tx gives L2 packet, code that will reuse skb must __skb_pull 4792 * before calling us again on redirect path. We do not call do_redirect 4793 * as we leave that up to the caller. 4794 * 4795 * Caller is responsible for managing lifetime of skb (i.e. calling 4796 * kfree_skb in response to actions it cannot handle/XDP_DROP). 4797 */ 4798 switch (act) { 4799 case XDP_REDIRECT: 4800 case XDP_TX: 4801 __skb_push(skb, mac_len); 4802 break; 4803 case XDP_PASS: 4804 metalen = xdp->data - xdp->data_meta; 4805 if (metalen) 4806 skb_metadata_set(skb, metalen); 4807 break; 4808 } 4809 4810 return act; 4811 } 4812 4813 static u32 netif_receive_generic_xdp(struct sk_buff *skb, 4814 struct xdp_buff *xdp, 4815 struct bpf_prog *xdp_prog) 4816 { 4817 u32 act = XDP_DROP; 4818 4819 /* Reinjected packets coming from act_mirred or similar should 4820 * not get XDP generic processing. 4821 */ 4822 if (skb_is_redirected(skb)) 4823 return XDP_PASS; 4824 4825 /* XDP packets must be linear and must have sufficient headroom 4826 * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also 4827 * native XDP provides, thus we need to do it here as well. 4828 */ 4829 if (skb_cloned(skb) || skb_is_nonlinear(skb) || 4830 skb_headroom(skb) < XDP_PACKET_HEADROOM) { 4831 int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb); 4832 int troom = skb->tail + skb->data_len - skb->end; 4833 4834 /* In case we have to go down the path and also linearize, 4835 * then lets do the pskb_expand_head() work just once here. 4836 */ 4837 if (pskb_expand_head(skb, 4838 hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0, 4839 troom > 0 ? troom + 128 : 0, GFP_ATOMIC)) 4840 goto do_drop; 4841 if (skb_linearize(skb)) 4842 goto do_drop; 4843 } 4844 4845 act = bpf_prog_run_generic_xdp(skb, xdp, xdp_prog); 4846 switch (act) { 4847 case XDP_REDIRECT: 4848 case XDP_TX: 4849 case XDP_PASS: 4850 break; 4851 default: 4852 bpf_warn_invalid_xdp_action(skb->dev, xdp_prog, act); 4853 fallthrough; 4854 case XDP_ABORTED: 4855 trace_xdp_exception(skb->dev, xdp_prog, act); 4856 fallthrough; 4857 case XDP_DROP: 4858 do_drop: 4859 kfree_skb(skb); 4860 break; 4861 } 4862 4863 return act; 4864 } 4865 4866 /* When doing generic XDP we have to bypass the qdisc layer and the 4867 * network taps in order to match in-driver-XDP behavior. This also means 4868 * that XDP packets are able to starve other packets going through a qdisc, 4869 * and DDOS attacks will be more effective. In-driver-XDP use dedicated TX 4870 * queues, so they do not have this starvation issue. 4871 */ 4872 void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog) 4873 { 4874 struct net_device *dev = skb->dev; 4875 struct netdev_queue *txq; 4876 bool free_skb = true; 4877 int cpu, rc; 4878 4879 txq = netdev_core_pick_tx(dev, skb, NULL); 4880 cpu = smp_processor_id(); 4881 HARD_TX_LOCK(dev, txq, cpu); 4882 if (!netif_xmit_frozen_or_drv_stopped(txq)) { 4883 rc = netdev_start_xmit(skb, dev, txq, 0); 4884 if (dev_xmit_complete(rc)) 4885 free_skb = false; 4886 } 4887 HARD_TX_UNLOCK(dev, txq); 4888 if (free_skb) { 4889 trace_xdp_exception(dev, xdp_prog, XDP_TX); 4890 dev_core_stats_tx_dropped_inc(dev); 4891 kfree_skb(skb); 4892 } 4893 } 4894 4895 static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key); 4896 4897 int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb) 4898 { 4899 if (xdp_prog) { 4900 struct xdp_buff xdp; 4901 u32 act; 4902 int err; 4903 4904 act = netif_receive_generic_xdp(skb, &xdp, xdp_prog); 4905 if (act != XDP_PASS) { 4906 switch (act) { 4907 case XDP_REDIRECT: 4908 err = xdp_do_generic_redirect(skb->dev, skb, 4909 &xdp, xdp_prog); 4910 if (err) 4911 goto out_redir; 4912 break; 4913 case XDP_TX: 4914 generic_xdp_tx(skb, xdp_prog); 4915 break; 4916 } 4917 return XDP_DROP; 4918 } 4919 } 4920 return XDP_PASS; 4921 out_redir: 4922 kfree_skb_reason(skb, SKB_DROP_REASON_XDP); 4923 return XDP_DROP; 4924 } 4925 EXPORT_SYMBOL_GPL(do_xdp_generic); 4926 4927 static int netif_rx_internal(struct sk_buff *skb) 4928 { 4929 int ret; 4930 4931 net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb); 4932 4933 trace_netif_rx(skb); 4934 4935 #ifdef CONFIG_RPS 4936 if (static_branch_unlikely(&rps_needed)) { 4937 struct rps_dev_flow voidflow, *rflow = &voidflow; 4938 int cpu; 4939 4940 rcu_read_lock(); 4941 4942 cpu = get_rps_cpu(skb->dev, skb, &rflow); 4943 if (cpu < 0) 4944 cpu = smp_processor_id(); 4945 4946 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 4947 4948 rcu_read_unlock(); 4949 } else 4950 #endif 4951 { 4952 unsigned int qtail; 4953 4954 ret = enqueue_to_backlog(skb, smp_processor_id(), &qtail); 4955 } 4956 return ret; 4957 } 4958 4959 /** 4960 * __netif_rx - Slightly optimized version of netif_rx 4961 * @skb: buffer to post 4962 * 4963 * This behaves as netif_rx except that it does not disable bottom halves. 4964 * As a result this function may only be invoked from the interrupt context 4965 * (either hard or soft interrupt). 4966 */ 4967 int __netif_rx(struct sk_buff *skb) 4968 { 4969 int ret; 4970 4971 lockdep_assert_once(hardirq_count() | softirq_count()); 4972 4973 trace_netif_rx_entry(skb); 4974 ret = netif_rx_internal(skb); 4975 trace_netif_rx_exit(ret); 4976 return ret; 4977 } 4978 EXPORT_SYMBOL(__netif_rx); 4979 4980 /** 4981 * netif_rx - post buffer to the network code 4982 * @skb: buffer to post 4983 * 4984 * This function receives a packet from a device driver and queues it for 4985 * the upper (protocol) levels to process via the backlog NAPI device. It 4986 * always succeeds. The buffer may be dropped during processing for 4987 * congestion control or by the protocol layers. 4988 * The network buffer is passed via the backlog NAPI device. Modern NIC 4989 * driver should use NAPI and GRO. 4990 * This function can used from interrupt and from process context. The 4991 * caller from process context must not disable interrupts before invoking 4992 * this function. 4993 * 4994 * return values: 4995 * NET_RX_SUCCESS (no congestion) 4996 * NET_RX_DROP (packet was dropped) 4997 * 4998 */ 4999 int netif_rx(struct sk_buff *skb) 5000 { 5001 bool need_bh_off = !(hardirq_count() | softirq_count()); 5002 int ret; 5003 5004 if (need_bh_off) 5005 local_bh_disable(); 5006 trace_netif_rx_entry(skb); 5007 ret = netif_rx_internal(skb); 5008 trace_netif_rx_exit(ret); 5009 if (need_bh_off) 5010 local_bh_enable(); 5011 return ret; 5012 } 5013 EXPORT_SYMBOL(netif_rx); 5014 5015 static __latent_entropy void net_tx_action(struct softirq_action *h) 5016 { 5017 struct softnet_data *sd = this_cpu_ptr(&softnet_data); 5018 5019 if (sd->completion_queue) { 5020 struct sk_buff *clist; 5021 5022 local_irq_disable(); 5023 clist = sd->completion_queue; 5024 sd->completion_queue = NULL; 5025 local_irq_enable(); 5026 5027 while (clist) { 5028 struct sk_buff *skb = clist; 5029 5030 clist = clist->next; 5031 5032 WARN_ON(refcount_read(&skb->users)); 5033 if (likely(get_kfree_skb_cb(skb)->reason == SKB_CONSUMED)) 5034 trace_consume_skb(skb, net_tx_action); 5035 else 5036 trace_kfree_skb(skb, net_tx_action, 5037 get_kfree_skb_cb(skb)->reason); 5038 5039 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) 5040 __kfree_skb(skb); 5041 else 5042 __kfree_skb_defer(skb); 5043 } 5044 } 5045 5046 if (sd->output_queue) { 5047 struct Qdisc *head; 5048 5049 local_irq_disable(); 5050 head = sd->output_queue; 5051 sd->output_queue = NULL; 5052 sd->output_queue_tailp = &sd->output_queue; 5053 local_irq_enable(); 5054 5055 rcu_read_lock(); 5056 5057 while (head) { 5058 struct Qdisc *q = head; 5059 spinlock_t *root_lock = NULL; 5060 5061 head = head->next_sched; 5062 5063 /* We need to make sure head->next_sched is read 5064 * before clearing __QDISC_STATE_SCHED 5065 */ 5066 smp_mb__before_atomic(); 5067 5068 if (!(q->flags & TCQ_F_NOLOCK)) { 5069 root_lock = qdisc_lock(q); 5070 spin_lock(root_lock); 5071 } else if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, 5072 &q->state))) { 5073 /* There is a synchronize_net() between 5074 * STATE_DEACTIVATED flag being set and 5075 * qdisc_reset()/some_qdisc_is_busy() in 5076 * dev_deactivate(), so we can safely bail out 5077 * early here to avoid data race between 5078 * qdisc_deactivate() and some_qdisc_is_busy() 5079 * for lockless qdisc. 5080 */ 5081 clear_bit(__QDISC_STATE_SCHED, &q->state); 5082 continue; 5083 } 5084 5085 clear_bit(__QDISC_STATE_SCHED, &q->state); 5086 qdisc_run(q); 5087 if (root_lock) 5088 spin_unlock(root_lock); 5089 } 5090 5091 rcu_read_unlock(); 5092 } 5093 5094 xfrm_dev_backlog(sd); 5095 } 5096 5097 #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE) 5098 /* This hook is defined here for ATM LANE */ 5099 int (*br_fdb_test_addr_hook)(struct net_device *dev, 5100 unsigned char *addr) __read_mostly; 5101 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook); 5102 #endif 5103 5104 static inline struct sk_buff * 5105 sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret, 5106 struct net_device *orig_dev, bool *another) 5107 { 5108 #ifdef CONFIG_NET_CLS_ACT 5109 struct mini_Qdisc *miniq = rcu_dereference_bh(skb->dev->miniq_ingress); 5110 struct tcf_result cl_res; 5111 5112 /* If there's at least one ingress present somewhere (so 5113 * we get here via enabled static key), remaining devices 5114 * that are not configured with an ingress qdisc will bail 5115 * out here. 5116 */ 5117 if (!miniq) 5118 return skb; 5119 5120 if (*pt_prev) { 5121 *ret = deliver_skb(skb, *pt_prev, orig_dev); 5122 *pt_prev = NULL; 5123 } 5124 5125 qdisc_skb_cb(skb)->pkt_len = skb->len; 5126 tc_skb_cb(skb)->mru = 0; 5127 tc_skb_cb(skb)->post_ct = false; 5128 skb->tc_at_ingress = 1; 5129 mini_qdisc_bstats_cpu_update(miniq, skb); 5130 5131 switch (tcf_classify(skb, miniq->block, miniq->filter_list, &cl_res, false)) { 5132 case TC_ACT_OK: 5133 case TC_ACT_RECLASSIFY: 5134 skb->tc_index = TC_H_MIN(cl_res.classid); 5135 break; 5136 case TC_ACT_SHOT: 5137 mini_qdisc_qstats_cpu_drop(miniq); 5138 kfree_skb_reason(skb, SKB_DROP_REASON_TC_INGRESS); 5139 *ret = NET_RX_DROP; 5140 return NULL; 5141 case TC_ACT_STOLEN: 5142 case TC_ACT_QUEUED: 5143 case TC_ACT_TRAP: 5144 consume_skb(skb); 5145 *ret = NET_RX_SUCCESS; 5146 return NULL; 5147 case TC_ACT_REDIRECT: 5148 /* skb_mac_header check was done by cls/act_bpf, so 5149 * we can safely push the L2 header back before 5150 * redirecting to another netdev 5151 */ 5152 __skb_push(skb, skb->mac_len); 5153 if (skb_do_redirect(skb) == -EAGAIN) { 5154 __skb_pull(skb, skb->mac_len); 5155 *another = true; 5156 break; 5157 } 5158 *ret = NET_RX_SUCCESS; 5159 return NULL; 5160 case TC_ACT_CONSUMED: 5161 *ret = NET_RX_SUCCESS; 5162 return NULL; 5163 default: 5164 break; 5165 } 5166 #endif /* CONFIG_NET_CLS_ACT */ 5167 return skb; 5168 } 5169 5170 /** 5171 * netdev_is_rx_handler_busy - check if receive handler is registered 5172 * @dev: device to check 5173 * 5174 * Check if a receive handler is already registered for a given device. 5175 * Return true if there one. 5176 * 5177 * The caller must hold the rtnl_mutex. 5178 */ 5179 bool netdev_is_rx_handler_busy(struct net_device *dev) 5180 { 5181 ASSERT_RTNL(); 5182 return dev && rtnl_dereference(dev->rx_handler); 5183 } 5184 EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy); 5185 5186 /** 5187 * netdev_rx_handler_register - register receive handler 5188 * @dev: device to register a handler for 5189 * @rx_handler: receive handler to register 5190 * @rx_handler_data: data pointer that is used by rx handler 5191 * 5192 * Register a receive handler for a device. This handler will then be 5193 * called from __netif_receive_skb. A negative errno code is returned 5194 * on a failure. 5195 * 5196 * The caller must hold the rtnl_mutex. 5197 * 5198 * For a general description of rx_handler, see enum rx_handler_result. 5199 */ 5200 int netdev_rx_handler_register(struct net_device *dev, 5201 rx_handler_func_t *rx_handler, 5202 void *rx_handler_data) 5203 { 5204 if (netdev_is_rx_handler_busy(dev)) 5205 return -EBUSY; 5206 5207 if (dev->priv_flags & IFF_NO_RX_HANDLER) 5208 return -EINVAL; 5209 5210 /* Note: rx_handler_data must be set before rx_handler */ 5211 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data); 5212 rcu_assign_pointer(dev->rx_handler, rx_handler); 5213 5214 return 0; 5215 } 5216 EXPORT_SYMBOL_GPL(netdev_rx_handler_register); 5217 5218 /** 5219 * netdev_rx_handler_unregister - unregister receive handler 5220 * @dev: device to unregister a handler from 5221 * 5222 * Unregister a receive handler from a device. 5223 * 5224 * The caller must hold the rtnl_mutex. 5225 */ 5226 void netdev_rx_handler_unregister(struct net_device *dev) 5227 { 5228 5229 ASSERT_RTNL(); 5230 RCU_INIT_POINTER(dev->rx_handler, NULL); 5231 /* a reader seeing a non NULL rx_handler in a rcu_read_lock() 5232 * section has a guarantee to see a non NULL rx_handler_data 5233 * as well. 5234 */ 5235 synchronize_net(); 5236 RCU_INIT_POINTER(dev->rx_handler_data, NULL); 5237 } 5238 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); 5239 5240 /* 5241 * Limit the use of PFMEMALLOC reserves to those protocols that implement 5242 * the special handling of PFMEMALLOC skbs. 5243 */ 5244 static bool skb_pfmemalloc_protocol(struct sk_buff *skb) 5245 { 5246 switch (skb->protocol) { 5247 case htons(ETH_P_ARP): 5248 case htons(ETH_P_IP): 5249 case htons(ETH_P_IPV6): 5250 case htons(ETH_P_8021Q): 5251 case htons(ETH_P_8021AD): 5252 return true; 5253 default: 5254 return false; 5255 } 5256 } 5257 5258 static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev, 5259 int *ret, struct net_device *orig_dev) 5260 { 5261 if (nf_hook_ingress_active(skb)) { 5262 int ingress_retval; 5263 5264 if (*pt_prev) { 5265 *ret = deliver_skb(skb, *pt_prev, orig_dev); 5266 *pt_prev = NULL; 5267 } 5268 5269 rcu_read_lock(); 5270 ingress_retval = nf_hook_ingress(skb); 5271 rcu_read_unlock(); 5272 return ingress_retval; 5273 } 5274 return 0; 5275 } 5276 5277 static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc, 5278 struct packet_type **ppt_prev) 5279 { 5280 struct packet_type *ptype, *pt_prev; 5281 rx_handler_func_t *rx_handler; 5282 struct sk_buff *skb = *pskb; 5283 struct net_device *orig_dev; 5284 bool deliver_exact = false; 5285 int ret = NET_RX_DROP; 5286 __be16 type; 5287 5288 net_timestamp_check(!READ_ONCE(netdev_tstamp_prequeue), skb); 5289 5290 trace_netif_receive_skb(skb); 5291 5292 orig_dev = skb->dev; 5293 5294 skb_reset_network_header(skb); 5295 if (!skb_transport_header_was_set(skb)) 5296 skb_reset_transport_header(skb); 5297 skb_reset_mac_len(skb); 5298 5299 pt_prev = NULL; 5300 5301 another_round: 5302 skb->skb_iif = skb->dev->ifindex; 5303 5304 __this_cpu_inc(softnet_data.processed); 5305 5306 if (static_branch_unlikely(&generic_xdp_needed_key)) { 5307 int ret2; 5308 5309 migrate_disable(); 5310 ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb); 5311 migrate_enable(); 5312 5313 if (ret2 != XDP_PASS) { 5314 ret = NET_RX_DROP; 5315 goto out; 5316 } 5317 } 5318 5319 if (eth_type_vlan(skb->protocol)) { 5320 skb = skb_vlan_untag(skb); 5321 if (unlikely(!skb)) 5322 goto out; 5323 } 5324 5325 if (skb_skip_tc_classify(skb)) 5326 goto skip_classify; 5327 5328 if (pfmemalloc) 5329 goto skip_taps; 5330 5331 list_for_each_entry_rcu(ptype, &ptype_all, list) { 5332 if (pt_prev) 5333 ret = deliver_skb(skb, pt_prev, orig_dev); 5334 pt_prev = ptype; 5335 } 5336 5337 list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) { 5338 if (pt_prev) 5339 ret = deliver_skb(skb, pt_prev, orig_dev); 5340 pt_prev = ptype; 5341 } 5342 5343 skip_taps: 5344 #ifdef CONFIG_NET_INGRESS 5345 if (static_branch_unlikely(&ingress_needed_key)) { 5346 bool another = false; 5347 5348 nf_skip_egress(skb, true); 5349 skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev, 5350 &another); 5351 if (another) 5352 goto another_round; 5353 if (!skb) 5354 goto out; 5355 5356 nf_skip_egress(skb, false); 5357 if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0) 5358 goto out; 5359 } 5360 #endif 5361 skb_reset_redirect(skb); 5362 skip_classify: 5363 if (pfmemalloc && !skb_pfmemalloc_protocol(skb)) 5364 goto drop; 5365 5366 if (skb_vlan_tag_present(skb)) { 5367 if (pt_prev) { 5368 ret = deliver_skb(skb, pt_prev, orig_dev); 5369 pt_prev = NULL; 5370 } 5371 if (vlan_do_receive(&skb)) 5372 goto another_round; 5373 else if (unlikely(!skb)) 5374 goto out; 5375 } 5376 5377 rx_handler = rcu_dereference(skb->dev->rx_handler); 5378 if (rx_handler) { 5379 if (pt_prev) { 5380 ret = deliver_skb(skb, pt_prev, orig_dev); 5381 pt_prev = NULL; 5382 } 5383 switch (rx_handler(&skb)) { 5384 case RX_HANDLER_CONSUMED: 5385 ret = NET_RX_SUCCESS; 5386 goto out; 5387 case RX_HANDLER_ANOTHER: 5388 goto another_round; 5389 case RX_HANDLER_EXACT: 5390 deliver_exact = true; 5391 break; 5392 case RX_HANDLER_PASS: 5393 break; 5394 default: 5395 BUG(); 5396 } 5397 } 5398 5399 if (unlikely(skb_vlan_tag_present(skb)) && !netdev_uses_dsa(skb->dev)) { 5400 check_vlan_id: 5401 if (skb_vlan_tag_get_id(skb)) { 5402 /* Vlan id is non 0 and vlan_do_receive() above couldn't 5403 * find vlan device. 5404 */ 5405 skb->pkt_type = PACKET_OTHERHOST; 5406 } else if (eth_type_vlan(skb->protocol)) { 5407 /* Outer header is 802.1P with vlan 0, inner header is 5408 * 802.1Q or 802.1AD and vlan_do_receive() above could 5409 * not find vlan dev for vlan id 0. 5410 */ 5411 __vlan_hwaccel_clear_tag(skb); 5412 skb = skb_vlan_untag(skb); 5413 if (unlikely(!skb)) 5414 goto out; 5415 if (vlan_do_receive(&skb)) 5416 /* After stripping off 802.1P header with vlan 0 5417 * vlan dev is found for inner header. 5418 */ 5419 goto another_round; 5420 else if (unlikely(!skb)) 5421 goto out; 5422 else 5423 /* We have stripped outer 802.1P vlan 0 header. 5424 * But could not find vlan dev. 5425 * check again for vlan id to set OTHERHOST. 5426 */ 5427 goto check_vlan_id; 5428 } 5429 /* Note: we might in the future use prio bits 5430 * and set skb->priority like in vlan_do_receive() 5431 * For the time being, just ignore Priority Code Point 5432 */ 5433 __vlan_hwaccel_clear_tag(skb); 5434 } 5435 5436 type = skb->protocol; 5437 5438 /* deliver only exact match when indicated */ 5439 if (likely(!deliver_exact)) { 5440 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, 5441 &ptype_base[ntohs(type) & 5442 PTYPE_HASH_MASK]); 5443 } 5444 5445 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, 5446 &orig_dev->ptype_specific); 5447 5448 if (unlikely(skb->dev != orig_dev)) { 5449 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, 5450 &skb->dev->ptype_specific); 5451 } 5452 5453 if (pt_prev) { 5454 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) 5455 goto drop; 5456 *ppt_prev = pt_prev; 5457 } else { 5458 drop: 5459 if (!deliver_exact) 5460 dev_core_stats_rx_dropped_inc(skb->dev); 5461 else 5462 dev_core_stats_rx_nohandler_inc(skb->dev); 5463 kfree_skb_reason(skb, SKB_DROP_REASON_UNHANDLED_PROTO); 5464 /* Jamal, now you will not able to escape explaining 5465 * me how you were going to use this. :-) 5466 */ 5467 ret = NET_RX_DROP; 5468 } 5469 5470 out: 5471 /* The invariant here is that if *ppt_prev is not NULL 5472 * then skb should also be non-NULL. 5473 * 5474 * Apparently *ppt_prev assignment above holds this invariant due to 5475 * skb dereferencing near it. 5476 */ 5477 *pskb = skb; 5478 return ret; 5479 } 5480 5481 static int __netif_receive_skb_one_core(struct sk_buff *skb, bool pfmemalloc) 5482 { 5483 struct net_device *orig_dev = skb->dev; 5484 struct packet_type *pt_prev = NULL; 5485 int ret; 5486 5487 ret = __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev); 5488 if (pt_prev) 5489 ret = INDIRECT_CALL_INET(pt_prev->func, ipv6_rcv, ip_rcv, skb, 5490 skb->dev, pt_prev, orig_dev); 5491 return ret; 5492 } 5493 5494 /** 5495 * netif_receive_skb_core - special purpose version of netif_receive_skb 5496 * @skb: buffer to process 5497 * 5498 * More direct receive version of netif_receive_skb(). It should 5499 * only be used by callers that have a need to skip RPS and Generic XDP. 5500 * Caller must also take care of handling if ``(page_is_)pfmemalloc``. 5501 * 5502 * This function may only be called from softirq context and interrupts 5503 * should be enabled. 5504 * 5505 * Return values (usually ignored): 5506 * NET_RX_SUCCESS: no congestion 5507 * NET_RX_DROP: packet was dropped 5508 */ 5509 int netif_receive_skb_core(struct sk_buff *skb) 5510 { 5511 int ret; 5512 5513 rcu_read_lock(); 5514 ret = __netif_receive_skb_one_core(skb, false); 5515 rcu_read_unlock(); 5516 5517 return ret; 5518 } 5519 EXPORT_SYMBOL(netif_receive_skb_core); 5520 5521 static inline void __netif_receive_skb_list_ptype(struct list_head *head, 5522 struct packet_type *pt_prev, 5523 struct net_device *orig_dev) 5524 { 5525 struct sk_buff *skb, *next; 5526 5527 if (!pt_prev) 5528 return; 5529 if (list_empty(head)) 5530 return; 5531 if (pt_prev->list_func != NULL) 5532 INDIRECT_CALL_INET(pt_prev->list_func, ipv6_list_rcv, 5533 ip_list_rcv, head, pt_prev, orig_dev); 5534 else 5535 list_for_each_entry_safe(skb, next, head, list) { 5536 skb_list_del_init(skb); 5537 pt_prev->func(skb, skb->dev, pt_prev, orig_dev); 5538 } 5539 } 5540 5541 static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc) 5542 { 5543 /* Fast-path assumptions: 5544 * - There is no RX handler. 5545 * - Only one packet_type matches. 5546 * If either of these fails, we will end up doing some per-packet 5547 * processing in-line, then handling the 'last ptype' for the whole 5548 * sublist. This can't cause out-of-order delivery to any single ptype, 5549 * because the 'last ptype' must be constant across the sublist, and all 5550 * other ptypes are handled per-packet. 5551 */ 5552 /* Current (common) ptype of sublist */ 5553 struct packet_type *pt_curr = NULL; 5554 /* Current (common) orig_dev of sublist */ 5555 struct net_device *od_curr = NULL; 5556 struct list_head sublist; 5557 struct sk_buff *skb, *next; 5558 5559 INIT_LIST_HEAD(&sublist); 5560 list_for_each_entry_safe(skb, next, head, list) { 5561 struct net_device *orig_dev = skb->dev; 5562 struct packet_type *pt_prev = NULL; 5563 5564 skb_list_del_init(skb); 5565 __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev); 5566 if (!pt_prev) 5567 continue; 5568 if (pt_curr != pt_prev || od_curr != orig_dev) { 5569 /* dispatch old sublist */ 5570 __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr); 5571 /* start new sublist */ 5572 INIT_LIST_HEAD(&sublist); 5573 pt_curr = pt_prev; 5574 od_curr = orig_dev; 5575 } 5576 list_add_tail(&skb->list, &sublist); 5577 } 5578 5579 /* dispatch final sublist */ 5580 __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr); 5581 } 5582 5583 static int __netif_receive_skb(struct sk_buff *skb) 5584 { 5585 int ret; 5586 5587 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) { 5588 unsigned int noreclaim_flag; 5589 5590 /* 5591 * PFMEMALLOC skbs are special, they should 5592 * - be delivered to SOCK_MEMALLOC sockets only 5593 * - stay away from userspace 5594 * - have bounded memory usage 5595 * 5596 * Use PF_MEMALLOC as this saves us from propagating the allocation 5597 * context down to all allocation sites. 5598 */ 5599 noreclaim_flag = memalloc_noreclaim_save(); 5600 ret = __netif_receive_skb_one_core(skb, true); 5601 memalloc_noreclaim_restore(noreclaim_flag); 5602 } else 5603 ret = __netif_receive_skb_one_core(skb, false); 5604 5605 return ret; 5606 } 5607 5608 static void __netif_receive_skb_list(struct list_head *head) 5609 { 5610 unsigned long noreclaim_flag = 0; 5611 struct sk_buff *skb, *next; 5612 bool pfmemalloc = false; /* Is current sublist PF_MEMALLOC? */ 5613 5614 list_for_each_entry_safe(skb, next, head, list) { 5615 if ((sk_memalloc_socks() && skb_pfmemalloc(skb)) != pfmemalloc) { 5616 struct list_head sublist; 5617 5618 /* Handle the previous sublist */ 5619 list_cut_before(&sublist, head, &skb->list); 5620 if (!list_empty(&sublist)) 5621 __netif_receive_skb_list_core(&sublist, pfmemalloc); 5622 pfmemalloc = !pfmemalloc; 5623 /* See comments in __netif_receive_skb */ 5624 if (pfmemalloc) 5625 noreclaim_flag = memalloc_noreclaim_save(); 5626 else 5627 memalloc_noreclaim_restore(noreclaim_flag); 5628 } 5629 } 5630 /* Handle the remaining sublist */ 5631 if (!list_empty(head)) 5632 __netif_receive_skb_list_core(head, pfmemalloc); 5633 /* Restore pflags */ 5634 if (pfmemalloc) 5635 memalloc_noreclaim_restore(noreclaim_flag); 5636 } 5637 5638 static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp) 5639 { 5640 struct bpf_prog *old = rtnl_dereference(dev->xdp_prog); 5641 struct bpf_prog *new = xdp->prog; 5642 int ret = 0; 5643 5644 switch (xdp->command) { 5645 case XDP_SETUP_PROG: 5646 rcu_assign_pointer(dev->xdp_prog, new); 5647 if (old) 5648 bpf_prog_put(old); 5649 5650 if (old && !new) { 5651 static_branch_dec(&generic_xdp_needed_key); 5652 } else if (new && !old) { 5653 static_branch_inc(&generic_xdp_needed_key); 5654 dev_disable_lro(dev); 5655 dev_disable_gro_hw(dev); 5656 } 5657 break; 5658 5659 default: 5660 ret = -EINVAL; 5661 break; 5662 } 5663 5664 return ret; 5665 } 5666 5667 static int netif_receive_skb_internal(struct sk_buff *skb) 5668 { 5669 int ret; 5670 5671 net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb); 5672 5673 if (skb_defer_rx_timestamp(skb)) 5674 return NET_RX_SUCCESS; 5675 5676 rcu_read_lock(); 5677 #ifdef CONFIG_RPS 5678 if (static_branch_unlikely(&rps_needed)) { 5679 struct rps_dev_flow voidflow, *rflow = &voidflow; 5680 int cpu = get_rps_cpu(skb->dev, skb, &rflow); 5681 5682 if (cpu >= 0) { 5683 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 5684 rcu_read_unlock(); 5685 return ret; 5686 } 5687 } 5688 #endif 5689 ret = __netif_receive_skb(skb); 5690 rcu_read_unlock(); 5691 return ret; 5692 } 5693 5694 void netif_receive_skb_list_internal(struct list_head *head) 5695 { 5696 struct sk_buff *skb, *next; 5697 struct list_head sublist; 5698 5699 INIT_LIST_HEAD(&sublist); 5700 list_for_each_entry_safe(skb, next, head, list) { 5701 net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb); 5702 skb_list_del_init(skb); 5703 if (!skb_defer_rx_timestamp(skb)) 5704 list_add_tail(&skb->list, &sublist); 5705 } 5706 list_splice_init(&sublist, head); 5707 5708 rcu_read_lock(); 5709 #ifdef CONFIG_RPS 5710 if (static_branch_unlikely(&rps_needed)) { 5711 list_for_each_entry_safe(skb, next, head, list) { 5712 struct rps_dev_flow voidflow, *rflow = &voidflow; 5713 int cpu = get_rps_cpu(skb->dev, skb, &rflow); 5714 5715 if (cpu >= 0) { 5716 /* Will be handled, remove from list */ 5717 skb_list_del_init(skb); 5718 enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 5719 } 5720 } 5721 } 5722 #endif 5723 __netif_receive_skb_list(head); 5724 rcu_read_unlock(); 5725 } 5726 5727 /** 5728 * netif_receive_skb - process receive buffer from network 5729 * @skb: buffer to process 5730 * 5731 * netif_receive_skb() is the main receive data processing function. 5732 * It always succeeds. The buffer may be dropped during processing 5733 * for congestion control or by the protocol layers. 5734 * 5735 * This function may only be called from softirq context and interrupts 5736 * should be enabled. 5737 * 5738 * Return values (usually ignored): 5739 * NET_RX_SUCCESS: no congestion 5740 * NET_RX_DROP: packet was dropped 5741 */ 5742 int netif_receive_skb(struct sk_buff *skb) 5743 { 5744 int ret; 5745 5746 trace_netif_receive_skb_entry(skb); 5747 5748 ret = netif_receive_skb_internal(skb); 5749 trace_netif_receive_skb_exit(ret); 5750 5751 return ret; 5752 } 5753 EXPORT_SYMBOL(netif_receive_skb); 5754 5755 /** 5756 * netif_receive_skb_list - process many receive buffers from network 5757 * @head: list of skbs to process. 5758 * 5759 * Since return value of netif_receive_skb() is normally ignored, and 5760 * wouldn't be meaningful for a list, this function returns void. 5761 * 5762 * This function may only be called from softirq context and interrupts 5763 * should be enabled. 5764 */ 5765 void netif_receive_skb_list(struct list_head *head) 5766 { 5767 struct sk_buff *skb; 5768 5769 if (list_empty(head)) 5770 return; 5771 if (trace_netif_receive_skb_list_entry_enabled()) { 5772 list_for_each_entry(skb, head, list) 5773 trace_netif_receive_skb_list_entry(skb); 5774 } 5775 netif_receive_skb_list_internal(head); 5776 trace_netif_receive_skb_list_exit(0); 5777 } 5778 EXPORT_SYMBOL(netif_receive_skb_list); 5779 5780 static DEFINE_PER_CPU(struct work_struct, flush_works); 5781 5782 /* Network device is going away, flush any packets still pending */ 5783 static void flush_backlog(struct work_struct *work) 5784 { 5785 struct sk_buff *skb, *tmp; 5786 struct softnet_data *sd; 5787 5788 local_bh_disable(); 5789 sd = this_cpu_ptr(&softnet_data); 5790 5791 rps_lock_irq_disable(sd); 5792 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) { 5793 if (skb->dev->reg_state == NETREG_UNREGISTERING) { 5794 __skb_unlink(skb, &sd->input_pkt_queue); 5795 dev_kfree_skb_irq(skb); 5796 input_queue_head_incr(sd); 5797 } 5798 } 5799 rps_unlock_irq_enable(sd); 5800 5801 skb_queue_walk_safe(&sd->process_queue, skb, tmp) { 5802 if (skb->dev->reg_state == NETREG_UNREGISTERING) { 5803 __skb_unlink(skb, &sd->process_queue); 5804 kfree_skb(skb); 5805 input_queue_head_incr(sd); 5806 } 5807 } 5808 local_bh_enable(); 5809 } 5810 5811 static bool flush_required(int cpu) 5812 { 5813 #if IS_ENABLED(CONFIG_RPS) 5814 struct softnet_data *sd = &per_cpu(softnet_data, cpu); 5815 bool do_flush; 5816 5817 rps_lock_irq_disable(sd); 5818 5819 /* as insertion into process_queue happens with the rps lock held, 5820 * process_queue access may race only with dequeue 5821 */ 5822 do_flush = !skb_queue_empty(&sd->input_pkt_queue) || 5823 !skb_queue_empty_lockless(&sd->process_queue); 5824 rps_unlock_irq_enable(sd); 5825 5826 return do_flush; 5827 #endif 5828 /* without RPS we can't safely check input_pkt_queue: during a 5829 * concurrent remote skb_queue_splice() we can detect as empty both 5830 * input_pkt_queue and process_queue even if the latter could end-up 5831 * containing a lot of packets. 5832 */ 5833 return true; 5834 } 5835 5836 static void flush_all_backlogs(void) 5837 { 5838 static cpumask_t flush_cpus; 5839 unsigned int cpu; 5840 5841 /* since we are under rtnl lock protection we can use static data 5842 * for the cpumask and avoid allocating on stack the possibly 5843 * large mask 5844 */ 5845 ASSERT_RTNL(); 5846 5847 cpus_read_lock(); 5848 5849 cpumask_clear(&flush_cpus); 5850 for_each_online_cpu(cpu) { 5851 if (flush_required(cpu)) { 5852 queue_work_on(cpu, system_highpri_wq, 5853 per_cpu_ptr(&flush_works, cpu)); 5854 cpumask_set_cpu(cpu, &flush_cpus); 5855 } 5856 } 5857 5858 /* we can have in flight packet[s] on the cpus we are not flushing, 5859 * synchronize_net() in unregister_netdevice_many() will take care of 5860 * them 5861 */ 5862 for_each_cpu(cpu, &flush_cpus) 5863 flush_work(per_cpu_ptr(&flush_works, cpu)); 5864 5865 cpus_read_unlock(); 5866 } 5867 5868 static void net_rps_send_ipi(struct softnet_data *remsd) 5869 { 5870 #ifdef CONFIG_RPS 5871 while (remsd) { 5872 struct softnet_data *next = remsd->rps_ipi_next; 5873 5874 if (cpu_online(remsd->cpu)) 5875 smp_call_function_single_async(remsd->cpu, &remsd->csd); 5876 remsd = next; 5877 } 5878 #endif 5879 } 5880 5881 /* 5882 * net_rps_action_and_irq_enable sends any pending IPI's for rps. 5883 * Note: called with local irq disabled, but exits with local irq enabled. 5884 */ 5885 static void net_rps_action_and_irq_enable(struct softnet_data *sd) 5886 { 5887 #ifdef CONFIG_RPS 5888 struct softnet_data *remsd = sd->rps_ipi_list; 5889 5890 if (remsd) { 5891 sd->rps_ipi_list = NULL; 5892 5893 local_irq_enable(); 5894 5895 /* Send pending IPI's to kick RPS processing on remote cpus. */ 5896 net_rps_send_ipi(remsd); 5897 } else 5898 #endif 5899 local_irq_enable(); 5900 } 5901 5902 static bool sd_has_rps_ipi_waiting(struct softnet_data *sd) 5903 { 5904 #ifdef CONFIG_RPS 5905 return sd->rps_ipi_list != NULL; 5906 #else 5907 return false; 5908 #endif 5909 } 5910 5911 static int process_backlog(struct napi_struct *napi, int quota) 5912 { 5913 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog); 5914 bool again = true; 5915 int work = 0; 5916 5917 /* Check if we have pending ipi, its better to send them now, 5918 * not waiting net_rx_action() end. 5919 */ 5920 if (sd_has_rps_ipi_waiting(sd)) { 5921 local_irq_disable(); 5922 net_rps_action_and_irq_enable(sd); 5923 } 5924 5925 napi->weight = READ_ONCE(dev_rx_weight); 5926 while (again) { 5927 struct sk_buff *skb; 5928 5929 while ((skb = __skb_dequeue(&sd->process_queue))) { 5930 rcu_read_lock(); 5931 __netif_receive_skb(skb); 5932 rcu_read_unlock(); 5933 input_queue_head_incr(sd); 5934 if (++work >= quota) 5935 return work; 5936 5937 } 5938 5939 rps_lock_irq_disable(sd); 5940 if (skb_queue_empty(&sd->input_pkt_queue)) { 5941 /* 5942 * Inline a custom version of __napi_complete(). 5943 * only current cpu owns and manipulates this napi, 5944 * and NAPI_STATE_SCHED is the only possible flag set 5945 * on backlog. 5946 * We can use a plain write instead of clear_bit(), 5947 * and we dont need an smp_mb() memory barrier. 5948 */ 5949 napi->state = 0; 5950 again = false; 5951 } else { 5952 skb_queue_splice_tail_init(&sd->input_pkt_queue, 5953 &sd->process_queue); 5954 } 5955 rps_unlock_irq_enable(sd); 5956 } 5957 5958 return work; 5959 } 5960 5961 /** 5962 * __napi_schedule - schedule for receive 5963 * @n: entry to schedule 5964 * 5965 * The entry's receive function will be scheduled to run. 5966 * Consider using __napi_schedule_irqoff() if hard irqs are masked. 5967 */ 5968 void __napi_schedule(struct napi_struct *n) 5969 { 5970 unsigned long flags; 5971 5972 local_irq_save(flags); 5973 ____napi_schedule(this_cpu_ptr(&softnet_data), n); 5974 local_irq_restore(flags); 5975 } 5976 EXPORT_SYMBOL(__napi_schedule); 5977 5978 /** 5979 * napi_schedule_prep - check if napi can be scheduled 5980 * @n: napi context 5981 * 5982 * Test if NAPI routine is already running, and if not mark 5983 * it as running. This is used as a condition variable to 5984 * insure only one NAPI poll instance runs. We also make 5985 * sure there is no pending NAPI disable. 5986 */ 5987 bool napi_schedule_prep(struct napi_struct *n) 5988 { 5989 unsigned long new, val = READ_ONCE(n->state); 5990 5991 do { 5992 if (unlikely(val & NAPIF_STATE_DISABLE)) 5993 return false; 5994 new = val | NAPIF_STATE_SCHED; 5995 5996 /* Sets STATE_MISSED bit if STATE_SCHED was already set 5997 * This was suggested by Alexander Duyck, as compiler 5998 * emits better code than : 5999 * if (val & NAPIF_STATE_SCHED) 6000 * new |= NAPIF_STATE_MISSED; 6001 */ 6002 new |= (val & NAPIF_STATE_SCHED) / NAPIF_STATE_SCHED * 6003 NAPIF_STATE_MISSED; 6004 } while (!try_cmpxchg(&n->state, &val, new)); 6005 6006 return !(val & NAPIF_STATE_SCHED); 6007 } 6008 EXPORT_SYMBOL(napi_schedule_prep); 6009 6010 /** 6011 * __napi_schedule_irqoff - schedule for receive 6012 * @n: entry to schedule 6013 * 6014 * Variant of __napi_schedule() assuming hard irqs are masked. 6015 * 6016 * On PREEMPT_RT enabled kernels this maps to __napi_schedule() 6017 * because the interrupt disabled assumption might not be true 6018 * due to force-threaded interrupts and spinlock substitution. 6019 */ 6020 void __napi_schedule_irqoff(struct napi_struct *n) 6021 { 6022 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) 6023 ____napi_schedule(this_cpu_ptr(&softnet_data), n); 6024 else 6025 __napi_schedule(n); 6026 } 6027 EXPORT_SYMBOL(__napi_schedule_irqoff); 6028 6029 bool napi_complete_done(struct napi_struct *n, int work_done) 6030 { 6031 unsigned long flags, val, new, timeout = 0; 6032 bool ret = true; 6033 6034 /* 6035 * 1) Don't let napi dequeue from the cpu poll list 6036 * just in case its running on a different cpu. 6037 * 2) If we are busy polling, do nothing here, we have 6038 * the guarantee we will be called later. 6039 */ 6040 if (unlikely(n->state & (NAPIF_STATE_NPSVC | 6041 NAPIF_STATE_IN_BUSY_POLL))) 6042 return false; 6043 6044 if (work_done) { 6045 if (n->gro_bitmask) 6046 timeout = READ_ONCE(n->dev->gro_flush_timeout); 6047 n->defer_hard_irqs_count = READ_ONCE(n->dev->napi_defer_hard_irqs); 6048 } 6049 if (n->defer_hard_irqs_count > 0) { 6050 n->defer_hard_irqs_count--; 6051 timeout = READ_ONCE(n->dev->gro_flush_timeout); 6052 if (timeout) 6053 ret = false; 6054 } 6055 if (n->gro_bitmask) { 6056 /* When the NAPI instance uses a timeout and keeps postponing 6057 * it, we need to bound somehow the time packets are kept in 6058 * the GRO layer 6059 */ 6060 napi_gro_flush(n, !!timeout); 6061 } 6062 6063 gro_normal_list(n); 6064 6065 if (unlikely(!list_empty(&n->poll_list))) { 6066 /* If n->poll_list is not empty, we need to mask irqs */ 6067 local_irq_save(flags); 6068 list_del_init(&n->poll_list); 6069 local_irq_restore(flags); 6070 } 6071 6072 val = READ_ONCE(n->state); 6073 do { 6074 WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED)); 6075 6076 new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED | 6077 NAPIF_STATE_SCHED_THREADED | 6078 NAPIF_STATE_PREFER_BUSY_POLL); 6079 6080 /* If STATE_MISSED was set, leave STATE_SCHED set, 6081 * because we will call napi->poll() one more time. 6082 * This C code was suggested by Alexander Duyck to help gcc. 6083 */ 6084 new |= (val & NAPIF_STATE_MISSED) / NAPIF_STATE_MISSED * 6085 NAPIF_STATE_SCHED; 6086 } while (!try_cmpxchg(&n->state, &val, new)); 6087 6088 if (unlikely(val & NAPIF_STATE_MISSED)) { 6089 __napi_schedule(n); 6090 return false; 6091 } 6092 6093 if (timeout) 6094 hrtimer_start(&n->timer, ns_to_ktime(timeout), 6095 HRTIMER_MODE_REL_PINNED); 6096 return ret; 6097 } 6098 EXPORT_SYMBOL(napi_complete_done); 6099 6100 /* must be called under rcu_read_lock(), as we dont take a reference */ 6101 static struct napi_struct *napi_by_id(unsigned int napi_id) 6102 { 6103 unsigned int hash = napi_id % HASH_SIZE(napi_hash); 6104 struct napi_struct *napi; 6105 6106 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node) 6107 if (napi->napi_id == napi_id) 6108 return napi; 6109 6110 return NULL; 6111 } 6112 6113 #if defined(CONFIG_NET_RX_BUSY_POLL) 6114 6115 static void __busy_poll_stop(struct napi_struct *napi, bool skip_schedule) 6116 { 6117 if (!skip_schedule) { 6118 gro_normal_list(napi); 6119 __napi_schedule(napi); 6120 return; 6121 } 6122 6123 if (napi->gro_bitmask) { 6124 /* flush too old packets 6125 * If HZ < 1000, flush all packets. 6126 */ 6127 napi_gro_flush(napi, HZ >= 1000); 6128 } 6129 6130 gro_normal_list(napi); 6131 clear_bit(NAPI_STATE_SCHED, &napi->state); 6132 } 6133 6134 static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock, bool prefer_busy_poll, 6135 u16 budget) 6136 { 6137 bool skip_schedule = false; 6138 unsigned long timeout; 6139 int rc; 6140 6141 /* Busy polling means there is a high chance device driver hard irq 6142 * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was 6143 * set in napi_schedule_prep(). 6144 * Since we are about to call napi->poll() once more, we can safely 6145 * clear NAPI_STATE_MISSED. 6146 * 6147 * Note: x86 could use a single "lock and ..." instruction 6148 * to perform these two clear_bit() 6149 */ 6150 clear_bit(NAPI_STATE_MISSED, &napi->state); 6151 clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state); 6152 6153 local_bh_disable(); 6154 6155 if (prefer_busy_poll) { 6156 napi->defer_hard_irqs_count = READ_ONCE(napi->dev->napi_defer_hard_irqs); 6157 timeout = READ_ONCE(napi->dev->gro_flush_timeout); 6158 if (napi->defer_hard_irqs_count && timeout) { 6159 hrtimer_start(&napi->timer, ns_to_ktime(timeout), HRTIMER_MODE_REL_PINNED); 6160 skip_schedule = true; 6161 } 6162 } 6163 6164 /* All we really want here is to re-enable device interrupts. 6165 * Ideally, a new ndo_busy_poll_stop() could avoid another round. 6166 */ 6167 rc = napi->poll(napi, budget); 6168 /* We can't gro_normal_list() here, because napi->poll() might have 6169 * rearmed the napi (napi_complete_done()) in which case it could 6170 * already be running on another CPU. 6171 */ 6172 trace_napi_poll(napi, rc, budget); 6173 netpoll_poll_unlock(have_poll_lock); 6174 if (rc == budget) 6175 __busy_poll_stop(napi, skip_schedule); 6176 local_bh_enable(); 6177 } 6178 6179 void napi_busy_loop(unsigned int napi_id, 6180 bool (*loop_end)(void *, unsigned long), 6181 void *loop_end_arg, bool prefer_busy_poll, u16 budget) 6182 { 6183 unsigned long start_time = loop_end ? busy_loop_current_time() : 0; 6184 int (*napi_poll)(struct napi_struct *napi, int budget); 6185 void *have_poll_lock = NULL; 6186 struct napi_struct *napi; 6187 6188 restart: 6189 napi_poll = NULL; 6190 6191 rcu_read_lock(); 6192 6193 napi = napi_by_id(napi_id); 6194 if (!napi) 6195 goto out; 6196 6197 preempt_disable(); 6198 for (;;) { 6199 int work = 0; 6200 6201 local_bh_disable(); 6202 if (!napi_poll) { 6203 unsigned long val = READ_ONCE(napi->state); 6204 6205 /* If multiple threads are competing for this napi, 6206 * we avoid dirtying napi->state as much as we can. 6207 */ 6208 if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED | 6209 NAPIF_STATE_IN_BUSY_POLL)) { 6210 if (prefer_busy_poll) 6211 set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state); 6212 goto count; 6213 } 6214 if (cmpxchg(&napi->state, val, 6215 val | NAPIF_STATE_IN_BUSY_POLL | 6216 NAPIF_STATE_SCHED) != val) { 6217 if (prefer_busy_poll) 6218 set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state); 6219 goto count; 6220 } 6221 have_poll_lock = netpoll_poll_lock(napi); 6222 napi_poll = napi->poll; 6223 } 6224 work = napi_poll(napi, budget); 6225 trace_napi_poll(napi, work, budget); 6226 gro_normal_list(napi); 6227 count: 6228 if (work > 0) 6229 __NET_ADD_STATS(dev_net(napi->dev), 6230 LINUX_MIB_BUSYPOLLRXPACKETS, work); 6231 local_bh_enable(); 6232 6233 if (!loop_end || loop_end(loop_end_arg, start_time)) 6234 break; 6235 6236 if (unlikely(need_resched())) { 6237 if (napi_poll) 6238 busy_poll_stop(napi, have_poll_lock, prefer_busy_poll, budget); 6239 preempt_enable(); 6240 rcu_read_unlock(); 6241 cond_resched(); 6242 if (loop_end(loop_end_arg, start_time)) 6243 return; 6244 goto restart; 6245 } 6246 cpu_relax(); 6247 } 6248 if (napi_poll) 6249 busy_poll_stop(napi, have_poll_lock, prefer_busy_poll, budget); 6250 preempt_enable(); 6251 out: 6252 rcu_read_unlock(); 6253 } 6254 EXPORT_SYMBOL(napi_busy_loop); 6255 6256 #endif /* CONFIG_NET_RX_BUSY_POLL */ 6257 6258 static void napi_hash_add(struct napi_struct *napi) 6259 { 6260 if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state)) 6261 return; 6262 6263 spin_lock(&napi_hash_lock); 6264 6265 /* 0..NR_CPUS range is reserved for sender_cpu use */ 6266 do { 6267 if (unlikely(++napi_gen_id < MIN_NAPI_ID)) 6268 napi_gen_id = MIN_NAPI_ID; 6269 } while (napi_by_id(napi_gen_id)); 6270 napi->napi_id = napi_gen_id; 6271 6272 hlist_add_head_rcu(&napi->napi_hash_node, 6273 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]); 6274 6275 spin_unlock(&napi_hash_lock); 6276 } 6277 6278 /* Warning : caller is responsible to make sure rcu grace period 6279 * is respected before freeing memory containing @napi 6280 */ 6281 static void napi_hash_del(struct napi_struct *napi) 6282 { 6283 spin_lock(&napi_hash_lock); 6284 6285 hlist_del_init_rcu(&napi->napi_hash_node); 6286 6287 spin_unlock(&napi_hash_lock); 6288 } 6289 6290 static enum hrtimer_restart napi_watchdog(struct hrtimer *timer) 6291 { 6292 struct napi_struct *napi; 6293 6294 napi = container_of(timer, struct napi_struct, timer); 6295 6296 /* Note : we use a relaxed variant of napi_schedule_prep() not setting 6297 * NAPI_STATE_MISSED, since we do not react to a device IRQ. 6298 */ 6299 if (!napi_disable_pending(napi) && 6300 !test_and_set_bit(NAPI_STATE_SCHED, &napi->state)) { 6301 clear_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state); 6302 __napi_schedule_irqoff(napi); 6303 } 6304 6305 return HRTIMER_NORESTART; 6306 } 6307 6308 static void init_gro_hash(struct napi_struct *napi) 6309 { 6310 int i; 6311 6312 for (i = 0; i < GRO_HASH_BUCKETS; i++) { 6313 INIT_LIST_HEAD(&napi->gro_hash[i].list); 6314 napi->gro_hash[i].count = 0; 6315 } 6316 napi->gro_bitmask = 0; 6317 } 6318 6319 int dev_set_threaded(struct net_device *dev, bool threaded) 6320 { 6321 struct napi_struct *napi; 6322 int err = 0; 6323 6324 if (dev->threaded == threaded) 6325 return 0; 6326 6327 if (threaded) { 6328 list_for_each_entry(napi, &dev->napi_list, dev_list) { 6329 if (!napi->thread) { 6330 err = napi_kthread_create(napi); 6331 if (err) { 6332 threaded = false; 6333 break; 6334 } 6335 } 6336 } 6337 } 6338 6339 dev->threaded = threaded; 6340 6341 /* Make sure kthread is created before THREADED bit 6342 * is set. 6343 */ 6344 smp_mb__before_atomic(); 6345 6346 /* Setting/unsetting threaded mode on a napi might not immediately 6347 * take effect, if the current napi instance is actively being 6348 * polled. In this case, the switch between threaded mode and 6349 * softirq mode will happen in the next round of napi_schedule(). 6350 * This should not cause hiccups/stalls to the live traffic. 6351 */ 6352 list_for_each_entry(napi, &dev->napi_list, dev_list) { 6353 if (threaded) 6354 set_bit(NAPI_STATE_THREADED, &napi->state); 6355 else 6356 clear_bit(NAPI_STATE_THREADED, &napi->state); 6357 } 6358 6359 return err; 6360 } 6361 EXPORT_SYMBOL(dev_set_threaded); 6362 6363 void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi, 6364 int (*poll)(struct napi_struct *, int), int weight) 6365 { 6366 if (WARN_ON(test_and_set_bit(NAPI_STATE_LISTED, &napi->state))) 6367 return; 6368 6369 INIT_LIST_HEAD(&napi->poll_list); 6370 INIT_HLIST_NODE(&napi->napi_hash_node); 6371 hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); 6372 napi->timer.function = napi_watchdog; 6373 init_gro_hash(napi); 6374 napi->skb = NULL; 6375 INIT_LIST_HEAD(&napi->rx_list); 6376 napi->rx_count = 0; 6377 napi->poll = poll; 6378 if (weight > NAPI_POLL_WEIGHT) 6379 netdev_err_once(dev, "%s() called with weight %d\n", __func__, 6380 weight); 6381 napi->weight = weight; 6382 napi->dev = dev; 6383 #ifdef CONFIG_NETPOLL 6384 napi->poll_owner = -1; 6385 #endif 6386 set_bit(NAPI_STATE_SCHED, &napi->state); 6387 set_bit(NAPI_STATE_NPSVC, &napi->state); 6388 list_add_rcu(&napi->dev_list, &dev->napi_list); 6389 napi_hash_add(napi); 6390 napi_get_frags_check(napi); 6391 /* Create kthread for this napi if dev->threaded is set. 6392 * Clear dev->threaded if kthread creation failed so that 6393 * threaded mode will not be enabled in napi_enable(). 6394 */ 6395 if (dev->threaded && napi_kthread_create(napi)) 6396 dev->threaded = 0; 6397 } 6398 EXPORT_SYMBOL(netif_napi_add_weight); 6399 6400 void napi_disable(struct napi_struct *n) 6401 { 6402 unsigned long val, new; 6403 6404 might_sleep(); 6405 set_bit(NAPI_STATE_DISABLE, &n->state); 6406 6407 val = READ_ONCE(n->state); 6408 do { 6409 while (val & (NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC)) { 6410 usleep_range(20, 200); 6411 val = READ_ONCE(n->state); 6412 } 6413 6414 new = val | NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC; 6415 new &= ~(NAPIF_STATE_THREADED | NAPIF_STATE_PREFER_BUSY_POLL); 6416 } while (!try_cmpxchg(&n->state, &val, new)); 6417 6418 hrtimer_cancel(&n->timer); 6419 6420 clear_bit(NAPI_STATE_DISABLE, &n->state); 6421 } 6422 EXPORT_SYMBOL(napi_disable); 6423 6424 /** 6425 * napi_enable - enable NAPI scheduling 6426 * @n: NAPI context 6427 * 6428 * Resume NAPI from being scheduled on this context. 6429 * Must be paired with napi_disable. 6430 */ 6431 void napi_enable(struct napi_struct *n) 6432 { 6433 unsigned long new, val = READ_ONCE(n->state); 6434 6435 do { 6436 BUG_ON(!test_bit(NAPI_STATE_SCHED, &val)); 6437 6438 new = val & ~(NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC); 6439 if (n->dev->threaded && n->thread) 6440 new |= NAPIF_STATE_THREADED; 6441 } while (!try_cmpxchg(&n->state, &val, new)); 6442 } 6443 EXPORT_SYMBOL(napi_enable); 6444 6445 static void flush_gro_hash(struct napi_struct *napi) 6446 { 6447 int i; 6448 6449 for (i = 0; i < GRO_HASH_BUCKETS; i++) { 6450 struct sk_buff *skb, *n; 6451 6452 list_for_each_entry_safe(skb, n, &napi->gro_hash[i].list, list) 6453 kfree_skb(skb); 6454 napi->gro_hash[i].count = 0; 6455 } 6456 } 6457 6458 /* Must be called in process context */ 6459 void __netif_napi_del(struct napi_struct *napi) 6460 { 6461 if (!test_and_clear_bit(NAPI_STATE_LISTED, &napi->state)) 6462 return; 6463 6464 napi_hash_del(napi); 6465 list_del_rcu(&napi->dev_list); 6466 napi_free_frags(napi); 6467 6468 flush_gro_hash(napi); 6469 napi->gro_bitmask = 0; 6470 6471 if (napi->thread) { 6472 kthread_stop(napi->thread); 6473 napi->thread = NULL; 6474 } 6475 } 6476 EXPORT_SYMBOL(__netif_napi_del); 6477 6478 static int __napi_poll(struct napi_struct *n, bool *repoll) 6479 { 6480 int work, weight; 6481 6482 weight = n->weight; 6483 6484 /* This NAPI_STATE_SCHED test is for avoiding a race 6485 * with netpoll's poll_napi(). Only the entity which 6486 * obtains the lock and sees NAPI_STATE_SCHED set will 6487 * actually make the ->poll() call. Therefore we avoid 6488 * accidentally calling ->poll() when NAPI is not scheduled. 6489 */ 6490 work = 0; 6491 if (test_bit(NAPI_STATE_SCHED, &n->state)) { 6492 work = n->poll(n, weight); 6493 trace_napi_poll(n, work, weight); 6494 } 6495 6496 if (unlikely(work > weight)) 6497 netdev_err_once(n->dev, "NAPI poll function %pS returned %d, exceeding its budget of %d.\n", 6498 n->poll, work, weight); 6499 6500 if (likely(work < weight)) 6501 return work; 6502 6503 /* Drivers must not modify the NAPI state if they 6504 * consume the entire weight. In such cases this code 6505 * still "owns" the NAPI instance and therefore can 6506 * move the instance around on the list at-will. 6507 */ 6508 if (unlikely(napi_disable_pending(n))) { 6509 napi_complete(n); 6510 return work; 6511 } 6512 6513 /* The NAPI context has more processing work, but busy-polling 6514 * is preferred. Exit early. 6515 */ 6516 if (napi_prefer_busy_poll(n)) { 6517 if (napi_complete_done(n, work)) { 6518 /* If timeout is not set, we need to make sure 6519 * that the NAPI is re-scheduled. 6520 */ 6521 napi_schedule(n); 6522 } 6523 return work; 6524 } 6525 6526 if (n->gro_bitmask) { 6527 /* flush too old packets 6528 * If HZ < 1000, flush all packets. 6529 */ 6530 napi_gro_flush(n, HZ >= 1000); 6531 } 6532 6533 gro_normal_list(n); 6534 6535 /* Some drivers may have called napi_schedule 6536 * prior to exhausting their budget. 6537 */ 6538 if (unlikely(!list_empty(&n->poll_list))) { 6539 pr_warn_once("%s: Budget exhausted after napi rescheduled\n", 6540 n->dev ? n->dev->name : "backlog"); 6541 return work; 6542 } 6543 6544 *repoll = true; 6545 6546 return work; 6547 } 6548 6549 static int napi_poll(struct napi_struct *n, struct list_head *repoll) 6550 { 6551 bool do_repoll = false; 6552 void *have; 6553 int work; 6554 6555 list_del_init(&n->poll_list); 6556 6557 have = netpoll_poll_lock(n); 6558 6559 work = __napi_poll(n, &do_repoll); 6560 6561 if (do_repoll) 6562 list_add_tail(&n->poll_list, repoll); 6563 6564 netpoll_poll_unlock(have); 6565 6566 return work; 6567 } 6568 6569 static int napi_thread_wait(struct napi_struct *napi) 6570 { 6571 bool woken = false; 6572 6573 set_current_state(TASK_INTERRUPTIBLE); 6574 6575 while (!kthread_should_stop()) { 6576 /* Testing SCHED_THREADED bit here to make sure the current 6577 * kthread owns this napi and could poll on this napi. 6578 * Testing SCHED bit is not enough because SCHED bit might be 6579 * set by some other busy poll thread or by napi_disable(). 6580 */ 6581 if (test_bit(NAPI_STATE_SCHED_THREADED, &napi->state) || woken) { 6582 WARN_ON(!list_empty(&napi->poll_list)); 6583 __set_current_state(TASK_RUNNING); 6584 return 0; 6585 } 6586 6587 schedule(); 6588 /* woken being true indicates this thread owns this napi. */ 6589 woken = true; 6590 set_current_state(TASK_INTERRUPTIBLE); 6591 } 6592 __set_current_state(TASK_RUNNING); 6593 6594 return -1; 6595 } 6596 6597 static int napi_threaded_poll(void *data) 6598 { 6599 struct napi_struct *napi = data; 6600 void *have; 6601 6602 while (!napi_thread_wait(napi)) { 6603 for (;;) { 6604 bool repoll = false; 6605 6606 local_bh_disable(); 6607 6608 have = netpoll_poll_lock(napi); 6609 __napi_poll(napi, &repoll); 6610 netpoll_poll_unlock(have); 6611 6612 local_bh_enable(); 6613 6614 if (!repoll) 6615 break; 6616 6617 cond_resched(); 6618 } 6619 } 6620 return 0; 6621 } 6622 6623 static void skb_defer_free_flush(struct softnet_data *sd) 6624 { 6625 struct sk_buff *skb, *next; 6626 6627 /* Paired with WRITE_ONCE() in skb_attempt_defer_free() */ 6628 if (!READ_ONCE(sd->defer_list)) 6629 return; 6630 6631 spin_lock_irq(&sd->defer_lock); 6632 skb = sd->defer_list; 6633 sd->defer_list = NULL; 6634 sd->defer_count = 0; 6635 spin_unlock_irq(&sd->defer_lock); 6636 6637 while (skb != NULL) { 6638 next = skb->next; 6639 napi_consume_skb(skb, 1); 6640 skb = next; 6641 } 6642 } 6643 6644 static __latent_entropy void net_rx_action(struct softirq_action *h) 6645 { 6646 struct softnet_data *sd = this_cpu_ptr(&softnet_data); 6647 unsigned long time_limit = jiffies + 6648 usecs_to_jiffies(READ_ONCE(netdev_budget_usecs)); 6649 int budget = READ_ONCE(netdev_budget); 6650 LIST_HEAD(list); 6651 LIST_HEAD(repoll); 6652 6653 start: 6654 sd->in_net_rx_action = true; 6655 local_irq_disable(); 6656 list_splice_init(&sd->poll_list, &list); 6657 local_irq_enable(); 6658 6659 for (;;) { 6660 struct napi_struct *n; 6661 6662 skb_defer_free_flush(sd); 6663 6664 if (list_empty(&list)) { 6665 if (list_empty(&repoll)) { 6666 sd->in_net_rx_action = false; 6667 barrier(); 6668 /* We need to check if ____napi_schedule() 6669 * had refilled poll_list while 6670 * sd->in_net_rx_action was true. 6671 */ 6672 if (!list_empty(&sd->poll_list)) 6673 goto start; 6674 if (!sd_has_rps_ipi_waiting(sd)) 6675 goto end; 6676 } 6677 break; 6678 } 6679 6680 n = list_first_entry(&list, struct napi_struct, poll_list); 6681 budget -= napi_poll(n, &repoll); 6682 6683 /* If softirq window is exhausted then punt. 6684 * Allow this to run for 2 jiffies since which will allow 6685 * an average latency of 1.5/HZ. 6686 */ 6687 if (unlikely(budget <= 0 || 6688 time_after_eq(jiffies, time_limit))) { 6689 sd->time_squeeze++; 6690 break; 6691 } 6692 } 6693 6694 local_irq_disable(); 6695 6696 list_splice_tail_init(&sd->poll_list, &list); 6697 list_splice_tail(&repoll, &list); 6698 list_splice(&list, &sd->poll_list); 6699 if (!list_empty(&sd->poll_list)) 6700 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 6701 else 6702 sd->in_net_rx_action = false; 6703 6704 net_rps_action_and_irq_enable(sd); 6705 end:; 6706 } 6707 6708 struct netdev_adjacent { 6709 struct net_device *dev; 6710 netdevice_tracker dev_tracker; 6711 6712 /* upper master flag, there can only be one master device per list */ 6713 bool master; 6714 6715 /* lookup ignore flag */ 6716 bool ignore; 6717 6718 /* counter for the number of times this device was added to us */ 6719 u16 ref_nr; 6720 6721 /* private field for the users */ 6722 void *private; 6723 6724 struct list_head list; 6725 struct rcu_head rcu; 6726 }; 6727 6728 static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev, 6729 struct list_head *adj_list) 6730 { 6731 struct netdev_adjacent *adj; 6732 6733 list_for_each_entry(adj, adj_list, list) { 6734 if (adj->dev == adj_dev) 6735 return adj; 6736 } 6737 return NULL; 6738 } 6739 6740 static int ____netdev_has_upper_dev(struct net_device *upper_dev, 6741 struct netdev_nested_priv *priv) 6742 { 6743 struct net_device *dev = (struct net_device *)priv->data; 6744 6745 return upper_dev == dev; 6746 } 6747 6748 /** 6749 * netdev_has_upper_dev - Check if device is linked to an upper device 6750 * @dev: device 6751 * @upper_dev: upper device to check 6752 * 6753 * Find out if a device is linked to specified upper device and return true 6754 * in case it is. Note that this checks only immediate upper device, 6755 * not through a complete stack of devices. The caller must hold the RTNL lock. 6756 */ 6757 bool netdev_has_upper_dev(struct net_device *dev, 6758 struct net_device *upper_dev) 6759 { 6760 struct netdev_nested_priv priv = { 6761 .data = (void *)upper_dev, 6762 }; 6763 6764 ASSERT_RTNL(); 6765 6766 return netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev, 6767 &priv); 6768 } 6769 EXPORT_SYMBOL(netdev_has_upper_dev); 6770 6771 /** 6772 * netdev_has_upper_dev_all_rcu - Check if device is linked to an upper device 6773 * @dev: device 6774 * @upper_dev: upper device to check 6775 * 6776 * Find out if a device is linked to specified upper device and return true 6777 * in case it is. Note that this checks the entire upper device chain. 6778 * The caller must hold rcu lock. 6779 */ 6780 6781 bool netdev_has_upper_dev_all_rcu(struct net_device *dev, 6782 struct net_device *upper_dev) 6783 { 6784 struct netdev_nested_priv priv = { 6785 .data = (void *)upper_dev, 6786 }; 6787 6788 return !!netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev, 6789 &priv); 6790 } 6791 EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu); 6792 6793 /** 6794 * netdev_has_any_upper_dev - Check if device is linked to some device 6795 * @dev: device 6796 * 6797 * Find out if a device is linked to an upper device and return true in case 6798 * it is. The caller must hold the RTNL lock. 6799 */ 6800 bool netdev_has_any_upper_dev(struct net_device *dev) 6801 { 6802 ASSERT_RTNL(); 6803 6804 return !list_empty(&dev->adj_list.upper); 6805 } 6806 EXPORT_SYMBOL(netdev_has_any_upper_dev); 6807 6808 /** 6809 * netdev_master_upper_dev_get - Get master upper device 6810 * @dev: device 6811 * 6812 * Find a master upper device and return pointer to it or NULL in case 6813 * it's not there. The caller must hold the RTNL lock. 6814 */ 6815 struct net_device *netdev_master_upper_dev_get(struct net_device *dev) 6816 { 6817 struct netdev_adjacent *upper; 6818 6819 ASSERT_RTNL(); 6820 6821 if (list_empty(&dev->adj_list.upper)) 6822 return NULL; 6823 6824 upper = list_first_entry(&dev->adj_list.upper, 6825 struct netdev_adjacent, list); 6826 if (likely(upper->master)) 6827 return upper->dev; 6828 return NULL; 6829 } 6830 EXPORT_SYMBOL(netdev_master_upper_dev_get); 6831 6832 static struct net_device *__netdev_master_upper_dev_get(struct net_device *dev) 6833 { 6834 struct netdev_adjacent *upper; 6835 6836 ASSERT_RTNL(); 6837 6838 if (list_empty(&dev->adj_list.upper)) 6839 return NULL; 6840 6841 upper = list_first_entry(&dev->adj_list.upper, 6842 struct netdev_adjacent, list); 6843 if (likely(upper->master) && !upper->ignore) 6844 return upper->dev; 6845 return NULL; 6846 } 6847 6848 /** 6849 * netdev_has_any_lower_dev - Check if device is linked to some device 6850 * @dev: device 6851 * 6852 * Find out if a device is linked to a lower device and return true in case 6853 * it is. The caller must hold the RTNL lock. 6854 */ 6855 static bool netdev_has_any_lower_dev(struct net_device *dev) 6856 { 6857 ASSERT_RTNL(); 6858 6859 return !list_empty(&dev->adj_list.lower); 6860 } 6861 6862 void *netdev_adjacent_get_private(struct list_head *adj_list) 6863 { 6864 struct netdev_adjacent *adj; 6865 6866 adj = list_entry(adj_list, struct netdev_adjacent, list); 6867 6868 return adj->private; 6869 } 6870 EXPORT_SYMBOL(netdev_adjacent_get_private); 6871 6872 /** 6873 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list 6874 * @dev: device 6875 * @iter: list_head ** of the current position 6876 * 6877 * Gets the next device from the dev's upper list, starting from iter 6878 * position. The caller must hold RCU read lock. 6879 */ 6880 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, 6881 struct list_head **iter) 6882 { 6883 struct netdev_adjacent *upper; 6884 6885 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held()); 6886 6887 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 6888 6889 if (&upper->list == &dev->adj_list.upper) 6890 return NULL; 6891 6892 *iter = &upper->list; 6893 6894 return upper->dev; 6895 } 6896 EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu); 6897 6898 static struct net_device *__netdev_next_upper_dev(struct net_device *dev, 6899 struct list_head **iter, 6900 bool *ignore) 6901 { 6902 struct netdev_adjacent *upper; 6903 6904 upper = list_entry((*iter)->next, struct netdev_adjacent, list); 6905 6906 if (&upper->list == &dev->adj_list.upper) 6907 return NULL; 6908 6909 *iter = &upper->list; 6910 *ignore = upper->ignore; 6911 6912 return upper->dev; 6913 } 6914 6915 static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev, 6916 struct list_head **iter) 6917 { 6918 struct netdev_adjacent *upper; 6919 6920 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held()); 6921 6922 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 6923 6924 if (&upper->list == &dev->adj_list.upper) 6925 return NULL; 6926 6927 *iter = &upper->list; 6928 6929 return upper->dev; 6930 } 6931 6932 static int __netdev_walk_all_upper_dev(struct net_device *dev, 6933 int (*fn)(struct net_device *dev, 6934 struct netdev_nested_priv *priv), 6935 struct netdev_nested_priv *priv) 6936 { 6937 struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; 6938 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; 6939 int ret, cur = 0; 6940 bool ignore; 6941 6942 now = dev; 6943 iter = &dev->adj_list.upper; 6944 6945 while (1) { 6946 if (now != dev) { 6947 ret = fn(now, priv); 6948 if (ret) 6949 return ret; 6950 } 6951 6952 next = NULL; 6953 while (1) { 6954 udev = __netdev_next_upper_dev(now, &iter, &ignore); 6955 if (!udev) 6956 break; 6957 if (ignore) 6958 continue; 6959 6960 next = udev; 6961 niter = &udev->adj_list.upper; 6962 dev_stack[cur] = now; 6963 iter_stack[cur++] = iter; 6964 break; 6965 } 6966 6967 if (!next) { 6968 if (!cur) 6969 return 0; 6970 next = dev_stack[--cur]; 6971 niter = iter_stack[cur]; 6972 } 6973 6974 now = next; 6975 iter = niter; 6976 } 6977 6978 return 0; 6979 } 6980 6981 int netdev_walk_all_upper_dev_rcu(struct net_device *dev, 6982 int (*fn)(struct net_device *dev, 6983 struct netdev_nested_priv *priv), 6984 struct netdev_nested_priv *priv) 6985 { 6986 struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; 6987 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; 6988 int ret, cur = 0; 6989 6990 now = dev; 6991 iter = &dev->adj_list.upper; 6992 6993 while (1) { 6994 if (now != dev) { 6995 ret = fn(now, priv); 6996 if (ret) 6997 return ret; 6998 } 6999 7000 next = NULL; 7001 while (1) { 7002 udev = netdev_next_upper_dev_rcu(now, &iter); 7003 if (!udev) 7004 break; 7005 7006 next = udev; 7007 niter = &udev->adj_list.upper; 7008 dev_stack[cur] = now; 7009 iter_stack[cur++] = iter; 7010 break; 7011 } 7012 7013 if (!next) { 7014 if (!cur) 7015 return 0; 7016 next = dev_stack[--cur]; 7017 niter = iter_stack[cur]; 7018 } 7019 7020 now = next; 7021 iter = niter; 7022 } 7023 7024 return 0; 7025 } 7026 EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu); 7027 7028 static bool __netdev_has_upper_dev(struct net_device *dev, 7029 struct net_device *upper_dev) 7030 { 7031 struct netdev_nested_priv priv = { 7032 .flags = 0, 7033 .data = (void *)upper_dev, 7034 }; 7035 7036 ASSERT_RTNL(); 7037 7038 return __netdev_walk_all_upper_dev(dev, ____netdev_has_upper_dev, 7039 &priv); 7040 } 7041 7042 /** 7043 * netdev_lower_get_next_private - Get the next ->private from the 7044 * lower neighbour list 7045 * @dev: device 7046 * @iter: list_head ** of the current position 7047 * 7048 * Gets the next netdev_adjacent->private from the dev's lower neighbour 7049 * list, starting from iter position. The caller must hold either hold the 7050 * RTNL lock or its own locking that guarantees that the neighbour lower 7051 * list will remain unchanged. 7052 */ 7053 void *netdev_lower_get_next_private(struct net_device *dev, 7054 struct list_head **iter) 7055 { 7056 struct netdev_adjacent *lower; 7057 7058 lower = list_entry(*iter, struct netdev_adjacent, list); 7059 7060 if (&lower->list == &dev->adj_list.lower) 7061 return NULL; 7062 7063 *iter = lower->list.next; 7064 7065 return lower->private; 7066 } 7067 EXPORT_SYMBOL(netdev_lower_get_next_private); 7068 7069 /** 7070 * netdev_lower_get_next_private_rcu - Get the next ->private from the 7071 * lower neighbour list, RCU 7072 * variant 7073 * @dev: device 7074 * @iter: list_head ** of the current position 7075 * 7076 * Gets the next netdev_adjacent->private from the dev's lower neighbour 7077 * list, starting from iter position. The caller must hold RCU read lock. 7078 */ 7079 void *netdev_lower_get_next_private_rcu(struct net_device *dev, 7080 struct list_head **iter) 7081 { 7082 struct netdev_adjacent *lower; 7083 7084 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); 7085 7086 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 7087 7088 if (&lower->list == &dev->adj_list.lower) 7089 return NULL; 7090 7091 *iter = &lower->list; 7092 7093 return lower->private; 7094 } 7095 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu); 7096 7097 /** 7098 * netdev_lower_get_next - Get the next device from the lower neighbour 7099 * list 7100 * @dev: device 7101 * @iter: list_head ** of the current position 7102 * 7103 * Gets the next netdev_adjacent from the dev's lower neighbour 7104 * list, starting from iter position. The caller must hold RTNL lock or 7105 * its own locking that guarantees that the neighbour lower 7106 * list will remain unchanged. 7107 */ 7108 void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter) 7109 { 7110 struct netdev_adjacent *lower; 7111 7112 lower = list_entry(*iter, struct netdev_adjacent, list); 7113 7114 if (&lower->list == &dev->adj_list.lower) 7115 return NULL; 7116 7117 *iter = lower->list.next; 7118 7119 return lower->dev; 7120 } 7121 EXPORT_SYMBOL(netdev_lower_get_next); 7122 7123 static struct net_device *netdev_next_lower_dev(struct net_device *dev, 7124 struct list_head **iter) 7125 { 7126 struct netdev_adjacent *lower; 7127 7128 lower = list_entry((*iter)->next, struct netdev_adjacent, list); 7129 7130 if (&lower->list == &dev->adj_list.lower) 7131 return NULL; 7132 7133 *iter = &lower->list; 7134 7135 return lower->dev; 7136 } 7137 7138 static struct net_device *__netdev_next_lower_dev(struct net_device *dev, 7139 struct list_head **iter, 7140 bool *ignore) 7141 { 7142 struct netdev_adjacent *lower; 7143 7144 lower = list_entry((*iter)->next, struct netdev_adjacent, list); 7145 7146 if (&lower->list == &dev->adj_list.lower) 7147 return NULL; 7148 7149 *iter = &lower->list; 7150 *ignore = lower->ignore; 7151 7152 return lower->dev; 7153 } 7154 7155 int netdev_walk_all_lower_dev(struct net_device *dev, 7156 int (*fn)(struct net_device *dev, 7157 struct netdev_nested_priv *priv), 7158 struct netdev_nested_priv *priv) 7159 { 7160 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; 7161 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; 7162 int ret, cur = 0; 7163 7164 now = dev; 7165 iter = &dev->adj_list.lower; 7166 7167 while (1) { 7168 if (now != dev) { 7169 ret = fn(now, priv); 7170 if (ret) 7171 return ret; 7172 } 7173 7174 next = NULL; 7175 while (1) { 7176 ldev = netdev_next_lower_dev(now, &iter); 7177 if (!ldev) 7178 break; 7179 7180 next = ldev; 7181 niter = &ldev->adj_list.lower; 7182 dev_stack[cur] = now; 7183 iter_stack[cur++] = iter; 7184 break; 7185 } 7186 7187 if (!next) { 7188 if (!cur) 7189 return 0; 7190 next = dev_stack[--cur]; 7191 niter = iter_stack[cur]; 7192 } 7193 7194 now = next; 7195 iter = niter; 7196 } 7197 7198 return 0; 7199 } 7200 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev); 7201 7202 static int __netdev_walk_all_lower_dev(struct net_device *dev, 7203 int (*fn)(struct net_device *dev, 7204 struct netdev_nested_priv *priv), 7205 struct netdev_nested_priv *priv) 7206 { 7207 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; 7208 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; 7209 int ret, cur = 0; 7210 bool ignore; 7211 7212 now = dev; 7213 iter = &dev->adj_list.lower; 7214 7215 while (1) { 7216 if (now != dev) { 7217 ret = fn(now, priv); 7218 if (ret) 7219 return ret; 7220 } 7221 7222 next = NULL; 7223 while (1) { 7224 ldev = __netdev_next_lower_dev(now, &iter, &ignore); 7225 if (!ldev) 7226 break; 7227 if (ignore) 7228 continue; 7229 7230 next = ldev; 7231 niter = &ldev->adj_list.lower; 7232 dev_stack[cur] = now; 7233 iter_stack[cur++] = iter; 7234 break; 7235 } 7236 7237 if (!next) { 7238 if (!cur) 7239 return 0; 7240 next = dev_stack[--cur]; 7241 niter = iter_stack[cur]; 7242 } 7243 7244 now = next; 7245 iter = niter; 7246 } 7247 7248 return 0; 7249 } 7250 7251 struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev, 7252 struct list_head **iter) 7253 { 7254 struct netdev_adjacent *lower; 7255 7256 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 7257 if (&lower->list == &dev->adj_list.lower) 7258 return NULL; 7259 7260 *iter = &lower->list; 7261 7262 return lower->dev; 7263 } 7264 EXPORT_SYMBOL(netdev_next_lower_dev_rcu); 7265 7266 static u8 __netdev_upper_depth(struct net_device *dev) 7267 { 7268 struct net_device *udev; 7269 struct list_head *iter; 7270 u8 max_depth = 0; 7271 bool ignore; 7272 7273 for (iter = &dev->adj_list.upper, 7274 udev = __netdev_next_upper_dev(dev, &iter, &ignore); 7275 udev; 7276 udev = __netdev_next_upper_dev(dev, &iter, &ignore)) { 7277 if (ignore) 7278 continue; 7279 if (max_depth < udev->upper_level) 7280 max_depth = udev->upper_level; 7281 } 7282 7283 return max_depth; 7284 } 7285 7286 static u8 __netdev_lower_depth(struct net_device *dev) 7287 { 7288 struct net_device *ldev; 7289 struct list_head *iter; 7290 u8 max_depth = 0; 7291 bool ignore; 7292 7293 for (iter = &dev->adj_list.lower, 7294 ldev = __netdev_next_lower_dev(dev, &iter, &ignore); 7295 ldev; 7296 ldev = __netdev_next_lower_dev(dev, &iter, &ignore)) { 7297 if (ignore) 7298 continue; 7299 if (max_depth < ldev->lower_level) 7300 max_depth = ldev->lower_level; 7301 } 7302 7303 return max_depth; 7304 } 7305 7306 static int __netdev_update_upper_level(struct net_device *dev, 7307 struct netdev_nested_priv *__unused) 7308 { 7309 dev->upper_level = __netdev_upper_depth(dev) + 1; 7310 return 0; 7311 } 7312 7313 #ifdef CONFIG_LOCKDEP 7314 static LIST_HEAD(net_unlink_list); 7315 7316 static void net_unlink_todo(struct net_device *dev) 7317 { 7318 if (list_empty(&dev->unlink_list)) 7319 list_add_tail(&dev->unlink_list, &net_unlink_list); 7320 } 7321 #endif 7322 7323 static int __netdev_update_lower_level(struct net_device *dev, 7324 struct netdev_nested_priv *priv) 7325 { 7326 dev->lower_level = __netdev_lower_depth(dev) + 1; 7327 7328 #ifdef CONFIG_LOCKDEP 7329 if (!priv) 7330 return 0; 7331 7332 if (priv->flags & NESTED_SYNC_IMM) 7333 dev->nested_level = dev->lower_level - 1; 7334 if (priv->flags & NESTED_SYNC_TODO) 7335 net_unlink_todo(dev); 7336 #endif 7337 return 0; 7338 } 7339 7340 int netdev_walk_all_lower_dev_rcu(struct net_device *dev, 7341 int (*fn)(struct net_device *dev, 7342 struct netdev_nested_priv *priv), 7343 struct netdev_nested_priv *priv) 7344 { 7345 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; 7346 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; 7347 int ret, cur = 0; 7348 7349 now = dev; 7350 iter = &dev->adj_list.lower; 7351 7352 while (1) { 7353 if (now != dev) { 7354 ret = fn(now, priv); 7355 if (ret) 7356 return ret; 7357 } 7358 7359 next = NULL; 7360 while (1) { 7361 ldev = netdev_next_lower_dev_rcu(now, &iter); 7362 if (!ldev) 7363 break; 7364 7365 next = ldev; 7366 niter = &ldev->adj_list.lower; 7367 dev_stack[cur] = now; 7368 iter_stack[cur++] = iter; 7369 break; 7370 } 7371 7372 if (!next) { 7373 if (!cur) 7374 return 0; 7375 next = dev_stack[--cur]; 7376 niter = iter_stack[cur]; 7377 } 7378 7379 now = next; 7380 iter = niter; 7381 } 7382 7383 return 0; 7384 } 7385 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu); 7386 7387 /** 7388 * netdev_lower_get_first_private_rcu - Get the first ->private from the 7389 * lower neighbour list, RCU 7390 * variant 7391 * @dev: device 7392 * 7393 * Gets the first netdev_adjacent->private from the dev's lower neighbour 7394 * list. The caller must hold RCU read lock. 7395 */ 7396 void *netdev_lower_get_first_private_rcu(struct net_device *dev) 7397 { 7398 struct netdev_adjacent *lower; 7399 7400 lower = list_first_or_null_rcu(&dev->adj_list.lower, 7401 struct netdev_adjacent, list); 7402 if (lower) 7403 return lower->private; 7404 return NULL; 7405 } 7406 EXPORT_SYMBOL(netdev_lower_get_first_private_rcu); 7407 7408 /** 7409 * netdev_master_upper_dev_get_rcu - Get master upper device 7410 * @dev: device 7411 * 7412 * Find a master upper device and return pointer to it or NULL in case 7413 * it's not there. The caller must hold the RCU read lock. 7414 */ 7415 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev) 7416 { 7417 struct netdev_adjacent *upper; 7418 7419 upper = list_first_or_null_rcu(&dev->adj_list.upper, 7420 struct netdev_adjacent, list); 7421 if (upper && likely(upper->master)) 7422 return upper->dev; 7423 return NULL; 7424 } 7425 EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu); 7426 7427 static int netdev_adjacent_sysfs_add(struct net_device *dev, 7428 struct net_device *adj_dev, 7429 struct list_head *dev_list) 7430 { 7431 char linkname[IFNAMSIZ+7]; 7432 7433 sprintf(linkname, dev_list == &dev->adj_list.upper ? 7434 "upper_%s" : "lower_%s", adj_dev->name); 7435 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj), 7436 linkname); 7437 } 7438 static void netdev_adjacent_sysfs_del(struct net_device *dev, 7439 char *name, 7440 struct list_head *dev_list) 7441 { 7442 char linkname[IFNAMSIZ+7]; 7443 7444 sprintf(linkname, dev_list == &dev->adj_list.upper ? 7445 "upper_%s" : "lower_%s", name); 7446 sysfs_remove_link(&(dev->dev.kobj), linkname); 7447 } 7448 7449 static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev, 7450 struct net_device *adj_dev, 7451 struct list_head *dev_list) 7452 { 7453 return (dev_list == &dev->adj_list.upper || 7454 dev_list == &dev->adj_list.lower) && 7455 net_eq(dev_net(dev), dev_net(adj_dev)); 7456 } 7457 7458 static int __netdev_adjacent_dev_insert(struct net_device *dev, 7459 struct net_device *adj_dev, 7460 struct list_head *dev_list, 7461 void *private, bool master) 7462 { 7463 struct netdev_adjacent *adj; 7464 int ret; 7465 7466 adj = __netdev_find_adj(adj_dev, dev_list); 7467 7468 if (adj) { 7469 adj->ref_nr += 1; 7470 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n", 7471 dev->name, adj_dev->name, adj->ref_nr); 7472 7473 return 0; 7474 } 7475 7476 adj = kmalloc(sizeof(*adj), GFP_KERNEL); 7477 if (!adj) 7478 return -ENOMEM; 7479 7480 adj->dev = adj_dev; 7481 adj->master = master; 7482 adj->ref_nr = 1; 7483 adj->private = private; 7484 adj->ignore = false; 7485 netdev_hold(adj_dev, &adj->dev_tracker, GFP_KERNEL); 7486 7487 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n", 7488 dev->name, adj_dev->name, adj->ref_nr, adj_dev->name); 7489 7490 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) { 7491 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list); 7492 if (ret) 7493 goto free_adj; 7494 } 7495 7496 /* Ensure that master link is always the first item in list. */ 7497 if (master) { 7498 ret = sysfs_create_link(&(dev->dev.kobj), 7499 &(adj_dev->dev.kobj), "master"); 7500 if (ret) 7501 goto remove_symlinks; 7502 7503 list_add_rcu(&adj->list, dev_list); 7504 } else { 7505 list_add_tail_rcu(&adj->list, dev_list); 7506 } 7507 7508 return 0; 7509 7510 remove_symlinks: 7511 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) 7512 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list); 7513 free_adj: 7514 netdev_put(adj_dev, &adj->dev_tracker); 7515 kfree(adj); 7516 7517 return ret; 7518 } 7519 7520 static void __netdev_adjacent_dev_remove(struct net_device *dev, 7521 struct net_device *adj_dev, 7522 u16 ref_nr, 7523 struct list_head *dev_list) 7524 { 7525 struct netdev_adjacent *adj; 7526 7527 pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n", 7528 dev->name, adj_dev->name, ref_nr); 7529 7530 adj = __netdev_find_adj(adj_dev, dev_list); 7531 7532 if (!adj) { 7533 pr_err("Adjacency does not exist for device %s from %s\n", 7534 dev->name, adj_dev->name); 7535 WARN_ON(1); 7536 return; 7537 } 7538 7539 if (adj->ref_nr > ref_nr) { 7540 pr_debug("adjacency: %s to %s ref_nr - %d = %d\n", 7541 dev->name, adj_dev->name, ref_nr, 7542 adj->ref_nr - ref_nr); 7543 adj->ref_nr -= ref_nr; 7544 return; 7545 } 7546 7547 if (adj->master) 7548 sysfs_remove_link(&(dev->dev.kobj), "master"); 7549 7550 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) 7551 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list); 7552 7553 list_del_rcu(&adj->list); 7554 pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n", 7555 adj_dev->name, dev->name, adj_dev->name); 7556 netdev_put(adj_dev, &adj->dev_tracker); 7557 kfree_rcu(adj, rcu); 7558 } 7559 7560 static int __netdev_adjacent_dev_link_lists(struct net_device *dev, 7561 struct net_device *upper_dev, 7562 struct list_head *up_list, 7563 struct list_head *down_list, 7564 void *private, bool master) 7565 { 7566 int ret; 7567 7568 ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, 7569 private, master); 7570 if (ret) 7571 return ret; 7572 7573 ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, 7574 private, false); 7575 if (ret) { 7576 __netdev_adjacent_dev_remove(dev, upper_dev, 1, up_list); 7577 return ret; 7578 } 7579 7580 return 0; 7581 } 7582 7583 static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev, 7584 struct net_device *upper_dev, 7585 u16 ref_nr, 7586 struct list_head *up_list, 7587 struct list_head *down_list) 7588 { 7589 __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list); 7590 __netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list); 7591 } 7592 7593 static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev, 7594 struct net_device *upper_dev, 7595 void *private, bool master) 7596 { 7597 return __netdev_adjacent_dev_link_lists(dev, upper_dev, 7598 &dev->adj_list.upper, 7599 &upper_dev->adj_list.lower, 7600 private, master); 7601 } 7602 7603 static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev, 7604 struct net_device *upper_dev) 7605 { 7606 __netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1, 7607 &dev->adj_list.upper, 7608 &upper_dev->adj_list.lower); 7609 } 7610 7611 static int __netdev_upper_dev_link(struct net_device *dev, 7612 struct net_device *upper_dev, bool master, 7613 void *upper_priv, void *upper_info, 7614 struct netdev_nested_priv *priv, 7615 struct netlink_ext_ack *extack) 7616 { 7617 struct netdev_notifier_changeupper_info changeupper_info = { 7618 .info = { 7619 .dev = dev, 7620 .extack = extack, 7621 }, 7622 .upper_dev = upper_dev, 7623 .master = master, 7624 .linking = true, 7625 .upper_info = upper_info, 7626 }; 7627 struct net_device *master_dev; 7628 int ret = 0; 7629 7630 ASSERT_RTNL(); 7631 7632 if (dev == upper_dev) 7633 return -EBUSY; 7634 7635 /* To prevent loops, check if dev is not upper device to upper_dev. */ 7636 if (__netdev_has_upper_dev(upper_dev, dev)) 7637 return -EBUSY; 7638 7639 if ((dev->lower_level + upper_dev->upper_level) > MAX_NEST_DEV) 7640 return -EMLINK; 7641 7642 if (!master) { 7643 if (__netdev_has_upper_dev(dev, upper_dev)) 7644 return -EEXIST; 7645 } else { 7646 master_dev = __netdev_master_upper_dev_get(dev); 7647 if (master_dev) 7648 return master_dev == upper_dev ? -EEXIST : -EBUSY; 7649 } 7650 7651 ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, 7652 &changeupper_info.info); 7653 ret = notifier_to_errno(ret); 7654 if (ret) 7655 return ret; 7656 7657 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv, 7658 master); 7659 if (ret) 7660 return ret; 7661 7662 ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, 7663 &changeupper_info.info); 7664 ret = notifier_to_errno(ret); 7665 if (ret) 7666 goto rollback; 7667 7668 __netdev_update_upper_level(dev, NULL); 7669 __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL); 7670 7671 __netdev_update_lower_level(upper_dev, priv); 7672 __netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level, 7673 priv); 7674 7675 return 0; 7676 7677 rollback: 7678 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev); 7679 7680 return ret; 7681 } 7682 7683 /** 7684 * netdev_upper_dev_link - Add a link to the upper device 7685 * @dev: device 7686 * @upper_dev: new upper device 7687 * @extack: netlink extended ack 7688 * 7689 * Adds a link to device which is upper to this one. The caller must hold 7690 * the RTNL lock. On a failure a negative errno code is returned. 7691 * On success the reference counts are adjusted and the function 7692 * returns zero. 7693 */ 7694 int netdev_upper_dev_link(struct net_device *dev, 7695 struct net_device *upper_dev, 7696 struct netlink_ext_ack *extack) 7697 { 7698 struct netdev_nested_priv priv = { 7699 .flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO, 7700 .data = NULL, 7701 }; 7702 7703 return __netdev_upper_dev_link(dev, upper_dev, false, 7704 NULL, NULL, &priv, extack); 7705 } 7706 EXPORT_SYMBOL(netdev_upper_dev_link); 7707 7708 /** 7709 * netdev_master_upper_dev_link - Add a master link to the upper device 7710 * @dev: device 7711 * @upper_dev: new upper device 7712 * @upper_priv: upper device private 7713 * @upper_info: upper info to be passed down via notifier 7714 * @extack: netlink extended ack 7715 * 7716 * Adds a link to device which is upper to this one. In this case, only 7717 * one master upper device can be linked, although other non-master devices 7718 * might be linked as well. The caller must hold the RTNL lock. 7719 * On a failure a negative errno code is returned. On success the reference 7720 * counts are adjusted and the function returns zero. 7721 */ 7722 int netdev_master_upper_dev_link(struct net_device *dev, 7723 struct net_device *upper_dev, 7724 void *upper_priv, void *upper_info, 7725 struct netlink_ext_ack *extack) 7726 { 7727 struct netdev_nested_priv priv = { 7728 .flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO, 7729 .data = NULL, 7730 }; 7731 7732 return __netdev_upper_dev_link(dev, upper_dev, true, 7733 upper_priv, upper_info, &priv, extack); 7734 } 7735 EXPORT_SYMBOL(netdev_master_upper_dev_link); 7736 7737 static void __netdev_upper_dev_unlink(struct net_device *dev, 7738 struct net_device *upper_dev, 7739 struct netdev_nested_priv *priv) 7740 { 7741 struct netdev_notifier_changeupper_info changeupper_info = { 7742 .info = { 7743 .dev = dev, 7744 }, 7745 .upper_dev = upper_dev, 7746 .linking = false, 7747 }; 7748 7749 ASSERT_RTNL(); 7750 7751 changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev; 7752 7753 call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, 7754 &changeupper_info.info); 7755 7756 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev); 7757 7758 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, 7759 &changeupper_info.info); 7760 7761 __netdev_update_upper_level(dev, NULL); 7762 __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL); 7763 7764 __netdev_update_lower_level(upper_dev, priv); 7765 __netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level, 7766 priv); 7767 } 7768 7769 /** 7770 * netdev_upper_dev_unlink - Removes a link to upper device 7771 * @dev: device 7772 * @upper_dev: new upper device 7773 * 7774 * Removes a link to device which is upper to this one. The caller must hold 7775 * the RTNL lock. 7776 */ 7777 void netdev_upper_dev_unlink(struct net_device *dev, 7778 struct net_device *upper_dev) 7779 { 7780 struct netdev_nested_priv priv = { 7781 .flags = NESTED_SYNC_TODO, 7782 .data = NULL, 7783 }; 7784 7785 __netdev_upper_dev_unlink(dev, upper_dev, &priv); 7786 } 7787 EXPORT_SYMBOL(netdev_upper_dev_unlink); 7788 7789 static void __netdev_adjacent_dev_set(struct net_device *upper_dev, 7790 struct net_device *lower_dev, 7791 bool val) 7792 { 7793 struct netdev_adjacent *adj; 7794 7795 adj = __netdev_find_adj(lower_dev, &upper_dev->adj_list.lower); 7796 if (adj) 7797 adj->ignore = val; 7798 7799 adj = __netdev_find_adj(upper_dev, &lower_dev->adj_list.upper); 7800 if (adj) 7801 adj->ignore = val; 7802 } 7803 7804 static void netdev_adjacent_dev_disable(struct net_device *upper_dev, 7805 struct net_device *lower_dev) 7806 { 7807 __netdev_adjacent_dev_set(upper_dev, lower_dev, true); 7808 } 7809 7810 static void netdev_adjacent_dev_enable(struct net_device *upper_dev, 7811 struct net_device *lower_dev) 7812 { 7813 __netdev_adjacent_dev_set(upper_dev, lower_dev, false); 7814 } 7815 7816 int netdev_adjacent_change_prepare(struct net_device *old_dev, 7817 struct net_device *new_dev, 7818 struct net_device *dev, 7819 struct netlink_ext_ack *extack) 7820 { 7821 struct netdev_nested_priv priv = { 7822 .flags = 0, 7823 .data = NULL, 7824 }; 7825 int err; 7826 7827 if (!new_dev) 7828 return 0; 7829 7830 if (old_dev && new_dev != old_dev) 7831 netdev_adjacent_dev_disable(dev, old_dev); 7832 err = __netdev_upper_dev_link(new_dev, dev, false, NULL, NULL, &priv, 7833 extack); 7834 if (err) { 7835 if (old_dev && new_dev != old_dev) 7836 netdev_adjacent_dev_enable(dev, old_dev); 7837 return err; 7838 } 7839 7840 return 0; 7841 } 7842 EXPORT_SYMBOL(netdev_adjacent_change_prepare); 7843 7844 void netdev_adjacent_change_commit(struct net_device *old_dev, 7845 struct net_device *new_dev, 7846 struct net_device *dev) 7847 { 7848 struct netdev_nested_priv priv = { 7849 .flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO, 7850 .data = NULL, 7851 }; 7852 7853 if (!new_dev || !old_dev) 7854 return; 7855 7856 if (new_dev == old_dev) 7857 return; 7858 7859 netdev_adjacent_dev_enable(dev, old_dev); 7860 __netdev_upper_dev_unlink(old_dev, dev, &priv); 7861 } 7862 EXPORT_SYMBOL(netdev_adjacent_change_commit); 7863 7864 void netdev_adjacent_change_abort(struct net_device *old_dev, 7865 struct net_device *new_dev, 7866 struct net_device *dev) 7867 { 7868 struct netdev_nested_priv priv = { 7869 .flags = 0, 7870 .data = NULL, 7871 }; 7872 7873 if (!new_dev) 7874 return; 7875 7876 if (old_dev && new_dev != old_dev) 7877 netdev_adjacent_dev_enable(dev, old_dev); 7878 7879 __netdev_upper_dev_unlink(new_dev, dev, &priv); 7880 } 7881 EXPORT_SYMBOL(netdev_adjacent_change_abort); 7882 7883 /** 7884 * netdev_bonding_info_change - Dispatch event about slave change 7885 * @dev: device 7886 * @bonding_info: info to dispatch 7887 * 7888 * Send NETDEV_BONDING_INFO to netdev notifiers with info. 7889 * The caller must hold the RTNL lock. 7890 */ 7891 void netdev_bonding_info_change(struct net_device *dev, 7892 struct netdev_bonding_info *bonding_info) 7893 { 7894 struct netdev_notifier_bonding_info info = { 7895 .info.dev = dev, 7896 }; 7897 7898 memcpy(&info.bonding_info, bonding_info, 7899 sizeof(struct netdev_bonding_info)); 7900 call_netdevice_notifiers_info(NETDEV_BONDING_INFO, 7901 &info.info); 7902 } 7903 EXPORT_SYMBOL(netdev_bonding_info_change); 7904 7905 static int netdev_offload_xstats_enable_l3(struct net_device *dev, 7906 struct netlink_ext_ack *extack) 7907 { 7908 struct netdev_notifier_offload_xstats_info info = { 7909 .info.dev = dev, 7910 .info.extack = extack, 7911 .type = NETDEV_OFFLOAD_XSTATS_TYPE_L3, 7912 }; 7913 int err; 7914 int rc; 7915 7916 dev->offload_xstats_l3 = kzalloc(sizeof(*dev->offload_xstats_l3), 7917 GFP_KERNEL); 7918 if (!dev->offload_xstats_l3) 7919 return -ENOMEM; 7920 7921 rc = call_netdevice_notifiers_info_robust(NETDEV_OFFLOAD_XSTATS_ENABLE, 7922 NETDEV_OFFLOAD_XSTATS_DISABLE, 7923 &info.info); 7924 err = notifier_to_errno(rc); 7925 if (err) 7926 goto free_stats; 7927 7928 return 0; 7929 7930 free_stats: 7931 kfree(dev->offload_xstats_l3); 7932 dev->offload_xstats_l3 = NULL; 7933 return err; 7934 } 7935 7936 int netdev_offload_xstats_enable(struct net_device *dev, 7937 enum netdev_offload_xstats_type type, 7938 struct netlink_ext_ack *extack) 7939 { 7940 ASSERT_RTNL(); 7941 7942 if (netdev_offload_xstats_enabled(dev, type)) 7943 return -EALREADY; 7944 7945 switch (type) { 7946 case NETDEV_OFFLOAD_XSTATS_TYPE_L3: 7947 return netdev_offload_xstats_enable_l3(dev, extack); 7948 } 7949 7950 WARN_ON(1); 7951 return -EINVAL; 7952 } 7953 EXPORT_SYMBOL(netdev_offload_xstats_enable); 7954 7955 static void netdev_offload_xstats_disable_l3(struct net_device *dev) 7956 { 7957 struct netdev_notifier_offload_xstats_info info = { 7958 .info.dev = dev, 7959 .type = NETDEV_OFFLOAD_XSTATS_TYPE_L3, 7960 }; 7961 7962 call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_DISABLE, 7963 &info.info); 7964 kfree(dev->offload_xstats_l3); 7965 dev->offload_xstats_l3 = NULL; 7966 } 7967 7968 int netdev_offload_xstats_disable(struct net_device *dev, 7969 enum netdev_offload_xstats_type type) 7970 { 7971 ASSERT_RTNL(); 7972 7973 if (!netdev_offload_xstats_enabled(dev, type)) 7974 return -EALREADY; 7975 7976 switch (type) { 7977 case NETDEV_OFFLOAD_XSTATS_TYPE_L3: 7978 netdev_offload_xstats_disable_l3(dev); 7979 return 0; 7980 } 7981 7982 WARN_ON(1); 7983 return -EINVAL; 7984 } 7985 EXPORT_SYMBOL(netdev_offload_xstats_disable); 7986 7987 static void netdev_offload_xstats_disable_all(struct net_device *dev) 7988 { 7989 netdev_offload_xstats_disable(dev, NETDEV_OFFLOAD_XSTATS_TYPE_L3); 7990 } 7991 7992 static struct rtnl_hw_stats64 * 7993 netdev_offload_xstats_get_ptr(const struct net_device *dev, 7994 enum netdev_offload_xstats_type type) 7995 { 7996 switch (type) { 7997 case NETDEV_OFFLOAD_XSTATS_TYPE_L3: 7998 return dev->offload_xstats_l3; 7999 } 8000 8001 WARN_ON(1); 8002 return NULL; 8003 } 8004 8005 bool netdev_offload_xstats_enabled(const struct net_device *dev, 8006 enum netdev_offload_xstats_type type) 8007 { 8008 ASSERT_RTNL(); 8009 8010 return netdev_offload_xstats_get_ptr(dev, type); 8011 } 8012 EXPORT_SYMBOL(netdev_offload_xstats_enabled); 8013 8014 struct netdev_notifier_offload_xstats_ru { 8015 bool used; 8016 }; 8017 8018 struct netdev_notifier_offload_xstats_rd { 8019 struct rtnl_hw_stats64 stats; 8020 bool used; 8021 }; 8022 8023 static void netdev_hw_stats64_add(struct rtnl_hw_stats64 *dest, 8024 const struct rtnl_hw_stats64 *src) 8025 { 8026 dest->rx_packets += src->rx_packets; 8027 dest->tx_packets += src->tx_packets; 8028 dest->rx_bytes += src->rx_bytes; 8029 dest->tx_bytes += src->tx_bytes; 8030 dest->rx_errors += src->rx_errors; 8031 dest->tx_errors += src->tx_errors; 8032 dest->rx_dropped += src->rx_dropped; 8033 dest->tx_dropped += src->tx_dropped; 8034 dest->multicast += src->multicast; 8035 } 8036 8037 static int netdev_offload_xstats_get_used(struct net_device *dev, 8038 enum netdev_offload_xstats_type type, 8039 bool *p_used, 8040 struct netlink_ext_ack *extack) 8041 { 8042 struct netdev_notifier_offload_xstats_ru report_used = {}; 8043 struct netdev_notifier_offload_xstats_info info = { 8044 .info.dev = dev, 8045 .info.extack = extack, 8046 .type = type, 8047 .report_used = &report_used, 8048 }; 8049 int rc; 8050 8051 WARN_ON(!netdev_offload_xstats_enabled(dev, type)); 8052 rc = call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_REPORT_USED, 8053 &info.info); 8054 *p_used = report_used.used; 8055 return notifier_to_errno(rc); 8056 } 8057 8058 static int netdev_offload_xstats_get_stats(struct net_device *dev, 8059 enum netdev_offload_xstats_type type, 8060 struct rtnl_hw_stats64 *p_stats, 8061 bool *p_used, 8062 struct netlink_ext_ack *extack) 8063 { 8064 struct netdev_notifier_offload_xstats_rd report_delta = {}; 8065 struct netdev_notifier_offload_xstats_info info = { 8066 .info.dev = dev, 8067 .info.extack = extack, 8068 .type = type, 8069 .report_delta = &report_delta, 8070 }; 8071 struct rtnl_hw_stats64 *stats; 8072 int rc; 8073 8074 stats = netdev_offload_xstats_get_ptr(dev, type); 8075 if (WARN_ON(!stats)) 8076 return -EINVAL; 8077 8078 rc = call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_REPORT_DELTA, 8079 &info.info); 8080 8081 /* Cache whatever we got, even if there was an error, otherwise the 8082 * successful stats retrievals would get lost. 8083 */ 8084 netdev_hw_stats64_add(stats, &report_delta.stats); 8085 8086 if (p_stats) 8087 *p_stats = *stats; 8088 *p_used = report_delta.used; 8089 8090 return notifier_to_errno(rc); 8091 } 8092 8093 int netdev_offload_xstats_get(struct net_device *dev, 8094 enum netdev_offload_xstats_type type, 8095 struct rtnl_hw_stats64 *p_stats, bool *p_used, 8096 struct netlink_ext_ack *extack) 8097 { 8098 ASSERT_RTNL(); 8099 8100 if (p_stats) 8101 return netdev_offload_xstats_get_stats(dev, type, p_stats, 8102 p_used, extack); 8103 else 8104 return netdev_offload_xstats_get_used(dev, type, p_used, 8105 extack); 8106 } 8107 EXPORT_SYMBOL(netdev_offload_xstats_get); 8108 8109 void 8110 netdev_offload_xstats_report_delta(struct netdev_notifier_offload_xstats_rd *report_delta, 8111 const struct rtnl_hw_stats64 *stats) 8112 { 8113 report_delta->used = true; 8114 netdev_hw_stats64_add(&report_delta->stats, stats); 8115 } 8116 EXPORT_SYMBOL(netdev_offload_xstats_report_delta); 8117 8118 void 8119 netdev_offload_xstats_report_used(struct netdev_notifier_offload_xstats_ru *report_used) 8120 { 8121 report_used->used = true; 8122 } 8123 EXPORT_SYMBOL(netdev_offload_xstats_report_used); 8124 8125 void netdev_offload_xstats_push_delta(struct net_device *dev, 8126 enum netdev_offload_xstats_type type, 8127 const struct rtnl_hw_stats64 *p_stats) 8128 { 8129 struct rtnl_hw_stats64 *stats; 8130 8131 ASSERT_RTNL(); 8132 8133 stats = netdev_offload_xstats_get_ptr(dev, type); 8134 if (WARN_ON(!stats)) 8135 return; 8136 8137 netdev_hw_stats64_add(stats, p_stats); 8138 } 8139 EXPORT_SYMBOL(netdev_offload_xstats_push_delta); 8140 8141 /** 8142 * netdev_get_xmit_slave - Get the xmit slave of master device 8143 * @dev: device 8144 * @skb: The packet 8145 * @all_slaves: assume all the slaves are active 8146 * 8147 * The reference counters are not incremented so the caller must be 8148 * careful with locks. The caller must hold RCU lock. 8149 * %NULL is returned if no slave is found. 8150 */ 8151 8152 struct net_device *netdev_get_xmit_slave(struct net_device *dev, 8153 struct sk_buff *skb, 8154 bool all_slaves) 8155 { 8156 const struct net_device_ops *ops = dev->netdev_ops; 8157 8158 if (!ops->ndo_get_xmit_slave) 8159 return NULL; 8160 return ops->ndo_get_xmit_slave(dev, skb, all_slaves); 8161 } 8162 EXPORT_SYMBOL(netdev_get_xmit_slave); 8163 8164 static struct net_device *netdev_sk_get_lower_dev(struct net_device *dev, 8165 struct sock *sk) 8166 { 8167 const struct net_device_ops *ops = dev->netdev_ops; 8168 8169 if (!ops->ndo_sk_get_lower_dev) 8170 return NULL; 8171 return ops->ndo_sk_get_lower_dev(dev, sk); 8172 } 8173 8174 /** 8175 * netdev_sk_get_lowest_dev - Get the lowest device in chain given device and socket 8176 * @dev: device 8177 * @sk: the socket 8178 * 8179 * %NULL is returned if no lower device is found. 8180 */ 8181 8182 struct net_device *netdev_sk_get_lowest_dev(struct net_device *dev, 8183 struct sock *sk) 8184 { 8185 struct net_device *lower; 8186 8187 lower = netdev_sk_get_lower_dev(dev, sk); 8188 while (lower) { 8189 dev = lower; 8190 lower = netdev_sk_get_lower_dev(dev, sk); 8191 } 8192 8193 return dev; 8194 } 8195 EXPORT_SYMBOL(netdev_sk_get_lowest_dev); 8196 8197 static void netdev_adjacent_add_links(struct net_device *dev) 8198 { 8199 struct netdev_adjacent *iter; 8200 8201 struct net *net = dev_net(dev); 8202 8203 list_for_each_entry(iter, &dev->adj_list.upper, list) { 8204 if (!net_eq(net, dev_net(iter->dev))) 8205 continue; 8206 netdev_adjacent_sysfs_add(iter->dev, dev, 8207 &iter->dev->adj_list.lower); 8208 netdev_adjacent_sysfs_add(dev, iter->dev, 8209 &dev->adj_list.upper); 8210 } 8211 8212 list_for_each_entry(iter, &dev->adj_list.lower, list) { 8213 if (!net_eq(net, dev_net(iter->dev))) 8214 continue; 8215 netdev_adjacent_sysfs_add(iter->dev, dev, 8216 &iter->dev->adj_list.upper); 8217 netdev_adjacent_sysfs_add(dev, iter->dev, 8218 &dev->adj_list.lower); 8219 } 8220 } 8221 8222 static void netdev_adjacent_del_links(struct net_device *dev) 8223 { 8224 struct netdev_adjacent *iter; 8225 8226 struct net *net = dev_net(dev); 8227 8228 list_for_each_entry(iter, &dev->adj_list.upper, list) { 8229 if (!net_eq(net, dev_net(iter->dev))) 8230 continue; 8231 netdev_adjacent_sysfs_del(iter->dev, dev->name, 8232 &iter->dev->adj_list.lower); 8233 netdev_adjacent_sysfs_del(dev, iter->dev->name, 8234 &dev->adj_list.upper); 8235 } 8236 8237 list_for_each_entry(iter, &dev->adj_list.lower, list) { 8238 if (!net_eq(net, dev_net(iter->dev))) 8239 continue; 8240 netdev_adjacent_sysfs_del(iter->dev, dev->name, 8241 &iter->dev->adj_list.upper); 8242 netdev_adjacent_sysfs_del(dev, iter->dev->name, 8243 &dev->adj_list.lower); 8244 } 8245 } 8246 8247 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname) 8248 { 8249 struct netdev_adjacent *iter; 8250 8251 struct net *net = dev_net(dev); 8252 8253 list_for_each_entry(iter, &dev->adj_list.upper, list) { 8254 if (!net_eq(net, dev_net(iter->dev))) 8255 continue; 8256 netdev_adjacent_sysfs_del(iter->dev, oldname, 8257 &iter->dev->adj_list.lower); 8258 netdev_adjacent_sysfs_add(iter->dev, dev, 8259 &iter->dev->adj_list.lower); 8260 } 8261 8262 list_for_each_entry(iter, &dev->adj_list.lower, list) { 8263 if (!net_eq(net, dev_net(iter->dev))) 8264 continue; 8265 netdev_adjacent_sysfs_del(iter->dev, oldname, 8266 &iter->dev->adj_list.upper); 8267 netdev_adjacent_sysfs_add(iter->dev, dev, 8268 &iter->dev->adj_list.upper); 8269 } 8270 } 8271 8272 void *netdev_lower_dev_get_private(struct net_device *dev, 8273 struct net_device *lower_dev) 8274 { 8275 struct netdev_adjacent *lower; 8276 8277 if (!lower_dev) 8278 return NULL; 8279 lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower); 8280 if (!lower) 8281 return NULL; 8282 8283 return lower->private; 8284 } 8285 EXPORT_SYMBOL(netdev_lower_dev_get_private); 8286 8287 8288 /** 8289 * netdev_lower_state_changed - Dispatch event about lower device state change 8290 * @lower_dev: device 8291 * @lower_state_info: state to dispatch 8292 * 8293 * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info. 8294 * The caller must hold the RTNL lock. 8295 */ 8296 void netdev_lower_state_changed(struct net_device *lower_dev, 8297 void *lower_state_info) 8298 { 8299 struct netdev_notifier_changelowerstate_info changelowerstate_info = { 8300 .info.dev = lower_dev, 8301 }; 8302 8303 ASSERT_RTNL(); 8304 changelowerstate_info.lower_state_info = lower_state_info; 8305 call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE, 8306 &changelowerstate_info.info); 8307 } 8308 EXPORT_SYMBOL(netdev_lower_state_changed); 8309 8310 static void dev_change_rx_flags(struct net_device *dev, int flags) 8311 { 8312 const struct net_device_ops *ops = dev->netdev_ops; 8313 8314 if (ops->ndo_change_rx_flags) 8315 ops->ndo_change_rx_flags(dev, flags); 8316 } 8317 8318 static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify) 8319 { 8320 unsigned int old_flags = dev->flags; 8321 kuid_t uid; 8322 kgid_t gid; 8323 8324 ASSERT_RTNL(); 8325 8326 dev->flags |= IFF_PROMISC; 8327 dev->promiscuity += inc; 8328 if (dev->promiscuity == 0) { 8329 /* 8330 * Avoid overflow. 8331 * If inc causes overflow, untouch promisc and return error. 8332 */ 8333 if (inc < 0) 8334 dev->flags &= ~IFF_PROMISC; 8335 else { 8336 dev->promiscuity -= inc; 8337 netdev_warn(dev, "promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n"); 8338 return -EOVERFLOW; 8339 } 8340 } 8341 if (dev->flags != old_flags) { 8342 netdev_info(dev, "%s promiscuous mode\n", 8343 dev->flags & IFF_PROMISC ? "entered" : "left"); 8344 if (audit_enabled) { 8345 current_uid_gid(&uid, &gid); 8346 audit_log(audit_context(), GFP_ATOMIC, 8347 AUDIT_ANOM_PROMISCUOUS, 8348 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u", 8349 dev->name, (dev->flags & IFF_PROMISC), 8350 (old_flags & IFF_PROMISC), 8351 from_kuid(&init_user_ns, audit_get_loginuid(current)), 8352 from_kuid(&init_user_ns, uid), 8353 from_kgid(&init_user_ns, gid), 8354 audit_get_sessionid(current)); 8355 } 8356 8357 dev_change_rx_flags(dev, IFF_PROMISC); 8358 } 8359 if (notify) 8360 __dev_notify_flags(dev, old_flags, IFF_PROMISC, 0, NULL); 8361 return 0; 8362 } 8363 8364 /** 8365 * dev_set_promiscuity - update promiscuity count on a device 8366 * @dev: device 8367 * @inc: modifier 8368 * 8369 * Add or remove promiscuity from a device. While the count in the device 8370 * remains above zero the interface remains promiscuous. Once it hits zero 8371 * the device reverts back to normal filtering operation. A negative inc 8372 * value is used to drop promiscuity on the device. 8373 * Return 0 if successful or a negative errno code on error. 8374 */ 8375 int dev_set_promiscuity(struct net_device *dev, int inc) 8376 { 8377 unsigned int old_flags = dev->flags; 8378 int err; 8379 8380 err = __dev_set_promiscuity(dev, inc, true); 8381 if (err < 0) 8382 return err; 8383 if (dev->flags != old_flags) 8384 dev_set_rx_mode(dev); 8385 return err; 8386 } 8387 EXPORT_SYMBOL(dev_set_promiscuity); 8388 8389 static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify) 8390 { 8391 unsigned int old_flags = dev->flags, old_gflags = dev->gflags; 8392 8393 ASSERT_RTNL(); 8394 8395 dev->flags |= IFF_ALLMULTI; 8396 dev->allmulti += inc; 8397 if (dev->allmulti == 0) { 8398 /* 8399 * Avoid overflow. 8400 * If inc causes overflow, untouch allmulti and return error. 8401 */ 8402 if (inc < 0) 8403 dev->flags &= ~IFF_ALLMULTI; 8404 else { 8405 dev->allmulti -= inc; 8406 netdev_warn(dev, "allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n"); 8407 return -EOVERFLOW; 8408 } 8409 } 8410 if (dev->flags ^ old_flags) { 8411 netdev_info(dev, "%s allmulticast mode\n", 8412 dev->flags & IFF_ALLMULTI ? "entered" : "left"); 8413 dev_change_rx_flags(dev, IFF_ALLMULTI); 8414 dev_set_rx_mode(dev); 8415 if (notify) 8416 __dev_notify_flags(dev, old_flags, 8417 dev->gflags ^ old_gflags, 0, NULL); 8418 } 8419 return 0; 8420 } 8421 8422 /** 8423 * dev_set_allmulti - update allmulti count on a device 8424 * @dev: device 8425 * @inc: modifier 8426 * 8427 * Add or remove reception of all multicast frames to a device. While the 8428 * count in the device remains above zero the interface remains listening 8429 * to all interfaces. Once it hits zero the device reverts back to normal 8430 * filtering operation. A negative @inc value is used to drop the counter 8431 * when releasing a resource needing all multicasts. 8432 * Return 0 if successful or a negative errno code on error. 8433 */ 8434 8435 int dev_set_allmulti(struct net_device *dev, int inc) 8436 { 8437 return __dev_set_allmulti(dev, inc, true); 8438 } 8439 EXPORT_SYMBOL(dev_set_allmulti); 8440 8441 /* 8442 * Upload unicast and multicast address lists to device and 8443 * configure RX filtering. When the device doesn't support unicast 8444 * filtering it is put in promiscuous mode while unicast addresses 8445 * are present. 8446 */ 8447 void __dev_set_rx_mode(struct net_device *dev) 8448 { 8449 const struct net_device_ops *ops = dev->netdev_ops; 8450 8451 /* dev_open will call this function so the list will stay sane. */ 8452 if (!(dev->flags&IFF_UP)) 8453 return; 8454 8455 if (!netif_device_present(dev)) 8456 return; 8457 8458 if (!(dev->priv_flags & IFF_UNICAST_FLT)) { 8459 /* Unicast addresses changes may only happen under the rtnl, 8460 * therefore calling __dev_set_promiscuity here is safe. 8461 */ 8462 if (!netdev_uc_empty(dev) && !dev->uc_promisc) { 8463 __dev_set_promiscuity(dev, 1, false); 8464 dev->uc_promisc = true; 8465 } else if (netdev_uc_empty(dev) && dev->uc_promisc) { 8466 __dev_set_promiscuity(dev, -1, false); 8467 dev->uc_promisc = false; 8468 } 8469 } 8470 8471 if (ops->ndo_set_rx_mode) 8472 ops->ndo_set_rx_mode(dev); 8473 } 8474 8475 void dev_set_rx_mode(struct net_device *dev) 8476 { 8477 netif_addr_lock_bh(dev); 8478 __dev_set_rx_mode(dev); 8479 netif_addr_unlock_bh(dev); 8480 } 8481 8482 /** 8483 * dev_get_flags - get flags reported to userspace 8484 * @dev: device 8485 * 8486 * Get the combination of flag bits exported through APIs to userspace. 8487 */ 8488 unsigned int dev_get_flags(const struct net_device *dev) 8489 { 8490 unsigned int flags; 8491 8492 flags = (dev->flags & ~(IFF_PROMISC | 8493 IFF_ALLMULTI | 8494 IFF_RUNNING | 8495 IFF_LOWER_UP | 8496 IFF_DORMANT)) | 8497 (dev->gflags & (IFF_PROMISC | 8498 IFF_ALLMULTI)); 8499 8500 if (netif_running(dev)) { 8501 if (netif_oper_up(dev)) 8502 flags |= IFF_RUNNING; 8503 if (netif_carrier_ok(dev)) 8504 flags |= IFF_LOWER_UP; 8505 if (netif_dormant(dev)) 8506 flags |= IFF_DORMANT; 8507 } 8508 8509 return flags; 8510 } 8511 EXPORT_SYMBOL(dev_get_flags); 8512 8513 int __dev_change_flags(struct net_device *dev, unsigned int flags, 8514 struct netlink_ext_ack *extack) 8515 { 8516 unsigned int old_flags = dev->flags; 8517 int ret; 8518 8519 ASSERT_RTNL(); 8520 8521 /* 8522 * Set the flags on our device. 8523 */ 8524 8525 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP | 8526 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL | 8527 IFF_AUTOMEDIA)) | 8528 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC | 8529 IFF_ALLMULTI)); 8530 8531 /* 8532 * Load in the correct multicast list now the flags have changed. 8533 */ 8534 8535 if ((old_flags ^ flags) & IFF_MULTICAST) 8536 dev_change_rx_flags(dev, IFF_MULTICAST); 8537 8538 dev_set_rx_mode(dev); 8539 8540 /* 8541 * Have we downed the interface. We handle IFF_UP ourselves 8542 * according to user attempts to set it, rather than blindly 8543 * setting it. 8544 */ 8545 8546 ret = 0; 8547 if ((old_flags ^ flags) & IFF_UP) { 8548 if (old_flags & IFF_UP) 8549 __dev_close(dev); 8550 else 8551 ret = __dev_open(dev, extack); 8552 } 8553 8554 if ((flags ^ dev->gflags) & IFF_PROMISC) { 8555 int inc = (flags & IFF_PROMISC) ? 1 : -1; 8556 unsigned int old_flags = dev->flags; 8557 8558 dev->gflags ^= IFF_PROMISC; 8559 8560 if (__dev_set_promiscuity(dev, inc, false) >= 0) 8561 if (dev->flags != old_flags) 8562 dev_set_rx_mode(dev); 8563 } 8564 8565 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI 8566 * is important. Some (broken) drivers set IFF_PROMISC, when 8567 * IFF_ALLMULTI is requested not asking us and not reporting. 8568 */ 8569 if ((flags ^ dev->gflags) & IFF_ALLMULTI) { 8570 int inc = (flags & IFF_ALLMULTI) ? 1 : -1; 8571 8572 dev->gflags ^= IFF_ALLMULTI; 8573 __dev_set_allmulti(dev, inc, false); 8574 } 8575 8576 return ret; 8577 } 8578 8579 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags, 8580 unsigned int gchanges, u32 portid, 8581 const struct nlmsghdr *nlh) 8582 { 8583 unsigned int changes = dev->flags ^ old_flags; 8584 8585 if (gchanges) 8586 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC, portid, nlh); 8587 8588 if (changes & IFF_UP) { 8589 if (dev->flags & IFF_UP) 8590 call_netdevice_notifiers(NETDEV_UP, dev); 8591 else 8592 call_netdevice_notifiers(NETDEV_DOWN, dev); 8593 } 8594 8595 if (dev->flags & IFF_UP && 8596 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) { 8597 struct netdev_notifier_change_info change_info = { 8598 .info = { 8599 .dev = dev, 8600 }, 8601 .flags_changed = changes, 8602 }; 8603 8604 call_netdevice_notifiers_info(NETDEV_CHANGE, &change_info.info); 8605 } 8606 } 8607 8608 /** 8609 * dev_change_flags - change device settings 8610 * @dev: device 8611 * @flags: device state flags 8612 * @extack: netlink extended ack 8613 * 8614 * Change settings on device based state flags. The flags are 8615 * in the userspace exported format. 8616 */ 8617 int dev_change_flags(struct net_device *dev, unsigned int flags, 8618 struct netlink_ext_ack *extack) 8619 { 8620 int ret; 8621 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags; 8622 8623 ret = __dev_change_flags(dev, flags, extack); 8624 if (ret < 0) 8625 return ret; 8626 8627 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags); 8628 __dev_notify_flags(dev, old_flags, changes, 0, NULL); 8629 return ret; 8630 } 8631 EXPORT_SYMBOL(dev_change_flags); 8632 8633 int __dev_set_mtu(struct net_device *dev, int new_mtu) 8634 { 8635 const struct net_device_ops *ops = dev->netdev_ops; 8636 8637 if (ops->ndo_change_mtu) 8638 return ops->ndo_change_mtu(dev, new_mtu); 8639 8640 /* Pairs with all the lockless reads of dev->mtu in the stack */ 8641 WRITE_ONCE(dev->mtu, new_mtu); 8642 return 0; 8643 } 8644 EXPORT_SYMBOL(__dev_set_mtu); 8645 8646 int dev_validate_mtu(struct net_device *dev, int new_mtu, 8647 struct netlink_ext_ack *extack) 8648 { 8649 /* MTU must be positive, and in range */ 8650 if (new_mtu < 0 || new_mtu < dev->min_mtu) { 8651 NL_SET_ERR_MSG(extack, "mtu less than device minimum"); 8652 return -EINVAL; 8653 } 8654 8655 if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) { 8656 NL_SET_ERR_MSG(extack, "mtu greater than device maximum"); 8657 return -EINVAL; 8658 } 8659 return 0; 8660 } 8661 8662 /** 8663 * dev_set_mtu_ext - Change maximum transfer unit 8664 * @dev: device 8665 * @new_mtu: new transfer unit 8666 * @extack: netlink extended ack 8667 * 8668 * Change the maximum transfer size of the network device. 8669 */ 8670 int dev_set_mtu_ext(struct net_device *dev, int new_mtu, 8671 struct netlink_ext_ack *extack) 8672 { 8673 int err, orig_mtu; 8674 8675 if (new_mtu == dev->mtu) 8676 return 0; 8677 8678 err = dev_validate_mtu(dev, new_mtu, extack); 8679 if (err) 8680 return err; 8681 8682 if (!netif_device_present(dev)) 8683 return -ENODEV; 8684 8685 err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev); 8686 err = notifier_to_errno(err); 8687 if (err) 8688 return err; 8689 8690 orig_mtu = dev->mtu; 8691 err = __dev_set_mtu(dev, new_mtu); 8692 8693 if (!err) { 8694 err = call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev, 8695 orig_mtu); 8696 err = notifier_to_errno(err); 8697 if (err) { 8698 /* setting mtu back and notifying everyone again, 8699 * so that they have a chance to revert changes. 8700 */ 8701 __dev_set_mtu(dev, orig_mtu); 8702 call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev, 8703 new_mtu); 8704 } 8705 } 8706 return err; 8707 } 8708 8709 int dev_set_mtu(struct net_device *dev, int new_mtu) 8710 { 8711 struct netlink_ext_ack extack; 8712 int err; 8713 8714 memset(&extack, 0, sizeof(extack)); 8715 err = dev_set_mtu_ext(dev, new_mtu, &extack); 8716 if (err && extack._msg) 8717 net_err_ratelimited("%s: %s\n", dev->name, extack._msg); 8718 return err; 8719 } 8720 EXPORT_SYMBOL(dev_set_mtu); 8721 8722 /** 8723 * dev_change_tx_queue_len - Change TX queue length of a netdevice 8724 * @dev: device 8725 * @new_len: new tx queue length 8726 */ 8727 int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len) 8728 { 8729 unsigned int orig_len = dev->tx_queue_len; 8730 int res; 8731 8732 if (new_len != (unsigned int)new_len) 8733 return -ERANGE; 8734 8735 if (new_len != orig_len) { 8736 dev->tx_queue_len = new_len; 8737 res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev); 8738 res = notifier_to_errno(res); 8739 if (res) 8740 goto err_rollback; 8741 res = dev_qdisc_change_tx_queue_len(dev); 8742 if (res) 8743 goto err_rollback; 8744 } 8745 8746 return 0; 8747 8748 err_rollback: 8749 netdev_err(dev, "refused to change device tx_queue_len\n"); 8750 dev->tx_queue_len = orig_len; 8751 return res; 8752 } 8753 8754 /** 8755 * dev_set_group - Change group this device belongs to 8756 * @dev: device 8757 * @new_group: group this device should belong to 8758 */ 8759 void dev_set_group(struct net_device *dev, int new_group) 8760 { 8761 dev->group = new_group; 8762 } 8763 8764 /** 8765 * dev_pre_changeaddr_notify - Call NETDEV_PRE_CHANGEADDR. 8766 * @dev: device 8767 * @addr: new address 8768 * @extack: netlink extended ack 8769 */ 8770 int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr, 8771 struct netlink_ext_ack *extack) 8772 { 8773 struct netdev_notifier_pre_changeaddr_info info = { 8774 .info.dev = dev, 8775 .info.extack = extack, 8776 .dev_addr = addr, 8777 }; 8778 int rc; 8779 8780 rc = call_netdevice_notifiers_info(NETDEV_PRE_CHANGEADDR, &info.info); 8781 return notifier_to_errno(rc); 8782 } 8783 EXPORT_SYMBOL(dev_pre_changeaddr_notify); 8784 8785 /** 8786 * dev_set_mac_address - Change Media Access Control Address 8787 * @dev: device 8788 * @sa: new address 8789 * @extack: netlink extended ack 8790 * 8791 * Change the hardware (MAC) address of the device 8792 */ 8793 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa, 8794 struct netlink_ext_ack *extack) 8795 { 8796 const struct net_device_ops *ops = dev->netdev_ops; 8797 int err; 8798 8799 if (!ops->ndo_set_mac_address) 8800 return -EOPNOTSUPP; 8801 if (sa->sa_family != dev->type) 8802 return -EINVAL; 8803 if (!netif_device_present(dev)) 8804 return -ENODEV; 8805 err = dev_pre_changeaddr_notify(dev, sa->sa_data, extack); 8806 if (err) 8807 return err; 8808 err = ops->ndo_set_mac_address(dev, sa); 8809 if (err) 8810 return err; 8811 dev->addr_assign_type = NET_ADDR_SET; 8812 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 8813 add_device_randomness(dev->dev_addr, dev->addr_len); 8814 return 0; 8815 } 8816 EXPORT_SYMBOL(dev_set_mac_address); 8817 8818 static DECLARE_RWSEM(dev_addr_sem); 8819 8820 int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa, 8821 struct netlink_ext_ack *extack) 8822 { 8823 int ret; 8824 8825 down_write(&dev_addr_sem); 8826 ret = dev_set_mac_address(dev, sa, extack); 8827 up_write(&dev_addr_sem); 8828 return ret; 8829 } 8830 EXPORT_SYMBOL(dev_set_mac_address_user); 8831 8832 int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name) 8833 { 8834 size_t size = sizeof(sa->sa_data_min); 8835 struct net_device *dev; 8836 int ret = 0; 8837 8838 down_read(&dev_addr_sem); 8839 rcu_read_lock(); 8840 8841 dev = dev_get_by_name_rcu(net, dev_name); 8842 if (!dev) { 8843 ret = -ENODEV; 8844 goto unlock; 8845 } 8846 if (!dev->addr_len) 8847 memset(sa->sa_data, 0, size); 8848 else 8849 memcpy(sa->sa_data, dev->dev_addr, 8850 min_t(size_t, size, dev->addr_len)); 8851 sa->sa_family = dev->type; 8852 8853 unlock: 8854 rcu_read_unlock(); 8855 up_read(&dev_addr_sem); 8856 return ret; 8857 } 8858 EXPORT_SYMBOL(dev_get_mac_address); 8859 8860 /** 8861 * dev_change_carrier - Change device carrier 8862 * @dev: device 8863 * @new_carrier: new value 8864 * 8865 * Change device carrier 8866 */ 8867 int dev_change_carrier(struct net_device *dev, bool new_carrier) 8868 { 8869 const struct net_device_ops *ops = dev->netdev_ops; 8870 8871 if (!ops->ndo_change_carrier) 8872 return -EOPNOTSUPP; 8873 if (!netif_device_present(dev)) 8874 return -ENODEV; 8875 return ops->ndo_change_carrier(dev, new_carrier); 8876 } 8877 8878 /** 8879 * dev_get_phys_port_id - Get device physical port ID 8880 * @dev: device 8881 * @ppid: port ID 8882 * 8883 * Get device physical port ID 8884 */ 8885 int dev_get_phys_port_id(struct net_device *dev, 8886 struct netdev_phys_item_id *ppid) 8887 { 8888 const struct net_device_ops *ops = dev->netdev_ops; 8889 8890 if (!ops->ndo_get_phys_port_id) 8891 return -EOPNOTSUPP; 8892 return ops->ndo_get_phys_port_id(dev, ppid); 8893 } 8894 8895 /** 8896 * dev_get_phys_port_name - Get device physical port name 8897 * @dev: device 8898 * @name: port name 8899 * @len: limit of bytes to copy to name 8900 * 8901 * Get device physical port name 8902 */ 8903 int dev_get_phys_port_name(struct net_device *dev, 8904 char *name, size_t len) 8905 { 8906 const struct net_device_ops *ops = dev->netdev_ops; 8907 int err; 8908 8909 if (ops->ndo_get_phys_port_name) { 8910 err = ops->ndo_get_phys_port_name(dev, name, len); 8911 if (err != -EOPNOTSUPP) 8912 return err; 8913 } 8914 return devlink_compat_phys_port_name_get(dev, name, len); 8915 } 8916 8917 /** 8918 * dev_get_port_parent_id - Get the device's port parent identifier 8919 * @dev: network device 8920 * @ppid: pointer to a storage for the port's parent identifier 8921 * @recurse: allow/disallow recursion to lower devices 8922 * 8923 * Get the devices's port parent identifier 8924 */ 8925 int dev_get_port_parent_id(struct net_device *dev, 8926 struct netdev_phys_item_id *ppid, 8927 bool recurse) 8928 { 8929 const struct net_device_ops *ops = dev->netdev_ops; 8930 struct netdev_phys_item_id first = { }; 8931 struct net_device *lower_dev; 8932 struct list_head *iter; 8933 int err; 8934 8935 if (ops->ndo_get_port_parent_id) { 8936 err = ops->ndo_get_port_parent_id(dev, ppid); 8937 if (err != -EOPNOTSUPP) 8938 return err; 8939 } 8940 8941 err = devlink_compat_switch_id_get(dev, ppid); 8942 if (!recurse || err != -EOPNOTSUPP) 8943 return err; 8944 8945 netdev_for_each_lower_dev(dev, lower_dev, iter) { 8946 err = dev_get_port_parent_id(lower_dev, ppid, true); 8947 if (err) 8948 break; 8949 if (!first.id_len) 8950 first = *ppid; 8951 else if (memcmp(&first, ppid, sizeof(*ppid))) 8952 return -EOPNOTSUPP; 8953 } 8954 8955 return err; 8956 } 8957 EXPORT_SYMBOL(dev_get_port_parent_id); 8958 8959 /** 8960 * netdev_port_same_parent_id - Indicate if two network devices have 8961 * the same port parent identifier 8962 * @a: first network device 8963 * @b: second network device 8964 */ 8965 bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b) 8966 { 8967 struct netdev_phys_item_id a_id = { }; 8968 struct netdev_phys_item_id b_id = { }; 8969 8970 if (dev_get_port_parent_id(a, &a_id, true) || 8971 dev_get_port_parent_id(b, &b_id, true)) 8972 return false; 8973 8974 return netdev_phys_item_id_same(&a_id, &b_id); 8975 } 8976 EXPORT_SYMBOL(netdev_port_same_parent_id); 8977 8978 /** 8979 * dev_change_proto_down - set carrier according to proto_down. 8980 * 8981 * @dev: device 8982 * @proto_down: new value 8983 */ 8984 int dev_change_proto_down(struct net_device *dev, bool proto_down) 8985 { 8986 if (!(dev->priv_flags & IFF_CHANGE_PROTO_DOWN)) 8987 return -EOPNOTSUPP; 8988 if (!netif_device_present(dev)) 8989 return -ENODEV; 8990 if (proto_down) 8991 netif_carrier_off(dev); 8992 else 8993 netif_carrier_on(dev); 8994 dev->proto_down = proto_down; 8995 return 0; 8996 } 8997 8998 /** 8999 * dev_change_proto_down_reason - proto down reason 9000 * 9001 * @dev: device 9002 * @mask: proto down mask 9003 * @value: proto down value 9004 */ 9005 void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask, 9006 u32 value) 9007 { 9008 int b; 9009 9010 if (!mask) { 9011 dev->proto_down_reason = value; 9012 } else { 9013 for_each_set_bit(b, &mask, 32) { 9014 if (value & (1 << b)) 9015 dev->proto_down_reason |= BIT(b); 9016 else 9017 dev->proto_down_reason &= ~BIT(b); 9018 } 9019 } 9020 } 9021 9022 struct bpf_xdp_link { 9023 struct bpf_link link; 9024 struct net_device *dev; /* protected by rtnl_lock, no refcnt held */ 9025 int flags; 9026 }; 9027 9028 static enum bpf_xdp_mode dev_xdp_mode(struct net_device *dev, u32 flags) 9029 { 9030 if (flags & XDP_FLAGS_HW_MODE) 9031 return XDP_MODE_HW; 9032 if (flags & XDP_FLAGS_DRV_MODE) 9033 return XDP_MODE_DRV; 9034 if (flags & XDP_FLAGS_SKB_MODE) 9035 return XDP_MODE_SKB; 9036 return dev->netdev_ops->ndo_bpf ? XDP_MODE_DRV : XDP_MODE_SKB; 9037 } 9038 9039 static bpf_op_t dev_xdp_bpf_op(struct net_device *dev, enum bpf_xdp_mode mode) 9040 { 9041 switch (mode) { 9042 case XDP_MODE_SKB: 9043 return generic_xdp_install; 9044 case XDP_MODE_DRV: 9045 case XDP_MODE_HW: 9046 return dev->netdev_ops->ndo_bpf; 9047 default: 9048 return NULL; 9049 } 9050 } 9051 9052 static struct bpf_xdp_link *dev_xdp_link(struct net_device *dev, 9053 enum bpf_xdp_mode mode) 9054 { 9055 return dev->xdp_state[mode].link; 9056 } 9057 9058 static struct bpf_prog *dev_xdp_prog(struct net_device *dev, 9059 enum bpf_xdp_mode mode) 9060 { 9061 struct bpf_xdp_link *link = dev_xdp_link(dev, mode); 9062 9063 if (link) 9064 return link->link.prog; 9065 return dev->xdp_state[mode].prog; 9066 } 9067 9068 u8 dev_xdp_prog_count(struct net_device *dev) 9069 { 9070 u8 count = 0; 9071 int i; 9072 9073 for (i = 0; i < __MAX_XDP_MODE; i++) 9074 if (dev->xdp_state[i].prog || dev->xdp_state[i].link) 9075 count++; 9076 return count; 9077 } 9078 EXPORT_SYMBOL_GPL(dev_xdp_prog_count); 9079 9080 u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode) 9081 { 9082 struct bpf_prog *prog = dev_xdp_prog(dev, mode); 9083 9084 return prog ? prog->aux->id : 0; 9085 } 9086 9087 static void dev_xdp_set_link(struct net_device *dev, enum bpf_xdp_mode mode, 9088 struct bpf_xdp_link *link) 9089 { 9090 dev->xdp_state[mode].link = link; 9091 dev->xdp_state[mode].prog = NULL; 9092 } 9093 9094 static void dev_xdp_set_prog(struct net_device *dev, enum bpf_xdp_mode mode, 9095 struct bpf_prog *prog) 9096 { 9097 dev->xdp_state[mode].link = NULL; 9098 dev->xdp_state[mode].prog = prog; 9099 } 9100 9101 static int dev_xdp_install(struct net_device *dev, enum bpf_xdp_mode mode, 9102 bpf_op_t bpf_op, struct netlink_ext_ack *extack, 9103 u32 flags, struct bpf_prog *prog) 9104 { 9105 struct netdev_bpf xdp; 9106 int err; 9107 9108 memset(&xdp, 0, sizeof(xdp)); 9109 xdp.command = mode == XDP_MODE_HW ? XDP_SETUP_PROG_HW : XDP_SETUP_PROG; 9110 xdp.extack = extack; 9111 xdp.flags = flags; 9112 xdp.prog = prog; 9113 9114 /* Drivers assume refcnt is already incremented (i.e, prog pointer is 9115 * "moved" into driver), so they don't increment it on their own, but 9116 * they do decrement refcnt when program is detached or replaced. 9117 * Given net_device also owns link/prog, we need to bump refcnt here 9118 * to prevent drivers from underflowing it. 9119 */ 9120 if (prog) 9121 bpf_prog_inc(prog); 9122 err = bpf_op(dev, &xdp); 9123 if (err) { 9124 if (prog) 9125 bpf_prog_put(prog); 9126 return err; 9127 } 9128 9129 if (mode != XDP_MODE_HW) 9130 bpf_prog_change_xdp(dev_xdp_prog(dev, mode), prog); 9131 9132 return 0; 9133 } 9134 9135 static void dev_xdp_uninstall(struct net_device *dev) 9136 { 9137 struct bpf_xdp_link *link; 9138 struct bpf_prog *prog; 9139 enum bpf_xdp_mode mode; 9140 bpf_op_t bpf_op; 9141 9142 ASSERT_RTNL(); 9143 9144 for (mode = XDP_MODE_SKB; mode < __MAX_XDP_MODE; mode++) { 9145 prog = dev_xdp_prog(dev, mode); 9146 if (!prog) 9147 continue; 9148 9149 bpf_op = dev_xdp_bpf_op(dev, mode); 9150 if (!bpf_op) 9151 continue; 9152 9153 WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL)); 9154 9155 /* auto-detach link from net device */ 9156 link = dev_xdp_link(dev, mode); 9157 if (link) 9158 link->dev = NULL; 9159 else 9160 bpf_prog_put(prog); 9161 9162 dev_xdp_set_link(dev, mode, NULL); 9163 } 9164 } 9165 9166 static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack, 9167 struct bpf_xdp_link *link, struct bpf_prog *new_prog, 9168 struct bpf_prog *old_prog, u32 flags) 9169 { 9170 unsigned int num_modes = hweight32(flags & XDP_FLAGS_MODES); 9171 struct bpf_prog *cur_prog; 9172 struct net_device *upper; 9173 struct list_head *iter; 9174 enum bpf_xdp_mode mode; 9175 bpf_op_t bpf_op; 9176 int err; 9177 9178 ASSERT_RTNL(); 9179 9180 /* either link or prog attachment, never both */ 9181 if (link && (new_prog || old_prog)) 9182 return -EINVAL; 9183 /* link supports only XDP mode flags */ 9184 if (link && (flags & ~XDP_FLAGS_MODES)) { 9185 NL_SET_ERR_MSG(extack, "Invalid XDP flags for BPF link attachment"); 9186 return -EINVAL; 9187 } 9188 /* just one XDP mode bit should be set, zero defaults to drv/skb mode */ 9189 if (num_modes > 1) { 9190 NL_SET_ERR_MSG(extack, "Only one XDP mode flag can be set"); 9191 return -EINVAL; 9192 } 9193 /* avoid ambiguity if offload + drv/skb mode progs are both loaded */ 9194 if (!num_modes && dev_xdp_prog_count(dev) > 1) { 9195 NL_SET_ERR_MSG(extack, 9196 "More than one program loaded, unset mode is ambiguous"); 9197 return -EINVAL; 9198 } 9199 /* old_prog != NULL implies XDP_FLAGS_REPLACE is set */ 9200 if (old_prog && !(flags & XDP_FLAGS_REPLACE)) { 9201 NL_SET_ERR_MSG(extack, "XDP_FLAGS_REPLACE is not specified"); 9202 return -EINVAL; 9203 } 9204 9205 mode = dev_xdp_mode(dev, flags); 9206 /* can't replace attached link */ 9207 if (dev_xdp_link(dev, mode)) { 9208 NL_SET_ERR_MSG(extack, "Can't replace active BPF XDP link"); 9209 return -EBUSY; 9210 } 9211 9212 /* don't allow if an upper device already has a program */ 9213 netdev_for_each_upper_dev_rcu(dev, upper, iter) { 9214 if (dev_xdp_prog_count(upper) > 0) { 9215 NL_SET_ERR_MSG(extack, "Cannot attach when an upper device already has a program"); 9216 return -EEXIST; 9217 } 9218 } 9219 9220 cur_prog = dev_xdp_prog(dev, mode); 9221 /* can't replace attached prog with link */ 9222 if (link && cur_prog) { 9223 NL_SET_ERR_MSG(extack, "Can't replace active XDP program with BPF link"); 9224 return -EBUSY; 9225 } 9226 if ((flags & XDP_FLAGS_REPLACE) && cur_prog != old_prog) { 9227 NL_SET_ERR_MSG(extack, "Active program does not match expected"); 9228 return -EEXIST; 9229 } 9230 9231 /* put effective new program into new_prog */ 9232 if (link) 9233 new_prog = link->link.prog; 9234 9235 if (new_prog) { 9236 bool offload = mode == XDP_MODE_HW; 9237 enum bpf_xdp_mode other_mode = mode == XDP_MODE_SKB 9238 ? XDP_MODE_DRV : XDP_MODE_SKB; 9239 9240 if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && cur_prog) { 9241 NL_SET_ERR_MSG(extack, "XDP program already attached"); 9242 return -EBUSY; 9243 } 9244 if (!offload && dev_xdp_prog(dev, other_mode)) { 9245 NL_SET_ERR_MSG(extack, "Native and generic XDP can't be active at the same time"); 9246 return -EEXIST; 9247 } 9248 if (!offload && bpf_prog_is_offloaded(new_prog->aux)) { 9249 NL_SET_ERR_MSG(extack, "Using offloaded program without HW_MODE flag is not supported"); 9250 return -EINVAL; 9251 } 9252 if (bpf_prog_is_dev_bound(new_prog->aux) && !bpf_offload_dev_match(new_prog, dev)) { 9253 NL_SET_ERR_MSG(extack, "Program bound to different device"); 9254 return -EINVAL; 9255 } 9256 if (new_prog->expected_attach_type == BPF_XDP_DEVMAP) { 9257 NL_SET_ERR_MSG(extack, "BPF_XDP_DEVMAP programs can not be attached to a device"); 9258 return -EINVAL; 9259 } 9260 if (new_prog->expected_attach_type == BPF_XDP_CPUMAP) { 9261 NL_SET_ERR_MSG(extack, "BPF_XDP_CPUMAP programs can not be attached to a device"); 9262 return -EINVAL; 9263 } 9264 } 9265 9266 /* don't call drivers if the effective program didn't change */ 9267 if (new_prog != cur_prog) { 9268 bpf_op = dev_xdp_bpf_op(dev, mode); 9269 if (!bpf_op) { 9270 NL_SET_ERR_MSG(extack, "Underlying driver does not support XDP in native mode"); 9271 return -EOPNOTSUPP; 9272 } 9273 9274 err = dev_xdp_install(dev, mode, bpf_op, extack, flags, new_prog); 9275 if (err) 9276 return err; 9277 } 9278 9279 if (link) 9280 dev_xdp_set_link(dev, mode, link); 9281 else 9282 dev_xdp_set_prog(dev, mode, new_prog); 9283 if (cur_prog) 9284 bpf_prog_put(cur_prog); 9285 9286 return 0; 9287 } 9288 9289 static int dev_xdp_attach_link(struct net_device *dev, 9290 struct netlink_ext_ack *extack, 9291 struct bpf_xdp_link *link) 9292 { 9293 return dev_xdp_attach(dev, extack, link, NULL, NULL, link->flags); 9294 } 9295 9296 static int dev_xdp_detach_link(struct net_device *dev, 9297 struct netlink_ext_ack *extack, 9298 struct bpf_xdp_link *link) 9299 { 9300 enum bpf_xdp_mode mode; 9301 bpf_op_t bpf_op; 9302 9303 ASSERT_RTNL(); 9304 9305 mode = dev_xdp_mode(dev, link->flags); 9306 if (dev_xdp_link(dev, mode) != link) 9307 return -EINVAL; 9308 9309 bpf_op = dev_xdp_bpf_op(dev, mode); 9310 WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL)); 9311 dev_xdp_set_link(dev, mode, NULL); 9312 return 0; 9313 } 9314 9315 static void bpf_xdp_link_release(struct bpf_link *link) 9316 { 9317 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link); 9318 9319 rtnl_lock(); 9320 9321 /* if racing with net_device's tear down, xdp_link->dev might be 9322 * already NULL, in which case link was already auto-detached 9323 */ 9324 if (xdp_link->dev) { 9325 WARN_ON(dev_xdp_detach_link(xdp_link->dev, NULL, xdp_link)); 9326 xdp_link->dev = NULL; 9327 } 9328 9329 rtnl_unlock(); 9330 } 9331 9332 static int bpf_xdp_link_detach(struct bpf_link *link) 9333 { 9334 bpf_xdp_link_release(link); 9335 return 0; 9336 } 9337 9338 static void bpf_xdp_link_dealloc(struct bpf_link *link) 9339 { 9340 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link); 9341 9342 kfree(xdp_link); 9343 } 9344 9345 static void bpf_xdp_link_show_fdinfo(const struct bpf_link *link, 9346 struct seq_file *seq) 9347 { 9348 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link); 9349 u32 ifindex = 0; 9350 9351 rtnl_lock(); 9352 if (xdp_link->dev) 9353 ifindex = xdp_link->dev->ifindex; 9354 rtnl_unlock(); 9355 9356 seq_printf(seq, "ifindex:\t%u\n", ifindex); 9357 } 9358 9359 static int bpf_xdp_link_fill_link_info(const struct bpf_link *link, 9360 struct bpf_link_info *info) 9361 { 9362 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link); 9363 u32 ifindex = 0; 9364 9365 rtnl_lock(); 9366 if (xdp_link->dev) 9367 ifindex = xdp_link->dev->ifindex; 9368 rtnl_unlock(); 9369 9370 info->xdp.ifindex = ifindex; 9371 return 0; 9372 } 9373 9374 static int bpf_xdp_link_update(struct bpf_link *link, struct bpf_prog *new_prog, 9375 struct bpf_prog *old_prog) 9376 { 9377 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link); 9378 enum bpf_xdp_mode mode; 9379 bpf_op_t bpf_op; 9380 int err = 0; 9381 9382 rtnl_lock(); 9383 9384 /* link might have been auto-released already, so fail */ 9385 if (!xdp_link->dev) { 9386 err = -ENOLINK; 9387 goto out_unlock; 9388 } 9389 9390 if (old_prog && link->prog != old_prog) { 9391 err = -EPERM; 9392 goto out_unlock; 9393 } 9394 old_prog = link->prog; 9395 if (old_prog->type != new_prog->type || 9396 old_prog->expected_attach_type != new_prog->expected_attach_type) { 9397 err = -EINVAL; 9398 goto out_unlock; 9399 } 9400 9401 if (old_prog == new_prog) { 9402 /* no-op, don't disturb drivers */ 9403 bpf_prog_put(new_prog); 9404 goto out_unlock; 9405 } 9406 9407 mode = dev_xdp_mode(xdp_link->dev, xdp_link->flags); 9408 bpf_op = dev_xdp_bpf_op(xdp_link->dev, mode); 9409 err = dev_xdp_install(xdp_link->dev, mode, bpf_op, NULL, 9410 xdp_link->flags, new_prog); 9411 if (err) 9412 goto out_unlock; 9413 9414 old_prog = xchg(&link->prog, new_prog); 9415 bpf_prog_put(old_prog); 9416 9417 out_unlock: 9418 rtnl_unlock(); 9419 return err; 9420 } 9421 9422 static const struct bpf_link_ops bpf_xdp_link_lops = { 9423 .release = bpf_xdp_link_release, 9424 .dealloc = bpf_xdp_link_dealloc, 9425 .detach = bpf_xdp_link_detach, 9426 .show_fdinfo = bpf_xdp_link_show_fdinfo, 9427 .fill_link_info = bpf_xdp_link_fill_link_info, 9428 .update_prog = bpf_xdp_link_update, 9429 }; 9430 9431 int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 9432 { 9433 struct net *net = current->nsproxy->net_ns; 9434 struct bpf_link_primer link_primer; 9435 struct bpf_xdp_link *link; 9436 struct net_device *dev; 9437 int err, fd; 9438 9439 rtnl_lock(); 9440 dev = dev_get_by_index(net, attr->link_create.target_ifindex); 9441 if (!dev) { 9442 rtnl_unlock(); 9443 return -EINVAL; 9444 } 9445 9446 link = kzalloc(sizeof(*link), GFP_USER); 9447 if (!link) { 9448 err = -ENOMEM; 9449 goto unlock; 9450 } 9451 9452 bpf_link_init(&link->link, BPF_LINK_TYPE_XDP, &bpf_xdp_link_lops, prog); 9453 link->dev = dev; 9454 link->flags = attr->link_create.flags; 9455 9456 err = bpf_link_prime(&link->link, &link_primer); 9457 if (err) { 9458 kfree(link); 9459 goto unlock; 9460 } 9461 9462 err = dev_xdp_attach_link(dev, NULL, link); 9463 rtnl_unlock(); 9464 9465 if (err) { 9466 link->dev = NULL; 9467 bpf_link_cleanup(&link_primer); 9468 goto out_put_dev; 9469 } 9470 9471 fd = bpf_link_settle(&link_primer); 9472 /* link itself doesn't hold dev's refcnt to not complicate shutdown */ 9473 dev_put(dev); 9474 return fd; 9475 9476 unlock: 9477 rtnl_unlock(); 9478 9479 out_put_dev: 9480 dev_put(dev); 9481 return err; 9482 } 9483 9484 /** 9485 * dev_change_xdp_fd - set or clear a bpf program for a device rx path 9486 * @dev: device 9487 * @extack: netlink extended ack 9488 * @fd: new program fd or negative value to clear 9489 * @expected_fd: old program fd that userspace expects to replace or clear 9490 * @flags: xdp-related flags 9491 * 9492 * Set or clear a bpf program for a device 9493 */ 9494 int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, 9495 int fd, int expected_fd, u32 flags) 9496 { 9497 enum bpf_xdp_mode mode = dev_xdp_mode(dev, flags); 9498 struct bpf_prog *new_prog = NULL, *old_prog = NULL; 9499 int err; 9500 9501 ASSERT_RTNL(); 9502 9503 if (fd >= 0) { 9504 new_prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP, 9505 mode != XDP_MODE_SKB); 9506 if (IS_ERR(new_prog)) 9507 return PTR_ERR(new_prog); 9508 } 9509 9510 if (expected_fd >= 0) { 9511 old_prog = bpf_prog_get_type_dev(expected_fd, BPF_PROG_TYPE_XDP, 9512 mode != XDP_MODE_SKB); 9513 if (IS_ERR(old_prog)) { 9514 err = PTR_ERR(old_prog); 9515 old_prog = NULL; 9516 goto err_out; 9517 } 9518 } 9519 9520 err = dev_xdp_attach(dev, extack, NULL, new_prog, old_prog, flags); 9521 9522 err_out: 9523 if (err && new_prog) 9524 bpf_prog_put(new_prog); 9525 if (old_prog) 9526 bpf_prog_put(old_prog); 9527 return err; 9528 } 9529 9530 /** 9531 * dev_new_index - allocate an ifindex 9532 * @net: the applicable net namespace 9533 * 9534 * Returns a suitable unique value for a new device interface 9535 * number. The caller must hold the rtnl semaphore or the 9536 * dev_base_lock to be sure it remains unique. 9537 */ 9538 static int dev_new_index(struct net *net) 9539 { 9540 int ifindex = net->ifindex; 9541 9542 for (;;) { 9543 if (++ifindex <= 0) 9544 ifindex = 1; 9545 if (!__dev_get_by_index(net, ifindex)) 9546 return net->ifindex = ifindex; 9547 } 9548 } 9549 9550 /* Delayed registration/unregisteration */ 9551 LIST_HEAD(net_todo_list); 9552 DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq); 9553 9554 static void net_set_todo(struct net_device *dev) 9555 { 9556 list_add_tail(&dev->todo_list, &net_todo_list); 9557 atomic_inc(&dev_net(dev)->dev_unreg_count); 9558 } 9559 9560 static netdev_features_t netdev_sync_upper_features(struct net_device *lower, 9561 struct net_device *upper, netdev_features_t features) 9562 { 9563 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES; 9564 netdev_features_t feature; 9565 int feature_bit; 9566 9567 for_each_netdev_feature(upper_disables, feature_bit) { 9568 feature = __NETIF_F_BIT(feature_bit); 9569 if (!(upper->wanted_features & feature) 9570 && (features & feature)) { 9571 netdev_dbg(lower, "Dropping feature %pNF, upper dev %s has it off.\n", 9572 &feature, upper->name); 9573 features &= ~feature; 9574 } 9575 } 9576 9577 return features; 9578 } 9579 9580 static void netdev_sync_lower_features(struct net_device *upper, 9581 struct net_device *lower, netdev_features_t features) 9582 { 9583 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES; 9584 netdev_features_t feature; 9585 int feature_bit; 9586 9587 for_each_netdev_feature(upper_disables, feature_bit) { 9588 feature = __NETIF_F_BIT(feature_bit); 9589 if (!(features & feature) && (lower->features & feature)) { 9590 netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n", 9591 &feature, lower->name); 9592 lower->wanted_features &= ~feature; 9593 __netdev_update_features(lower); 9594 9595 if (unlikely(lower->features & feature)) 9596 netdev_WARN(upper, "failed to disable %pNF on %s!\n", 9597 &feature, lower->name); 9598 else 9599 netdev_features_change(lower); 9600 } 9601 } 9602 } 9603 9604 static netdev_features_t netdev_fix_features(struct net_device *dev, 9605 netdev_features_t features) 9606 { 9607 /* Fix illegal checksum combinations */ 9608 if ((features & NETIF_F_HW_CSUM) && 9609 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { 9610 netdev_warn(dev, "mixed HW and IP checksum settings.\n"); 9611 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); 9612 } 9613 9614 /* TSO requires that SG is present as well. */ 9615 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) { 9616 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n"); 9617 features &= ~NETIF_F_ALL_TSO; 9618 } 9619 9620 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) && 9621 !(features & NETIF_F_IP_CSUM)) { 9622 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n"); 9623 features &= ~NETIF_F_TSO; 9624 features &= ~NETIF_F_TSO_ECN; 9625 } 9626 9627 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) && 9628 !(features & NETIF_F_IPV6_CSUM)) { 9629 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n"); 9630 features &= ~NETIF_F_TSO6; 9631 } 9632 9633 /* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */ 9634 if ((features & NETIF_F_TSO_MANGLEID) && !(features & NETIF_F_TSO)) 9635 features &= ~NETIF_F_TSO_MANGLEID; 9636 9637 /* TSO ECN requires that TSO is present as well. */ 9638 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN) 9639 features &= ~NETIF_F_TSO_ECN; 9640 9641 /* Software GSO depends on SG. */ 9642 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) { 9643 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n"); 9644 features &= ~NETIF_F_GSO; 9645 } 9646 9647 /* GSO partial features require GSO partial be set */ 9648 if ((features & dev->gso_partial_features) && 9649 !(features & NETIF_F_GSO_PARTIAL)) { 9650 netdev_dbg(dev, 9651 "Dropping partially supported GSO features since no GSO partial.\n"); 9652 features &= ~dev->gso_partial_features; 9653 } 9654 9655 if (!(features & NETIF_F_RXCSUM)) { 9656 /* NETIF_F_GRO_HW implies doing RXCSUM since every packet 9657 * successfully merged by hardware must also have the 9658 * checksum verified by hardware. If the user does not 9659 * want to enable RXCSUM, logically, we should disable GRO_HW. 9660 */ 9661 if (features & NETIF_F_GRO_HW) { 9662 netdev_dbg(dev, "Dropping NETIF_F_GRO_HW since no RXCSUM feature.\n"); 9663 features &= ~NETIF_F_GRO_HW; 9664 } 9665 } 9666 9667 /* LRO/HW-GRO features cannot be combined with RX-FCS */ 9668 if (features & NETIF_F_RXFCS) { 9669 if (features & NETIF_F_LRO) { 9670 netdev_dbg(dev, "Dropping LRO feature since RX-FCS is requested.\n"); 9671 features &= ~NETIF_F_LRO; 9672 } 9673 9674 if (features & NETIF_F_GRO_HW) { 9675 netdev_dbg(dev, "Dropping HW-GRO feature since RX-FCS is requested.\n"); 9676 features &= ~NETIF_F_GRO_HW; 9677 } 9678 } 9679 9680 if ((features & NETIF_F_GRO_HW) && (features & NETIF_F_LRO)) { 9681 netdev_dbg(dev, "Dropping LRO feature since HW-GRO is requested.\n"); 9682 features &= ~NETIF_F_LRO; 9683 } 9684 9685 if (features & NETIF_F_HW_TLS_TX) { 9686 bool ip_csum = (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) == 9687 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); 9688 bool hw_csum = features & NETIF_F_HW_CSUM; 9689 9690 if (!ip_csum && !hw_csum) { 9691 netdev_dbg(dev, "Dropping TLS TX HW offload feature since no CSUM feature.\n"); 9692 features &= ~NETIF_F_HW_TLS_TX; 9693 } 9694 } 9695 9696 if ((features & NETIF_F_HW_TLS_RX) && !(features & NETIF_F_RXCSUM)) { 9697 netdev_dbg(dev, "Dropping TLS RX HW offload feature since no RXCSUM feature.\n"); 9698 features &= ~NETIF_F_HW_TLS_RX; 9699 } 9700 9701 return features; 9702 } 9703 9704 int __netdev_update_features(struct net_device *dev) 9705 { 9706 struct net_device *upper, *lower; 9707 netdev_features_t features; 9708 struct list_head *iter; 9709 int err = -1; 9710 9711 ASSERT_RTNL(); 9712 9713 features = netdev_get_wanted_features(dev); 9714 9715 if (dev->netdev_ops->ndo_fix_features) 9716 features = dev->netdev_ops->ndo_fix_features(dev, features); 9717 9718 /* driver might be less strict about feature dependencies */ 9719 features = netdev_fix_features(dev, features); 9720 9721 /* some features can't be enabled if they're off on an upper device */ 9722 netdev_for_each_upper_dev_rcu(dev, upper, iter) 9723 features = netdev_sync_upper_features(dev, upper, features); 9724 9725 if (dev->features == features) 9726 goto sync_lower; 9727 9728 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n", 9729 &dev->features, &features); 9730 9731 if (dev->netdev_ops->ndo_set_features) 9732 err = dev->netdev_ops->ndo_set_features(dev, features); 9733 else 9734 err = 0; 9735 9736 if (unlikely(err < 0)) { 9737 netdev_err(dev, 9738 "set_features() failed (%d); wanted %pNF, left %pNF\n", 9739 err, &features, &dev->features); 9740 /* return non-0 since some features might have changed and 9741 * it's better to fire a spurious notification than miss it 9742 */ 9743 return -1; 9744 } 9745 9746 sync_lower: 9747 /* some features must be disabled on lower devices when disabled 9748 * on an upper device (think: bonding master or bridge) 9749 */ 9750 netdev_for_each_lower_dev(dev, lower, iter) 9751 netdev_sync_lower_features(dev, lower, features); 9752 9753 if (!err) { 9754 netdev_features_t diff = features ^ dev->features; 9755 9756 if (diff & NETIF_F_RX_UDP_TUNNEL_PORT) { 9757 /* udp_tunnel_{get,drop}_rx_info both need 9758 * NETIF_F_RX_UDP_TUNNEL_PORT enabled on the 9759 * device, or they won't do anything. 9760 * Thus we need to update dev->features 9761 * *before* calling udp_tunnel_get_rx_info, 9762 * but *after* calling udp_tunnel_drop_rx_info. 9763 */ 9764 if (features & NETIF_F_RX_UDP_TUNNEL_PORT) { 9765 dev->features = features; 9766 udp_tunnel_get_rx_info(dev); 9767 } else { 9768 udp_tunnel_drop_rx_info(dev); 9769 } 9770 } 9771 9772 if (diff & NETIF_F_HW_VLAN_CTAG_FILTER) { 9773 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) { 9774 dev->features = features; 9775 err |= vlan_get_rx_ctag_filter_info(dev); 9776 } else { 9777 vlan_drop_rx_ctag_filter_info(dev); 9778 } 9779 } 9780 9781 if (diff & NETIF_F_HW_VLAN_STAG_FILTER) { 9782 if (features & NETIF_F_HW_VLAN_STAG_FILTER) { 9783 dev->features = features; 9784 err |= vlan_get_rx_stag_filter_info(dev); 9785 } else { 9786 vlan_drop_rx_stag_filter_info(dev); 9787 } 9788 } 9789 9790 dev->features = features; 9791 } 9792 9793 return err < 0 ? 0 : 1; 9794 } 9795 9796 /** 9797 * netdev_update_features - recalculate device features 9798 * @dev: the device to check 9799 * 9800 * Recalculate dev->features set and send notifications if it 9801 * has changed. Should be called after driver or hardware dependent 9802 * conditions might have changed that influence the features. 9803 */ 9804 void netdev_update_features(struct net_device *dev) 9805 { 9806 if (__netdev_update_features(dev)) 9807 netdev_features_change(dev); 9808 } 9809 EXPORT_SYMBOL(netdev_update_features); 9810 9811 /** 9812 * netdev_change_features - recalculate device features 9813 * @dev: the device to check 9814 * 9815 * Recalculate dev->features set and send notifications even 9816 * if they have not changed. Should be called instead of 9817 * netdev_update_features() if also dev->vlan_features might 9818 * have changed to allow the changes to be propagated to stacked 9819 * VLAN devices. 9820 */ 9821 void netdev_change_features(struct net_device *dev) 9822 { 9823 __netdev_update_features(dev); 9824 netdev_features_change(dev); 9825 } 9826 EXPORT_SYMBOL(netdev_change_features); 9827 9828 /** 9829 * netif_stacked_transfer_operstate - transfer operstate 9830 * @rootdev: the root or lower level device to transfer state from 9831 * @dev: the device to transfer operstate to 9832 * 9833 * Transfer operational state from root to device. This is normally 9834 * called when a stacking relationship exists between the root 9835 * device and the device(a leaf device). 9836 */ 9837 void netif_stacked_transfer_operstate(const struct net_device *rootdev, 9838 struct net_device *dev) 9839 { 9840 if (rootdev->operstate == IF_OPER_DORMANT) 9841 netif_dormant_on(dev); 9842 else 9843 netif_dormant_off(dev); 9844 9845 if (rootdev->operstate == IF_OPER_TESTING) 9846 netif_testing_on(dev); 9847 else 9848 netif_testing_off(dev); 9849 9850 if (netif_carrier_ok(rootdev)) 9851 netif_carrier_on(dev); 9852 else 9853 netif_carrier_off(dev); 9854 } 9855 EXPORT_SYMBOL(netif_stacked_transfer_operstate); 9856 9857 static int netif_alloc_rx_queues(struct net_device *dev) 9858 { 9859 unsigned int i, count = dev->num_rx_queues; 9860 struct netdev_rx_queue *rx; 9861 size_t sz = count * sizeof(*rx); 9862 int err = 0; 9863 9864 BUG_ON(count < 1); 9865 9866 rx = kvzalloc(sz, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL); 9867 if (!rx) 9868 return -ENOMEM; 9869 9870 dev->_rx = rx; 9871 9872 for (i = 0; i < count; i++) { 9873 rx[i].dev = dev; 9874 9875 /* XDP RX-queue setup */ 9876 err = xdp_rxq_info_reg(&rx[i].xdp_rxq, dev, i, 0); 9877 if (err < 0) 9878 goto err_rxq_info; 9879 } 9880 return 0; 9881 9882 err_rxq_info: 9883 /* Rollback successful reg's and free other resources */ 9884 while (i--) 9885 xdp_rxq_info_unreg(&rx[i].xdp_rxq); 9886 kvfree(dev->_rx); 9887 dev->_rx = NULL; 9888 return err; 9889 } 9890 9891 static void netif_free_rx_queues(struct net_device *dev) 9892 { 9893 unsigned int i, count = dev->num_rx_queues; 9894 9895 /* netif_alloc_rx_queues alloc failed, resources have been unreg'ed */ 9896 if (!dev->_rx) 9897 return; 9898 9899 for (i = 0; i < count; i++) 9900 xdp_rxq_info_unreg(&dev->_rx[i].xdp_rxq); 9901 9902 kvfree(dev->_rx); 9903 } 9904 9905 static void netdev_init_one_queue(struct net_device *dev, 9906 struct netdev_queue *queue, void *_unused) 9907 { 9908 /* Initialize queue lock */ 9909 spin_lock_init(&queue->_xmit_lock); 9910 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type); 9911 queue->xmit_lock_owner = -1; 9912 netdev_queue_numa_node_write(queue, NUMA_NO_NODE); 9913 queue->dev = dev; 9914 #ifdef CONFIG_BQL 9915 dql_init(&queue->dql, HZ); 9916 #endif 9917 } 9918 9919 static void netif_free_tx_queues(struct net_device *dev) 9920 { 9921 kvfree(dev->_tx); 9922 } 9923 9924 static int netif_alloc_netdev_queues(struct net_device *dev) 9925 { 9926 unsigned int count = dev->num_tx_queues; 9927 struct netdev_queue *tx; 9928 size_t sz = count * sizeof(*tx); 9929 9930 if (count < 1 || count > 0xffff) 9931 return -EINVAL; 9932 9933 tx = kvzalloc(sz, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL); 9934 if (!tx) 9935 return -ENOMEM; 9936 9937 dev->_tx = tx; 9938 9939 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL); 9940 spin_lock_init(&dev->tx_global_lock); 9941 9942 return 0; 9943 } 9944 9945 void netif_tx_stop_all_queues(struct net_device *dev) 9946 { 9947 unsigned int i; 9948 9949 for (i = 0; i < dev->num_tx_queues; i++) { 9950 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 9951 9952 netif_tx_stop_queue(txq); 9953 } 9954 } 9955 EXPORT_SYMBOL(netif_tx_stop_all_queues); 9956 9957 /** 9958 * register_netdevice() - register a network device 9959 * @dev: device to register 9960 * 9961 * Take a prepared network device structure and make it externally accessible. 9962 * A %NETDEV_REGISTER message is sent to the netdev notifier chain. 9963 * Callers must hold the rtnl lock - you may want register_netdev() 9964 * instead of this. 9965 */ 9966 int register_netdevice(struct net_device *dev) 9967 { 9968 int ret; 9969 struct net *net = dev_net(dev); 9970 9971 BUILD_BUG_ON(sizeof(netdev_features_t) * BITS_PER_BYTE < 9972 NETDEV_FEATURE_COUNT); 9973 BUG_ON(dev_boot_phase); 9974 ASSERT_RTNL(); 9975 9976 might_sleep(); 9977 9978 /* When net_device's are persistent, this will be fatal. */ 9979 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED); 9980 BUG_ON(!net); 9981 9982 ret = ethtool_check_ops(dev->ethtool_ops); 9983 if (ret) 9984 return ret; 9985 9986 spin_lock_init(&dev->addr_list_lock); 9987 netdev_set_addr_lockdep_class(dev); 9988 9989 ret = dev_get_valid_name(net, dev, dev->name); 9990 if (ret < 0) 9991 goto out; 9992 9993 ret = -ENOMEM; 9994 dev->name_node = netdev_name_node_head_alloc(dev); 9995 if (!dev->name_node) 9996 goto out; 9997 9998 /* Init, if this function is available */ 9999 if (dev->netdev_ops->ndo_init) { 10000 ret = dev->netdev_ops->ndo_init(dev); 10001 if (ret) { 10002 if (ret > 0) 10003 ret = -EIO; 10004 goto err_free_name; 10005 } 10006 } 10007 10008 if (((dev->hw_features | dev->features) & 10009 NETIF_F_HW_VLAN_CTAG_FILTER) && 10010 (!dev->netdev_ops->ndo_vlan_rx_add_vid || 10011 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) { 10012 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n"); 10013 ret = -EINVAL; 10014 goto err_uninit; 10015 } 10016 10017 ret = -EBUSY; 10018 if (!dev->ifindex) 10019 dev->ifindex = dev_new_index(net); 10020 else if (__dev_get_by_index(net, dev->ifindex)) 10021 goto err_uninit; 10022 10023 /* Transfer changeable features to wanted_features and enable 10024 * software offloads (GSO and GRO). 10025 */ 10026 dev->hw_features |= (NETIF_F_SOFT_FEATURES | NETIF_F_SOFT_FEATURES_OFF); 10027 dev->features |= NETIF_F_SOFT_FEATURES; 10028 10029 if (dev->udp_tunnel_nic_info) { 10030 dev->features |= NETIF_F_RX_UDP_TUNNEL_PORT; 10031 dev->hw_features |= NETIF_F_RX_UDP_TUNNEL_PORT; 10032 } 10033 10034 dev->wanted_features = dev->features & dev->hw_features; 10035 10036 if (!(dev->flags & IFF_LOOPBACK)) 10037 dev->hw_features |= NETIF_F_NOCACHE_COPY; 10038 10039 /* If IPv4 TCP segmentation offload is supported we should also 10040 * allow the device to enable segmenting the frame with the option 10041 * of ignoring a static IP ID value. This doesn't enable the 10042 * feature itself but allows the user to enable it later. 10043 */ 10044 if (dev->hw_features & NETIF_F_TSO) 10045 dev->hw_features |= NETIF_F_TSO_MANGLEID; 10046 if (dev->vlan_features & NETIF_F_TSO) 10047 dev->vlan_features |= NETIF_F_TSO_MANGLEID; 10048 if (dev->mpls_features & NETIF_F_TSO) 10049 dev->mpls_features |= NETIF_F_TSO_MANGLEID; 10050 if (dev->hw_enc_features & NETIF_F_TSO) 10051 dev->hw_enc_features |= NETIF_F_TSO_MANGLEID; 10052 10053 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices. 10054 */ 10055 dev->vlan_features |= NETIF_F_HIGHDMA; 10056 10057 /* Make NETIF_F_SG inheritable to tunnel devices. 10058 */ 10059 dev->hw_enc_features |= NETIF_F_SG | NETIF_F_GSO_PARTIAL; 10060 10061 /* Make NETIF_F_SG inheritable to MPLS. 10062 */ 10063 dev->mpls_features |= NETIF_F_SG; 10064 10065 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev); 10066 ret = notifier_to_errno(ret); 10067 if (ret) 10068 goto err_uninit; 10069 10070 ret = netdev_register_kobject(dev); 10071 write_lock(&dev_base_lock); 10072 dev->reg_state = ret ? NETREG_UNREGISTERED : NETREG_REGISTERED; 10073 write_unlock(&dev_base_lock); 10074 if (ret) 10075 goto err_uninit_notify; 10076 10077 __netdev_update_features(dev); 10078 10079 /* 10080 * Default initial state at registry is that the 10081 * device is present. 10082 */ 10083 10084 set_bit(__LINK_STATE_PRESENT, &dev->state); 10085 10086 linkwatch_init_dev(dev); 10087 10088 dev_init_scheduler(dev); 10089 10090 netdev_hold(dev, &dev->dev_registered_tracker, GFP_KERNEL); 10091 list_netdevice(dev); 10092 10093 add_device_randomness(dev->dev_addr, dev->addr_len); 10094 10095 /* If the device has permanent device address, driver should 10096 * set dev_addr and also addr_assign_type should be set to 10097 * NET_ADDR_PERM (default value). 10098 */ 10099 if (dev->addr_assign_type == NET_ADDR_PERM) 10100 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 10101 10102 /* Notify protocols, that a new device appeared. */ 10103 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev); 10104 ret = notifier_to_errno(ret); 10105 if (ret) { 10106 /* Expect explicit free_netdev() on failure */ 10107 dev->needs_free_netdev = false; 10108 unregister_netdevice_queue(dev, NULL); 10109 goto out; 10110 } 10111 /* 10112 * Prevent userspace races by waiting until the network 10113 * device is fully setup before sending notifications. 10114 */ 10115 if (!dev->rtnl_link_ops || 10116 dev->rtnl_link_state == RTNL_LINK_INITIALIZED) 10117 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL, 0, NULL); 10118 10119 out: 10120 return ret; 10121 10122 err_uninit_notify: 10123 call_netdevice_notifiers(NETDEV_PRE_UNINIT, dev); 10124 err_uninit: 10125 if (dev->netdev_ops->ndo_uninit) 10126 dev->netdev_ops->ndo_uninit(dev); 10127 if (dev->priv_destructor) 10128 dev->priv_destructor(dev); 10129 err_free_name: 10130 netdev_name_node_free(dev->name_node); 10131 goto out; 10132 } 10133 EXPORT_SYMBOL(register_netdevice); 10134 10135 /** 10136 * init_dummy_netdev - init a dummy network device for NAPI 10137 * @dev: device to init 10138 * 10139 * This takes a network device structure and initialize the minimum 10140 * amount of fields so it can be used to schedule NAPI polls without 10141 * registering a full blown interface. This is to be used by drivers 10142 * that need to tie several hardware interfaces to a single NAPI 10143 * poll scheduler due to HW limitations. 10144 */ 10145 int init_dummy_netdev(struct net_device *dev) 10146 { 10147 /* Clear everything. Note we don't initialize spinlocks 10148 * are they aren't supposed to be taken by any of the 10149 * NAPI code and this dummy netdev is supposed to be 10150 * only ever used for NAPI polls 10151 */ 10152 memset(dev, 0, sizeof(struct net_device)); 10153 10154 /* make sure we BUG if trying to hit standard 10155 * register/unregister code path 10156 */ 10157 dev->reg_state = NETREG_DUMMY; 10158 10159 /* NAPI wants this */ 10160 INIT_LIST_HEAD(&dev->napi_list); 10161 10162 /* a dummy interface is started by default */ 10163 set_bit(__LINK_STATE_PRESENT, &dev->state); 10164 set_bit(__LINK_STATE_START, &dev->state); 10165 10166 /* napi_busy_loop stats accounting wants this */ 10167 dev_net_set(dev, &init_net); 10168 10169 /* Note : We dont allocate pcpu_refcnt for dummy devices, 10170 * because users of this 'device' dont need to change 10171 * its refcount. 10172 */ 10173 10174 return 0; 10175 } 10176 EXPORT_SYMBOL_GPL(init_dummy_netdev); 10177 10178 10179 /** 10180 * register_netdev - register a network device 10181 * @dev: device to register 10182 * 10183 * Take a completed network device structure and add it to the kernel 10184 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier 10185 * chain. 0 is returned on success. A negative errno code is returned 10186 * on a failure to set up the device, or if the name is a duplicate. 10187 * 10188 * This is a wrapper around register_netdevice that takes the rtnl semaphore 10189 * and expands the device name if you passed a format string to 10190 * alloc_netdev. 10191 */ 10192 int register_netdev(struct net_device *dev) 10193 { 10194 int err; 10195 10196 if (rtnl_lock_killable()) 10197 return -EINTR; 10198 err = register_netdevice(dev); 10199 rtnl_unlock(); 10200 return err; 10201 } 10202 EXPORT_SYMBOL(register_netdev); 10203 10204 int netdev_refcnt_read(const struct net_device *dev) 10205 { 10206 #ifdef CONFIG_PCPU_DEV_REFCNT 10207 int i, refcnt = 0; 10208 10209 for_each_possible_cpu(i) 10210 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i); 10211 return refcnt; 10212 #else 10213 return refcount_read(&dev->dev_refcnt); 10214 #endif 10215 } 10216 EXPORT_SYMBOL(netdev_refcnt_read); 10217 10218 int netdev_unregister_timeout_secs __read_mostly = 10; 10219 10220 #define WAIT_REFS_MIN_MSECS 1 10221 #define WAIT_REFS_MAX_MSECS 250 10222 /** 10223 * netdev_wait_allrefs_any - wait until all references are gone. 10224 * @list: list of net_devices to wait on 10225 * 10226 * This is called when unregistering network devices. 10227 * 10228 * Any protocol or device that holds a reference should register 10229 * for netdevice notification, and cleanup and put back the 10230 * reference if they receive an UNREGISTER event. 10231 * We can get stuck here if buggy protocols don't correctly 10232 * call dev_put. 10233 */ 10234 static struct net_device *netdev_wait_allrefs_any(struct list_head *list) 10235 { 10236 unsigned long rebroadcast_time, warning_time; 10237 struct net_device *dev; 10238 int wait = 0; 10239 10240 rebroadcast_time = warning_time = jiffies; 10241 10242 list_for_each_entry(dev, list, todo_list) 10243 if (netdev_refcnt_read(dev) == 1) 10244 return dev; 10245 10246 while (true) { 10247 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) { 10248 rtnl_lock(); 10249 10250 /* Rebroadcast unregister notification */ 10251 list_for_each_entry(dev, list, todo_list) 10252 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 10253 10254 __rtnl_unlock(); 10255 rcu_barrier(); 10256 rtnl_lock(); 10257 10258 list_for_each_entry(dev, list, todo_list) 10259 if (test_bit(__LINK_STATE_LINKWATCH_PENDING, 10260 &dev->state)) { 10261 /* We must not have linkwatch events 10262 * pending on unregister. If this 10263 * happens, we simply run the queue 10264 * unscheduled, resulting in a noop 10265 * for this device. 10266 */ 10267 linkwatch_run_queue(); 10268 break; 10269 } 10270 10271 __rtnl_unlock(); 10272 10273 rebroadcast_time = jiffies; 10274 } 10275 10276 if (!wait) { 10277 rcu_barrier(); 10278 wait = WAIT_REFS_MIN_MSECS; 10279 } else { 10280 msleep(wait); 10281 wait = min(wait << 1, WAIT_REFS_MAX_MSECS); 10282 } 10283 10284 list_for_each_entry(dev, list, todo_list) 10285 if (netdev_refcnt_read(dev) == 1) 10286 return dev; 10287 10288 if (time_after(jiffies, warning_time + 10289 READ_ONCE(netdev_unregister_timeout_secs) * HZ)) { 10290 list_for_each_entry(dev, list, todo_list) { 10291 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n", 10292 dev->name, netdev_refcnt_read(dev)); 10293 ref_tracker_dir_print(&dev->refcnt_tracker, 10); 10294 } 10295 10296 warning_time = jiffies; 10297 } 10298 } 10299 } 10300 10301 /* The sequence is: 10302 * 10303 * rtnl_lock(); 10304 * ... 10305 * register_netdevice(x1); 10306 * register_netdevice(x2); 10307 * ... 10308 * unregister_netdevice(y1); 10309 * unregister_netdevice(y2); 10310 * ... 10311 * rtnl_unlock(); 10312 * free_netdev(y1); 10313 * free_netdev(y2); 10314 * 10315 * We are invoked by rtnl_unlock(). 10316 * This allows us to deal with problems: 10317 * 1) We can delete sysfs objects which invoke hotplug 10318 * without deadlocking with linkwatch via keventd. 10319 * 2) Since we run with the RTNL semaphore not held, we can sleep 10320 * safely in order to wait for the netdev refcnt to drop to zero. 10321 * 10322 * We must not return until all unregister events added during 10323 * the interval the lock was held have been completed. 10324 */ 10325 void netdev_run_todo(void) 10326 { 10327 struct net_device *dev, *tmp; 10328 struct list_head list; 10329 #ifdef CONFIG_LOCKDEP 10330 struct list_head unlink_list; 10331 10332 list_replace_init(&net_unlink_list, &unlink_list); 10333 10334 while (!list_empty(&unlink_list)) { 10335 struct net_device *dev = list_first_entry(&unlink_list, 10336 struct net_device, 10337 unlink_list); 10338 list_del_init(&dev->unlink_list); 10339 dev->nested_level = dev->lower_level - 1; 10340 } 10341 #endif 10342 10343 /* Snapshot list, allow later requests */ 10344 list_replace_init(&net_todo_list, &list); 10345 10346 __rtnl_unlock(); 10347 10348 /* Wait for rcu callbacks to finish before next phase */ 10349 if (!list_empty(&list)) 10350 rcu_barrier(); 10351 10352 list_for_each_entry_safe(dev, tmp, &list, todo_list) { 10353 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) { 10354 netdev_WARN(dev, "run_todo but not unregistering\n"); 10355 list_del(&dev->todo_list); 10356 continue; 10357 } 10358 10359 write_lock(&dev_base_lock); 10360 dev->reg_state = NETREG_UNREGISTERED; 10361 write_unlock(&dev_base_lock); 10362 linkwatch_forget_dev(dev); 10363 } 10364 10365 while (!list_empty(&list)) { 10366 dev = netdev_wait_allrefs_any(&list); 10367 list_del(&dev->todo_list); 10368 10369 /* paranoia */ 10370 BUG_ON(netdev_refcnt_read(dev) != 1); 10371 BUG_ON(!list_empty(&dev->ptype_all)); 10372 BUG_ON(!list_empty(&dev->ptype_specific)); 10373 WARN_ON(rcu_access_pointer(dev->ip_ptr)); 10374 WARN_ON(rcu_access_pointer(dev->ip6_ptr)); 10375 10376 if (dev->priv_destructor) 10377 dev->priv_destructor(dev); 10378 if (dev->needs_free_netdev) 10379 free_netdev(dev); 10380 10381 if (atomic_dec_and_test(&dev_net(dev)->dev_unreg_count)) 10382 wake_up(&netdev_unregistering_wq); 10383 10384 /* Free network device */ 10385 kobject_put(&dev->dev.kobj); 10386 } 10387 } 10388 10389 /* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has 10390 * all the same fields in the same order as net_device_stats, with only 10391 * the type differing, but rtnl_link_stats64 may have additional fields 10392 * at the end for newer counters. 10393 */ 10394 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, 10395 const struct net_device_stats *netdev_stats) 10396 { 10397 size_t i, n = sizeof(*netdev_stats) / sizeof(atomic_long_t); 10398 const atomic_long_t *src = (atomic_long_t *)netdev_stats; 10399 u64 *dst = (u64 *)stats64; 10400 10401 BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64)); 10402 for (i = 0; i < n; i++) 10403 dst[i] = (unsigned long)atomic_long_read(&src[i]); 10404 /* zero out counters that only exist in rtnl_link_stats64 */ 10405 memset((char *)stats64 + n * sizeof(u64), 0, 10406 sizeof(*stats64) - n * sizeof(u64)); 10407 } 10408 EXPORT_SYMBOL(netdev_stats_to_stats64); 10409 10410 struct net_device_core_stats __percpu *netdev_core_stats_alloc(struct net_device *dev) 10411 { 10412 struct net_device_core_stats __percpu *p; 10413 10414 p = alloc_percpu_gfp(struct net_device_core_stats, 10415 GFP_ATOMIC | __GFP_NOWARN); 10416 10417 if (p && cmpxchg(&dev->core_stats, NULL, p)) 10418 free_percpu(p); 10419 10420 /* This READ_ONCE() pairs with the cmpxchg() above */ 10421 return READ_ONCE(dev->core_stats); 10422 } 10423 EXPORT_SYMBOL(netdev_core_stats_alloc); 10424 10425 /** 10426 * dev_get_stats - get network device statistics 10427 * @dev: device to get statistics from 10428 * @storage: place to store stats 10429 * 10430 * Get network statistics from device. Return @storage. 10431 * The device driver may provide its own method by setting 10432 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats; 10433 * otherwise the internal statistics structure is used. 10434 */ 10435 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, 10436 struct rtnl_link_stats64 *storage) 10437 { 10438 const struct net_device_ops *ops = dev->netdev_ops; 10439 const struct net_device_core_stats __percpu *p; 10440 10441 if (ops->ndo_get_stats64) { 10442 memset(storage, 0, sizeof(*storage)); 10443 ops->ndo_get_stats64(dev, storage); 10444 } else if (ops->ndo_get_stats) { 10445 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev)); 10446 } else { 10447 netdev_stats_to_stats64(storage, &dev->stats); 10448 } 10449 10450 /* This READ_ONCE() pairs with the write in netdev_core_stats_alloc() */ 10451 p = READ_ONCE(dev->core_stats); 10452 if (p) { 10453 const struct net_device_core_stats *core_stats; 10454 int i; 10455 10456 for_each_possible_cpu(i) { 10457 core_stats = per_cpu_ptr(p, i); 10458 storage->rx_dropped += READ_ONCE(core_stats->rx_dropped); 10459 storage->tx_dropped += READ_ONCE(core_stats->tx_dropped); 10460 storage->rx_nohandler += READ_ONCE(core_stats->rx_nohandler); 10461 storage->rx_otherhost_dropped += READ_ONCE(core_stats->rx_otherhost_dropped); 10462 } 10463 } 10464 return storage; 10465 } 10466 EXPORT_SYMBOL(dev_get_stats); 10467 10468 /** 10469 * dev_fetch_sw_netstats - get per-cpu network device statistics 10470 * @s: place to store stats 10471 * @netstats: per-cpu network stats to read from 10472 * 10473 * Read per-cpu network statistics and populate the related fields in @s. 10474 */ 10475 void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s, 10476 const struct pcpu_sw_netstats __percpu *netstats) 10477 { 10478 int cpu; 10479 10480 for_each_possible_cpu(cpu) { 10481 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 10482 const struct pcpu_sw_netstats *stats; 10483 unsigned int start; 10484 10485 stats = per_cpu_ptr(netstats, cpu); 10486 do { 10487 start = u64_stats_fetch_begin(&stats->syncp); 10488 rx_packets = u64_stats_read(&stats->rx_packets); 10489 rx_bytes = u64_stats_read(&stats->rx_bytes); 10490 tx_packets = u64_stats_read(&stats->tx_packets); 10491 tx_bytes = u64_stats_read(&stats->tx_bytes); 10492 } while (u64_stats_fetch_retry(&stats->syncp, start)); 10493 10494 s->rx_packets += rx_packets; 10495 s->rx_bytes += rx_bytes; 10496 s->tx_packets += tx_packets; 10497 s->tx_bytes += tx_bytes; 10498 } 10499 } 10500 EXPORT_SYMBOL_GPL(dev_fetch_sw_netstats); 10501 10502 /** 10503 * dev_get_tstats64 - ndo_get_stats64 implementation 10504 * @dev: device to get statistics from 10505 * @s: place to store stats 10506 * 10507 * Populate @s from dev->stats and dev->tstats. Can be used as 10508 * ndo_get_stats64() callback. 10509 */ 10510 void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s) 10511 { 10512 netdev_stats_to_stats64(s, &dev->stats); 10513 dev_fetch_sw_netstats(s, dev->tstats); 10514 } 10515 EXPORT_SYMBOL_GPL(dev_get_tstats64); 10516 10517 struct netdev_queue *dev_ingress_queue_create(struct net_device *dev) 10518 { 10519 struct netdev_queue *queue = dev_ingress_queue(dev); 10520 10521 #ifdef CONFIG_NET_CLS_ACT 10522 if (queue) 10523 return queue; 10524 queue = kzalloc(sizeof(*queue), GFP_KERNEL); 10525 if (!queue) 10526 return NULL; 10527 netdev_init_one_queue(dev, queue, NULL); 10528 RCU_INIT_POINTER(queue->qdisc, &noop_qdisc); 10529 queue->qdisc_sleeping = &noop_qdisc; 10530 rcu_assign_pointer(dev->ingress_queue, queue); 10531 #endif 10532 return queue; 10533 } 10534 10535 static const struct ethtool_ops default_ethtool_ops; 10536 10537 void netdev_set_default_ethtool_ops(struct net_device *dev, 10538 const struct ethtool_ops *ops) 10539 { 10540 if (dev->ethtool_ops == &default_ethtool_ops) 10541 dev->ethtool_ops = ops; 10542 } 10543 EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops); 10544 10545 /** 10546 * netdev_sw_irq_coalesce_default_on() - enable SW IRQ coalescing by default 10547 * @dev: netdev to enable the IRQ coalescing on 10548 * 10549 * Sets a conservative default for SW IRQ coalescing. Users can use 10550 * sysfs attributes to override the default values. 10551 */ 10552 void netdev_sw_irq_coalesce_default_on(struct net_device *dev) 10553 { 10554 WARN_ON(dev->reg_state == NETREG_REGISTERED); 10555 10556 dev->gro_flush_timeout = 20000; 10557 dev->napi_defer_hard_irqs = 1; 10558 } 10559 EXPORT_SYMBOL_GPL(netdev_sw_irq_coalesce_default_on); 10560 10561 void netdev_freemem(struct net_device *dev) 10562 { 10563 char *addr = (char *)dev - dev->padded; 10564 10565 kvfree(addr); 10566 } 10567 10568 /** 10569 * alloc_netdev_mqs - allocate network device 10570 * @sizeof_priv: size of private data to allocate space for 10571 * @name: device name format string 10572 * @name_assign_type: origin of device name 10573 * @setup: callback to initialize device 10574 * @txqs: the number of TX subqueues to allocate 10575 * @rxqs: the number of RX subqueues to allocate 10576 * 10577 * Allocates a struct net_device with private data area for driver use 10578 * and performs basic initialization. Also allocates subqueue structs 10579 * for each queue on the device. 10580 */ 10581 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, 10582 unsigned char name_assign_type, 10583 void (*setup)(struct net_device *), 10584 unsigned int txqs, unsigned int rxqs) 10585 { 10586 struct net_device *dev; 10587 unsigned int alloc_size; 10588 struct net_device *p; 10589 10590 BUG_ON(strlen(name) >= sizeof(dev->name)); 10591 10592 if (txqs < 1) { 10593 pr_err("alloc_netdev: Unable to allocate device with zero queues\n"); 10594 return NULL; 10595 } 10596 10597 if (rxqs < 1) { 10598 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n"); 10599 return NULL; 10600 } 10601 10602 alloc_size = sizeof(struct net_device); 10603 if (sizeof_priv) { 10604 /* ensure 32-byte alignment of private area */ 10605 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN); 10606 alloc_size += sizeof_priv; 10607 } 10608 /* ensure 32-byte alignment of whole construct */ 10609 alloc_size += NETDEV_ALIGN - 1; 10610 10611 p = kvzalloc(alloc_size, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL); 10612 if (!p) 10613 return NULL; 10614 10615 dev = PTR_ALIGN(p, NETDEV_ALIGN); 10616 dev->padded = (char *)dev - (char *)p; 10617 10618 ref_tracker_dir_init(&dev->refcnt_tracker, 128); 10619 #ifdef CONFIG_PCPU_DEV_REFCNT 10620 dev->pcpu_refcnt = alloc_percpu(int); 10621 if (!dev->pcpu_refcnt) 10622 goto free_dev; 10623 __dev_hold(dev); 10624 #else 10625 refcount_set(&dev->dev_refcnt, 1); 10626 #endif 10627 10628 if (dev_addr_init(dev)) 10629 goto free_pcpu; 10630 10631 dev_mc_init(dev); 10632 dev_uc_init(dev); 10633 10634 dev_net_set(dev, &init_net); 10635 10636 dev->gso_max_size = GSO_LEGACY_MAX_SIZE; 10637 dev->gso_max_segs = GSO_MAX_SEGS; 10638 dev->gro_max_size = GRO_LEGACY_MAX_SIZE; 10639 dev->gso_ipv4_max_size = GSO_LEGACY_MAX_SIZE; 10640 dev->gro_ipv4_max_size = GRO_LEGACY_MAX_SIZE; 10641 dev->tso_max_size = TSO_LEGACY_MAX_SIZE; 10642 dev->tso_max_segs = TSO_MAX_SEGS; 10643 dev->upper_level = 1; 10644 dev->lower_level = 1; 10645 #ifdef CONFIG_LOCKDEP 10646 dev->nested_level = 0; 10647 INIT_LIST_HEAD(&dev->unlink_list); 10648 #endif 10649 10650 INIT_LIST_HEAD(&dev->napi_list); 10651 INIT_LIST_HEAD(&dev->unreg_list); 10652 INIT_LIST_HEAD(&dev->close_list); 10653 INIT_LIST_HEAD(&dev->link_watch_list); 10654 INIT_LIST_HEAD(&dev->adj_list.upper); 10655 INIT_LIST_HEAD(&dev->adj_list.lower); 10656 INIT_LIST_HEAD(&dev->ptype_all); 10657 INIT_LIST_HEAD(&dev->ptype_specific); 10658 INIT_LIST_HEAD(&dev->net_notifier_list); 10659 #ifdef CONFIG_NET_SCHED 10660 hash_init(dev->qdisc_hash); 10661 #endif 10662 dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM; 10663 setup(dev); 10664 10665 if (!dev->tx_queue_len) { 10666 dev->priv_flags |= IFF_NO_QUEUE; 10667 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN; 10668 } 10669 10670 dev->num_tx_queues = txqs; 10671 dev->real_num_tx_queues = txqs; 10672 if (netif_alloc_netdev_queues(dev)) 10673 goto free_all; 10674 10675 dev->num_rx_queues = rxqs; 10676 dev->real_num_rx_queues = rxqs; 10677 if (netif_alloc_rx_queues(dev)) 10678 goto free_all; 10679 10680 strcpy(dev->name, name); 10681 dev->name_assign_type = name_assign_type; 10682 dev->group = INIT_NETDEV_GROUP; 10683 if (!dev->ethtool_ops) 10684 dev->ethtool_ops = &default_ethtool_ops; 10685 10686 nf_hook_netdev_init(dev); 10687 10688 return dev; 10689 10690 free_all: 10691 free_netdev(dev); 10692 return NULL; 10693 10694 free_pcpu: 10695 #ifdef CONFIG_PCPU_DEV_REFCNT 10696 free_percpu(dev->pcpu_refcnt); 10697 free_dev: 10698 #endif 10699 netdev_freemem(dev); 10700 return NULL; 10701 } 10702 EXPORT_SYMBOL(alloc_netdev_mqs); 10703 10704 /** 10705 * free_netdev - free network device 10706 * @dev: device 10707 * 10708 * This function does the last stage of destroying an allocated device 10709 * interface. The reference to the device object is released. If this 10710 * is the last reference then it will be freed.Must be called in process 10711 * context. 10712 */ 10713 void free_netdev(struct net_device *dev) 10714 { 10715 struct napi_struct *p, *n; 10716 10717 might_sleep(); 10718 10719 /* When called immediately after register_netdevice() failed the unwind 10720 * handling may still be dismantling the device. Handle that case by 10721 * deferring the free. 10722 */ 10723 if (dev->reg_state == NETREG_UNREGISTERING) { 10724 ASSERT_RTNL(); 10725 dev->needs_free_netdev = true; 10726 return; 10727 } 10728 10729 netif_free_tx_queues(dev); 10730 netif_free_rx_queues(dev); 10731 10732 kfree(rcu_dereference_protected(dev->ingress_queue, 1)); 10733 10734 /* Flush device addresses */ 10735 dev_addr_flush(dev); 10736 10737 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list) 10738 netif_napi_del(p); 10739 10740 ref_tracker_dir_exit(&dev->refcnt_tracker); 10741 #ifdef CONFIG_PCPU_DEV_REFCNT 10742 free_percpu(dev->pcpu_refcnt); 10743 dev->pcpu_refcnt = NULL; 10744 #endif 10745 free_percpu(dev->core_stats); 10746 dev->core_stats = NULL; 10747 free_percpu(dev->xdp_bulkq); 10748 dev->xdp_bulkq = NULL; 10749 10750 /* Compatibility with error handling in drivers */ 10751 if (dev->reg_state == NETREG_UNINITIALIZED) { 10752 netdev_freemem(dev); 10753 return; 10754 } 10755 10756 BUG_ON(dev->reg_state != NETREG_UNREGISTERED); 10757 dev->reg_state = NETREG_RELEASED; 10758 10759 /* will free via device release */ 10760 put_device(&dev->dev); 10761 } 10762 EXPORT_SYMBOL(free_netdev); 10763 10764 /** 10765 * synchronize_net - Synchronize with packet receive processing 10766 * 10767 * Wait for packets currently being received to be done. 10768 * Does not block later packets from starting. 10769 */ 10770 void synchronize_net(void) 10771 { 10772 might_sleep(); 10773 if (rtnl_is_locked()) 10774 synchronize_rcu_expedited(); 10775 else 10776 synchronize_rcu(); 10777 } 10778 EXPORT_SYMBOL(synchronize_net); 10779 10780 /** 10781 * unregister_netdevice_queue - remove device from the kernel 10782 * @dev: device 10783 * @head: list 10784 * 10785 * This function shuts down a device interface and removes it 10786 * from the kernel tables. 10787 * If head not NULL, device is queued to be unregistered later. 10788 * 10789 * Callers must hold the rtnl semaphore. You may want 10790 * unregister_netdev() instead of this. 10791 */ 10792 10793 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head) 10794 { 10795 ASSERT_RTNL(); 10796 10797 if (head) { 10798 list_move_tail(&dev->unreg_list, head); 10799 } else { 10800 LIST_HEAD(single); 10801 10802 list_add(&dev->unreg_list, &single); 10803 unregister_netdevice_many(&single); 10804 } 10805 } 10806 EXPORT_SYMBOL(unregister_netdevice_queue); 10807 10808 void unregister_netdevice_many_notify(struct list_head *head, 10809 u32 portid, const struct nlmsghdr *nlh) 10810 { 10811 struct net_device *dev, *tmp; 10812 LIST_HEAD(close_head); 10813 10814 BUG_ON(dev_boot_phase); 10815 ASSERT_RTNL(); 10816 10817 if (list_empty(head)) 10818 return; 10819 10820 list_for_each_entry_safe(dev, tmp, head, unreg_list) { 10821 /* Some devices call without registering 10822 * for initialization unwind. Remove those 10823 * devices and proceed with the remaining. 10824 */ 10825 if (dev->reg_state == NETREG_UNINITIALIZED) { 10826 pr_debug("unregister_netdevice: device %s/%p never was registered\n", 10827 dev->name, dev); 10828 10829 WARN_ON(1); 10830 list_del(&dev->unreg_list); 10831 continue; 10832 } 10833 dev->dismantle = true; 10834 BUG_ON(dev->reg_state != NETREG_REGISTERED); 10835 } 10836 10837 /* If device is running, close it first. */ 10838 list_for_each_entry(dev, head, unreg_list) 10839 list_add_tail(&dev->close_list, &close_head); 10840 dev_close_many(&close_head, true); 10841 10842 list_for_each_entry(dev, head, unreg_list) { 10843 /* And unlink it from device chain. */ 10844 write_lock(&dev_base_lock); 10845 unlist_netdevice(dev, false); 10846 dev->reg_state = NETREG_UNREGISTERING; 10847 write_unlock(&dev_base_lock); 10848 } 10849 flush_all_backlogs(); 10850 10851 synchronize_net(); 10852 10853 list_for_each_entry(dev, head, unreg_list) { 10854 struct sk_buff *skb = NULL; 10855 10856 /* Shutdown queueing discipline. */ 10857 dev_shutdown(dev); 10858 10859 dev_xdp_uninstall(dev); 10860 bpf_dev_bound_netdev_unregister(dev); 10861 10862 netdev_offload_xstats_disable_all(dev); 10863 10864 /* Notify protocols, that we are about to destroy 10865 * this device. They should clean all the things. 10866 */ 10867 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 10868 10869 if (!dev->rtnl_link_ops || 10870 dev->rtnl_link_state == RTNL_LINK_INITIALIZED) 10871 skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0, 10872 GFP_KERNEL, NULL, 0, 10873 portid, nlmsg_seq(nlh)); 10874 10875 /* 10876 * Flush the unicast and multicast chains 10877 */ 10878 dev_uc_flush(dev); 10879 dev_mc_flush(dev); 10880 10881 netdev_name_node_alt_flush(dev); 10882 netdev_name_node_free(dev->name_node); 10883 10884 call_netdevice_notifiers(NETDEV_PRE_UNINIT, dev); 10885 10886 if (dev->netdev_ops->ndo_uninit) 10887 dev->netdev_ops->ndo_uninit(dev); 10888 10889 if (skb) 10890 rtmsg_ifinfo_send(skb, dev, GFP_KERNEL, portid, nlh); 10891 10892 /* Notifier chain MUST detach us all upper devices. */ 10893 WARN_ON(netdev_has_any_upper_dev(dev)); 10894 WARN_ON(netdev_has_any_lower_dev(dev)); 10895 10896 /* Remove entries from kobject tree */ 10897 netdev_unregister_kobject(dev); 10898 #ifdef CONFIG_XPS 10899 /* Remove XPS queueing entries */ 10900 netif_reset_xps_queues_gt(dev, 0); 10901 #endif 10902 } 10903 10904 synchronize_net(); 10905 10906 list_for_each_entry(dev, head, unreg_list) { 10907 netdev_put(dev, &dev->dev_registered_tracker); 10908 net_set_todo(dev); 10909 } 10910 10911 list_del(head); 10912 } 10913 10914 /** 10915 * unregister_netdevice_many - unregister many devices 10916 * @head: list of devices 10917 * 10918 * Note: As most callers use a stack allocated list_head, 10919 * we force a list_del() to make sure stack wont be corrupted later. 10920 */ 10921 void unregister_netdevice_many(struct list_head *head) 10922 { 10923 unregister_netdevice_many_notify(head, 0, NULL); 10924 } 10925 EXPORT_SYMBOL(unregister_netdevice_many); 10926 10927 /** 10928 * unregister_netdev - remove device from the kernel 10929 * @dev: device 10930 * 10931 * This function shuts down a device interface and removes it 10932 * from the kernel tables. 10933 * 10934 * This is just a wrapper for unregister_netdevice that takes 10935 * the rtnl semaphore. In general you want to use this and not 10936 * unregister_netdevice. 10937 */ 10938 void unregister_netdev(struct net_device *dev) 10939 { 10940 rtnl_lock(); 10941 unregister_netdevice(dev); 10942 rtnl_unlock(); 10943 } 10944 EXPORT_SYMBOL(unregister_netdev); 10945 10946 /** 10947 * __dev_change_net_namespace - move device to different nethost namespace 10948 * @dev: device 10949 * @net: network namespace 10950 * @pat: If not NULL name pattern to try if the current device name 10951 * is already taken in the destination network namespace. 10952 * @new_ifindex: If not zero, specifies device index in the target 10953 * namespace. 10954 * 10955 * This function shuts down a device interface and moves it 10956 * to a new network namespace. On success 0 is returned, on 10957 * a failure a netagive errno code is returned. 10958 * 10959 * Callers must hold the rtnl semaphore. 10960 */ 10961 10962 int __dev_change_net_namespace(struct net_device *dev, struct net *net, 10963 const char *pat, int new_ifindex) 10964 { 10965 struct net *net_old = dev_net(dev); 10966 int err, new_nsid; 10967 10968 ASSERT_RTNL(); 10969 10970 /* Don't allow namespace local devices to be moved. */ 10971 err = -EINVAL; 10972 if (dev->features & NETIF_F_NETNS_LOCAL) 10973 goto out; 10974 10975 /* Ensure the device has been registrered */ 10976 if (dev->reg_state != NETREG_REGISTERED) 10977 goto out; 10978 10979 /* Get out if there is nothing todo */ 10980 err = 0; 10981 if (net_eq(net_old, net)) 10982 goto out; 10983 10984 /* Pick the destination device name, and ensure 10985 * we can use it in the destination network namespace. 10986 */ 10987 err = -EEXIST; 10988 if (netdev_name_in_use(net, dev->name)) { 10989 /* We get here if we can't use the current device name */ 10990 if (!pat) 10991 goto out; 10992 err = dev_get_valid_name(net, dev, pat); 10993 if (err < 0) 10994 goto out; 10995 } 10996 10997 /* Check that new_ifindex isn't used yet. */ 10998 err = -EBUSY; 10999 if (new_ifindex && __dev_get_by_index(net, new_ifindex)) 11000 goto out; 11001 11002 /* 11003 * And now a mini version of register_netdevice unregister_netdevice. 11004 */ 11005 11006 /* If device is running close it first. */ 11007 dev_close(dev); 11008 11009 /* And unlink it from device chain */ 11010 unlist_netdevice(dev, true); 11011 11012 synchronize_net(); 11013 11014 /* Shutdown queueing discipline. */ 11015 dev_shutdown(dev); 11016 11017 /* Notify protocols, that we are about to destroy 11018 * this device. They should clean all the things. 11019 * 11020 * Note that dev->reg_state stays at NETREG_REGISTERED. 11021 * This is wanted because this way 8021q and macvlan know 11022 * the device is just moving and can keep their slaves up. 11023 */ 11024 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 11025 rcu_barrier(); 11026 11027 new_nsid = peernet2id_alloc(dev_net(dev), net, GFP_KERNEL); 11028 /* If there is an ifindex conflict assign a new one */ 11029 if (!new_ifindex) { 11030 if (__dev_get_by_index(net, dev->ifindex)) 11031 new_ifindex = dev_new_index(net); 11032 else 11033 new_ifindex = dev->ifindex; 11034 } 11035 11036 rtmsg_ifinfo_newnet(RTM_DELLINK, dev, ~0U, GFP_KERNEL, &new_nsid, 11037 new_ifindex); 11038 11039 /* 11040 * Flush the unicast and multicast chains 11041 */ 11042 dev_uc_flush(dev); 11043 dev_mc_flush(dev); 11044 11045 /* Send a netdev-removed uevent to the old namespace */ 11046 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE); 11047 netdev_adjacent_del_links(dev); 11048 11049 /* Move per-net netdevice notifiers that are following the netdevice */ 11050 move_netdevice_notifiers_dev_net(dev, net); 11051 11052 /* Actually switch the network namespace */ 11053 dev_net_set(dev, net); 11054 dev->ifindex = new_ifindex; 11055 11056 /* Send a netdev-add uevent to the new namespace */ 11057 kobject_uevent(&dev->dev.kobj, KOBJ_ADD); 11058 netdev_adjacent_add_links(dev); 11059 11060 /* Fixup kobjects */ 11061 err = device_rename(&dev->dev, dev->name); 11062 WARN_ON(err); 11063 11064 /* Adapt owner in case owning user namespace of target network 11065 * namespace is different from the original one. 11066 */ 11067 err = netdev_change_owner(dev, net_old, net); 11068 WARN_ON(err); 11069 11070 /* Add the device back in the hashes */ 11071 list_netdevice(dev); 11072 11073 /* Notify protocols, that a new device appeared. */ 11074 call_netdevice_notifiers(NETDEV_REGISTER, dev); 11075 11076 /* 11077 * Prevent userspace races by waiting until the network 11078 * device is fully setup before sending notifications. 11079 */ 11080 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL, 0, NULL); 11081 11082 synchronize_net(); 11083 err = 0; 11084 out: 11085 return err; 11086 } 11087 EXPORT_SYMBOL_GPL(__dev_change_net_namespace); 11088 11089 static int dev_cpu_dead(unsigned int oldcpu) 11090 { 11091 struct sk_buff **list_skb; 11092 struct sk_buff *skb; 11093 unsigned int cpu; 11094 struct softnet_data *sd, *oldsd, *remsd = NULL; 11095 11096 local_irq_disable(); 11097 cpu = smp_processor_id(); 11098 sd = &per_cpu(softnet_data, cpu); 11099 oldsd = &per_cpu(softnet_data, oldcpu); 11100 11101 /* Find end of our completion_queue. */ 11102 list_skb = &sd->completion_queue; 11103 while (*list_skb) 11104 list_skb = &(*list_skb)->next; 11105 /* Append completion queue from offline CPU. */ 11106 *list_skb = oldsd->completion_queue; 11107 oldsd->completion_queue = NULL; 11108 11109 /* Append output queue from offline CPU. */ 11110 if (oldsd->output_queue) { 11111 *sd->output_queue_tailp = oldsd->output_queue; 11112 sd->output_queue_tailp = oldsd->output_queue_tailp; 11113 oldsd->output_queue = NULL; 11114 oldsd->output_queue_tailp = &oldsd->output_queue; 11115 } 11116 /* Append NAPI poll list from offline CPU, with one exception : 11117 * process_backlog() must be called by cpu owning percpu backlog. 11118 * We properly handle process_queue & input_pkt_queue later. 11119 */ 11120 while (!list_empty(&oldsd->poll_list)) { 11121 struct napi_struct *napi = list_first_entry(&oldsd->poll_list, 11122 struct napi_struct, 11123 poll_list); 11124 11125 list_del_init(&napi->poll_list); 11126 if (napi->poll == process_backlog) 11127 napi->state = 0; 11128 else 11129 ____napi_schedule(sd, napi); 11130 } 11131 11132 raise_softirq_irqoff(NET_TX_SOFTIRQ); 11133 local_irq_enable(); 11134 11135 #ifdef CONFIG_RPS 11136 remsd = oldsd->rps_ipi_list; 11137 oldsd->rps_ipi_list = NULL; 11138 #endif 11139 /* send out pending IPI's on offline CPU */ 11140 net_rps_send_ipi(remsd); 11141 11142 /* Process offline CPU's input_pkt_queue */ 11143 while ((skb = __skb_dequeue(&oldsd->process_queue))) { 11144 netif_rx(skb); 11145 input_queue_head_incr(oldsd); 11146 } 11147 while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) { 11148 netif_rx(skb); 11149 input_queue_head_incr(oldsd); 11150 } 11151 11152 return 0; 11153 } 11154 11155 /** 11156 * netdev_increment_features - increment feature set by one 11157 * @all: current feature set 11158 * @one: new feature set 11159 * @mask: mask feature set 11160 * 11161 * Computes a new feature set after adding a device with feature set 11162 * @one to the master device with current feature set @all. Will not 11163 * enable anything that is off in @mask. Returns the new feature set. 11164 */ 11165 netdev_features_t netdev_increment_features(netdev_features_t all, 11166 netdev_features_t one, netdev_features_t mask) 11167 { 11168 if (mask & NETIF_F_HW_CSUM) 11169 mask |= NETIF_F_CSUM_MASK; 11170 mask |= NETIF_F_VLAN_CHALLENGED; 11171 11172 all |= one & (NETIF_F_ONE_FOR_ALL | NETIF_F_CSUM_MASK) & mask; 11173 all &= one | ~NETIF_F_ALL_FOR_ALL; 11174 11175 /* If one device supports hw checksumming, set for all. */ 11176 if (all & NETIF_F_HW_CSUM) 11177 all &= ~(NETIF_F_CSUM_MASK & ~NETIF_F_HW_CSUM); 11178 11179 return all; 11180 } 11181 EXPORT_SYMBOL(netdev_increment_features); 11182 11183 static struct hlist_head * __net_init netdev_create_hash(void) 11184 { 11185 int i; 11186 struct hlist_head *hash; 11187 11188 hash = kmalloc_array(NETDEV_HASHENTRIES, sizeof(*hash), GFP_KERNEL); 11189 if (hash != NULL) 11190 for (i = 0; i < NETDEV_HASHENTRIES; i++) 11191 INIT_HLIST_HEAD(&hash[i]); 11192 11193 return hash; 11194 } 11195 11196 /* Initialize per network namespace state */ 11197 static int __net_init netdev_init(struct net *net) 11198 { 11199 BUILD_BUG_ON(GRO_HASH_BUCKETS > 11200 8 * sizeof_field(struct napi_struct, gro_bitmask)); 11201 11202 INIT_LIST_HEAD(&net->dev_base_head); 11203 11204 net->dev_name_head = netdev_create_hash(); 11205 if (net->dev_name_head == NULL) 11206 goto err_name; 11207 11208 net->dev_index_head = netdev_create_hash(); 11209 if (net->dev_index_head == NULL) 11210 goto err_idx; 11211 11212 RAW_INIT_NOTIFIER_HEAD(&net->netdev_chain); 11213 11214 return 0; 11215 11216 err_idx: 11217 kfree(net->dev_name_head); 11218 err_name: 11219 return -ENOMEM; 11220 } 11221 11222 /** 11223 * netdev_drivername - network driver for the device 11224 * @dev: network device 11225 * 11226 * Determine network driver for device. 11227 */ 11228 const char *netdev_drivername(const struct net_device *dev) 11229 { 11230 const struct device_driver *driver; 11231 const struct device *parent; 11232 const char *empty = ""; 11233 11234 parent = dev->dev.parent; 11235 if (!parent) 11236 return empty; 11237 11238 driver = parent->driver; 11239 if (driver && driver->name) 11240 return driver->name; 11241 return empty; 11242 } 11243 11244 static void __netdev_printk(const char *level, const struct net_device *dev, 11245 struct va_format *vaf) 11246 { 11247 if (dev && dev->dev.parent) { 11248 dev_printk_emit(level[1] - '0', 11249 dev->dev.parent, 11250 "%s %s %s%s: %pV", 11251 dev_driver_string(dev->dev.parent), 11252 dev_name(dev->dev.parent), 11253 netdev_name(dev), netdev_reg_state(dev), 11254 vaf); 11255 } else if (dev) { 11256 printk("%s%s%s: %pV", 11257 level, netdev_name(dev), netdev_reg_state(dev), vaf); 11258 } else { 11259 printk("%s(NULL net_device): %pV", level, vaf); 11260 } 11261 } 11262 11263 void netdev_printk(const char *level, const struct net_device *dev, 11264 const char *format, ...) 11265 { 11266 struct va_format vaf; 11267 va_list args; 11268 11269 va_start(args, format); 11270 11271 vaf.fmt = format; 11272 vaf.va = &args; 11273 11274 __netdev_printk(level, dev, &vaf); 11275 11276 va_end(args); 11277 } 11278 EXPORT_SYMBOL(netdev_printk); 11279 11280 #define define_netdev_printk_level(func, level) \ 11281 void func(const struct net_device *dev, const char *fmt, ...) \ 11282 { \ 11283 struct va_format vaf; \ 11284 va_list args; \ 11285 \ 11286 va_start(args, fmt); \ 11287 \ 11288 vaf.fmt = fmt; \ 11289 vaf.va = &args; \ 11290 \ 11291 __netdev_printk(level, dev, &vaf); \ 11292 \ 11293 va_end(args); \ 11294 } \ 11295 EXPORT_SYMBOL(func); 11296 11297 define_netdev_printk_level(netdev_emerg, KERN_EMERG); 11298 define_netdev_printk_level(netdev_alert, KERN_ALERT); 11299 define_netdev_printk_level(netdev_crit, KERN_CRIT); 11300 define_netdev_printk_level(netdev_err, KERN_ERR); 11301 define_netdev_printk_level(netdev_warn, KERN_WARNING); 11302 define_netdev_printk_level(netdev_notice, KERN_NOTICE); 11303 define_netdev_printk_level(netdev_info, KERN_INFO); 11304 11305 static void __net_exit netdev_exit(struct net *net) 11306 { 11307 kfree(net->dev_name_head); 11308 kfree(net->dev_index_head); 11309 if (net != &init_net) 11310 WARN_ON_ONCE(!list_empty(&net->dev_base_head)); 11311 } 11312 11313 static struct pernet_operations __net_initdata netdev_net_ops = { 11314 .init = netdev_init, 11315 .exit = netdev_exit, 11316 }; 11317 11318 static void __net_exit default_device_exit_net(struct net *net) 11319 { 11320 struct net_device *dev, *aux; 11321 /* 11322 * Push all migratable network devices back to the 11323 * initial network namespace 11324 */ 11325 ASSERT_RTNL(); 11326 for_each_netdev_safe(net, dev, aux) { 11327 int err; 11328 char fb_name[IFNAMSIZ]; 11329 11330 /* Ignore unmoveable devices (i.e. loopback) */ 11331 if (dev->features & NETIF_F_NETNS_LOCAL) 11332 continue; 11333 11334 /* Leave virtual devices for the generic cleanup */ 11335 if (dev->rtnl_link_ops && !dev->rtnl_link_ops->netns_refund) 11336 continue; 11337 11338 /* Push remaining network devices to init_net */ 11339 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex); 11340 if (netdev_name_in_use(&init_net, fb_name)) 11341 snprintf(fb_name, IFNAMSIZ, "dev%%d"); 11342 err = dev_change_net_namespace(dev, &init_net, fb_name); 11343 if (err) { 11344 pr_emerg("%s: failed to move %s to init_net: %d\n", 11345 __func__, dev->name, err); 11346 BUG(); 11347 } 11348 } 11349 } 11350 11351 static void __net_exit default_device_exit_batch(struct list_head *net_list) 11352 { 11353 /* At exit all network devices most be removed from a network 11354 * namespace. Do this in the reverse order of registration. 11355 * Do this across as many network namespaces as possible to 11356 * improve batching efficiency. 11357 */ 11358 struct net_device *dev; 11359 struct net *net; 11360 LIST_HEAD(dev_kill_list); 11361 11362 rtnl_lock(); 11363 list_for_each_entry(net, net_list, exit_list) { 11364 default_device_exit_net(net); 11365 cond_resched(); 11366 } 11367 11368 list_for_each_entry(net, net_list, exit_list) { 11369 for_each_netdev_reverse(net, dev) { 11370 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) 11371 dev->rtnl_link_ops->dellink(dev, &dev_kill_list); 11372 else 11373 unregister_netdevice_queue(dev, &dev_kill_list); 11374 } 11375 } 11376 unregister_netdevice_many(&dev_kill_list); 11377 rtnl_unlock(); 11378 } 11379 11380 static struct pernet_operations __net_initdata default_device_ops = { 11381 .exit_batch = default_device_exit_batch, 11382 }; 11383 11384 /* 11385 * Initialize the DEV module. At boot time this walks the device list and 11386 * unhooks any devices that fail to initialise (normally hardware not 11387 * present) and leaves us with a valid list of present and active devices. 11388 * 11389 */ 11390 11391 /* 11392 * This is called single threaded during boot, so no need 11393 * to take the rtnl semaphore. 11394 */ 11395 static int __init net_dev_init(void) 11396 { 11397 int i, rc = -ENOMEM; 11398 11399 BUG_ON(!dev_boot_phase); 11400 11401 if (dev_proc_init()) 11402 goto out; 11403 11404 if (netdev_kobject_init()) 11405 goto out; 11406 11407 INIT_LIST_HEAD(&ptype_all); 11408 for (i = 0; i < PTYPE_HASH_SIZE; i++) 11409 INIT_LIST_HEAD(&ptype_base[i]); 11410 11411 if (register_pernet_subsys(&netdev_net_ops)) 11412 goto out; 11413 11414 /* 11415 * Initialise the packet receive queues. 11416 */ 11417 11418 for_each_possible_cpu(i) { 11419 struct work_struct *flush = per_cpu_ptr(&flush_works, i); 11420 struct softnet_data *sd = &per_cpu(softnet_data, i); 11421 11422 INIT_WORK(flush, flush_backlog); 11423 11424 skb_queue_head_init(&sd->input_pkt_queue); 11425 skb_queue_head_init(&sd->process_queue); 11426 #ifdef CONFIG_XFRM_OFFLOAD 11427 skb_queue_head_init(&sd->xfrm_backlog); 11428 #endif 11429 INIT_LIST_HEAD(&sd->poll_list); 11430 sd->output_queue_tailp = &sd->output_queue; 11431 #ifdef CONFIG_RPS 11432 INIT_CSD(&sd->csd, rps_trigger_softirq, sd); 11433 sd->cpu = i; 11434 #endif 11435 INIT_CSD(&sd->defer_csd, trigger_rx_softirq, sd); 11436 spin_lock_init(&sd->defer_lock); 11437 11438 init_gro_hash(&sd->backlog); 11439 sd->backlog.poll = process_backlog; 11440 sd->backlog.weight = weight_p; 11441 } 11442 11443 dev_boot_phase = 0; 11444 11445 /* The loopback device is special if any other network devices 11446 * is present in a network namespace the loopback device must 11447 * be present. Since we now dynamically allocate and free the 11448 * loopback device ensure this invariant is maintained by 11449 * keeping the loopback device as the first device on the 11450 * list of network devices. Ensuring the loopback devices 11451 * is the first device that appears and the last network device 11452 * that disappears. 11453 */ 11454 if (register_pernet_device(&loopback_net_ops)) 11455 goto out; 11456 11457 if (register_pernet_device(&default_device_ops)) 11458 goto out; 11459 11460 open_softirq(NET_TX_SOFTIRQ, net_tx_action); 11461 open_softirq(NET_RX_SOFTIRQ, net_rx_action); 11462 11463 rc = cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD, "net/dev:dead", 11464 NULL, dev_cpu_dead); 11465 WARN_ON(rc < 0); 11466 rc = 0; 11467 out: 11468 return rc; 11469 } 11470 11471 subsys_initcall(net_dev_init); 11472