1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * NET3 Protocol independent device support routines. 4 * 5 * Derived from the non IP parts of dev.c 1.0.19 6 * Authors: Ross Biro 7 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 8 * Mark Evans, <evansmp@uhura.aston.ac.uk> 9 * 10 * Additional Authors: 11 * Florian la Roche <rzsfl@rz.uni-sb.de> 12 * Alan Cox <gw4pts@gw4pts.ampr.org> 13 * David Hinds <dahinds@users.sourceforge.net> 14 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> 15 * Adam Sulmicki <adam@cfar.umd.edu> 16 * Pekka Riikonen <priikone@poesidon.pspt.fi> 17 * 18 * Changes: 19 * D.J. Barrow : Fixed bug where dev->refcnt gets set 20 * to 2 if register_netdev gets called 21 * before net_dev_init & also removed a 22 * few lines of code in the process. 23 * Alan Cox : device private ioctl copies fields back. 24 * Alan Cox : Transmit queue code does relevant 25 * stunts to keep the queue safe. 26 * Alan Cox : Fixed double lock. 27 * Alan Cox : Fixed promisc NULL pointer trap 28 * ???????? : Support the full private ioctl range 29 * Alan Cox : Moved ioctl permission check into 30 * drivers 31 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI 32 * Alan Cox : 100 backlog just doesn't cut it when 33 * you start doing multicast video 8) 34 * Alan Cox : Rewrote net_bh and list manager. 35 * Alan Cox : Fix ETH_P_ALL echoback lengths. 36 * Alan Cox : Took out transmit every packet pass 37 * Saved a few bytes in the ioctl handler 38 * Alan Cox : Network driver sets packet type before 39 * calling netif_rx. Saves a function 40 * call a packet. 41 * Alan Cox : Hashed net_bh() 42 * Richard Kooijman: Timestamp fixes. 43 * Alan Cox : Wrong field in SIOCGIFDSTADDR 44 * Alan Cox : Device lock protection. 45 * Alan Cox : Fixed nasty side effect of device close 46 * changes. 47 * Rudi Cilibrasi : Pass the right thing to 48 * set_mac_address() 49 * Dave Miller : 32bit quantity for the device lock to 50 * make it work out on a Sparc. 51 * Bjorn Ekwall : Added KERNELD hack. 52 * Alan Cox : Cleaned up the backlog initialise. 53 * Craig Metz : SIOCGIFCONF fix if space for under 54 * 1 device. 55 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there 56 * is no device open function. 57 * Andi Kleen : Fix error reporting for SIOCGIFCONF 58 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF 59 * Cyrus Durgin : Cleaned for KMOD 60 * Adam Sulmicki : Bug Fix : Network Device Unload 61 * A network device unload needs to purge 62 * the backlog queue. 63 * Paul Rusty Russell : SIOCSIFNAME 64 * Pekka Riikonen : Netdev boot-time settings code 65 * Andrew Morton : Make unregister_netdevice wait 66 * indefinitely on dev->refcnt 67 * J Hadi Salim : - Backlog queue sampling 68 * - netif_rx() feedback 69 */ 70 71 #include <linux/uaccess.h> 72 #include <linux/bitops.h> 73 #include <linux/capability.h> 74 #include <linux/cpu.h> 75 #include <linux/types.h> 76 #include <linux/kernel.h> 77 #include <linux/hash.h> 78 #include <linux/slab.h> 79 #include <linux/sched.h> 80 #include <linux/sched/mm.h> 81 #include <linux/mutex.h> 82 #include <linux/rwsem.h> 83 #include <linux/string.h> 84 #include <linux/mm.h> 85 #include <linux/socket.h> 86 #include <linux/sockios.h> 87 #include <linux/errno.h> 88 #include <linux/interrupt.h> 89 #include <linux/if_ether.h> 90 #include <linux/netdevice.h> 91 #include <linux/etherdevice.h> 92 #include <linux/ethtool.h> 93 #include <linux/skbuff.h> 94 #include <linux/kthread.h> 95 #include <linux/bpf.h> 96 #include <linux/bpf_trace.h> 97 #include <net/net_namespace.h> 98 #include <net/sock.h> 99 #include <net/busy_poll.h> 100 #include <linux/rtnetlink.h> 101 #include <linux/stat.h> 102 #include <net/dsa.h> 103 #include <net/dst.h> 104 #include <net/dst_metadata.h> 105 #include <net/gro.h> 106 #include <net/pkt_sched.h> 107 #include <net/pkt_cls.h> 108 #include <net/checksum.h> 109 #include <net/xfrm.h> 110 #include <linux/highmem.h> 111 #include <linux/init.h> 112 #include <linux/module.h> 113 #include <linux/netpoll.h> 114 #include <linux/rcupdate.h> 115 #include <linux/delay.h> 116 #include <net/iw_handler.h> 117 #include <asm/current.h> 118 #include <linux/audit.h> 119 #include <linux/dmaengine.h> 120 #include <linux/err.h> 121 #include <linux/ctype.h> 122 #include <linux/if_arp.h> 123 #include <linux/if_vlan.h> 124 #include <linux/ip.h> 125 #include <net/ip.h> 126 #include <net/mpls.h> 127 #include <linux/ipv6.h> 128 #include <linux/in.h> 129 #include <linux/jhash.h> 130 #include <linux/random.h> 131 #include <trace/events/napi.h> 132 #include <trace/events/net.h> 133 #include <trace/events/skb.h> 134 #include <trace/events/qdisc.h> 135 #include <linux/inetdevice.h> 136 #include <linux/cpu_rmap.h> 137 #include <linux/static_key.h> 138 #include <linux/hashtable.h> 139 #include <linux/vmalloc.h> 140 #include <linux/if_macvlan.h> 141 #include <linux/errqueue.h> 142 #include <linux/hrtimer.h> 143 #include <linux/netfilter_netdev.h> 144 #include <linux/crash_dump.h> 145 #include <linux/sctp.h> 146 #include <net/udp_tunnel.h> 147 #include <linux/net_namespace.h> 148 #include <linux/indirect_call_wrapper.h> 149 #include <net/devlink.h> 150 #include <linux/pm_runtime.h> 151 #include <linux/prandom.h> 152 #include <linux/once_lite.h> 153 154 #include "dev.h" 155 #include "net-sysfs.h" 156 157 158 static DEFINE_SPINLOCK(ptype_lock); 159 struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; 160 struct list_head ptype_all __read_mostly; /* Taps */ 161 162 static int netif_rx_internal(struct sk_buff *skb); 163 static int call_netdevice_notifiers_info(unsigned long val, 164 struct netdev_notifier_info *info); 165 static int call_netdevice_notifiers_extack(unsigned long val, 166 struct net_device *dev, 167 struct netlink_ext_ack *extack); 168 static struct napi_struct *napi_by_id(unsigned int napi_id); 169 170 /* 171 * The @dev_base_head list is protected by @dev_base_lock and the rtnl 172 * semaphore. 173 * 174 * Pure readers hold dev_base_lock for reading, or rcu_read_lock() 175 * 176 * Writers must hold the rtnl semaphore while they loop through the 177 * dev_base_head list, and hold dev_base_lock for writing when they do the 178 * actual updates. This allows pure readers to access the list even 179 * while a writer is preparing to update it. 180 * 181 * To put it another way, dev_base_lock is held for writing only to 182 * protect against pure readers; the rtnl semaphore provides the 183 * protection against other writers. 184 * 185 * See, for example usages, register_netdevice() and 186 * unregister_netdevice(), which must be called with the rtnl 187 * semaphore held. 188 */ 189 DEFINE_RWLOCK(dev_base_lock); 190 EXPORT_SYMBOL(dev_base_lock); 191 192 static DEFINE_MUTEX(ifalias_mutex); 193 194 /* protects napi_hash addition/deletion and napi_gen_id */ 195 static DEFINE_SPINLOCK(napi_hash_lock); 196 197 static unsigned int napi_gen_id = NR_CPUS; 198 static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8); 199 200 static DECLARE_RWSEM(devnet_rename_sem); 201 202 static inline void dev_base_seq_inc(struct net *net) 203 { 204 while (++net->dev_base_seq == 0) 205 ; 206 } 207 208 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name) 209 { 210 unsigned int hash = full_name_hash(net, name, strnlen(name, IFNAMSIZ)); 211 212 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)]; 213 } 214 215 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) 216 { 217 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)]; 218 } 219 220 static inline void rps_lock_irqsave(struct softnet_data *sd, 221 unsigned long *flags) 222 { 223 if (IS_ENABLED(CONFIG_RPS)) 224 spin_lock_irqsave(&sd->input_pkt_queue.lock, *flags); 225 else if (!IS_ENABLED(CONFIG_PREEMPT_RT)) 226 local_irq_save(*flags); 227 } 228 229 static inline void rps_lock_irq_disable(struct softnet_data *sd) 230 { 231 if (IS_ENABLED(CONFIG_RPS)) 232 spin_lock_irq(&sd->input_pkt_queue.lock); 233 else if (!IS_ENABLED(CONFIG_PREEMPT_RT)) 234 local_irq_disable(); 235 } 236 237 static inline void rps_unlock_irq_restore(struct softnet_data *sd, 238 unsigned long *flags) 239 { 240 if (IS_ENABLED(CONFIG_RPS)) 241 spin_unlock_irqrestore(&sd->input_pkt_queue.lock, *flags); 242 else if (!IS_ENABLED(CONFIG_PREEMPT_RT)) 243 local_irq_restore(*flags); 244 } 245 246 static inline void rps_unlock_irq_enable(struct softnet_data *sd) 247 { 248 if (IS_ENABLED(CONFIG_RPS)) 249 spin_unlock_irq(&sd->input_pkt_queue.lock); 250 else if (!IS_ENABLED(CONFIG_PREEMPT_RT)) 251 local_irq_enable(); 252 } 253 254 static struct netdev_name_node *netdev_name_node_alloc(struct net_device *dev, 255 const char *name) 256 { 257 struct netdev_name_node *name_node; 258 259 name_node = kmalloc(sizeof(*name_node), GFP_KERNEL); 260 if (!name_node) 261 return NULL; 262 INIT_HLIST_NODE(&name_node->hlist); 263 name_node->dev = dev; 264 name_node->name = name; 265 return name_node; 266 } 267 268 static struct netdev_name_node * 269 netdev_name_node_head_alloc(struct net_device *dev) 270 { 271 struct netdev_name_node *name_node; 272 273 name_node = netdev_name_node_alloc(dev, dev->name); 274 if (!name_node) 275 return NULL; 276 INIT_LIST_HEAD(&name_node->list); 277 return name_node; 278 } 279 280 static void netdev_name_node_free(struct netdev_name_node *name_node) 281 { 282 kfree(name_node); 283 } 284 285 static void netdev_name_node_add(struct net *net, 286 struct netdev_name_node *name_node) 287 { 288 hlist_add_head_rcu(&name_node->hlist, 289 dev_name_hash(net, name_node->name)); 290 } 291 292 static void netdev_name_node_del(struct netdev_name_node *name_node) 293 { 294 hlist_del_rcu(&name_node->hlist); 295 } 296 297 static struct netdev_name_node *netdev_name_node_lookup(struct net *net, 298 const char *name) 299 { 300 struct hlist_head *head = dev_name_hash(net, name); 301 struct netdev_name_node *name_node; 302 303 hlist_for_each_entry(name_node, head, hlist) 304 if (!strcmp(name_node->name, name)) 305 return name_node; 306 return NULL; 307 } 308 309 static struct netdev_name_node *netdev_name_node_lookup_rcu(struct net *net, 310 const char *name) 311 { 312 struct hlist_head *head = dev_name_hash(net, name); 313 struct netdev_name_node *name_node; 314 315 hlist_for_each_entry_rcu(name_node, head, hlist) 316 if (!strcmp(name_node->name, name)) 317 return name_node; 318 return NULL; 319 } 320 321 bool netdev_name_in_use(struct net *net, const char *name) 322 { 323 return netdev_name_node_lookup(net, name); 324 } 325 EXPORT_SYMBOL(netdev_name_in_use); 326 327 int netdev_name_node_alt_create(struct net_device *dev, const char *name) 328 { 329 struct netdev_name_node *name_node; 330 struct net *net = dev_net(dev); 331 332 name_node = netdev_name_node_lookup(net, name); 333 if (name_node) 334 return -EEXIST; 335 name_node = netdev_name_node_alloc(dev, name); 336 if (!name_node) 337 return -ENOMEM; 338 netdev_name_node_add(net, name_node); 339 /* The node that holds dev->name acts as a head of per-device list. */ 340 list_add_tail(&name_node->list, &dev->name_node->list); 341 342 return 0; 343 } 344 345 static void __netdev_name_node_alt_destroy(struct netdev_name_node *name_node) 346 { 347 list_del(&name_node->list); 348 netdev_name_node_del(name_node); 349 kfree(name_node->name); 350 netdev_name_node_free(name_node); 351 } 352 353 int netdev_name_node_alt_destroy(struct net_device *dev, const char *name) 354 { 355 struct netdev_name_node *name_node; 356 struct net *net = dev_net(dev); 357 358 name_node = netdev_name_node_lookup(net, name); 359 if (!name_node) 360 return -ENOENT; 361 /* lookup might have found our primary name or a name belonging 362 * to another device. 363 */ 364 if (name_node == dev->name_node || name_node->dev != dev) 365 return -EINVAL; 366 367 __netdev_name_node_alt_destroy(name_node); 368 369 return 0; 370 } 371 372 static void netdev_name_node_alt_flush(struct net_device *dev) 373 { 374 struct netdev_name_node *name_node, *tmp; 375 376 list_for_each_entry_safe(name_node, tmp, &dev->name_node->list, list) 377 __netdev_name_node_alt_destroy(name_node); 378 } 379 380 /* Device list insertion */ 381 static void list_netdevice(struct net_device *dev) 382 { 383 struct net *net = dev_net(dev); 384 385 ASSERT_RTNL(); 386 387 write_lock(&dev_base_lock); 388 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head); 389 netdev_name_node_add(net, dev->name_node); 390 hlist_add_head_rcu(&dev->index_hlist, 391 dev_index_hash(net, dev->ifindex)); 392 write_unlock(&dev_base_lock); 393 394 dev_base_seq_inc(net); 395 } 396 397 /* Device list removal 398 * caller must respect a RCU grace period before freeing/reusing dev 399 */ 400 static void unlist_netdevice(struct net_device *dev, bool lock) 401 { 402 ASSERT_RTNL(); 403 404 /* Unlink dev from the device chain */ 405 if (lock) 406 write_lock(&dev_base_lock); 407 list_del_rcu(&dev->dev_list); 408 netdev_name_node_del(dev->name_node); 409 hlist_del_rcu(&dev->index_hlist); 410 if (lock) 411 write_unlock(&dev_base_lock); 412 413 dev_base_seq_inc(dev_net(dev)); 414 } 415 416 /* 417 * Our notifier list 418 */ 419 420 static RAW_NOTIFIER_HEAD(netdev_chain); 421 422 /* 423 * Device drivers call our routines to queue packets here. We empty the 424 * queue in the local softnet handler. 425 */ 426 427 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); 428 EXPORT_PER_CPU_SYMBOL(softnet_data); 429 430 #ifdef CONFIG_LOCKDEP 431 /* 432 * register_netdevice() inits txq->_xmit_lock and sets lockdep class 433 * according to dev->type 434 */ 435 static const unsigned short netdev_lock_type[] = { 436 ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25, 437 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET, 438 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM, 439 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP, 440 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD, 441 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25, 442 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP, 443 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD, 444 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI, 445 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE, 446 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET, 447 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL, 448 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM, 449 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE, 450 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE}; 451 452 static const char *const netdev_lock_name[] = { 453 "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25", 454 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET", 455 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM", 456 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP", 457 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD", 458 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25", 459 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP", 460 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD", 461 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI", 462 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE", 463 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET", 464 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL", 465 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM", 466 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE", 467 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"}; 468 469 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)]; 470 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)]; 471 472 static inline unsigned short netdev_lock_pos(unsigned short dev_type) 473 { 474 int i; 475 476 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++) 477 if (netdev_lock_type[i] == dev_type) 478 return i; 479 /* the last key is used by default */ 480 return ARRAY_SIZE(netdev_lock_type) - 1; 481 } 482 483 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, 484 unsigned short dev_type) 485 { 486 int i; 487 488 i = netdev_lock_pos(dev_type); 489 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i], 490 netdev_lock_name[i]); 491 } 492 493 static inline void netdev_set_addr_lockdep_class(struct net_device *dev) 494 { 495 int i; 496 497 i = netdev_lock_pos(dev->type); 498 lockdep_set_class_and_name(&dev->addr_list_lock, 499 &netdev_addr_lock_key[i], 500 netdev_lock_name[i]); 501 } 502 #else 503 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, 504 unsigned short dev_type) 505 { 506 } 507 508 static inline void netdev_set_addr_lockdep_class(struct net_device *dev) 509 { 510 } 511 #endif 512 513 /******************************************************************************* 514 * 515 * Protocol management and registration routines 516 * 517 *******************************************************************************/ 518 519 520 /* 521 * Add a protocol ID to the list. Now that the input handler is 522 * smarter we can dispense with all the messy stuff that used to be 523 * here. 524 * 525 * BEWARE!!! Protocol handlers, mangling input packets, 526 * MUST BE last in hash buckets and checking protocol handlers 527 * MUST start from promiscuous ptype_all chain in net_bh. 528 * It is true now, do not change it. 529 * Explanation follows: if protocol handler, mangling packet, will 530 * be the first on list, it is not able to sense, that packet 531 * is cloned and should be copied-on-write, so that it will 532 * change it and subsequent readers will get broken packet. 533 * --ANK (980803) 534 */ 535 536 static inline struct list_head *ptype_head(const struct packet_type *pt) 537 { 538 if (pt->type == htons(ETH_P_ALL)) 539 return pt->dev ? &pt->dev->ptype_all : &ptype_all; 540 else 541 return pt->dev ? &pt->dev->ptype_specific : 542 &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK]; 543 } 544 545 /** 546 * dev_add_pack - add packet handler 547 * @pt: packet type declaration 548 * 549 * Add a protocol handler to the networking stack. The passed &packet_type 550 * is linked into kernel lists and may not be freed until it has been 551 * removed from the kernel lists. 552 * 553 * This call does not sleep therefore it can not 554 * guarantee all CPU's that are in middle of receiving packets 555 * will see the new packet type (until the next received packet). 556 */ 557 558 void dev_add_pack(struct packet_type *pt) 559 { 560 struct list_head *head = ptype_head(pt); 561 562 spin_lock(&ptype_lock); 563 list_add_rcu(&pt->list, head); 564 spin_unlock(&ptype_lock); 565 } 566 EXPORT_SYMBOL(dev_add_pack); 567 568 /** 569 * __dev_remove_pack - remove packet handler 570 * @pt: packet type declaration 571 * 572 * Remove a protocol handler that was previously added to the kernel 573 * protocol handlers by dev_add_pack(). The passed &packet_type is removed 574 * from the kernel lists and can be freed or reused once this function 575 * returns. 576 * 577 * The packet type might still be in use by receivers 578 * and must not be freed until after all the CPU's have gone 579 * through a quiescent state. 580 */ 581 void __dev_remove_pack(struct packet_type *pt) 582 { 583 struct list_head *head = ptype_head(pt); 584 struct packet_type *pt1; 585 586 spin_lock(&ptype_lock); 587 588 list_for_each_entry(pt1, head, list) { 589 if (pt == pt1) { 590 list_del_rcu(&pt->list); 591 goto out; 592 } 593 } 594 595 pr_warn("dev_remove_pack: %p not found\n", pt); 596 out: 597 spin_unlock(&ptype_lock); 598 } 599 EXPORT_SYMBOL(__dev_remove_pack); 600 601 /** 602 * dev_remove_pack - remove packet handler 603 * @pt: packet type declaration 604 * 605 * Remove a protocol handler that was previously added to the kernel 606 * protocol handlers by dev_add_pack(). The passed &packet_type is removed 607 * from the kernel lists and can be freed or reused once this function 608 * returns. 609 * 610 * This call sleeps to guarantee that no CPU is looking at the packet 611 * type after return. 612 */ 613 void dev_remove_pack(struct packet_type *pt) 614 { 615 __dev_remove_pack(pt); 616 617 synchronize_net(); 618 } 619 EXPORT_SYMBOL(dev_remove_pack); 620 621 622 /******************************************************************************* 623 * 624 * Device Interface Subroutines 625 * 626 *******************************************************************************/ 627 628 /** 629 * dev_get_iflink - get 'iflink' value of a interface 630 * @dev: targeted interface 631 * 632 * Indicates the ifindex the interface is linked to. 633 * Physical interfaces have the same 'ifindex' and 'iflink' values. 634 */ 635 636 int dev_get_iflink(const struct net_device *dev) 637 { 638 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink) 639 return dev->netdev_ops->ndo_get_iflink(dev); 640 641 return dev->ifindex; 642 } 643 EXPORT_SYMBOL(dev_get_iflink); 644 645 /** 646 * dev_fill_metadata_dst - Retrieve tunnel egress information. 647 * @dev: targeted interface 648 * @skb: The packet. 649 * 650 * For better visibility of tunnel traffic OVS needs to retrieve 651 * egress tunnel information for a packet. Following API allows 652 * user to get this info. 653 */ 654 int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) 655 { 656 struct ip_tunnel_info *info; 657 658 if (!dev->netdev_ops || !dev->netdev_ops->ndo_fill_metadata_dst) 659 return -EINVAL; 660 661 info = skb_tunnel_info_unclone(skb); 662 if (!info) 663 return -ENOMEM; 664 if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX))) 665 return -EINVAL; 666 667 return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb); 668 } 669 EXPORT_SYMBOL_GPL(dev_fill_metadata_dst); 670 671 static struct net_device_path *dev_fwd_path(struct net_device_path_stack *stack) 672 { 673 int k = stack->num_paths++; 674 675 if (WARN_ON_ONCE(k >= NET_DEVICE_PATH_STACK_MAX)) 676 return NULL; 677 678 return &stack->path[k]; 679 } 680 681 int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr, 682 struct net_device_path_stack *stack) 683 { 684 const struct net_device *last_dev; 685 struct net_device_path_ctx ctx = { 686 .dev = dev, 687 }; 688 struct net_device_path *path; 689 int ret = 0; 690 691 memcpy(ctx.daddr, daddr, sizeof(ctx.daddr)); 692 stack->num_paths = 0; 693 while (ctx.dev && ctx.dev->netdev_ops->ndo_fill_forward_path) { 694 last_dev = ctx.dev; 695 path = dev_fwd_path(stack); 696 if (!path) 697 return -1; 698 699 memset(path, 0, sizeof(struct net_device_path)); 700 ret = ctx.dev->netdev_ops->ndo_fill_forward_path(&ctx, path); 701 if (ret < 0) 702 return -1; 703 704 if (WARN_ON_ONCE(last_dev == ctx.dev)) 705 return -1; 706 } 707 708 if (!ctx.dev) 709 return ret; 710 711 path = dev_fwd_path(stack); 712 if (!path) 713 return -1; 714 path->type = DEV_PATH_ETHERNET; 715 path->dev = ctx.dev; 716 717 return ret; 718 } 719 EXPORT_SYMBOL_GPL(dev_fill_forward_path); 720 721 /** 722 * __dev_get_by_name - find a device by its name 723 * @net: the applicable net namespace 724 * @name: name to find 725 * 726 * Find an interface by name. Must be called under RTNL semaphore 727 * or @dev_base_lock. If the name is found a pointer to the device 728 * is returned. If the name is not found then %NULL is returned. The 729 * reference counters are not incremented so the caller must be 730 * careful with locks. 731 */ 732 733 struct net_device *__dev_get_by_name(struct net *net, const char *name) 734 { 735 struct netdev_name_node *node_name; 736 737 node_name = netdev_name_node_lookup(net, name); 738 return node_name ? node_name->dev : NULL; 739 } 740 EXPORT_SYMBOL(__dev_get_by_name); 741 742 /** 743 * dev_get_by_name_rcu - find a device by its name 744 * @net: the applicable net namespace 745 * @name: name to find 746 * 747 * Find an interface by name. 748 * If the name is found a pointer to the device is returned. 749 * If the name is not found then %NULL is returned. 750 * The reference counters are not incremented so the caller must be 751 * careful with locks. The caller must hold RCU lock. 752 */ 753 754 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name) 755 { 756 struct netdev_name_node *node_name; 757 758 node_name = netdev_name_node_lookup_rcu(net, name); 759 return node_name ? node_name->dev : NULL; 760 } 761 EXPORT_SYMBOL(dev_get_by_name_rcu); 762 763 /** 764 * dev_get_by_name - find a device by its name 765 * @net: the applicable net namespace 766 * @name: name to find 767 * 768 * Find an interface by name. This can be called from any 769 * context and does its own locking. The returned handle has 770 * the usage count incremented and the caller must use dev_put() to 771 * release it when it is no longer needed. %NULL is returned if no 772 * matching device is found. 773 */ 774 775 struct net_device *dev_get_by_name(struct net *net, const char *name) 776 { 777 struct net_device *dev; 778 779 rcu_read_lock(); 780 dev = dev_get_by_name_rcu(net, name); 781 dev_hold(dev); 782 rcu_read_unlock(); 783 return dev; 784 } 785 EXPORT_SYMBOL(dev_get_by_name); 786 787 /** 788 * __dev_get_by_index - find a device by its ifindex 789 * @net: the applicable net namespace 790 * @ifindex: index of device 791 * 792 * Search for an interface by index. Returns %NULL if the device 793 * is not found or a pointer to the device. The device has not 794 * had its reference counter increased so the caller must be careful 795 * about locking. The caller must hold either the RTNL semaphore 796 * or @dev_base_lock. 797 */ 798 799 struct net_device *__dev_get_by_index(struct net *net, int ifindex) 800 { 801 struct net_device *dev; 802 struct hlist_head *head = dev_index_hash(net, ifindex); 803 804 hlist_for_each_entry(dev, head, index_hlist) 805 if (dev->ifindex == ifindex) 806 return dev; 807 808 return NULL; 809 } 810 EXPORT_SYMBOL(__dev_get_by_index); 811 812 /** 813 * dev_get_by_index_rcu - find a device by its ifindex 814 * @net: the applicable net namespace 815 * @ifindex: index of device 816 * 817 * Search for an interface by index. Returns %NULL if the device 818 * is not found or a pointer to the device. The device has not 819 * had its reference counter increased so the caller must be careful 820 * about locking. The caller must hold RCU lock. 821 */ 822 823 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex) 824 { 825 struct net_device *dev; 826 struct hlist_head *head = dev_index_hash(net, ifindex); 827 828 hlist_for_each_entry_rcu(dev, head, index_hlist) 829 if (dev->ifindex == ifindex) 830 return dev; 831 832 return NULL; 833 } 834 EXPORT_SYMBOL(dev_get_by_index_rcu); 835 836 837 /** 838 * dev_get_by_index - find a device by its ifindex 839 * @net: the applicable net namespace 840 * @ifindex: index of device 841 * 842 * Search for an interface by index. Returns NULL if the device 843 * is not found or a pointer to the device. The device returned has 844 * had a reference added and the pointer is safe until the user calls 845 * dev_put to indicate they have finished with it. 846 */ 847 848 struct net_device *dev_get_by_index(struct net *net, int ifindex) 849 { 850 struct net_device *dev; 851 852 rcu_read_lock(); 853 dev = dev_get_by_index_rcu(net, ifindex); 854 dev_hold(dev); 855 rcu_read_unlock(); 856 return dev; 857 } 858 EXPORT_SYMBOL(dev_get_by_index); 859 860 /** 861 * dev_get_by_napi_id - find a device by napi_id 862 * @napi_id: ID of the NAPI struct 863 * 864 * Search for an interface by NAPI ID. Returns %NULL if the device 865 * is not found or a pointer to the device. The device has not had 866 * its reference counter increased so the caller must be careful 867 * about locking. The caller must hold RCU lock. 868 */ 869 870 struct net_device *dev_get_by_napi_id(unsigned int napi_id) 871 { 872 struct napi_struct *napi; 873 874 WARN_ON_ONCE(!rcu_read_lock_held()); 875 876 if (napi_id < MIN_NAPI_ID) 877 return NULL; 878 879 napi = napi_by_id(napi_id); 880 881 return napi ? napi->dev : NULL; 882 } 883 EXPORT_SYMBOL(dev_get_by_napi_id); 884 885 /** 886 * netdev_get_name - get a netdevice name, knowing its ifindex. 887 * @net: network namespace 888 * @name: a pointer to the buffer where the name will be stored. 889 * @ifindex: the ifindex of the interface to get the name from. 890 */ 891 int netdev_get_name(struct net *net, char *name, int ifindex) 892 { 893 struct net_device *dev; 894 int ret; 895 896 down_read(&devnet_rename_sem); 897 rcu_read_lock(); 898 899 dev = dev_get_by_index_rcu(net, ifindex); 900 if (!dev) { 901 ret = -ENODEV; 902 goto out; 903 } 904 905 strcpy(name, dev->name); 906 907 ret = 0; 908 out: 909 rcu_read_unlock(); 910 up_read(&devnet_rename_sem); 911 return ret; 912 } 913 914 /** 915 * dev_getbyhwaddr_rcu - find a device by its hardware address 916 * @net: the applicable net namespace 917 * @type: media type of device 918 * @ha: hardware address 919 * 920 * Search for an interface by MAC address. Returns NULL if the device 921 * is not found or a pointer to the device. 922 * The caller must hold RCU or RTNL. 923 * The returned device has not had its ref count increased 924 * and the caller must therefore be careful about locking 925 * 926 */ 927 928 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, 929 const char *ha) 930 { 931 struct net_device *dev; 932 933 for_each_netdev_rcu(net, dev) 934 if (dev->type == type && 935 !memcmp(dev->dev_addr, ha, dev->addr_len)) 936 return dev; 937 938 return NULL; 939 } 940 EXPORT_SYMBOL(dev_getbyhwaddr_rcu); 941 942 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type) 943 { 944 struct net_device *dev, *ret = NULL; 945 946 rcu_read_lock(); 947 for_each_netdev_rcu(net, dev) 948 if (dev->type == type) { 949 dev_hold(dev); 950 ret = dev; 951 break; 952 } 953 rcu_read_unlock(); 954 return ret; 955 } 956 EXPORT_SYMBOL(dev_getfirstbyhwtype); 957 958 /** 959 * __dev_get_by_flags - find any device with given flags 960 * @net: the applicable net namespace 961 * @if_flags: IFF_* values 962 * @mask: bitmask of bits in if_flags to check 963 * 964 * Search for any interface with the given flags. Returns NULL if a device 965 * is not found or a pointer to the device. Must be called inside 966 * rtnl_lock(), and result refcount is unchanged. 967 */ 968 969 struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags, 970 unsigned short mask) 971 { 972 struct net_device *dev, *ret; 973 974 ASSERT_RTNL(); 975 976 ret = NULL; 977 for_each_netdev(net, dev) { 978 if (((dev->flags ^ if_flags) & mask) == 0) { 979 ret = dev; 980 break; 981 } 982 } 983 return ret; 984 } 985 EXPORT_SYMBOL(__dev_get_by_flags); 986 987 /** 988 * dev_valid_name - check if name is okay for network device 989 * @name: name string 990 * 991 * Network device names need to be valid file names to 992 * allow sysfs to work. We also disallow any kind of 993 * whitespace. 994 */ 995 bool dev_valid_name(const char *name) 996 { 997 if (*name == '\0') 998 return false; 999 if (strnlen(name, IFNAMSIZ) == IFNAMSIZ) 1000 return false; 1001 if (!strcmp(name, ".") || !strcmp(name, "..")) 1002 return false; 1003 1004 while (*name) { 1005 if (*name == '/' || *name == ':' || isspace(*name)) 1006 return false; 1007 name++; 1008 } 1009 return true; 1010 } 1011 EXPORT_SYMBOL(dev_valid_name); 1012 1013 /** 1014 * __dev_alloc_name - allocate a name for a device 1015 * @net: network namespace to allocate the device name in 1016 * @name: name format string 1017 * @buf: scratch buffer and result name string 1018 * 1019 * Passed a format string - eg "lt%d" it will try and find a suitable 1020 * id. It scans list of devices to build up a free map, then chooses 1021 * the first empty slot. The caller must hold the dev_base or rtnl lock 1022 * while allocating the name and adding the device in order to avoid 1023 * duplicates. 1024 * Limited to bits_per_byte * page size devices (ie 32K on most platforms). 1025 * Returns the number of the unit assigned or a negative errno code. 1026 */ 1027 1028 static int __dev_alloc_name(struct net *net, const char *name, char *buf) 1029 { 1030 int i = 0; 1031 const char *p; 1032 const int max_netdevices = 8*PAGE_SIZE; 1033 unsigned long *inuse; 1034 struct net_device *d; 1035 1036 if (!dev_valid_name(name)) 1037 return -EINVAL; 1038 1039 p = strchr(name, '%'); 1040 if (p) { 1041 /* 1042 * Verify the string as this thing may have come from 1043 * the user. There must be either one "%d" and no other "%" 1044 * characters. 1045 */ 1046 if (p[1] != 'd' || strchr(p + 2, '%')) 1047 return -EINVAL; 1048 1049 /* Use one page as a bit array of possible slots */ 1050 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC); 1051 if (!inuse) 1052 return -ENOMEM; 1053 1054 for_each_netdev(net, d) { 1055 struct netdev_name_node *name_node; 1056 list_for_each_entry(name_node, &d->name_node->list, list) { 1057 if (!sscanf(name_node->name, name, &i)) 1058 continue; 1059 if (i < 0 || i >= max_netdevices) 1060 continue; 1061 1062 /* avoid cases where sscanf is not exact inverse of printf */ 1063 snprintf(buf, IFNAMSIZ, name, i); 1064 if (!strncmp(buf, name_node->name, IFNAMSIZ)) 1065 __set_bit(i, inuse); 1066 } 1067 if (!sscanf(d->name, name, &i)) 1068 continue; 1069 if (i < 0 || i >= max_netdevices) 1070 continue; 1071 1072 /* avoid cases where sscanf is not exact inverse of printf */ 1073 snprintf(buf, IFNAMSIZ, name, i); 1074 if (!strncmp(buf, d->name, IFNAMSIZ)) 1075 __set_bit(i, inuse); 1076 } 1077 1078 i = find_first_zero_bit(inuse, max_netdevices); 1079 free_page((unsigned long) inuse); 1080 } 1081 1082 snprintf(buf, IFNAMSIZ, name, i); 1083 if (!netdev_name_in_use(net, buf)) 1084 return i; 1085 1086 /* It is possible to run out of possible slots 1087 * when the name is long and there isn't enough space left 1088 * for the digits, or if all bits are used. 1089 */ 1090 return -ENFILE; 1091 } 1092 1093 static int dev_alloc_name_ns(struct net *net, 1094 struct net_device *dev, 1095 const char *name) 1096 { 1097 char buf[IFNAMSIZ]; 1098 int ret; 1099 1100 BUG_ON(!net); 1101 ret = __dev_alloc_name(net, name, buf); 1102 if (ret >= 0) 1103 strscpy(dev->name, buf, IFNAMSIZ); 1104 return ret; 1105 } 1106 1107 /** 1108 * dev_alloc_name - allocate a name for a device 1109 * @dev: device 1110 * @name: name format string 1111 * 1112 * Passed a format string - eg "lt%d" it will try and find a suitable 1113 * id. It scans list of devices to build up a free map, then chooses 1114 * the first empty slot. The caller must hold the dev_base or rtnl lock 1115 * while allocating the name and adding the device in order to avoid 1116 * duplicates. 1117 * Limited to bits_per_byte * page size devices (ie 32K on most platforms). 1118 * Returns the number of the unit assigned or a negative errno code. 1119 */ 1120 1121 int dev_alloc_name(struct net_device *dev, const char *name) 1122 { 1123 return dev_alloc_name_ns(dev_net(dev), dev, name); 1124 } 1125 EXPORT_SYMBOL(dev_alloc_name); 1126 1127 static int dev_get_valid_name(struct net *net, struct net_device *dev, 1128 const char *name) 1129 { 1130 BUG_ON(!net); 1131 1132 if (!dev_valid_name(name)) 1133 return -EINVAL; 1134 1135 if (strchr(name, '%')) 1136 return dev_alloc_name_ns(net, dev, name); 1137 else if (netdev_name_in_use(net, name)) 1138 return -EEXIST; 1139 else if (dev->name != name) 1140 strscpy(dev->name, name, IFNAMSIZ); 1141 1142 return 0; 1143 } 1144 1145 /** 1146 * dev_change_name - change name of a device 1147 * @dev: device 1148 * @newname: name (or format string) must be at least IFNAMSIZ 1149 * 1150 * Change name of a device, can pass format strings "eth%d". 1151 * for wildcarding. 1152 */ 1153 int dev_change_name(struct net_device *dev, const char *newname) 1154 { 1155 unsigned char old_assign_type; 1156 char oldname[IFNAMSIZ]; 1157 int err = 0; 1158 int ret; 1159 struct net *net; 1160 1161 ASSERT_RTNL(); 1162 BUG_ON(!dev_net(dev)); 1163 1164 net = dev_net(dev); 1165 1166 down_write(&devnet_rename_sem); 1167 1168 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) { 1169 up_write(&devnet_rename_sem); 1170 return 0; 1171 } 1172 1173 memcpy(oldname, dev->name, IFNAMSIZ); 1174 1175 err = dev_get_valid_name(net, dev, newname); 1176 if (err < 0) { 1177 up_write(&devnet_rename_sem); 1178 return err; 1179 } 1180 1181 if (oldname[0] && !strchr(oldname, '%')) 1182 netdev_info(dev, "renamed from %s%s\n", oldname, 1183 dev->flags & IFF_UP ? " (while UP)" : ""); 1184 1185 old_assign_type = dev->name_assign_type; 1186 dev->name_assign_type = NET_NAME_RENAMED; 1187 1188 rollback: 1189 ret = device_rename(&dev->dev, dev->name); 1190 if (ret) { 1191 memcpy(dev->name, oldname, IFNAMSIZ); 1192 dev->name_assign_type = old_assign_type; 1193 up_write(&devnet_rename_sem); 1194 return ret; 1195 } 1196 1197 up_write(&devnet_rename_sem); 1198 1199 netdev_adjacent_rename_links(dev, oldname); 1200 1201 write_lock(&dev_base_lock); 1202 netdev_name_node_del(dev->name_node); 1203 write_unlock(&dev_base_lock); 1204 1205 synchronize_rcu(); 1206 1207 write_lock(&dev_base_lock); 1208 netdev_name_node_add(net, dev->name_node); 1209 write_unlock(&dev_base_lock); 1210 1211 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev); 1212 ret = notifier_to_errno(ret); 1213 1214 if (ret) { 1215 /* err >= 0 after dev_alloc_name() or stores the first errno */ 1216 if (err >= 0) { 1217 err = ret; 1218 down_write(&devnet_rename_sem); 1219 memcpy(dev->name, oldname, IFNAMSIZ); 1220 memcpy(oldname, newname, IFNAMSIZ); 1221 dev->name_assign_type = old_assign_type; 1222 old_assign_type = NET_NAME_RENAMED; 1223 goto rollback; 1224 } else { 1225 netdev_err(dev, "name change rollback failed: %d\n", 1226 ret); 1227 } 1228 } 1229 1230 return err; 1231 } 1232 1233 /** 1234 * dev_set_alias - change ifalias of a device 1235 * @dev: device 1236 * @alias: name up to IFALIASZ 1237 * @len: limit of bytes to copy from info 1238 * 1239 * Set ifalias for a device, 1240 */ 1241 int dev_set_alias(struct net_device *dev, const char *alias, size_t len) 1242 { 1243 struct dev_ifalias *new_alias = NULL; 1244 1245 if (len >= IFALIASZ) 1246 return -EINVAL; 1247 1248 if (len) { 1249 new_alias = kmalloc(sizeof(*new_alias) + len + 1, GFP_KERNEL); 1250 if (!new_alias) 1251 return -ENOMEM; 1252 1253 memcpy(new_alias->ifalias, alias, len); 1254 new_alias->ifalias[len] = 0; 1255 } 1256 1257 mutex_lock(&ifalias_mutex); 1258 new_alias = rcu_replace_pointer(dev->ifalias, new_alias, 1259 mutex_is_locked(&ifalias_mutex)); 1260 mutex_unlock(&ifalias_mutex); 1261 1262 if (new_alias) 1263 kfree_rcu(new_alias, rcuhead); 1264 1265 return len; 1266 } 1267 EXPORT_SYMBOL(dev_set_alias); 1268 1269 /** 1270 * dev_get_alias - get ifalias of a device 1271 * @dev: device 1272 * @name: buffer to store name of ifalias 1273 * @len: size of buffer 1274 * 1275 * get ifalias for a device. Caller must make sure dev cannot go 1276 * away, e.g. rcu read lock or own a reference count to device. 1277 */ 1278 int dev_get_alias(const struct net_device *dev, char *name, size_t len) 1279 { 1280 const struct dev_ifalias *alias; 1281 int ret = 0; 1282 1283 rcu_read_lock(); 1284 alias = rcu_dereference(dev->ifalias); 1285 if (alias) 1286 ret = snprintf(name, len, "%s", alias->ifalias); 1287 rcu_read_unlock(); 1288 1289 return ret; 1290 } 1291 1292 /** 1293 * netdev_features_change - device changes features 1294 * @dev: device to cause notification 1295 * 1296 * Called to indicate a device has changed features. 1297 */ 1298 void netdev_features_change(struct net_device *dev) 1299 { 1300 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev); 1301 } 1302 EXPORT_SYMBOL(netdev_features_change); 1303 1304 /** 1305 * netdev_state_change - device changes state 1306 * @dev: device to cause notification 1307 * 1308 * Called to indicate a device has changed state. This function calls 1309 * the notifier chains for netdev_chain and sends a NEWLINK message 1310 * to the routing socket. 1311 */ 1312 void netdev_state_change(struct net_device *dev) 1313 { 1314 if (dev->flags & IFF_UP) { 1315 struct netdev_notifier_change_info change_info = { 1316 .info.dev = dev, 1317 }; 1318 1319 call_netdevice_notifiers_info(NETDEV_CHANGE, 1320 &change_info.info); 1321 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL, 0, NULL); 1322 } 1323 } 1324 EXPORT_SYMBOL(netdev_state_change); 1325 1326 /** 1327 * __netdev_notify_peers - notify network peers about existence of @dev, 1328 * to be called when rtnl lock is already held. 1329 * @dev: network device 1330 * 1331 * Generate traffic such that interested network peers are aware of 1332 * @dev, such as by generating a gratuitous ARP. This may be used when 1333 * a device wants to inform the rest of the network about some sort of 1334 * reconfiguration such as a failover event or virtual machine 1335 * migration. 1336 */ 1337 void __netdev_notify_peers(struct net_device *dev) 1338 { 1339 ASSERT_RTNL(); 1340 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev); 1341 call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev); 1342 } 1343 EXPORT_SYMBOL(__netdev_notify_peers); 1344 1345 /** 1346 * netdev_notify_peers - notify network peers about existence of @dev 1347 * @dev: network device 1348 * 1349 * Generate traffic such that interested network peers are aware of 1350 * @dev, such as by generating a gratuitous ARP. This may be used when 1351 * a device wants to inform the rest of the network about some sort of 1352 * reconfiguration such as a failover event or virtual machine 1353 * migration. 1354 */ 1355 void netdev_notify_peers(struct net_device *dev) 1356 { 1357 rtnl_lock(); 1358 __netdev_notify_peers(dev); 1359 rtnl_unlock(); 1360 } 1361 EXPORT_SYMBOL(netdev_notify_peers); 1362 1363 static int napi_threaded_poll(void *data); 1364 1365 static int napi_kthread_create(struct napi_struct *n) 1366 { 1367 int err = 0; 1368 1369 /* Create and wake up the kthread once to put it in 1370 * TASK_INTERRUPTIBLE mode to avoid the blocked task 1371 * warning and work with loadavg. 1372 */ 1373 n->thread = kthread_run(napi_threaded_poll, n, "napi/%s-%d", 1374 n->dev->name, n->napi_id); 1375 if (IS_ERR(n->thread)) { 1376 err = PTR_ERR(n->thread); 1377 pr_err("kthread_run failed with err %d\n", err); 1378 n->thread = NULL; 1379 } 1380 1381 return err; 1382 } 1383 1384 static int __dev_open(struct net_device *dev, struct netlink_ext_ack *extack) 1385 { 1386 const struct net_device_ops *ops = dev->netdev_ops; 1387 int ret; 1388 1389 ASSERT_RTNL(); 1390 dev_addr_check(dev); 1391 1392 if (!netif_device_present(dev)) { 1393 /* may be detached because parent is runtime-suspended */ 1394 if (dev->dev.parent) 1395 pm_runtime_resume(dev->dev.parent); 1396 if (!netif_device_present(dev)) 1397 return -ENODEV; 1398 } 1399 1400 /* Block netpoll from trying to do any rx path servicing. 1401 * If we don't do this there is a chance ndo_poll_controller 1402 * or ndo_poll may be running while we open the device 1403 */ 1404 netpoll_poll_disable(dev); 1405 1406 ret = call_netdevice_notifiers_extack(NETDEV_PRE_UP, dev, extack); 1407 ret = notifier_to_errno(ret); 1408 if (ret) 1409 return ret; 1410 1411 set_bit(__LINK_STATE_START, &dev->state); 1412 1413 if (ops->ndo_validate_addr) 1414 ret = ops->ndo_validate_addr(dev); 1415 1416 if (!ret && ops->ndo_open) 1417 ret = ops->ndo_open(dev); 1418 1419 netpoll_poll_enable(dev); 1420 1421 if (ret) 1422 clear_bit(__LINK_STATE_START, &dev->state); 1423 else { 1424 dev->flags |= IFF_UP; 1425 dev_set_rx_mode(dev); 1426 dev_activate(dev); 1427 add_device_randomness(dev->dev_addr, dev->addr_len); 1428 } 1429 1430 return ret; 1431 } 1432 1433 /** 1434 * dev_open - prepare an interface for use. 1435 * @dev: device to open 1436 * @extack: netlink extended ack 1437 * 1438 * Takes a device from down to up state. The device's private open 1439 * function is invoked and then the multicast lists are loaded. Finally 1440 * the device is moved into the up state and a %NETDEV_UP message is 1441 * sent to the netdev notifier chain. 1442 * 1443 * Calling this function on an active interface is a nop. On a failure 1444 * a negative errno code is returned. 1445 */ 1446 int dev_open(struct net_device *dev, struct netlink_ext_ack *extack) 1447 { 1448 int ret; 1449 1450 if (dev->flags & IFF_UP) 1451 return 0; 1452 1453 ret = __dev_open(dev, extack); 1454 if (ret < 0) 1455 return ret; 1456 1457 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP | IFF_RUNNING, GFP_KERNEL, 0, NULL); 1458 call_netdevice_notifiers(NETDEV_UP, dev); 1459 1460 return ret; 1461 } 1462 EXPORT_SYMBOL(dev_open); 1463 1464 static void __dev_close_many(struct list_head *head) 1465 { 1466 struct net_device *dev; 1467 1468 ASSERT_RTNL(); 1469 might_sleep(); 1470 1471 list_for_each_entry(dev, head, close_list) { 1472 /* Temporarily disable netpoll until the interface is down */ 1473 netpoll_poll_disable(dev); 1474 1475 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev); 1476 1477 clear_bit(__LINK_STATE_START, &dev->state); 1478 1479 /* Synchronize to scheduled poll. We cannot touch poll list, it 1480 * can be even on different cpu. So just clear netif_running(). 1481 * 1482 * dev->stop() will invoke napi_disable() on all of it's 1483 * napi_struct instances on this device. 1484 */ 1485 smp_mb__after_atomic(); /* Commit netif_running(). */ 1486 } 1487 1488 dev_deactivate_many(head); 1489 1490 list_for_each_entry(dev, head, close_list) { 1491 const struct net_device_ops *ops = dev->netdev_ops; 1492 1493 /* 1494 * Call the device specific close. This cannot fail. 1495 * Only if device is UP 1496 * 1497 * We allow it to be called even after a DETACH hot-plug 1498 * event. 1499 */ 1500 if (ops->ndo_stop) 1501 ops->ndo_stop(dev); 1502 1503 dev->flags &= ~IFF_UP; 1504 netpoll_poll_enable(dev); 1505 } 1506 } 1507 1508 static void __dev_close(struct net_device *dev) 1509 { 1510 LIST_HEAD(single); 1511 1512 list_add(&dev->close_list, &single); 1513 __dev_close_many(&single); 1514 list_del(&single); 1515 } 1516 1517 void dev_close_many(struct list_head *head, bool unlink) 1518 { 1519 struct net_device *dev, *tmp; 1520 1521 /* Remove the devices that don't need to be closed */ 1522 list_for_each_entry_safe(dev, tmp, head, close_list) 1523 if (!(dev->flags & IFF_UP)) 1524 list_del_init(&dev->close_list); 1525 1526 __dev_close_many(head); 1527 1528 list_for_each_entry_safe(dev, tmp, head, close_list) { 1529 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP | IFF_RUNNING, GFP_KERNEL, 0, NULL); 1530 call_netdevice_notifiers(NETDEV_DOWN, dev); 1531 if (unlink) 1532 list_del_init(&dev->close_list); 1533 } 1534 } 1535 EXPORT_SYMBOL(dev_close_many); 1536 1537 /** 1538 * dev_close - shutdown an interface. 1539 * @dev: device to shutdown 1540 * 1541 * This function moves an active device into down state. A 1542 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device 1543 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier 1544 * chain. 1545 */ 1546 void dev_close(struct net_device *dev) 1547 { 1548 if (dev->flags & IFF_UP) { 1549 LIST_HEAD(single); 1550 1551 list_add(&dev->close_list, &single); 1552 dev_close_many(&single, true); 1553 list_del(&single); 1554 } 1555 } 1556 EXPORT_SYMBOL(dev_close); 1557 1558 1559 /** 1560 * dev_disable_lro - disable Large Receive Offload on a device 1561 * @dev: device 1562 * 1563 * Disable Large Receive Offload (LRO) on a net device. Must be 1564 * called under RTNL. This is needed if received packets may be 1565 * forwarded to another interface. 1566 */ 1567 void dev_disable_lro(struct net_device *dev) 1568 { 1569 struct net_device *lower_dev; 1570 struct list_head *iter; 1571 1572 dev->wanted_features &= ~NETIF_F_LRO; 1573 netdev_update_features(dev); 1574 1575 if (unlikely(dev->features & NETIF_F_LRO)) 1576 netdev_WARN(dev, "failed to disable LRO!\n"); 1577 1578 netdev_for_each_lower_dev(dev, lower_dev, iter) 1579 dev_disable_lro(lower_dev); 1580 } 1581 EXPORT_SYMBOL(dev_disable_lro); 1582 1583 /** 1584 * dev_disable_gro_hw - disable HW Generic Receive Offload on a device 1585 * @dev: device 1586 * 1587 * Disable HW Generic Receive Offload (GRO_HW) on a net device. Must be 1588 * called under RTNL. This is needed if Generic XDP is installed on 1589 * the device. 1590 */ 1591 static void dev_disable_gro_hw(struct net_device *dev) 1592 { 1593 dev->wanted_features &= ~NETIF_F_GRO_HW; 1594 netdev_update_features(dev); 1595 1596 if (unlikely(dev->features & NETIF_F_GRO_HW)) 1597 netdev_WARN(dev, "failed to disable GRO_HW!\n"); 1598 } 1599 1600 const char *netdev_cmd_to_name(enum netdev_cmd cmd) 1601 { 1602 #define N(val) \ 1603 case NETDEV_##val: \ 1604 return "NETDEV_" __stringify(val); 1605 switch (cmd) { 1606 N(UP) N(DOWN) N(REBOOT) N(CHANGE) N(REGISTER) N(UNREGISTER) 1607 N(CHANGEMTU) N(CHANGEADDR) N(GOING_DOWN) N(CHANGENAME) N(FEAT_CHANGE) 1608 N(BONDING_FAILOVER) N(PRE_UP) N(PRE_TYPE_CHANGE) N(POST_TYPE_CHANGE) 1609 N(POST_INIT) N(PRE_UNINIT) N(RELEASE) N(NOTIFY_PEERS) N(JOIN) 1610 N(CHANGEUPPER) N(RESEND_IGMP) N(PRECHANGEMTU) N(CHANGEINFODATA) 1611 N(BONDING_INFO) N(PRECHANGEUPPER) N(CHANGELOWERSTATE) 1612 N(UDP_TUNNEL_PUSH_INFO) N(UDP_TUNNEL_DROP_INFO) N(CHANGE_TX_QUEUE_LEN) 1613 N(CVLAN_FILTER_PUSH_INFO) N(CVLAN_FILTER_DROP_INFO) 1614 N(SVLAN_FILTER_PUSH_INFO) N(SVLAN_FILTER_DROP_INFO) 1615 N(PRE_CHANGEADDR) N(OFFLOAD_XSTATS_ENABLE) N(OFFLOAD_XSTATS_DISABLE) 1616 N(OFFLOAD_XSTATS_REPORT_USED) N(OFFLOAD_XSTATS_REPORT_DELTA) 1617 N(XDP_FEAT_CHANGE) 1618 } 1619 #undef N 1620 return "UNKNOWN_NETDEV_EVENT"; 1621 } 1622 EXPORT_SYMBOL_GPL(netdev_cmd_to_name); 1623 1624 static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val, 1625 struct net_device *dev) 1626 { 1627 struct netdev_notifier_info info = { 1628 .dev = dev, 1629 }; 1630 1631 return nb->notifier_call(nb, val, &info); 1632 } 1633 1634 static int call_netdevice_register_notifiers(struct notifier_block *nb, 1635 struct net_device *dev) 1636 { 1637 int err; 1638 1639 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev); 1640 err = notifier_to_errno(err); 1641 if (err) 1642 return err; 1643 1644 if (!(dev->flags & IFF_UP)) 1645 return 0; 1646 1647 call_netdevice_notifier(nb, NETDEV_UP, dev); 1648 return 0; 1649 } 1650 1651 static void call_netdevice_unregister_notifiers(struct notifier_block *nb, 1652 struct net_device *dev) 1653 { 1654 if (dev->flags & IFF_UP) { 1655 call_netdevice_notifier(nb, NETDEV_GOING_DOWN, 1656 dev); 1657 call_netdevice_notifier(nb, NETDEV_DOWN, dev); 1658 } 1659 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev); 1660 } 1661 1662 static int call_netdevice_register_net_notifiers(struct notifier_block *nb, 1663 struct net *net) 1664 { 1665 struct net_device *dev; 1666 int err; 1667 1668 for_each_netdev(net, dev) { 1669 err = call_netdevice_register_notifiers(nb, dev); 1670 if (err) 1671 goto rollback; 1672 } 1673 return 0; 1674 1675 rollback: 1676 for_each_netdev_continue_reverse(net, dev) 1677 call_netdevice_unregister_notifiers(nb, dev); 1678 return err; 1679 } 1680 1681 static void call_netdevice_unregister_net_notifiers(struct notifier_block *nb, 1682 struct net *net) 1683 { 1684 struct net_device *dev; 1685 1686 for_each_netdev(net, dev) 1687 call_netdevice_unregister_notifiers(nb, dev); 1688 } 1689 1690 static int dev_boot_phase = 1; 1691 1692 /** 1693 * register_netdevice_notifier - register a network notifier block 1694 * @nb: notifier 1695 * 1696 * Register a notifier to be called when network device events occur. 1697 * The notifier passed is linked into the kernel structures and must 1698 * not be reused until it has been unregistered. A negative errno code 1699 * is returned on a failure. 1700 * 1701 * When registered all registration and up events are replayed 1702 * to the new notifier to allow device to have a race free 1703 * view of the network device list. 1704 */ 1705 1706 int register_netdevice_notifier(struct notifier_block *nb) 1707 { 1708 struct net *net; 1709 int err; 1710 1711 /* Close race with setup_net() and cleanup_net() */ 1712 down_write(&pernet_ops_rwsem); 1713 rtnl_lock(); 1714 err = raw_notifier_chain_register(&netdev_chain, nb); 1715 if (err) 1716 goto unlock; 1717 if (dev_boot_phase) 1718 goto unlock; 1719 for_each_net(net) { 1720 err = call_netdevice_register_net_notifiers(nb, net); 1721 if (err) 1722 goto rollback; 1723 } 1724 1725 unlock: 1726 rtnl_unlock(); 1727 up_write(&pernet_ops_rwsem); 1728 return err; 1729 1730 rollback: 1731 for_each_net_continue_reverse(net) 1732 call_netdevice_unregister_net_notifiers(nb, net); 1733 1734 raw_notifier_chain_unregister(&netdev_chain, nb); 1735 goto unlock; 1736 } 1737 EXPORT_SYMBOL(register_netdevice_notifier); 1738 1739 /** 1740 * unregister_netdevice_notifier - unregister a network notifier block 1741 * @nb: notifier 1742 * 1743 * Unregister a notifier previously registered by 1744 * register_netdevice_notifier(). The notifier is unlinked into the 1745 * kernel structures and may then be reused. A negative errno code 1746 * is returned on a failure. 1747 * 1748 * After unregistering unregister and down device events are synthesized 1749 * for all devices on the device list to the removed notifier to remove 1750 * the need for special case cleanup code. 1751 */ 1752 1753 int unregister_netdevice_notifier(struct notifier_block *nb) 1754 { 1755 struct net *net; 1756 int err; 1757 1758 /* Close race with setup_net() and cleanup_net() */ 1759 down_write(&pernet_ops_rwsem); 1760 rtnl_lock(); 1761 err = raw_notifier_chain_unregister(&netdev_chain, nb); 1762 if (err) 1763 goto unlock; 1764 1765 for_each_net(net) 1766 call_netdevice_unregister_net_notifiers(nb, net); 1767 1768 unlock: 1769 rtnl_unlock(); 1770 up_write(&pernet_ops_rwsem); 1771 return err; 1772 } 1773 EXPORT_SYMBOL(unregister_netdevice_notifier); 1774 1775 static int __register_netdevice_notifier_net(struct net *net, 1776 struct notifier_block *nb, 1777 bool ignore_call_fail) 1778 { 1779 int err; 1780 1781 err = raw_notifier_chain_register(&net->netdev_chain, nb); 1782 if (err) 1783 return err; 1784 if (dev_boot_phase) 1785 return 0; 1786 1787 err = call_netdevice_register_net_notifiers(nb, net); 1788 if (err && !ignore_call_fail) 1789 goto chain_unregister; 1790 1791 return 0; 1792 1793 chain_unregister: 1794 raw_notifier_chain_unregister(&net->netdev_chain, nb); 1795 return err; 1796 } 1797 1798 static int __unregister_netdevice_notifier_net(struct net *net, 1799 struct notifier_block *nb) 1800 { 1801 int err; 1802 1803 err = raw_notifier_chain_unregister(&net->netdev_chain, nb); 1804 if (err) 1805 return err; 1806 1807 call_netdevice_unregister_net_notifiers(nb, net); 1808 return 0; 1809 } 1810 1811 /** 1812 * register_netdevice_notifier_net - register a per-netns network notifier block 1813 * @net: network namespace 1814 * @nb: notifier 1815 * 1816 * Register a notifier to be called when network device events occur. 1817 * The notifier passed is linked into the kernel structures and must 1818 * not be reused until it has been unregistered. A negative errno code 1819 * is returned on a failure. 1820 * 1821 * When registered all registration and up events are replayed 1822 * to the new notifier to allow device to have a race free 1823 * view of the network device list. 1824 */ 1825 1826 int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb) 1827 { 1828 int err; 1829 1830 rtnl_lock(); 1831 err = __register_netdevice_notifier_net(net, nb, false); 1832 rtnl_unlock(); 1833 return err; 1834 } 1835 EXPORT_SYMBOL(register_netdevice_notifier_net); 1836 1837 /** 1838 * unregister_netdevice_notifier_net - unregister a per-netns 1839 * network notifier block 1840 * @net: network namespace 1841 * @nb: notifier 1842 * 1843 * Unregister a notifier previously registered by 1844 * register_netdevice_notifier_net(). The notifier is unlinked from the 1845 * kernel structures and may then be reused. A negative errno code 1846 * is returned on a failure. 1847 * 1848 * After unregistering unregister and down device events are synthesized 1849 * for all devices on the device list to the removed notifier to remove 1850 * the need for special case cleanup code. 1851 */ 1852 1853 int unregister_netdevice_notifier_net(struct net *net, 1854 struct notifier_block *nb) 1855 { 1856 int err; 1857 1858 rtnl_lock(); 1859 err = __unregister_netdevice_notifier_net(net, nb); 1860 rtnl_unlock(); 1861 return err; 1862 } 1863 EXPORT_SYMBOL(unregister_netdevice_notifier_net); 1864 1865 static void __move_netdevice_notifier_net(struct net *src_net, 1866 struct net *dst_net, 1867 struct notifier_block *nb) 1868 { 1869 __unregister_netdevice_notifier_net(src_net, nb); 1870 __register_netdevice_notifier_net(dst_net, nb, true); 1871 } 1872 1873 int register_netdevice_notifier_dev_net(struct net_device *dev, 1874 struct notifier_block *nb, 1875 struct netdev_net_notifier *nn) 1876 { 1877 int err; 1878 1879 rtnl_lock(); 1880 err = __register_netdevice_notifier_net(dev_net(dev), nb, false); 1881 if (!err) { 1882 nn->nb = nb; 1883 list_add(&nn->list, &dev->net_notifier_list); 1884 } 1885 rtnl_unlock(); 1886 return err; 1887 } 1888 EXPORT_SYMBOL(register_netdevice_notifier_dev_net); 1889 1890 int unregister_netdevice_notifier_dev_net(struct net_device *dev, 1891 struct notifier_block *nb, 1892 struct netdev_net_notifier *nn) 1893 { 1894 int err; 1895 1896 rtnl_lock(); 1897 list_del(&nn->list); 1898 err = __unregister_netdevice_notifier_net(dev_net(dev), nb); 1899 rtnl_unlock(); 1900 return err; 1901 } 1902 EXPORT_SYMBOL(unregister_netdevice_notifier_dev_net); 1903 1904 static void move_netdevice_notifiers_dev_net(struct net_device *dev, 1905 struct net *net) 1906 { 1907 struct netdev_net_notifier *nn; 1908 1909 list_for_each_entry(nn, &dev->net_notifier_list, list) 1910 __move_netdevice_notifier_net(dev_net(dev), net, nn->nb); 1911 } 1912 1913 /** 1914 * call_netdevice_notifiers_info - call all network notifier blocks 1915 * @val: value passed unmodified to notifier function 1916 * @info: notifier information data 1917 * 1918 * Call all network notifier blocks. Parameters and return value 1919 * are as for raw_notifier_call_chain(). 1920 */ 1921 1922 static int call_netdevice_notifiers_info(unsigned long val, 1923 struct netdev_notifier_info *info) 1924 { 1925 struct net *net = dev_net(info->dev); 1926 int ret; 1927 1928 ASSERT_RTNL(); 1929 1930 /* Run per-netns notifier block chain first, then run the global one. 1931 * Hopefully, one day, the global one is going to be removed after 1932 * all notifier block registrators get converted to be per-netns. 1933 */ 1934 ret = raw_notifier_call_chain(&net->netdev_chain, val, info); 1935 if (ret & NOTIFY_STOP_MASK) 1936 return ret; 1937 return raw_notifier_call_chain(&netdev_chain, val, info); 1938 } 1939 1940 /** 1941 * call_netdevice_notifiers_info_robust - call per-netns notifier blocks 1942 * for and rollback on error 1943 * @val_up: value passed unmodified to notifier function 1944 * @val_down: value passed unmodified to the notifier function when 1945 * recovering from an error on @val_up 1946 * @info: notifier information data 1947 * 1948 * Call all per-netns network notifier blocks, but not notifier blocks on 1949 * the global notifier chain. Parameters and return value are as for 1950 * raw_notifier_call_chain_robust(). 1951 */ 1952 1953 static int 1954 call_netdevice_notifiers_info_robust(unsigned long val_up, 1955 unsigned long val_down, 1956 struct netdev_notifier_info *info) 1957 { 1958 struct net *net = dev_net(info->dev); 1959 1960 ASSERT_RTNL(); 1961 1962 return raw_notifier_call_chain_robust(&net->netdev_chain, 1963 val_up, val_down, info); 1964 } 1965 1966 static int call_netdevice_notifiers_extack(unsigned long val, 1967 struct net_device *dev, 1968 struct netlink_ext_ack *extack) 1969 { 1970 struct netdev_notifier_info info = { 1971 .dev = dev, 1972 .extack = extack, 1973 }; 1974 1975 return call_netdevice_notifiers_info(val, &info); 1976 } 1977 1978 /** 1979 * call_netdevice_notifiers - call all network notifier blocks 1980 * @val: value passed unmodified to notifier function 1981 * @dev: net_device pointer passed unmodified to notifier function 1982 * 1983 * Call all network notifier blocks. Parameters and return value 1984 * are as for raw_notifier_call_chain(). 1985 */ 1986 1987 int call_netdevice_notifiers(unsigned long val, struct net_device *dev) 1988 { 1989 return call_netdevice_notifiers_extack(val, dev, NULL); 1990 } 1991 EXPORT_SYMBOL(call_netdevice_notifiers); 1992 1993 /** 1994 * call_netdevice_notifiers_mtu - call all network notifier blocks 1995 * @val: value passed unmodified to notifier function 1996 * @dev: net_device pointer passed unmodified to notifier function 1997 * @arg: additional u32 argument passed to the notifier function 1998 * 1999 * Call all network notifier blocks. Parameters and return value 2000 * are as for raw_notifier_call_chain(). 2001 */ 2002 static int call_netdevice_notifiers_mtu(unsigned long val, 2003 struct net_device *dev, u32 arg) 2004 { 2005 struct netdev_notifier_info_ext info = { 2006 .info.dev = dev, 2007 .ext.mtu = arg, 2008 }; 2009 2010 BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext, info) != 0); 2011 2012 return call_netdevice_notifiers_info(val, &info.info); 2013 } 2014 2015 #ifdef CONFIG_NET_INGRESS 2016 static DEFINE_STATIC_KEY_FALSE(ingress_needed_key); 2017 2018 void net_inc_ingress_queue(void) 2019 { 2020 static_branch_inc(&ingress_needed_key); 2021 } 2022 EXPORT_SYMBOL_GPL(net_inc_ingress_queue); 2023 2024 void net_dec_ingress_queue(void) 2025 { 2026 static_branch_dec(&ingress_needed_key); 2027 } 2028 EXPORT_SYMBOL_GPL(net_dec_ingress_queue); 2029 #endif 2030 2031 #ifdef CONFIG_NET_EGRESS 2032 static DEFINE_STATIC_KEY_FALSE(egress_needed_key); 2033 2034 void net_inc_egress_queue(void) 2035 { 2036 static_branch_inc(&egress_needed_key); 2037 } 2038 EXPORT_SYMBOL_GPL(net_inc_egress_queue); 2039 2040 void net_dec_egress_queue(void) 2041 { 2042 static_branch_dec(&egress_needed_key); 2043 } 2044 EXPORT_SYMBOL_GPL(net_dec_egress_queue); 2045 #endif 2046 2047 DEFINE_STATIC_KEY_FALSE(netstamp_needed_key); 2048 EXPORT_SYMBOL(netstamp_needed_key); 2049 #ifdef CONFIG_JUMP_LABEL 2050 static atomic_t netstamp_needed_deferred; 2051 static atomic_t netstamp_wanted; 2052 static void netstamp_clear(struct work_struct *work) 2053 { 2054 int deferred = atomic_xchg(&netstamp_needed_deferred, 0); 2055 int wanted; 2056 2057 wanted = atomic_add_return(deferred, &netstamp_wanted); 2058 if (wanted > 0) 2059 static_branch_enable(&netstamp_needed_key); 2060 else 2061 static_branch_disable(&netstamp_needed_key); 2062 } 2063 static DECLARE_WORK(netstamp_work, netstamp_clear); 2064 #endif 2065 2066 void net_enable_timestamp(void) 2067 { 2068 #ifdef CONFIG_JUMP_LABEL 2069 int wanted = atomic_read(&netstamp_wanted); 2070 2071 while (wanted > 0) { 2072 if (atomic_try_cmpxchg(&netstamp_wanted, &wanted, wanted + 1)) 2073 return; 2074 } 2075 atomic_inc(&netstamp_needed_deferred); 2076 schedule_work(&netstamp_work); 2077 #else 2078 static_branch_inc(&netstamp_needed_key); 2079 #endif 2080 } 2081 EXPORT_SYMBOL(net_enable_timestamp); 2082 2083 void net_disable_timestamp(void) 2084 { 2085 #ifdef CONFIG_JUMP_LABEL 2086 int wanted = atomic_read(&netstamp_wanted); 2087 2088 while (wanted > 1) { 2089 if (atomic_try_cmpxchg(&netstamp_wanted, &wanted, wanted - 1)) 2090 return; 2091 } 2092 atomic_dec(&netstamp_needed_deferred); 2093 schedule_work(&netstamp_work); 2094 #else 2095 static_branch_dec(&netstamp_needed_key); 2096 #endif 2097 } 2098 EXPORT_SYMBOL(net_disable_timestamp); 2099 2100 static inline void net_timestamp_set(struct sk_buff *skb) 2101 { 2102 skb->tstamp = 0; 2103 skb->mono_delivery_time = 0; 2104 if (static_branch_unlikely(&netstamp_needed_key)) 2105 skb->tstamp = ktime_get_real(); 2106 } 2107 2108 #define net_timestamp_check(COND, SKB) \ 2109 if (static_branch_unlikely(&netstamp_needed_key)) { \ 2110 if ((COND) && !(SKB)->tstamp) \ 2111 (SKB)->tstamp = ktime_get_real(); \ 2112 } \ 2113 2114 bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb) 2115 { 2116 return __is_skb_forwardable(dev, skb, true); 2117 } 2118 EXPORT_SYMBOL_GPL(is_skb_forwardable); 2119 2120 static int __dev_forward_skb2(struct net_device *dev, struct sk_buff *skb, 2121 bool check_mtu) 2122 { 2123 int ret = ____dev_forward_skb(dev, skb, check_mtu); 2124 2125 if (likely(!ret)) { 2126 skb->protocol = eth_type_trans(skb, dev); 2127 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); 2128 } 2129 2130 return ret; 2131 } 2132 2133 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb) 2134 { 2135 return __dev_forward_skb2(dev, skb, true); 2136 } 2137 EXPORT_SYMBOL_GPL(__dev_forward_skb); 2138 2139 /** 2140 * dev_forward_skb - loopback an skb to another netif 2141 * 2142 * @dev: destination network device 2143 * @skb: buffer to forward 2144 * 2145 * return values: 2146 * NET_RX_SUCCESS (no congestion) 2147 * NET_RX_DROP (packet was dropped, but freed) 2148 * 2149 * dev_forward_skb can be used for injecting an skb from the 2150 * start_xmit function of one device into the receive queue 2151 * of another device. 2152 * 2153 * The receiving device may be in another namespace, so 2154 * we have to clear all information in the skb that could 2155 * impact namespace isolation. 2156 */ 2157 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) 2158 { 2159 return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb); 2160 } 2161 EXPORT_SYMBOL_GPL(dev_forward_skb); 2162 2163 int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb) 2164 { 2165 return __dev_forward_skb2(dev, skb, false) ?: netif_rx_internal(skb); 2166 } 2167 2168 static inline int deliver_skb(struct sk_buff *skb, 2169 struct packet_type *pt_prev, 2170 struct net_device *orig_dev) 2171 { 2172 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) 2173 return -ENOMEM; 2174 refcount_inc(&skb->users); 2175 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev); 2176 } 2177 2178 static inline void deliver_ptype_list_skb(struct sk_buff *skb, 2179 struct packet_type **pt, 2180 struct net_device *orig_dev, 2181 __be16 type, 2182 struct list_head *ptype_list) 2183 { 2184 struct packet_type *ptype, *pt_prev = *pt; 2185 2186 list_for_each_entry_rcu(ptype, ptype_list, list) { 2187 if (ptype->type != type) 2188 continue; 2189 if (pt_prev) 2190 deliver_skb(skb, pt_prev, orig_dev); 2191 pt_prev = ptype; 2192 } 2193 *pt = pt_prev; 2194 } 2195 2196 static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb) 2197 { 2198 if (!ptype->af_packet_priv || !skb->sk) 2199 return false; 2200 2201 if (ptype->id_match) 2202 return ptype->id_match(ptype, skb->sk); 2203 else if ((struct sock *)ptype->af_packet_priv == skb->sk) 2204 return true; 2205 2206 return false; 2207 } 2208 2209 /** 2210 * dev_nit_active - return true if any network interface taps are in use 2211 * 2212 * @dev: network device to check for the presence of taps 2213 */ 2214 bool dev_nit_active(struct net_device *dev) 2215 { 2216 return !list_empty(&ptype_all) || !list_empty(&dev->ptype_all); 2217 } 2218 EXPORT_SYMBOL_GPL(dev_nit_active); 2219 2220 /* 2221 * Support routine. Sends outgoing frames to any network 2222 * taps currently in use. 2223 */ 2224 2225 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) 2226 { 2227 struct packet_type *ptype; 2228 struct sk_buff *skb2 = NULL; 2229 struct packet_type *pt_prev = NULL; 2230 struct list_head *ptype_list = &ptype_all; 2231 2232 rcu_read_lock(); 2233 again: 2234 list_for_each_entry_rcu(ptype, ptype_list, list) { 2235 if (ptype->ignore_outgoing) 2236 continue; 2237 2238 /* Never send packets back to the socket 2239 * they originated from - MvS (miquels@drinkel.ow.org) 2240 */ 2241 if (skb_loop_sk(ptype, skb)) 2242 continue; 2243 2244 if (pt_prev) { 2245 deliver_skb(skb2, pt_prev, skb->dev); 2246 pt_prev = ptype; 2247 continue; 2248 } 2249 2250 /* need to clone skb, done only once */ 2251 skb2 = skb_clone(skb, GFP_ATOMIC); 2252 if (!skb2) 2253 goto out_unlock; 2254 2255 net_timestamp_set(skb2); 2256 2257 /* skb->nh should be correctly 2258 * set by sender, so that the second statement is 2259 * just protection against buggy protocols. 2260 */ 2261 skb_reset_mac_header(skb2); 2262 2263 if (skb_network_header(skb2) < skb2->data || 2264 skb_network_header(skb2) > skb_tail_pointer(skb2)) { 2265 net_crit_ratelimited("protocol %04x is buggy, dev %s\n", 2266 ntohs(skb2->protocol), 2267 dev->name); 2268 skb_reset_network_header(skb2); 2269 } 2270 2271 skb2->transport_header = skb2->network_header; 2272 skb2->pkt_type = PACKET_OUTGOING; 2273 pt_prev = ptype; 2274 } 2275 2276 if (ptype_list == &ptype_all) { 2277 ptype_list = &dev->ptype_all; 2278 goto again; 2279 } 2280 out_unlock: 2281 if (pt_prev) { 2282 if (!skb_orphan_frags_rx(skb2, GFP_ATOMIC)) 2283 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev); 2284 else 2285 kfree_skb(skb2); 2286 } 2287 rcu_read_unlock(); 2288 } 2289 EXPORT_SYMBOL_GPL(dev_queue_xmit_nit); 2290 2291 /** 2292 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change 2293 * @dev: Network device 2294 * @txq: number of queues available 2295 * 2296 * If real_num_tx_queues is changed the tc mappings may no longer be 2297 * valid. To resolve this verify the tc mapping remains valid and if 2298 * not NULL the mapping. With no priorities mapping to this 2299 * offset/count pair it will no longer be used. In the worst case TC0 2300 * is invalid nothing can be done so disable priority mappings. If is 2301 * expected that drivers will fix this mapping if they can before 2302 * calling netif_set_real_num_tx_queues. 2303 */ 2304 static void netif_setup_tc(struct net_device *dev, unsigned int txq) 2305 { 2306 int i; 2307 struct netdev_tc_txq *tc = &dev->tc_to_txq[0]; 2308 2309 /* If TC0 is invalidated disable TC mapping */ 2310 if (tc->offset + tc->count > txq) { 2311 netdev_warn(dev, "Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n"); 2312 dev->num_tc = 0; 2313 return; 2314 } 2315 2316 /* Invalidated prio to tc mappings set to TC0 */ 2317 for (i = 1; i < TC_BITMASK + 1; i++) { 2318 int q = netdev_get_prio_tc_map(dev, i); 2319 2320 tc = &dev->tc_to_txq[q]; 2321 if (tc->offset + tc->count > txq) { 2322 netdev_warn(dev, "Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n", 2323 i, q); 2324 netdev_set_prio_tc_map(dev, i, 0); 2325 } 2326 } 2327 } 2328 2329 int netdev_txq_to_tc(struct net_device *dev, unsigned int txq) 2330 { 2331 if (dev->num_tc) { 2332 struct netdev_tc_txq *tc = &dev->tc_to_txq[0]; 2333 int i; 2334 2335 /* walk through the TCs and see if it falls into any of them */ 2336 for (i = 0; i < TC_MAX_QUEUE; i++, tc++) { 2337 if ((txq - tc->offset) < tc->count) 2338 return i; 2339 } 2340 2341 /* didn't find it, just return -1 to indicate no match */ 2342 return -1; 2343 } 2344 2345 return 0; 2346 } 2347 EXPORT_SYMBOL(netdev_txq_to_tc); 2348 2349 #ifdef CONFIG_XPS 2350 static struct static_key xps_needed __read_mostly; 2351 static struct static_key xps_rxqs_needed __read_mostly; 2352 static DEFINE_MUTEX(xps_map_mutex); 2353 #define xmap_dereference(P) \ 2354 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex)) 2355 2356 static bool remove_xps_queue(struct xps_dev_maps *dev_maps, 2357 struct xps_dev_maps *old_maps, int tci, u16 index) 2358 { 2359 struct xps_map *map = NULL; 2360 int pos; 2361 2362 if (dev_maps) 2363 map = xmap_dereference(dev_maps->attr_map[tci]); 2364 if (!map) 2365 return false; 2366 2367 for (pos = map->len; pos--;) { 2368 if (map->queues[pos] != index) 2369 continue; 2370 2371 if (map->len > 1) { 2372 map->queues[pos] = map->queues[--map->len]; 2373 break; 2374 } 2375 2376 if (old_maps) 2377 RCU_INIT_POINTER(old_maps->attr_map[tci], NULL); 2378 RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL); 2379 kfree_rcu(map, rcu); 2380 return false; 2381 } 2382 2383 return true; 2384 } 2385 2386 static bool remove_xps_queue_cpu(struct net_device *dev, 2387 struct xps_dev_maps *dev_maps, 2388 int cpu, u16 offset, u16 count) 2389 { 2390 int num_tc = dev_maps->num_tc; 2391 bool active = false; 2392 int tci; 2393 2394 for (tci = cpu * num_tc; num_tc--; tci++) { 2395 int i, j; 2396 2397 for (i = count, j = offset; i--; j++) { 2398 if (!remove_xps_queue(dev_maps, NULL, tci, j)) 2399 break; 2400 } 2401 2402 active |= i < 0; 2403 } 2404 2405 return active; 2406 } 2407 2408 static void reset_xps_maps(struct net_device *dev, 2409 struct xps_dev_maps *dev_maps, 2410 enum xps_map_type type) 2411 { 2412 static_key_slow_dec_cpuslocked(&xps_needed); 2413 if (type == XPS_RXQS) 2414 static_key_slow_dec_cpuslocked(&xps_rxqs_needed); 2415 2416 RCU_INIT_POINTER(dev->xps_maps[type], NULL); 2417 2418 kfree_rcu(dev_maps, rcu); 2419 } 2420 2421 static void clean_xps_maps(struct net_device *dev, enum xps_map_type type, 2422 u16 offset, u16 count) 2423 { 2424 struct xps_dev_maps *dev_maps; 2425 bool active = false; 2426 int i, j; 2427 2428 dev_maps = xmap_dereference(dev->xps_maps[type]); 2429 if (!dev_maps) 2430 return; 2431 2432 for (j = 0; j < dev_maps->nr_ids; j++) 2433 active |= remove_xps_queue_cpu(dev, dev_maps, j, offset, count); 2434 if (!active) 2435 reset_xps_maps(dev, dev_maps, type); 2436 2437 if (type == XPS_CPUS) { 2438 for (i = offset + (count - 1); count--; i--) 2439 netdev_queue_numa_node_write( 2440 netdev_get_tx_queue(dev, i), NUMA_NO_NODE); 2441 } 2442 } 2443 2444 static void netif_reset_xps_queues(struct net_device *dev, u16 offset, 2445 u16 count) 2446 { 2447 if (!static_key_false(&xps_needed)) 2448 return; 2449 2450 cpus_read_lock(); 2451 mutex_lock(&xps_map_mutex); 2452 2453 if (static_key_false(&xps_rxqs_needed)) 2454 clean_xps_maps(dev, XPS_RXQS, offset, count); 2455 2456 clean_xps_maps(dev, XPS_CPUS, offset, count); 2457 2458 mutex_unlock(&xps_map_mutex); 2459 cpus_read_unlock(); 2460 } 2461 2462 static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index) 2463 { 2464 netif_reset_xps_queues(dev, index, dev->num_tx_queues - index); 2465 } 2466 2467 static struct xps_map *expand_xps_map(struct xps_map *map, int attr_index, 2468 u16 index, bool is_rxqs_map) 2469 { 2470 struct xps_map *new_map; 2471 int alloc_len = XPS_MIN_MAP_ALLOC; 2472 int i, pos; 2473 2474 for (pos = 0; map && pos < map->len; pos++) { 2475 if (map->queues[pos] != index) 2476 continue; 2477 return map; 2478 } 2479 2480 /* Need to add tx-queue to this CPU's/rx-queue's existing map */ 2481 if (map) { 2482 if (pos < map->alloc_len) 2483 return map; 2484 2485 alloc_len = map->alloc_len * 2; 2486 } 2487 2488 /* Need to allocate new map to store tx-queue on this CPU's/rx-queue's 2489 * map 2490 */ 2491 if (is_rxqs_map) 2492 new_map = kzalloc(XPS_MAP_SIZE(alloc_len), GFP_KERNEL); 2493 else 2494 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL, 2495 cpu_to_node(attr_index)); 2496 if (!new_map) 2497 return NULL; 2498 2499 for (i = 0; i < pos; i++) 2500 new_map->queues[i] = map->queues[i]; 2501 new_map->alloc_len = alloc_len; 2502 new_map->len = pos; 2503 2504 return new_map; 2505 } 2506 2507 /* Copy xps maps at a given index */ 2508 static void xps_copy_dev_maps(struct xps_dev_maps *dev_maps, 2509 struct xps_dev_maps *new_dev_maps, int index, 2510 int tc, bool skip_tc) 2511 { 2512 int i, tci = index * dev_maps->num_tc; 2513 struct xps_map *map; 2514 2515 /* copy maps belonging to foreign traffic classes */ 2516 for (i = 0; i < dev_maps->num_tc; i++, tci++) { 2517 if (i == tc && skip_tc) 2518 continue; 2519 2520 /* fill in the new device map from the old device map */ 2521 map = xmap_dereference(dev_maps->attr_map[tci]); 2522 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map); 2523 } 2524 } 2525 2526 /* Must be called under cpus_read_lock */ 2527 int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, 2528 u16 index, enum xps_map_type type) 2529 { 2530 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL, *old_dev_maps = NULL; 2531 const unsigned long *online_mask = NULL; 2532 bool active = false, copy = false; 2533 int i, j, tci, numa_node_id = -2; 2534 int maps_sz, num_tc = 1, tc = 0; 2535 struct xps_map *map, *new_map; 2536 unsigned int nr_ids; 2537 2538 if (dev->num_tc) { 2539 /* Do not allow XPS on subordinate device directly */ 2540 num_tc = dev->num_tc; 2541 if (num_tc < 0) 2542 return -EINVAL; 2543 2544 /* If queue belongs to subordinate dev use its map */ 2545 dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev; 2546 2547 tc = netdev_txq_to_tc(dev, index); 2548 if (tc < 0) 2549 return -EINVAL; 2550 } 2551 2552 mutex_lock(&xps_map_mutex); 2553 2554 dev_maps = xmap_dereference(dev->xps_maps[type]); 2555 if (type == XPS_RXQS) { 2556 maps_sz = XPS_RXQ_DEV_MAPS_SIZE(num_tc, dev->num_rx_queues); 2557 nr_ids = dev->num_rx_queues; 2558 } else { 2559 maps_sz = XPS_CPU_DEV_MAPS_SIZE(num_tc); 2560 if (num_possible_cpus() > 1) 2561 online_mask = cpumask_bits(cpu_online_mask); 2562 nr_ids = nr_cpu_ids; 2563 } 2564 2565 if (maps_sz < L1_CACHE_BYTES) 2566 maps_sz = L1_CACHE_BYTES; 2567 2568 /* The old dev_maps could be larger or smaller than the one we're 2569 * setting up now, as dev->num_tc or nr_ids could have been updated in 2570 * between. We could try to be smart, but let's be safe instead and only 2571 * copy foreign traffic classes if the two map sizes match. 2572 */ 2573 if (dev_maps && 2574 dev_maps->num_tc == num_tc && dev_maps->nr_ids == nr_ids) 2575 copy = true; 2576 2577 /* allocate memory for queue storage */ 2578 for (j = -1; j = netif_attrmask_next_and(j, online_mask, mask, nr_ids), 2579 j < nr_ids;) { 2580 if (!new_dev_maps) { 2581 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL); 2582 if (!new_dev_maps) { 2583 mutex_unlock(&xps_map_mutex); 2584 return -ENOMEM; 2585 } 2586 2587 new_dev_maps->nr_ids = nr_ids; 2588 new_dev_maps->num_tc = num_tc; 2589 } 2590 2591 tci = j * num_tc + tc; 2592 map = copy ? xmap_dereference(dev_maps->attr_map[tci]) : NULL; 2593 2594 map = expand_xps_map(map, j, index, type == XPS_RXQS); 2595 if (!map) 2596 goto error; 2597 2598 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map); 2599 } 2600 2601 if (!new_dev_maps) 2602 goto out_no_new_maps; 2603 2604 if (!dev_maps) { 2605 /* Increment static keys at most once per type */ 2606 static_key_slow_inc_cpuslocked(&xps_needed); 2607 if (type == XPS_RXQS) 2608 static_key_slow_inc_cpuslocked(&xps_rxqs_needed); 2609 } 2610 2611 for (j = 0; j < nr_ids; j++) { 2612 bool skip_tc = false; 2613 2614 tci = j * num_tc + tc; 2615 if (netif_attr_test_mask(j, mask, nr_ids) && 2616 netif_attr_test_online(j, online_mask, nr_ids)) { 2617 /* add tx-queue to CPU/rx-queue maps */ 2618 int pos = 0; 2619 2620 skip_tc = true; 2621 2622 map = xmap_dereference(new_dev_maps->attr_map[tci]); 2623 while ((pos < map->len) && (map->queues[pos] != index)) 2624 pos++; 2625 2626 if (pos == map->len) 2627 map->queues[map->len++] = index; 2628 #ifdef CONFIG_NUMA 2629 if (type == XPS_CPUS) { 2630 if (numa_node_id == -2) 2631 numa_node_id = cpu_to_node(j); 2632 else if (numa_node_id != cpu_to_node(j)) 2633 numa_node_id = -1; 2634 } 2635 #endif 2636 } 2637 2638 if (copy) 2639 xps_copy_dev_maps(dev_maps, new_dev_maps, j, tc, 2640 skip_tc); 2641 } 2642 2643 rcu_assign_pointer(dev->xps_maps[type], new_dev_maps); 2644 2645 /* Cleanup old maps */ 2646 if (!dev_maps) 2647 goto out_no_old_maps; 2648 2649 for (j = 0; j < dev_maps->nr_ids; j++) { 2650 for (i = num_tc, tci = j * dev_maps->num_tc; i--; tci++) { 2651 map = xmap_dereference(dev_maps->attr_map[tci]); 2652 if (!map) 2653 continue; 2654 2655 if (copy) { 2656 new_map = xmap_dereference(new_dev_maps->attr_map[tci]); 2657 if (map == new_map) 2658 continue; 2659 } 2660 2661 RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL); 2662 kfree_rcu(map, rcu); 2663 } 2664 } 2665 2666 old_dev_maps = dev_maps; 2667 2668 out_no_old_maps: 2669 dev_maps = new_dev_maps; 2670 active = true; 2671 2672 out_no_new_maps: 2673 if (type == XPS_CPUS) 2674 /* update Tx queue numa node */ 2675 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index), 2676 (numa_node_id >= 0) ? 2677 numa_node_id : NUMA_NO_NODE); 2678 2679 if (!dev_maps) 2680 goto out_no_maps; 2681 2682 /* removes tx-queue from unused CPUs/rx-queues */ 2683 for (j = 0; j < dev_maps->nr_ids; j++) { 2684 tci = j * dev_maps->num_tc; 2685 2686 for (i = 0; i < dev_maps->num_tc; i++, tci++) { 2687 if (i == tc && 2688 netif_attr_test_mask(j, mask, dev_maps->nr_ids) && 2689 netif_attr_test_online(j, online_mask, dev_maps->nr_ids)) 2690 continue; 2691 2692 active |= remove_xps_queue(dev_maps, 2693 copy ? old_dev_maps : NULL, 2694 tci, index); 2695 } 2696 } 2697 2698 if (old_dev_maps) 2699 kfree_rcu(old_dev_maps, rcu); 2700 2701 /* free map if not active */ 2702 if (!active) 2703 reset_xps_maps(dev, dev_maps, type); 2704 2705 out_no_maps: 2706 mutex_unlock(&xps_map_mutex); 2707 2708 return 0; 2709 error: 2710 /* remove any maps that we added */ 2711 for (j = 0; j < nr_ids; j++) { 2712 for (i = num_tc, tci = j * num_tc; i--; tci++) { 2713 new_map = xmap_dereference(new_dev_maps->attr_map[tci]); 2714 map = copy ? 2715 xmap_dereference(dev_maps->attr_map[tci]) : 2716 NULL; 2717 if (new_map && new_map != map) 2718 kfree(new_map); 2719 } 2720 } 2721 2722 mutex_unlock(&xps_map_mutex); 2723 2724 kfree(new_dev_maps); 2725 return -ENOMEM; 2726 } 2727 EXPORT_SYMBOL_GPL(__netif_set_xps_queue); 2728 2729 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, 2730 u16 index) 2731 { 2732 int ret; 2733 2734 cpus_read_lock(); 2735 ret = __netif_set_xps_queue(dev, cpumask_bits(mask), index, XPS_CPUS); 2736 cpus_read_unlock(); 2737 2738 return ret; 2739 } 2740 EXPORT_SYMBOL(netif_set_xps_queue); 2741 2742 #endif 2743 static void netdev_unbind_all_sb_channels(struct net_device *dev) 2744 { 2745 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues]; 2746 2747 /* Unbind any subordinate channels */ 2748 while (txq-- != &dev->_tx[0]) { 2749 if (txq->sb_dev) 2750 netdev_unbind_sb_channel(dev, txq->sb_dev); 2751 } 2752 } 2753 2754 void netdev_reset_tc(struct net_device *dev) 2755 { 2756 #ifdef CONFIG_XPS 2757 netif_reset_xps_queues_gt(dev, 0); 2758 #endif 2759 netdev_unbind_all_sb_channels(dev); 2760 2761 /* Reset TC configuration of device */ 2762 dev->num_tc = 0; 2763 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq)); 2764 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map)); 2765 } 2766 EXPORT_SYMBOL(netdev_reset_tc); 2767 2768 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset) 2769 { 2770 if (tc >= dev->num_tc) 2771 return -EINVAL; 2772 2773 #ifdef CONFIG_XPS 2774 netif_reset_xps_queues(dev, offset, count); 2775 #endif 2776 dev->tc_to_txq[tc].count = count; 2777 dev->tc_to_txq[tc].offset = offset; 2778 return 0; 2779 } 2780 EXPORT_SYMBOL(netdev_set_tc_queue); 2781 2782 int netdev_set_num_tc(struct net_device *dev, u8 num_tc) 2783 { 2784 if (num_tc > TC_MAX_QUEUE) 2785 return -EINVAL; 2786 2787 #ifdef CONFIG_XPS 2788 netif_reset_xps_queues_gt(dev, 0); 2789 #endif 2790 netdev_unbind_all_sb_channels(dev); 2791 2792 dev->num_tc = num_tc; 2793 return 0; 2794 } 2795 EXPORT_SYMBOL(netdev_set_num_tc); 2796 2797 void netdev_unbind_sb_channel(struct net_device *dev, 2798 struct net_device *sb_dev) 2799 { 2800 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues]; 2801 2802 #ifdef CONFIG_XPS 2803 netif_reset_xps_queues_gt(sb_dev, 0); 2804 #endif 2805 memset(sb_dev->tc_to_txq, 0, sizeof(sb_dev->tc_to_txq)); 2806 memset(sb_dev->prio_tc_map, 0, sizeof(sb_dev->prio_tc_map)); 2807 2808 while (txq-- != &dev->_tx[0]) { 2809 if (txq->sb_dev == sb_dev) 2810 txq->sb_dev = NULL; 2811 } 2812 } 2813 EXPORT_SYMBOL(netdev_unbind_sb_channel); 2814 2815 int netdev_bind_sb_channel_queue(struct net_device *dev, 2816 struct net_device *sb_dev, 2817 u8 tc, u16 count, u16 offset) 2818 { 2819 /* Make certain the sb_dev and dev are already configured */ 2820 if (sb_dev->num_tc >= 0 || tc >= dev->num_tc) 2821 return -EINVAL; 2822 2823 /* We cannot hand out queues we don't have */ 2824 if ((offset + count) > dev->real_num_tx_queues) 2825 return -EINVAL; 2826 2827 /* Record the mapping */ 2828 sb_dev->tc_to_txq[tc].count = count; 2829 sb_dev->tc_to_txq[tc].offset = offset; 2830 2831 /* Provide a way for Tx queue to find the tc_to_txq map or 2832 * XPS map for itself. 2833 */ 2834 while (count--) 2835 netdev_get_tx_queue(dev, count + offset)->sb_dev = sb_dev; 2836 2837 return 0; 2838 } 2839 EXPORT_SYMBOL(netdev_bind_sb_channel_queue); 2840 2841 int netdev_set_sb_channel(struct net_device *dev, u16 channel) 2842 { 2843 /* Do not use a multiqueue device to represent a subordinate channel */ 2844 if (netif_is_multiqueue(dev)) 2845 return -ENODEV; 2846 2847 /* We allow channels 1 - 32767 to be used for subordinate channels. 2848 * Channel 0 is meant to be "native" mode and used only to represent 2849 * the main root device. We allow writing 0 to reset the device back 2850 * to normal mode after being used as a subordinate channel. 2851 */ 2852 if (channel > S16_MAX) 2853 return -EINVAL; 2854 2855 dev->num_tc = -channel; 2856 2857 return 0; 2858 } 2859 EXPORT_SYMBOL(netdev_set_sb_channel); 2860 2861 /* 2862 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues 2863 * greater than real_num_tx_queues stale skbs on the qdisc must be flushed. 2864 */ 2865 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) 2866 { 2867 bool disabling; 2868 int rc; 2869 2870 disabling = txq < dev->real_num_tx_queues; 2871 2872 if (txq < 1 || txq > dev->num_tx_queues) 2873 return -EINVAL; 2874 2875 if (dev->reg_state == NETREG_REGISTERED || 2876 dev->reg_state == NETREG_UNREGISTERING) { 2877 ASSERT_RTNL(); 2878 2879 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues, 2880 txq); 2881 if (rc) 2882 return rc; 2883 2884 if (dev->num_tc) 2885 netif_setup_tc(dev, txq); 2886 2887 dev_qdisc_change_real_num_tx(dev, txq); 2888 2889 dev->real_num_tx_queues = txq; 2890 2891 if (disabling) { 2892 synchronize_net(); 2893 qdisc_reset_all_tx_gt(dev, txq); 2894 #ifdef CONFIG_XPS 2895 netif_reset_xps_queues_gt(dev, txq); 2896 #endif 2897 } 2898 } else { 2899 dev->real_num_tx_queues = txq; 2900 } 2901 2902 return 0; 2903 } 2904 EXPORT_SYMBOL(netif_set_real_num_tx_queues); 2905 2906 #ifdef CONFIG_SYSFS 2907 /** 2908 * netif_set_real_num_rx_queues - set actual number of RX queues used 2909 * @dev: Network device 2910 * @rxq: Actual number of RX queues 2911 * 2912 * This must be called either with the rtnl_lock held or before 2913 * registration of the net device. Returns 0 on success, or a 2914 * negative error code. If called before registration, it always 2915 * succeeds. 2916 */ 2917 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq) 2918 { 2919 int rc; 2920 2921 if (rxq < 1 || rxq > dev->num_rx_queues) 2922 return -EINVAL; 2923 2924 if (dev->reg_state == NETREG_REGISTERED) { 2925 ASSERT_RTNL(); 2926 2927 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues, 2928 rxq); 2929 if (rc) 2930 return rc; 2931 } 2932 2933 dev->real_num_rx_queues = rxq; 2934 return 0; 2935 } 2936 EXPORT_SYMBOL(netif_set_real_num_rx_queues); 2937 #endif 2938 2939 /** 2940 * netif_set_real_num_queues - set actual number of RX and TX queues used 2941 * @dev: Network device 2942 * @txq: Actual number of TX queues 2943 * @rxq: Actual number of RX queues 2944 * 2945 * Set the real number of both TX and RX queues. 2946 * Does nothing if the number of queues is already correct. 2947 */ 2948 int netif_set_real_num_queues(struct net_device *dev, 2949 unsigned int txq, unsigned int rxq) 2950 { 2951 unsigned int old_rxq = dev->real_num_rx_queues; 2952 int err; 2953 2954 if (txq < 1 || txq > dev->num_tx_queues || 2955 rxq < 1 || rxq > dev->num_rx_queues) 2956 return -EINVAL; 2957 2958 /* Start from increases, so the error path only does decreases - 2959 * decreases can't fail. 2960 */ 2961 if (rxq > dev->real_num_rx_queues) { 2962 err = netif_set_real_num_rx_queues(dev, rxq); 2963 if (err) 2964 return err; 2965 } 2966 if (txq > dev->real_num_tx_queues) { 2967 err = netif_set_real_num_tx_queues(dev, txq); 2968 if (err) 2969 goto undo_rx; 2970 } 2971 if (rxq < dev->real_num_rx_queues) 2972 WARN_ON(netif_set_real_num_rx_queues(dev, rxq)); 2973 if (txq < dev->real_num_tx_queues) 2974 WARN_ON(netif_set_real_num_tx_queues(dev, txq)); 2975 2976 return 0; 2977 undo_rx: 2978 WARN_ON(netif_set_real_num_rx_queues(dev, old_rxq)); 2979 return err; 2980 } 2981 EXPORT_SYMBOL(netif_set_real_num_queues); 2982 2983 /** 2984 * netif_set_tso_max_size() - set the max size of TSO frames supported 2985 * @dev: netdev to update 2986 * @size: max skb->len of a TSO frame 2987 * 2988 * Set the limit on the size of TSO super-frames the device can handle. 2989 * Unless explicitly set the stack will assume the value of 2990 * %GSO_LEGACY_MAX_SIZE. 2991 */ 2992 void netif_set_tso_max_size(struct net_device *dev, unsigned int size) 2993 { 2994 dev->tso_max_size = min(GSO_MAX_SIZE, size); 2995 if (size < READ_ONCE(dev->gso_max_size)) 2996 netif_set_gso_max_size(dev, size); 2997 if (size < READ_ONCE(dev->gso_ipv4_max_size)) 2998 netif_set_gso_ipv4_max_size(dev, size); 2999 } 3000 EXPORT_SYMBOL(netif_set_tso_max_size); 3001 3002 /** 3003 * netif_set_tso_max_segs() - set the max number of segs supported for TSO 3004 * @dev: netdev to update 3005 * @segs: max number of TCP segments 3006 * 3007 * Set the limit on the number of TCP segments the device can generate from 3008 * a single TSO super-frame. 3009 * Unless explicitly set the stack will assume the value of %GSO_MAX_SEGS. 3010 */ 3011 void netif_set_tso_max_segs(struct net_device *dev, unsigned int segs) 3012 { 3013 dev->tso_max_segs = segs; 3014 if (segs < READ_ONCE(dev->gso_max_segs)) 3015 netif_set_gso_max_segs(dev, segs); 3016 } 3017 EXPORT_SYMBOL(netif_set_tso_max_segs); 3018 3019 /** 3020 * netif_inherit_tso_max() - copy all TSO limits from a lower device to an upper 3021 * @to: netdev to update 3022 * @from: netdev from which to copy the limits 3023 */ 3024 void netif_inherit_tso_max(struct net_device *to, const struct net_device *from) 3025 { 3026 netif_set_tso_max_size(to, from->tso_max_size); 3027 netif_set_tso_max_segs(to, from->tso_max_segs); 3028 } 3029 EXPORT_SYMBOL(netif_inherit_tso_max); 3030 3031 /** 3032 * netif_get_num_default_rss_queues - default number of RSS queues 3033 * 3034 * Default value is the number of physical cores if there are only 1 or 2, or 3035 * divided by 2 if there are more. 3036 */ 3037 int netif_get_num_default_rss_queues(void) 3038 { 3039 cpumask_var_t cpus; 3040 int cpu, count = 0; 3041 3042 if (unlikely(is_kdump_kernel() || !zalloc_cpumask_var(&cpus, GFP_KERNEL))) 3043 return 1; 3044 3045 cpumask_copy(cpus, cpu_online_mask); 3046 for_each_cpu(cpu, cpus) { 3047 ++count; 3048 cpumask_andnot(cpus, cpus, topology_sibling_cpumask(cpu)); 3049 } 3050 free_cpumask_var(cpus); 3051 3052 return count > 2 ? DIV_ROUND_UP(count, 2) : count; 3053 } 3054 EXPORT_SYMBOL(netif_get_num_default_rss_queues); 3055 3056 static void __netif_reschedule(struct Qdisc *q) 3057 { 3058 struct softnet_data *sd; 3059 unsigned long flags; 3060 3061 local_irq_save(flags); 3062 sd = this_cpu_ptr(&softnet_data); 3063 q->next_sched = NULL; 3064 *sd->output_queue_tailp = q; 3065 sd->output_queue_tailp = &q->next_sched; 3066 raise_softirq_irqoff(NET_TX_SOFTIRQ); 3067 local_irq_restore(flags); 3068 } 3069 3070 void __netif_schedule(struct Qdisc *q) 3071 { 3072 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) 3073 __netif_reschedule(q); 3074 } 3075 EXPORT_SYMBOL(__netif_schedule); 3076 3077 struct dev_kfree_skb_cb { 3078 enum skb_free_reason reason; 3079 }; 3080 3081 static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb) 3082 { 3083 return (struct dev_kfree_skb_cb *)skb->cb; 3084 } 3085 3086 void netif_schedule_queue(struct netdev_queue *txq) 3087 { 3088 rcu_read_lock(); 3089 if (!netif_xmit_stopped(txq)) { 3090 struct Qdisc *q = rcu_dereference(txq->qdisc); 3091 3092 __netif_schedule(q); 3093 } 3094 rcu_read_unlock(); 3095 } 3096 EXPORT_SYMBOL(netif_schedule_queue); 3097 3098 void netif_tx_wake_queue(struct netdev_queue *dev_queue) 3099 { 3100 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) { 3101 struct Qdisc *q; 3102 3103 rcu_read_lock(); 3104 q = rcu_dereference(dev_queue->qdisc); 3105 __netif_schedule(q); 3106 rcu_read_unlock(); 3107 } 3108 } 3109 EXPORT_SYMBOL(netif_tx_wake_queue); 3110 3111 void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason) 3112 { 3113 unsigned long flags; 3114 3115 if (unlikely(!skb)) 3116 return; 3117 3118 if (likely(refcount_read(&skb->users) == 1)) { 3119 smp_rmb(); 3120 refcount_set(&skb->users, 0); 3121 } else if (likely(!refcount_dec_and_test(&skb->users))) { 3122 return; 3123 } 3124 get_kfree_skb_cb(skb)->reason = reason; 3125 local_irq_save(flags); 3126 skb->next = __this_cpu_read(softnet_data.completion_queue); 3127 __this_cpu_write(softnet_data.completion_queue, skb); 3128 raise_softirq_irqoff(NET_TX_SOFTIRQ); 3129 local_irq_restore(flags); 3130 } 3131 EXPORT_SYMBOL(__dev_kfree_skb_irq); 3132 3133 void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason) 3134 { 3135 if (in_hardirq() || irqs_disabled()) 3136 __dev_kfree_skb_irq(skb, reason); 3137 else if (unlikely(reason == SKB_REASON_DROPPED)) 3138 kfree_skb(skb); 3139 else 3140 consume_skb(skb); 3141 } 3142 EXPORT_SYMBOL(__dev_kfree_skb_any); 3143 3144 3145 /** 3146 * netif_device_detach - mark device as removed 3147 * @dev: network device 3148 * 3149 * Mark device as removed from system and therefore no longer available. 3150 */ 3151 void netif_device_detach(struct net_device *dev) 3152 { 3153 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) && 3154 netif_running(dev)) { 3155 netif_tx_stop_all_queues(dev); 3156 } 3157 } 3158 EXPORT_SYMBOL(netif_device_detach); 3159 3160 /** 3161 * netif_device_attach - mark device as attached 3162 * @dev: network device 3163 * 3164 * Mark device as attached from system and restart if needed. 3165 */ 3166 void netif_device_attach(struct net_device *dev) 3167 { 3168 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) && 3169 netif_running(dev)) { 3170 netif_tx_wake_all_queues(dev); 3171 __netdev_watchdog_up(dev); 3172 } 3173 } 3174 EXPORT_SYMBOL(netif_device_attach); 3175 3176 /* 3177 * Returns a Tx hash based on the given packet descriptor a Tx queues' number 3178 * to be used as a distribution range. 3179 */ 3180 static u16 skb_tx_hash(const struct net_device *dev, 3181 const struct net_device *sb_dev, 3182 struct sk_buff *skb) 3183 { 3184 u32 hash; 3185 u16 qoffset = 0; 3186 u16 qcount = dev->real_num_tx_queues; 3187 3188 if (dev->num_tc) { 3189 u8 tc = netdev_get_prio_tc_map(dev, skb->priority); 3190 3191 qoffset = sb_dev->tc_to_txq[tc].offset; 3192 qcount = sb_dev->tc_to_txq[tc].count; 3193 if (unlikely(!qcount)) { 3194 net_warn_ratelimited("%s: invalid qcount, qoffset %u for tc %u\n", 3195 sb_dev->name, qoffset, tc); 3196 qoffset = 0; 3197 qcount = dev->real_num_tx_queues; 3198 } 3199 } 3200 3201 if (skb_rx_queue_recorded(skb)) { 3202 DEBUG_NET_WARN_ON_ONCE(qcount == 0); 3203 hash = skb_get_rx_queue(skb); 3204 if (hash >= qoffset) 3205 hash -= qoffset; 3206 while (unlikely(hash >= qcount)) 3207 hash -= qcount; 3208 return hash + qoffset; 3209 } 3210 3211 return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset; 3212 } 3213 3214 static void skb_warn_bad_offload(const struct sk_buff *skb) 3215 { 3216 static const netdev_features_t null_features; 3217 struct net_device *dev = skb->dev; 3218 const char *name = ""; 3219 3220 if (!net_ratelimit()) 3221 return; 3222 3223 if (dev) { 3224 if (dev->dev.parent) 3225 name = dev_driver_string(dev->dev.parent); 3226 else 3227 name = netdev_name(dev); 3228 } 3229 skb_dump(KERN_WARNING, skb, false); 3230 WARN(1, "%s: caps=(%pNF, %pNF)\n", 3231 name, dev ? &dev->features : &null_features, 3232 skb->sk ? &skb->sk->sk_route_caps : &null_features); 3233 } 3234 3235 /* 3236 * Invalidate hardware checksum when packet is to be mangled, and 3237 * complete checksum manually on outgoing path. 3238 */ 3239 int skb_checksum_help(struct sk_buff *skb) 3240 { 3241 __wsum csum; 3242 int ret = 0, offset; 3243 3244 if (skb->ip_summed == CHECKSUM_COMPLETE) 3245 goto out_set_summed; 3246 3247 if (unlikely(skb_is_gso(skb))) { 3248 skb_warn_bad_offload(skb); 3249 return -EINVAL; 3250 } 3251 3252 /* Before computing a checksum, we should make sure no frag could 3253 * be modified by an external entity : checksum could be wrong. 3254 */ 3255 if (skb_has_shared_frag(skb)) { 3256 ret = __skb_linearize(skb); 3257 if (ret) 3258 goto out; 3259 } 3260 3261 offset = skb_checksum_start_offset(skb); 3262 ret = -EINVAL; 3263 if (WARN_ON_ONCE(offset >= skb_headlen(skb))) { 3264 DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false); 3265 goto out; 3266 } 3267 csum = skb_checksum(skb, offset, skb->len - offset, 0); 3268 3269 offset += skb->csum_offset; 3270 if (WARN_ON_ONCE(offset + sizeof(__sum16) > skb_headlen(skb))) { 3271 DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false); 3272 goto out; 3273 } 3274 ret = skb_ensure_writable(skb, offset + sizeof(__sum16)); 3275 if (ret) 3276 goto out; 3277 3278 *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0; 3279 out_set_summed: 3280 skb->ip_summed = CHECKSUM_NONE; 3281 out: 3282 return ret; 3283 } 3284 EXPORT_SYMBOL(skb_checksum_help); 3285 3286 int skb_crc32c_csum_help(struct sk_buff *skb) 3287 { 3288 __le32 crc32c_csum; 3289 int ret = 0, offset, start; 3290 3291 if (skb->ip_summed != CHECKSUM_PARTIAL) 3292 goto out; 3293 3294 if (unlikely(skb_is_gso(skb))) 3295 goto out; 3296 3297 /* Before computing a checksum, we should make sure no frag could 3298 * be modified by an external entity : checksum could be wrong. 3299 */ 3300 if (unlikely(skb_has_shared_frag(skb))) { 3301 ret = __skb_linearize(skb); 3302 if (ret) 3303 goto out; 3304 } 3305 start = skb_checksum_start_offset(skb); 3306 offset = start + offsetof(struct sctphdr, checksum); 3307 if (WARN_ON_ONCE(offset >= skb_headlen(skb))) { 3308 ret = -EINVAL; 3309 goto out; 3310 } 3311 3312 ret = skb_ensure_writable(skb, offset + sizeof(__le32)); 3313 if (ret) 3314 goto out; 3315 3316 crc32c_csum = cpu_to_le32(~__skb_checksum(skb, start, 3317 skb->len - start, ~(__u32)0, 3318 crc32c_csum_stub)); 3319 *(__le32 *)(skb->data + offset) = crc32c_csum; 3320 skb->ip_summed = CHECKSUM_NONE; 3321 skb->csum_not_inet = 0; 3322 out: 3323 return ret; 3324 } 3325 3326 __be16 skb_network_protocol(struct sk_buff *skb, int *depth) 3327 { 3328 __be16 type = skb->protocol; 3329 3330 /* Tunnel gso handlers can set protocol to ethernet. */ 3331 if (type == htons(ETH_P_TEB)) { 3332 struct ethhdr *eth; 3333 3334 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr)))) 3335 return 0; 3336 3337 eth = (struct ethhdr *)skb->data; 3338 type = eth->h_proto; 3339 } 3340 3341 return __vlan_get_protocol(skb, type, depth); 3342 } 3343 3344 /* openvswitch calls this on rx path, so we need a different check. 3345 */ 3346 static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path) 3347 { 3348 if (tx_path) 3349 return skb->ip_summed != CHECKSUM_PARTIAL && 3350 skb->ip_summed != CHECKSUM_UNNECESSARY; 3351 3352 return skb->ip_summed == CHECKSUM_NONE; 3353 } 3354 3355 /** 3356 * __skb_gso_segment - Perform segmentation on skb. 3357 * @skb: buffer to segment 3358 * @features: features for the output path (see dev->features) 3359 * @tx_path: whether it is called in TX path 3360 * 3361 * This function segments the given skb and returns a list of segments. 3362 * 3363 * It may return NULL if the skb requires no segmentation. This is 3364 * only possible when GSO is used for verifying header integrity. 3365 * 3366 * Segmentation preserves SKB_GSO_CB_OFFSET bytes of previous skb cb. 3367 */ 3368 struct sk_buff *__skb_gso_segment(struct sk_buff *skb, 3369 netdev_features_t features, bool tx_path) 3370 { 3371 struct sk_buff *segs; 3372 3373 if (unlikely(skb_needs_check(skb, tx_path))) { 3374 int err; 3375 3376 /* We're going to init ->check field in TCP or UDP header */ 3377 err = skb_cow_head(skb, 0); 3378 if (err < 0) 3379 return ERR_PTR(err); 3380 } 3381 3382 /* Only report GSO partial support if it will enable us to 3383 * support segmentation on this frame without needing additional 3384 * work. 3385 */ 3386 if (features & NETIF_F_GSO_PARTIAL) { 3387 netdev_features_t partial_features = NETIF_F_GSO_ROBUST; 3388 struct net_device *dev = skb->dev; 3389 3390 partial_features |= dev->features & dev->gso_partial_features; 3391 if (!skb_gso_ok(skb, features | partial_features)) 3392 features &= ~NETIF_F_GSO_PARTIAL; 3393 } 3394 3395 BUILD_BUG_ON(SKB_GSO_CB_OFFSET + 3396 sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb)); 3397 3398 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb); 3399 SKB_GSO_CB(skb)->encap_level = 0; 3400 3401 skb_reset_mac_header(skb); 3402 skb_reset_mac_len(skb); 3403 3404 segs = skb_mac_gso_segment(skb, features); 3405 3406 if (segs != skb && unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs))) 3407 skb_warn_bad_offload(skb); 3408 3409 return segs; 3410 } 3411 EXPORT_SYMBOL(__skb_gso_segment); 3412 3413 /* Take action when hardware reception checksum errors are detected. */ 3414 #ifdef CONFIG_BUG 3415 static void do_netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb) 3416 { 3417 netdev_err(dev, "hw csum failure\n"); 3418 skb_dump(KERN_ERR, skb, true); 3419 dump_stack(); 3420 } 3421 3422 void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb) 3423 { 3424 DO_ONCE_LITE(do_netdev_rx_csum_fault, dev, skb); 3425 } 3426 EXPORT_SYMBOL(netdev_rx_csum_fault); 3427 #endif 3428 3429 /* XXX: check that highmem exists at all on the given machine. */ 3430 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb) 3431 { 3432 #ifdef CONFIG_HIGHMEM 3433 int i; 3434 3435 if (!(dev->features & NETIF_F_HIGHDMA)) { 3436 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 3437 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3438 3439 if (PageHighMem(skb_frag_page(frag))) 3440 return 1; 3441 } 3442 } 3443 #endif 3444 return 0; 3445 } 3446 3447 /* If MPLS offload request, verify we are testing hardware MPLS features 3448 * instead of standard features for the netdev. 3449 */ 3450 #if IS_ENABLED(CONFIG_NET_MPLS_GSO) 3451 static netdev_features_t net_mpls_features(struct sk_buff *skb, 3452 netdev_features_t features, 3453 __be16 type) 3454 { 3455 if (eth_p_mpls(type)) 3456 features &= skb->dev->mpls_features; 3457 3458 return features; 3459 } 3460 #else 3461 static netdev_features_t net_mpls_features(struct sk_buff *skb, 3462 netdev_features_t features, 3463 __be16 type) 3464 { 3465 return features; 3466 } 3467 #endif 3468 3469 static netdev_features_t harmonize_features(struct sk_buff *skb, 3470 netdev_features_t features) 3471 { 3472 __be16 type; 3473 3474 type = skb_network_protocol(skb, NULL); 3475 features = net_mpls_features(skb, features, type); 3476 3477 if (skb->ip_summed != CHECKSUM_NONE && 3478 !can_checksum_protocol(features, type)) { 3479 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 3480 } 3481 if (illegal_highdma(skb->dev, skb)) 3482 features &= ~NETIF_F_SG; 3483 3484 return features; 3485 } 3486 3487 netdev_features_t passthru_features_check(struct sk_buff *skb, 3488 struct net_device *dev, 3489 netdev_features_t features) 3490 { 3491 return features; 3492 } 3493 EXPORT_SYMBOL(passthru_features_check); 3494 3495 static netdev_features_t dflt_features_check(struct sk_buff *skb, 3496 struct net_device *dev, 3497 netdev_features_t features) 3498 { 3499 return vlan_features_check(skb, features); 3500 } 3501 3502 static netdev_features_t gso_features_check(const struct sk_buff *skb, 3503 struct net_device *dev, 3504 netdev_features_t features) 3505 { 3506 u16 gso_segs = skb_shinfo(skb)->gso_segs; 3507 3508 if (gso_segs > READ_ONCE(dev->gso_max_segs)) 3509 return features & ~NETIF_F_GSO_MASK; 3510 3511 if (!skb_shinfo(skb)->gso_type) { 3512 skb_warn_bad_offload(skb); 3513 return features & ~NETIF_F_GSO_MASK; 3514 } 3515 3516 /* Support for GSO partial features requires software 3517 * intervention before we can actually process the packets 3518 * so we need to strip support for any partial features now 3519 * and we can pull them back in after we have partially 3520 * segmented the frame. 3521 */ 3522 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL)) 3523 features &= ~dev->gso_partial_features; 3524 3525 /* Make sure to clear the IPv4 ID mangling feature if the 3526 * IPv4 header has the potential to be fragmented. 3527 */ 3528 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) { 3529 struct iphdr *iph = skb->encapsulation ? 3530 inner_ip_hdr(skb) : ip_hdr(skb); 3531 3532 if (!(iph->frag_off & htons(IP_DF))) 3533 features &= ~NETIF_F_TSO_MANGLEID; 3534 } 3535 3536 return features; 3537 } 3538 3539 netdev_features_t netif_skb_features(struct sk_buff *skb) 3540 { 3541 struct net_device *dev = skb->dev; 3542 netdev_features_t features = dev->features; 3543 3544 if (skb_is_gso(skb)) 3545 features = gso_features_check(skb, dev, features); 3546 3547 /* If encapsulation offload request, verify we are testing 3548 * hardware encapsulation features instead of standard 3549 * features for the netdev 3550 */ 3551 if (skb->encapsulation) 3552 features &= dev->hw_enc_features; 3553 3554 if (skb_vlan_tagged(skb)) 3555 features = netdev_intersect_features(features, 3556 dev->vlan_features | 3557 NETIF_F_HW_VLAN_CTAG_TX | 3558 NETIF_F_HW_VLAN_STAG_TX); 3559 3560 if (dev->netdev_ops->ndo_features_check) 3561 features &= dev->netdev_ops->ndo_features_check(skb, dev, 3562 features); 3563 else 3564 features &= dflt_features_check(skb, dev, features); 3565 3566 return harmonize_features(skb, features); 3567 } 3568 EXPORT_SYMBOL(netif_skb_features); 3569 3570 static int xmit_one(struct sk_buff *skb, struct net_device *dev, 3571 struct netdev_queue *txq, bool more) 3572 { 3573 unsigned int len; 3574 int rc; 3575 3576 if (dev_nit_active(dev)) 3577 dev_queue_xmit_nit(skb, dev); 3578 3579 len = skb->len; 3580 trace_net_dev_start_xmit(skb, dev); 3581 rc = netdev_start_xmit(skb, dev, txq, more); 3582 trace_net_dev_xmit(skb, rc, dev, len); 3583 3584 return rc; 3585 } 3586 3587 struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev, 3588 struct netdev_queue *txq, int *ret) 3589 { 3590 struct sk_buff *skb = first; 3591 int rc = NETDEV_TX_OK; 3592 3593 while (skb) { 3594 struct sk_buff *next = skb->next; 3595 3596 skb_mark_not_on_list(skb); 3597 rc = xmit_one(skb, dev, txq, next != NULL); 3598 if (unlikely(!dev_xmit_complete(rc))) { 3599 skb->next = next; 3600 goto out; 3601 } 3602 3603 skb = next; 3604 if (netif_tx_queue_stopped(txq) && skb) { 3605 rc = NETDEV_TX_BUSY; 3606 break; 3607 } 3608 } 3609 3610 out: 3611 *ret = rc; 3612 return skb; 3613 } 3614 3615 static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb, 3616 netdev_features_t features) 3617 { 3618 if (skb_vlan_tag_present(skb) && 3619 !vlan_hw_offload_capable(features, skb->vlan_proto)) 3620 skb = __vlan_hwaccel_push_inside(skb); 3621 return skb; 3622 } 3623 3624 int skb_csum_hwoffload_help(struct sk_buff *skb, 3625 const netdev_features_t features) 3626 { 3627 if (unlikely(skb_csum_is_sctp(skb))) 3628 return !!(features & NETIF_F_SCTP_CRC) ? 0 : 3629 skb_crc32c_csum_help(skb); 3630 3631 if (features & NETIF_F_HW_CSUM) 3632 return 0; 3633 3634 if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) { 3635 switch (skb->csum_offset) { 3636 case offsetof(struct tcphdr, check): 3637 case offsetof(struct udphdr, check): 3638 return 0; 3639 } 3640 } 3641 3642 return skb_checksum_help(skb); 3643 } 3644 EXPORT_SYMBOL(skb_csum_hwoffload_help); 3645 3646 static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again) 3647 { 3648 netdev_features_t features; 3649 3650 features = netif_skb_features(skb); 3651 skb = validate_xmit_vlan(skb, features); 3652 if (unlikely(!skb)) 3653 goto out_null; 3654 3655 skb = sk_validate_xmit_skb(skb, dev); 3656 if (unlikely(!skb)) 3657 goto out_null; 3658 3659 if (netif_needs_gso(skb, features)) { 3660 struct sk_buff *segs; 3661 3662 segs = skb_gso_segment(skb, features); 3663 if (IS_ERR(segs)) { 3664 goto out_kfree_skb; 3665 } else if (segs) { 3666 consume_skb(skb); 3667 skb = segs; 3668 } 3669 } else { 3670 if (skb_needs_linearize(skb, features) && 3671 __skb_linearize(skb)) 3672 goto out_kfree_skb; 3673 3674 /* If packet is not checksummed and device does not 3675 * support checksumming for this protocol, complete 3676 * checksumming here. 3677 */ 3678 if (skb->ip_summed == CHECKSUM_PARTIAL) { 3679 if (skb->encapsulation) 3680 skb_set_inner_transport_header(skb, 3681 skb_checksum_start_offset(skb)); 3682 else 3683 skb_set_transport_header(skb, 3684 skb_checksum_start_offset(skb)); 3685 if (skb_csum_hwoffload_help(skb, features)) 3686 goto out_kfree_skb; 3687 } 3688 } 3689 3690 skb = validate_xmit_xfrm(skb, features, again); 3691 3692 return skb; 3693 3694 out_kfree_skb: 3695 kfree_skb(skb); 3696 out_null: 3697 dev_core_stats_tx_dropped_inc(dev); 3698 return NULL; 3699 } 3700 3701 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again) 3702 { 3703 struct sk_buff *next, *head = NULL, *tail; 3704 3705 for (; skb != NULL; skb = next) { 3706 next = skb->next; 3707 skb_mark_not_on_list(skb); 3708 3709 /* in case skb wont be segmented, point to itself */ 3710 skb->prev = skb; 3711 3712 skb = validate_xmit_skb(skb, dev, again); 3713 if (!skb) 3714 continue; 3715 3716 if (!head) 3717 head = skb; 3718 else 3719 tail->next = skb; 3720 /* If skb was segmented, skb->prev points to 3721 * the last segment. If not, it still contains skb. 3722 */ 3723 tail = skb->prev; 3724 } 3725 return head; 3726 } 3727 EXPORT_SYMBOL_GPL(validate_xmit_skb_list); 3728 3729 static void qdisc_pkt_len_init(struct sk_buff *skb) 3730 { 3731 const struct skb_shared_info *shinfo = skb_shinfo(skb); 3732 3733 qdisc_skb_cb(skb)->pkt_len = skb->len; 3734 3735 /* To get more precise estimation of bytes sent on wire, 3736 * we add to pkt_len the headers size of all segments 3737 */ 3738 if (shinfo->gso_size && skb_transport_header_was_set(skb)) { 3739 unsigned int hdr_len; 3740 u16 gso_segs = shinfo->gso_segs; 3741 3742 /* mac layer + network layer */ 3743 hdr_len = skb_transport_header(skb) - skb_mac_header(skb); 3744 3745 /* + transport layer */ 3746 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) { 3747 const struct tcphdr *th; 3748 struct tcphdr _tcphdr; 3749 3750 th = skb_header_pointer(skb, skb_transport_offset(skb), 3751 sizeof(_tcphdr), &_tcphdr); 3752 if (likely(th)) 3753 hdr_len += __tcp_hdrlen(th); 3754 } else { 3755 struct udphdr _udphdr; 3756 3757 if (skb_header_pointer(skb, skb_transport_offset(skb), 3758 sizeof(_udphdr), &_udphdr)) 3759 hdr_len += sizeof(struct udphdr); 3760 } 3761 3762 if (shinfo->gso_type & SKB_GSO_DODGY) 3763 gso_segs = DIV_ROUND_UP(skb->len - hdr_len, 3764 shinfo->gso_size); 3765 3766 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len; 3767 } 3768 } 3769 3770 static int dev_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *q, 3771 struct sk_buff **to_free, 3772 struct netdev_queue *txq) 3773 { 3774 int rc; 3775 3776 rc = q->enqueue(skb, q, to_free) & NET_XMIT_MASK; 3777 if (rc == NET_XMIT_SUCCESS) 3778 trace_qdisc_enqueue(q, txq, skb); 3779 return rc; 3780 } 3781 3782 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, 3783 struct net_device *dev, 3784 struct netdev_queue *txq) 3785 { 3786 spinlock_t *root_lock = qdisc_lock(q); 3787 struct sk_buff *to_free = NULL; 3788 bool contended; 3789 int rc; 3790 3791 qdisc_calculate_pkt_len(skb, q); 3792 3793 if (q->flags & TCQ_F_NOLOCK) { 3794 if (q->flags & TCQ_F_CAN_BYPASS && nolock_qdisc_is_empty(q) && 3795 qdisc_run_begin(q)) { 3796 /* Retest nolock_qdisc_is_empty() within the protection 3797 * of q->seqlock to protect from racing with requeuing. 3798 */ 3799 if (unlikely(!nolock_qdisc_is_empty(q))) { 3800 rc = dev_qdisc_enqueue(skb, q, &to_free, txq); 3801 __qdisc_run(q); 3802 qdisc_run_end(q); 3803 3804 goto no_lock_out; 3805 } 3806 3807 qdisc_bstats_cpu_update(q, skb); 3808 if (sch_direct_xmit(skb, q, dev, txq, NULL, true) && 3809 !nolock_qdisc_is_empty(q)) 3810 __qdisc_run(q); 3811 3812 qdisc_run_end(q); 3813 return NET_XMIT_SUCCESS; 3814 } 3815 3816 rc = dev_qdisc_enqueue(skb, q, &to_free, txq); 3817 qdisc_run(q); 3818 3819 no_lock_out: 3820 if (unlikely(to_free)) 3821 kfree_skb_list_reason(to_free, 3822 SKB_DROP_REASON_QDISC_DROP); 3823 return rc; 3824 } 3825 3826 /* 3827 * Heuristic to force contended enqueues to serialize on a 3828 * separate lock before trying to get qdisc main lock. 3829 * This permits qdisc->running owner to get the lock more 3830 * often and dequeue packets faster. 3831 * On PREEMPT_RT it is possible to preempt the qdisc owner during xmit 3832 * and then other tasks will only enqueue packets. The packets will be 3833 * sent after the qdisc owner is scheduled again. To prevent this 3834 * scenario the task always serialize on the lock. 3835 */ 3836 contended = qdisc_is_running(q) || IS_ENABLED(CONFIG_PREEMPT_RT); 3837 if (unlikely(contended)) 3838 spin_lock(&q->busylock); 3839 3840 spin_lock(root_lock); 3841 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) { 3842 __qdisc_drop(skb, &to_free); 3843 rc = NET_XMIT_DROP; 3844 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) && 3845 qdisc_run_begin(q)) { 3846 /* 3847 * This is a work-conserving queue; there are no old skbs 3848 * waiting to be sent out; and the qdisc is not running - 3849 * xmit the skb directly. 3850 */ 3851 3852 qdisc_bstats_update(q, skb); 3853 3854 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) { 3855 if (unlikely(contended)) { 3856 spin_unlock(&q->busylock); 3857 contended = false; 3858 } 3859 __qdisc_run(q); 3860 } 3861 3862 qdisc_run_end(q); 3863 rc = NET_XMIT_SUCCESS; 3864 } else { 3865 rc = dev_qdisc_enqueue(skb, q, &to_free, txq); 3866 if (qdisc_run_begin(q)) { 3867 if (unlikely(contended)) { 3868 spin_unlock(&q->busylock); 3869 contended = false; 3870 } 3871 __qdisc_run(q); 3872 qdisc_run_end(q); 3873 } 3874 } 3875 spin_unlock(root_lock); 3876 if (unlikely(to_free)) 3877 kfree_skb_list_reason(to_free, SKB_DROP_REASON_QDISC_DROP); 3878 if (unlikely(contended)) 3879 spin_unlock(&q->busylock); 3880 return rc; 3881 } 3882 3883 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) 3884 static void skb_update_prio(struct sk_buff *skb) 3885 { 3886 const struct netprio_map *map; 3887 const struct sock *sk; 3888 unsigned int prioidx; 3889 3890 if (skb->priority) 3891 return; 3892 map = rcu_dereference_bh(skb->dev->priomap); 3893 if (!map) 3894 return; 3895 sk = skb_to_full_sk(skb); 3896 if (!sk) 3897 return; 3898 3899 prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data); 3900 3901 if (prioidx < map->priomap_len) 3902 skb->priority = map->priomap[prioidx]; 3903 } 3904 #else 3905 #define skb_update_prio(skb) 3906 #endif 3907 3908 /** 3909 * dev_loopback_xmit - loop back @skb 3910 * @net: network namespace this loopback is happening in 3911 * @sk: sk needed to be a netfilter okfn 3912 * @skb: buffer to transmit 3913 */ 3914 int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb) 3915 { 3916 skb_reset_mac_header(skb); 3917 __skb_pull(skb, skb_network_offset(skb)); 3918 skb->pkt_type = PACKET_LOOPBACK; 3919 if (skb->ip_summed == CHECKSUM_NONE) 3920 skb->ip_summed = CHECKSUM_UNNECESSARY; 3921 DEBUG_NET_WARN_ON_ONCE(!skb_dst(skb)); 3922 skb_dst_force(skb); 3923 netif_rx(skb); 3924 return 0; 3925 } 3926 EXPORT_SYMBOL(dev_loopback_xmit); 3927 3928 #ifdef CONFIG_NET_EGRESS 3929 static struct sk_buff * 3930 sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev) 3931 { 3932 #ifdef CONFIG_NET_CLS_ACT 3933 struct mini_Qdisc *miniq = rcu_dereference_bh(dev->miniq_egress); 3934 struct tcf_result cl_res; 3935 3936 if (!miniq) 3937 return skb; 3938 3939 /* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */ 3940 tc_skb_cb(skb)->mru = 0; 3941 tc_skb_cb(skb)->post_ct = false; 3942 mini_qdisc_bstats_cpu_update(miniq, skb); 3943 3944 switch (tcf_classify(skb, miniq->block, miniq->filter_list, &cl_res, false)) { 3945 case TC_ACT_OK: 3946 case TC_ACT_RECLASSIFY: 3947 skb->tc_index = TC_H_MIN(cl_res.classid); 3948 break; 3949 case TC_ACT_SHOT: 3950 mini_qdisc_qstats_cpu_drop(miniq); 3951 *ret = NET_XMIT_DROP; 3952 kfree_skb_reason(skb, SKB_DROP_REASON_TC_EGRESS); 3953 return NULL; 3954 case TC_ACT_STOLEN: 3955 case TC_ACT_QUEUED: 3956 case TC_ACT_TRAP: 3957 *ret = NET_XMIT_SUCCESS; 3958 consume_skb(skb); 3959 return NULL; 3960 case TC_ACT_REDIRECT: 3961 /* No need to push/pop skb's mac_header here on egress! */ 3962 skb_do_redirect(skb); 3963 *ret = NET_XMIT_SUCCESS; 3964 return NULL; 3965 default: 3966 break; 3967 } 3968 #endif /* CONFIG_NET_CLS_ACT */ 3969 3970 return skb; 3971 } 3972 3973 static struct netdev_queue * 3974 netdev_tx_queue_mapping(struct net_device *dev, struct sk_buff *skb) 3975 { 3976 int qm = skb_get_queue_mapping(skb); 3977 3978 return netdev_get_tx_queue(dev, netdev_cap_txqueue(dev, qm)); 3979 } 3980 3981 static bool netdev_xmit_txqueue_skipped(void) 3982 { 3983 return __this_cpu_read(softnet_data.xmit.skip_txqueue); 3984 } 3985 3986 void netdev_xmit_skip_txqueue(bool skip) 3987 { 3988 __this_cpu_write(softnet_data.xmit.skip_txqueue, skip); 3989 } 3990 EXPORT_SYMBOL_GPL(netdev_xmit_skip_txqueue); 3991 #endif /* CONFIG_NET_EGRESS */ 3992 3993 #ifdef CONFIG_XPS 3994 static int __get_xps_queue_idx(struct net_device *dev, struct sk_buff *skb, 3995 struct xps_dev_maps *dev_maps, unsigned int tci) 3996 { 3997 int tc = netdev_get_prio_tc_map(dev, skb->priority); 3998 struct xps_map *map; 3999 int queue_index = -1; 4000 4001 if (tc >= dev_maps->num_tc || tci >= dev_maps->nr_ids) 4002 return queue_index; 4003 4004 tci *= dev_maps->num_tc; 4005 tci += tc; 4006 4007 map = rcu_dereference(dev_maps->attr_map[tci]); 4008 if (map) { 4009 if (map->len == 1) 4010 queue_index = map->queues[0]; 4011 else 4012 queue_index = map->queues[reciprocal_scale( 4013 skb_get_hash(skb), map->len)]; 4014 if (unlikely(queue_index >= dev->real_num_tx_queues)) 4015 queue_index = -1; 4016 } 4017 return queue_index; 4018 } 4019 #endif 4020 4021 static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev, 4022 struct sk_buff *skb) 4023 { 4024 #ifdef CONFIG_XPS 4025 struct xps_dev_maps *dev_maps; 4026 struct sock *sk = skb->sk; 4027 int queue_index = -1; 4028 4029 if (!static_key_false(&xps_needed)) 4030 return -1; 4031 4032 rcu_read_lock(); 4033 if (!static_key_false(&xps_rxqs_needed)) 4034 goto get_cpus_map; 4035 4036 dev_maps = rcu_dereference(sb_dev->xps_maps[XPS_RXQS]); 4037 if (dev_maps) { 4038 int tci = sk_rx_queue_get(sk); 4039 4040 if (tci >= 0) 4041 queue_index = __get_xps_queue_idx(dev, skb, dev_maps, 4042 tci); 4043 } 4044 4045 get_cpus_map: 4046 if (queue_index < 0) { 4047 dev_maps = rcu_dereference(sb_dev->xps_maps[XPS_CPUS]); 4048 if (dev_maps) { 4049 unsigned int tci = skb->sender_cpu - 1; 4050 4051 queue_index = __get_xps_queue_idx(dev, skb, dev_maps, 4052 tci); 4053 } 4054 } 4055 rcu_read_unlock(); 4056 4057 return queue_index; 4058 #else 4059 return -1; 4060 #endif 4061 } 4062 4063 u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb, 4064 struct net_device *sb_dev) 4065 { 4066 return 0; 4067 } 4068 EXPORT_SYMBOL(dev_pick_tx_zero); 4069 4070 u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb, 4071 struct net_device *sb_dev) 4072 { 4073 return (u16)raw_smp_processor_id() % dev->real_num_tx_queues; 4074 } 4075 EXPORT_SYMBOL(dev_pick_tx_cpu_id); 4076 4077 u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, 4078 struct net_device *sb_dev) 4079 { 4080 struct sock *sk = skb->sk; 4081 int queue_index = sk_tx_queue_get(sk); 4082 4083 sb_dev = sb_dev ? : dev; 4084 4085 if (queue_index < 0 || skb->ooo_okay || 4086 queue_index >= dev->real_num_tx_queues) { 4087 int new_index = get_xps_queue(dev, sb_dev, skb); 4088 4089 if (new_index < 0) 4090 new_index = skb_tx_hash(dev, sb_dev, skb); 4091 4092 if (queue_index != new_index && sk && 4093 sk_fullsock(sk) && 4094 rcu_access_pointer(sk->sk_dst_cache)) 4095 sk_tx_queue_set(sk, new_index); 4096 4097 queue_index = new_index; 4098 } 4099 4100 return queue_index; 4101 } 4102 EXPORT_SYMBOL(netdev_pick_tx); 4103 4104 struct netdev_queue *netdev_core_pick_tx(struct net_device *dev, 4105 struct sk_buff *skb, 4106 struct net_device *sb_dev) 4107 { 4108 int queue_index = 0; 4109 4110 #ifdef CONFIG_XPS 4111 u32 sender_cpu = skb->sender_cpu - 1; 4112 4113 if (sender_cpu >= (u32)NR_CPUS) 4114 skb->sender_cpu = raw_smp_processor_id() + 1; 4115 #endif 4116 4117 if (dev->real_num_tx_queues != 1) { 4118 const struct net_device_ops *ops = dev->netdev_ops; 4119 4120 if (ops->ndo_select_queue) 4121 queue_index = ops->ndo_select_queue(dev, skb, sb_dev); 4122 else 4123 queue_index = netdev_pick_tx(dev, skb, sb_dev); 4124 4125 queue_index = netdev_cap_txqueue(dev, queue_index); 4126 } 4127 4128 skb_set_queue_mapping(skb, queue_index); 4129 return netdev_get_tx_queue(dev, queue_index); 4130 } 4131 4132 /** 4133 * __dev_queue_xmit() - transmit a buffer 4134 * @skb: buffer to transmit 4135 * @sb_dev: suboordinate device used for L2 forwarding offload 4136 * 4137 * Queue a buffer for transmission to a network device. The caller must 4138 * have set the device and priority and built the buffer before calling 4139 * this function. The function can be called from an interrupt. 4140 * 4141 * When calling this method, interrupts MUST be enabled. This is because 4142 * the BH enable code must have IRQs enabled so that it will not deadlock. 4143 * 4144 * Regardless of the return value, the skb is consumed, so it is currently 4145 * difficult to retry a send to this method. (You can bump the ref count 4146 * before sending to hold a reference for retry if you are careful.) 4147 * 4148 * Return: 4149 * * 0 - buffer successfully transmitted 4150 * * positive qdisc return code - NET_XMIT_DROP etc. 4151 * * negative errno - other errors 4152 */ 4153 int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev) 4154 { 4155 struct net_device *dev = skb->dev; 4156 struct netdev_queue *txq = NULL; 4157 struct Qdisc *q; 4158 int rc = -ENOMEM; 4159 bool again = false; 4160 4161 skb_reset_mac_header(skb); 4162 skb_assert_len(skb); 4163 4164 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP)) 4165 __skb_tstamp_tx(skb, NULL, NULL, skb->sk, SCM_TSTAMP_SCHED); 4166 4167 /* Disable soft irqs for various locks below. Also 4168 * stops preemption for RCU. 4169 */ 4170 rcu_read_lock_bh(); 4171 4172 skb_update_prio(skb); 4173 4174 qdisc_pkt_len_init(skb); 4175 #ifdef CONFIG_NET_CLS_ACT 4176 skb->tc_at_ingress = 0; 4177 #endif 4178 #ifdef CONFIG_NET_EGRESS 4179 if (static_branch_unlikely(&egress_needed_key)) { 4180 if (nf_hook_egress_active()) { 4181 skb = nf_hook_egress(skb, &rc, dev); 4182 if (!skb) 4183 goto out; 4184 } 4185 4186 netdev_xmit_skip_txqueue(false); 4187 4188 nf_skip_egress(skb, true); 4189 skb = sch_handle_egress(skb, &rc, dev); 4190 if (!skb) 4191 goto out; 4192 nf_skip_egress(skb, false); 4193 4194 if (netdev_xmit_txqueue_skipped()) 4195 txq = netdev_tx_queue_mapping(dev, skb); 4196 } 4197 #endif 4198 /* If device/qdisc don't need skb->dst, release it right now while 4199 * its hot in this cpu cache. 4200 */ 4201 if (dev->priv_flags & IFF_XMIT_DST_RELEASE) 4202 skb_dst_drop(skb); 4203 else 4204 skb_dst_force(skb); 4205 4206 if (!txq) 4207 txq = netdev_core_pick_tx(dev, skb, sb_dev); 4208 4209 q = rcu_dereference_bh(txq->qdisc); 4210 4211 trace_net_dev_queue(skb); 4212 if (q->enqueue) { 4213 rc = __dev_xmit_skb(skb, q, dev, txq); 4214 goto out; 4215 } 4216 4217 /* The device has no queue. Common case for software devices: 4218 * loopback, all the sorts of tunnels... 4219 4220 * Really, it is unlikely that netif_tx_lock protection is necessary 4221 * here. (f.e. loopback and IP tunnels are clean ignoring statistics 4222 * counters.) 4223 * However, it is possible, that they rely on protection 4224 * made by us here. 4225 4226 * Check this and shot the lock. It is not prone from deadlocks. 4227 *Either shot noqueue qdisc, it is even simpler 8) 4228 */ 4229 if (dev->flags & IFF_UP) { 4230 int cpu = smp_processor_id(); /* ok because BHs are off */ 4231 4232 /* Other cpus might concurrently change txq->xmit_lock_owner 4233 * to -1 or to their cpu id, but not to our id. 4234 */ 4235 if (READ_ONCE(txq->xmit_lock_owner) != cpu) { 4236 if (dev_xmit_recursion()) 4237 goto recursion_alert; 4238 4239 skb = validate_xmit_skb(skb, dev, &again); 4240 if (!skb) 4241 goto out; 4242 4243 HARD_TX_LOCK(dev, txq, cpu); 4244 4245 if (!netif_xmit_stopped(txq)) { 4246 dev_xmit_recursion_inc(); 4247 skb = dev_hard_start_xmit(skb, dev, txq, &rc); 4248 dev_xmit_recursion_dec(); 4249 if (dev_xmit_complete(rc)) { 4250 HARD_TX_UNLOCK(dev, txq); 4251 goto out; 4252 } 4253 } 4254 HARD_TX_UNLOCK(dev, txq); 4255 net_crit_ratelimited("Virtual device %s asks to queue packet!\n", 4256 dev->name); 4257 } else { 4258 /* Recursion is detected! It is possible, 4259 * unfortunately 4260 */ 4261 recursion_alert: 4262 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n", 4263 dev->name); 4264 } 4265 } 4266 4267 rc = -ENETDOWN; 4268 rcu_read_unlock_bh(); 4269 4270 dev_core_stats_tx_dropped_inc(dev); 4271 kfree_skb_list(skb); 4272 return rc; 4273 out: 4274 rcu_read_unlock_bh(); 4275 return rc; 4276 } 4277 EXPORT_SYMBOL(__dev_queue_xmit); 4278 4279 int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id) 4280 { 4281 struct net_device *dev = skb->dev; 4282 struct sk_buff *orig_skb = skb; 4283 struct netdev_queue *txq; 4284 int ret = NETDEV_TX_BUSY; 4285 bool again = false; 4286 4287 if (unlikely(!netif_running(dev) || 4288 !netif_carrier_ok(dev))) 4289 goto drop; 4290 4291 skb = validate_xmit_skb_list(skb, dev, &again); 4292 if (skb != orig_skb) 4293 goto drop; 4294 4295 skb_set_queue_mapping(skb, queue_id); 4296 txq = skb_get_tx_queue(dev, skb); 4297 4298 local_bh_disable(); 4299 4300 dev_xmit_recursion_inc(); 4301 HARD_TX_LOCK(dev, txq, smp_processor_id()); 4302 if (!netif_xmit_frozen_or_drv_stopped(txq)) 4303 ret = netdev_start_xmit(skb, dev, txq, false); 4304 HARD_TX_UNLOCK(dev, txq); 4305 dev_xmit_recursion_dec(); 4306 4307 local_bh_enable(); 4308 return ret; 4309 drop: 4310 dev_core_stats_tx_dropped_inc(dev); 4311 kfree_skb_list(skb); 4312 return NET_XMIT_DROP; 4313 } 4314 EXPORT_SYMBOL(__dev_direct_xmit); 4315 4316 /************************************************************************* 4317 * Receiver routines 4318 *************************************************************************/ 4319 4320 int netdev_max_backlog __read_mostly = 1000; 4321 EXPORT_SYMBOL(netdev_max_backlog); 4322 4323 int netdev_tstamp_prequeue __read_mostly = 1; 4324 unsigned int sysctl_skb_defer_max __read_mostly = 64; 4325 int netdev_budget __read_mostly = 300; 4326 /* Must be at least 2 jiffes to guarantee 1 jiffy timeout */ 4327 unsigned int __read_mostly netdev_budget_usecs = 2 * USEC_PER_SEC / HZ; 4328 int weight_p __read_mostly = 64; /* old backlog weight */ 4329 int dev_weight_rx_bias __read_mostly = 1; /* bias for backlog weight */ 4330 int dev_weight_tx_bias __read_mostly = 1; /* bias for output_queue quota */ 4331 int dev_rx_weight __read_mostly = 64; 4332 int dev_tx_weight __read_mostly = 64; 4333 4334 /* Called with irq disabled */ 4335 static inline void ____napi_schedule(struct softnet_data *sd, 4336 struct napi_struct *napi) 4337 { 4338 struct task_struct *thread; 4339 4340 lockdep_assert_irqs_disabled(); 4341 4342 if (test_bit(NAPI_STATE_THREADED, &napi->state)) { 4343 /* Paired with smp_mb__before_atomic() in 4344 * napi_enable()/dev_set_threaded(). 4345 * Use READ_ONCE() to guarantee a complete 4346 * read on napi->thread. Only call 4347 * wake_up_process() when it's not NULL. 4348 */ 4349 thread = READ_ONCE(napi->thread); 4350 if (thread) { 4351 /* Avoid doing set_bit() if the thread is in 4352 * INTERRUPTIBLE state, cause napi_thread_wait() 4353 * makes sure to proceed with napi polling 4354 * if the thread is explicitly woken from here. 4355 */ 4356 if (READ_ONCE(thread->__state) != TASK_INTERRUPTIBLE) 4357 set_bit(NAPI_STATE_SCHED_THREADED, &napi->state); 4358 wake_up_process(thread); 4359 return; 4360 } 4361 } 4362 4363 list_add_tail(&napi->poll_list, &sd->poll_list); 4364 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 4365 } 4366 4367 #ifdef CONFIG_RPS 4368 4369 /* One global table that all flow-based protocols share. */ 4370 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly; 4371 EXPORT_SYMBOL(rps_sock_flow_table); 4372 u32 rps_cpu_mask __read_mostly; 4373 EXPORT_SYMBOL(rps_cpu_mask); 4374 4375 struct static_key_false rps_needed __read_mostly; 4376 EXPORT_SYMBOL(rps_needed); 4377 struct static_key_false rfs_needed __read_mostly; 4378 EXPORT_SYMBOL(rfs_needed); 4379 4380 static struct rps_dev_flow * 4381 set_rps_cpu(struct net_device *dev, struct sk_buff *skb, 4382 struct rps_dev_flow *rflow, u16 next_cpu) 4383 { 4384 if (next_cpu < nr_cpu_ids) { 4385 #ifdef CONFIG_RFS_ACCEL 4386 struct netdev_rx_queue *rxqueue; 4387 struct rps_dev_flow_table *flow_table; 4388 struct rps_dev_flow *old_rflow; 4389 u32 flow_id; 4390 u16 rxq_index; 4391 int rc; 4392 4393 /* Should we steer this flow to a different hardware queue? */ 4394 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap || 4395 !(dev->features & NETIF_F_NTUPLE)) 4396 goto out; 4397 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu); 4398 if (rxq_index == skb_get_rx_queue(skb)) 4399 goto out; 4400 4401 rxqueue = dev->_rx + rxq_index; 4402 flow_table = rcu_dereference(rxqueue->rps_flow_table); 4403 if (!flow_table) 4404 goto out; 4405 flow_id = skb_get_hash(skb) & flow_table->mask; 4406 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb, 4407 rxq_index, flow_id); 4408 if (rc < 0) 4409 goto out; 4410 old_rflow = rflow; 4411 rflow = &flow_table->flows[flow_id]; 4412 rflow->filter = rc; 4413 if (old_rflow->filter == rflow->filter) 4414 old_rflow->filter = RPS_NO_FILTER; 4415 out: 4416 #endif 4417 rflow->last_qtail = 4418 per_cpu(softnet_data, next_cpu).input_queue_head; 4419 } 4420 4421 rflow->cpu = next_cpu; 4422 return rflow; 4423 } 4424 4425 /* 4426 * get_rps_cpu is called from netif_receive_skb and returns the target 4427 * CPU from the RPS map of the receiving queue for a given skb. 4428 * rcu_read_lock must be held on entry. 4429 */ 4430 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, 4431 struct rps_dev_flow **rflowp) 4432 { 4433 const struct rps_sock_flow_table *sock_flow_table; 4434 struct netdev_rx_queue *rxqueue = dev->_rx; 4435 struct rps_dev_flow_table *flow_table; 4436 struct rps_map *map; 4437 int cpu = -1; 4438 u32 tcpu; 4439 u32 hash; 4440 4441 if (skb_rx_queue_recorded(skb)) { 4442 u16 index = skb_get_rx_queue(skb); 4443 4444 if (unlikely(index >= dev->real_num_rx_queues)) { 4445 WARN_ONCE(dev->real_num_rx_queues > 1, 4446 "%s received packet on queue %u, but number " 4447 "of RX queues is %u\n", 4448 dev->name, index, dev->real_num_rx_queues); 4449 goto done; 4450 } 4451 rxqueue += index; 4452 } 4453 4454 /* Avoid computing hash if RFS/RPS is not active for this rxqueue */ 4455 4456 flow_table = rcu_dereference(rxqueue->rps_flow_table); 4457 map = rcu_dereference(rxqueue->rps_map); 4458 if (!flow_table && !map) 4459 goto done; 4460 4461 skb_reset_network_header(skb); 4462 hash = skb_get_hash(skb); 4463 if (!hash) 4464 goto done; 4465 4466 sock_flow_table = rcu_dereference(rps_sock_flow_table); 4467 if (flow_table && sock_flow_table) { 4468 struct rps_dev_flow *rflow; 4469 u32 next_cpu; 4470 u32 ident; 4471 4472 /* First check into global flow table if there is a match */ 4473 ident = sock_flow_table->ents[hash & sock_flow_table->mask]; 4474 if ((ident ^ hash) & ~rps_cpu_mask) 4475 goto try_rps; 4476 4477 next_cpu = ident & rps_cpu_mask; 4478 4479 /* OK, now we know there is a match, 4480 * we can look at the local (per receive queue) flow table 4481 */ 4482 rflow = &flow_table->flows[hash & flow_table->mask]; 4483 tcpu = rflow->cpu; 4484 4485 /* 4486 * If the desired CPU (where last recvmsg was done) is 4487 * different from current CPU (one in the rx-queue flow 4488 * table entry), switch if one of the following holds: 4489 * - Current CPU is unset (>= nr_cpu_ids). 4490 * - Current CPU is offline. 4491 * - The current CPU's queue tail has advanced beyond the 4492 * last packet that was enqueued using this table entry. 4493 * This guarantees that all previous packets for the flow 4494 * have been dequeued, thus preserving in order delivery. 4495 */ 4496 if (unlikely(tcpu != next_cpu) && 4497 (tcpu >= nr_cpu_ids || !cpu_online(tcpu) || 4498 ((int)(per_cpu(softnet_data, tcpu).input_queue_head - 4499 rflow->last_qtail)) >= 0)) { 4500 tcpu = next_cpu; 4501 rflow = set_rps_cpu(dev, skb, rflow, next_cpu); 4502 } 4503 4504 if (tcpu < nr_cpu_ids && cpu_online(tcpu)) { 4505 *rflowp = rflow; 4506 cpu = tcpu; 4507 goto done; 4508 } 4509 } 4510 4511 try_rps: 4512 4513 if (map) { 4514 tcpu = map->cpus[reciprocal_scale(hash, map->len)]; 4515 if (cpu_online(tcpu)) { 4516 cpu = tcpu; 4517 goto done; 4518 } 4519 } 4520 4521 done: 4522 return cpu; 4523 } 4524 4525 #ifdef CONFIG_RFS_ACCEL 4526 4527 /** 4528 * rps_may_expire_flow - check whether an RFS hardware filter may be removed 4529 * @dev: Device on which the filter was set 4530 * @rxq_index: RX queue index 4531 * @flow_id: Flow ID passed to ndo_rx_flow_steer() 4532 * @filter_id: Filter ID returned by ndo_rx_flow_steer() 4533 * 4534 * Drivers that implement ndo_rx_flow_steer() should periodically call 4535 * this function for each installed filter and remove the filters for 4536 * which it returns %true. 4537 */ 4538 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, 4539 u32 flow_id, u16 filter_id) 4540 { 4541 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index; 4542 struct rps_dev_flow_table *flow_table; 4543 struct rps_dev_flow *rflow; 4544 bool expire = true; 4545 unsigned int cpu; 4546 4547 rcu_read_lock(); 4548 flow_table = rcu_dereference(rxqueue->rps_flow_table); 4549 if (flow_table && flow_id <= flow_table->mask) { 4550 rflow = &flow_table->flows[flow_id]; 4551 cpu = READ_ONCE(rflow->cpu); 4552 if (rflow->filter == filter_id && cpu < nr_cpu_ids && 4553 ((int)(per_cpu(softnet_data, cpu).input_queue_head - 4554 rflow->last_qtail) < 4555 (int)(10 * flow_table->mask))) 4556 expire = false; 4557 } 4558 rcu_read_unlock(); 4559 return expire; 4560 } 4561 EXPORT_SYMBOL(rps_may_expire_flow); 4562 4563 #endif /* CONFIG_RFS_ACCEL */ 4564 4565 /* Called from hardirq (IPI) context */ 4566 static void rps_trigger_softirq(void *data) 4567 { 4568 struct softnet_data *sd = data; 4569 4570 ____napi_schedule(sd, &sd->backlog); 4571 sd->received_rps++; 4572 } 4573 4574 #endif /* CONFIG_RPS */ 4575 4576 /* Called from hardirq (IPI) context */ 4577 static void trigger_rx_softirq(void *data) 4578 { 4579 struct softnet_data *sd = data; 4580 4581 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 4582 smp_store_release(&sd->defer_ipi_scheduled, 0); 4583 } 4584 4585 /* 4586 * Check if this softnet_data structure is another cpu one 4587 * If yes, queue it to our IPI list and return 1 4588 * If no, return 0 4589 */ 4590 static int napi_schedule_rps(struct softnet_data *sd) 4591 { 4592 struct softnet_data *mysd = this_cpu_ptr(&softnet_data); 4593 4594 #ifdef CONFIG_RPS 4595 if (sd != mysd) { 4596 sd->rps_ipi_next = mysd->rps_ipi_list; 4597 mysd->rps_ipi_list = sd; 4598 4599 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 4600 return 1; 4601 } 4602 #endif /* CONFIG_RPS */ 4603 __napi_schedule_irqoff(&mysd->backlog); 4604 return 0; 4605 } 4606 4607 #ifdef CONFIG_NET_FLOW_LIMIT 4608 int netdev_flow_limit_table_len __read_mostly = (1 << 12); 4609 #endif 4610 4611 static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen) 4612 { 4613 #ifdef CONFIG_NET_FLOW_LIMIT 4614 struct sd_flow_limit *fl; 4615 struct softnet_data *sd; 4616 unsigned int old_flow, new_flow; 4617 4618 if (qlen < (READ_ONCE(netdev_max_backlog) >> 1)) 4619 return false; 4620 4621 sd = this_cpu_ptr(&softnet_data); 4622 4623 rcu_read_lock(); 4624 fl = rcu_dereference(sd->flow_limit); 4625 if (fl) { 4626 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1); 4627 old_flow = fl->history[fl->history_head]; 4628 fl->history[fl->history_head] = new_flow; 4629 4630 fl->history_head++; 4631 fl->history_head &= FLOW_LIMIT_HISTORY - 1; 4632 4633 if (likely(fl->buckets[old_flow])) 4634 fl->buckets[old_flow]--; 4635 4636 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) { 4637 fl->count++; 4638 rcu_read_unlock(); 4639 return true; 4640 } 4641 } 4642 rcu_read_unlock(); 4643 #endif 4644 return false; 4645 } 4646 4647 /* 4648 * enqueue_to_backlog is called to queue an skb to a per CPU backlog 4649 * queue (may be a remote CPU queue). 4650 */ 4651 static int enqueue_to_backlog(struct sk_buff *skb, int cpu, 4652 unsigned int *qtail) 4653 { 4654 enum skb_drop_reason reason; 4655 struct softnet_data *sd; 4656 unsigned long flags; 4657 unsigned int qlen; 4658 4659 reason = SKB_DROP_REASON_NOT_SPECIFIED; 4660 sd = &per_cpu(softnet_data, cpu); 4661 4662 rps_lock_irqsave(sd, &flags); 4663 if (!netif_running(skb->dev)) 4664 goto drop; 4665 qlen = skb_queue_len(&sd->input_pkt_queue); 4666 if (qlen <= READ_ONCE(netdev_max_backlog) && !skb_flow_limit(skb, qlen)) { 4667 if (qlen) { 4668 enqueue: 4669 __skb_queue_tail(&sd->input_pkt_queue, skb); 4670 input_queue_tail_incr_save(sd, qtail); 4671 rps_unlock_irq_restore(sd, &flags); 4672 return NET_RX_SUCCESS; 4673 } 4674 4675 /* Schedule NAPI for backlog device 4676 * We can use non atomic operation since we own the queue lock 4677 */ 4678 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) 4679 napi_schedule_rps(sd); 4680 goto enqueue; 4681 } 4682 reason = SKB_DROP_REASON_CPU_BACKLOG; 4683 4684 drop: 4685 sd->dropped++; 4686 rps_unlock_irq_restore(sd, &flags); 4687 4688 dev_core_stats_rx_dropped_inc(skb->dev); 4689 kfree_skb_reason(skb, reason); 4690 return NET_RX_DROP; 4691 } 4692 4693 static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb) 4694 { 4695 struct net_device *dev = skb->dev; 4696 struct netdev_rx_queue *rxqueue; 4697 4698 rxqueue = dev->_rx; 4699 4700 if (skb_rx_queue_recorded(skb)) { 4701 u16 index = skb_get_rx_queue(skb); 4702 4703 if (unlikely(index >= dev->real_num_rx_queues)) { 4704 WARN_ONCE(dev->real_num_rx_queues > 1, 4705 "%s received packet on queue %u, but number " 4706 "of RX queues is %u\n", 4707 dev->name, index, dev->real_num_rx_queues); 4708 4709 return rxqueue; /* Return first rxqueue */ 4710 } 4711 rxqueue += index; 4712 } 4713 return rxqueue; 4714 } 4715 4716 u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp, 4717 struct bpf_prog *xdp_prog) 4718 { 4719 void *orig_data, *orig_data_end, *hard_start; 4720 struct netdev_rx_queue *rxqueue; 4721 bool orig_bcast, orig_host; 4722 u32 mac_len, frame_sz; 4723 __be16 orig_eth_type; 4724 struct ethhdr *eth; 4725 u32 metalen, act; 4726 int off; 4727 4728 /* The XDP program wants to see the packet starting at the MAC 4729 * header. 4730 */ 4731 mac_len = skb->data - skb_mac_header(skb); 4732 hard_start = skb->data - skb_headroom(skb); 4733 4734 /* SKB "head" area always have tailroom for skb_shared_info */ 4735 frame_sz = (void *)skb_end_pointer(skb) - hard_start; 4736 frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 4737 4738 rxqueue = netif_get_rxqueue(skb); 4739 xdp_init_buff(xdp, frame_sz, &rxqueue->xdp_rxq); 4740 xdp_prepare_buff(xdp, hard_start, skb_headroom(skb) - mac_len, 4741 skb_headlen(skb) + mac_len, true); 4742 4743 orig_data_end = xdp->data_end; 4744 orig_data = xdp->data; 4745 eth = (struct ethhdr *)xdp->data; 4746 orig_host = ether_addr_equal_64bits(eth->h_dest, skb->dev->dev_addr); 4747 orig_bcast = is_multicast_ether_addr_64bits(eth->h_dest); 4748 orig_eth_type = eth->h_proto; 4749 4750 act = bpf_prog_run_xdp(xdp_prog, xdp); 4751 4752 /* check if bpf_xdp_adjust_head was used */ 4753 off = xdp->data - orig_data; 4754 if (off) { 4755 if (off > 0) 4756 __skb_pull(skb, off); 4757 else if (off < 0) 4758 __skb_push(skb, -off); 4759 4760 skb->mac_header += off; 4761 skb_reset_network_header(skb); 4762 } 4763 4764 /* check if bpf_xdp_adjust_tail was used */ 4765 off = xdp->data_end - orig_data_end; 4766 if (off != 0) { 4767 skb_set_tail_pointer(skb, xdp->data_end - xdp->data); 4768 skb->len += off; /* positive on grow, negative on shrink */ 4769 } 4770 4771 /* check if XDP changed eth hdr such SKB needs update */ 4772 eth = (struct ethhdr *)xdp->data; 4773 if ((orig_eth_type != eth->h_proto) || 4774 (orig_host != ether_addr_equal_64bits(eth->h_dest, 4775 skb->dev->dev_addr)) || 4776 (orig_bcast != is_multicast_ether_addr_64bits(eth->h_dest))) { 4777 __skb_push(skb, ETH_HLEN); 4778 skb->pkt_type = PACKET_HOST; 4779 skb->protocol = eth_type_trans(skb, skb->dev); 4780 } 4781 4782 /* Redirect/Tx gives L2 packet, code that will reuse skb must __skb_pull 4783 * before calling us again on redirect path. We do not call do_redirect 4784 * as we leave that up to the caller. 4785 * 4786 * Caller is responsible for managing lifetime of skb (i.e. calling 4787 * kfree_skb in response to actions it cannot handle/XDP_DROP). 4788 */ 4789 switch (act) { 4790 case XDP_REDIRECT: 4791 case XDP_TX: 4792 __skb_push(skb, mac_len); 4793 break; 4794 case XDP_PASS: 4795 metalen = xdp->data - xdp->data_meta; 4796 if (metalen) 4797 skb_metadata_set(skb, metalen); 4798 break; 4799 } 4800 4801 return act; 4802 } 4803 4804 static u32 netif_receive_generic_xdp(struct sk_buff *skb, 4805 struct xdp_buff *xdp, 4806 struct bpf_prog *xdp_prog) 4807 { 4808 u32 act = XDP_DROP; 4809 4810 /* Reinjected packets coming from act_mirred or similar should 4811 * not get XDP generic processing. 4812 */ 4813 if (skb_is_redirected(skb)) 4814 return XDP_PASS; 4815 4816 /* XDP packets must be linear and must have sufficient headroom 4817 * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also 4818 * native XDP provides, thus we need to do it here as well. 4819 */ 4820 if (skb_cloned(skb) || skb_is_nonlinear(skb) || 4821 skb_headroom(skb) < XDP_PACKET_HEADROOM) { 4822 int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb); 4823 int troom = skb->tail + skb->data_len - skb->end; 4824 4825 /* In case we have to go down the path and also linearize, 4826 * then lets do the pskb_expand_head() work just once here. 4827 */ 4828 if (pskb_expand_head(skb, 4829 hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0, 4830 troom > 0 ? troom + 128 : 0, GFP_ATOMIC)) 4831 goto do_drop; 4832 if (skb_linearize(skb)) 4833 goto do_drop; 4834 } 4835 4836 act = bpf_prog_run_generic_xdp(skb, xdp, xdp_prog); 4837 switch (act) { 4838 case XDP_REDIRECT: 4839 case XDP_TX: 4840 case XDP_PASS: 4841 break; 4842 default: 4843 bpf_warn_invalid_xdp_action(skb->dev, xdp_prog, act); 4844 fallthrough; 4845 case XDP_ABORTED: 4846 trace_xdp_exception(skb->dev, xdp_prog, act); 4847 fallthrough; 4848 case XDP_DROP: 4849 do_drop: 4850 kfree_skb(skb); 4851 break; 4852 } 4853 4854 return act; 4855 } 4856 4857 /* When doing generic XDP we have to bypass the qdisc layer and the 4858 * network taps in order to match in-driver-XDP behavior. This also means 4859 * that XDP packets are able to starve other packets going through a qdisc, 4860 * and DDOS attacks will be more effective. In-driver-XDP use dedicated TX 4861 * queues, so they do not have this starvation issue. 4862 */ 4863 void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog) 4864 { 4865 struct net_device *dev = skb->dev; 4866 struct netdev_queue *txq; 4867 bool free_skb = true; 4868 int cpu, rc; 4869 4870 txq = netdev_core_pick_tx(dev, skb, NULL); 4871 cpu = smp_processor_id(); 4872 HARD_TX_LOCK(dev, txq, cpu); 4873 if (!netif_xmit_frozen_or_drv_stopped(txq)) { 4874 rc = netdev_start_xmit(skb, dev, txq, 0); 4875 if (dev_xmit_complete(rc)) 4876 free_skb = false; 4877 } 4878 HARD_TX_UNLOCK(dev, txq); 4879 if (free_skb) { 4880 trace_xdp_exception(dev, xdp_prog, XDP_TX); 4881 dev_core_stats_tx_dropped_inc(dev); 4882 kfree_skb(skb); 4883 } 4884 } 4885 4886 static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key); 4887 4888 int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb) 4889 { 4890 if (xdp_prog) { 4891 struct xdp_buff xdp; 4892 u32 act; 4893 int err; 4894 4895 act = netif_receive_generic_xdp(skb, &xdp, xdp_prog); 4896 if (act != XDP_PASS) { 4897 switch (act) { 4898 case XDP_REDIRECT: 4899 err = xdp_do_generic_redirect(skb->dev, skb, 4900 &xdp, xdp_prog); 4901 if (err) 4902 goto out_redir; 4903 break; 4904 case XDP_TX: 4905 generic_xdp_tx(skb, xdp_prog); 4906 break; 4907 } 4908 return XDP_DROP; 4909 } 4910 } 4911 return XDP_PASS; 4912 out_redir: 4913 kfree_skb_reason(skb, SKB_DROP_REASON_XDP); 4914 return XDP_DROP; 4915 } 4916 EXPORT_SYMBOL_GPL(do_xdp_generic); 4917 4918 static int netif_rx_internal(struct sk_buff *skb) 4919 { 4920 int ret; 4921 4922 net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb); 4923 4924 trace_netif_rx(skb); 4925 4926 #ifdef CONFIG_RPS 4927 if (static_branch_unlikely(&rps_needed)) { 4928 struct rps_dev_flow voidflow, *rflow = &voidflow; 4929 int cpu; 4930 4931 rcu_read_lock(); 4932 4933 cpu = get_rps_cpu(skb->dev, skb, &rflow); 4934 if (cpu < 0) 4935 cpu = smp_processor_id(); 4936 4937 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 4938 4939 rcu_read_unlock(); 4940 } else 4941 #endif 4942 { 4943 unsigned int qtail; 4944 4945 ret = enqueue_to_backlog(skb, smp_processor_id(), &qtail); 4946 } 4947 return ret; 4948 } 4949 4950 /** 4951 * __netif_rx - Slightly optimized version of netif_rx 4952 * @skb: buffer to post 4953 * 4954 * This behaves as netif_rx except that it does not disable bottom halves. 4955 * As a result this function may only be invoked from the interrupt context 4956 * (either hard or soft interrupt). 4957 */ 4958 int __netif_rx(struct sk_buff *skb) 4959 { 4960 int ret; 4961 4962 lockdep_assert_once(hardirq_count() | softirq_count()); 4963 4964 trace_netif_rx_entry(skb); 4965 ret = netif_rx_internal(skb); 4966 trace_netif_rx_exit(ret); 4967 return ret; 4968 } 4969 EXPORT_SYMBOL(__netif_rx); 4970 4971 /** 4972 * netif_rx - post buffer to the network code 4973 * @skb: buffer to post 4974 * 4975 * This function receives a packet from a device driver and queues it for 4976 * the upper (protocol) levels to process via the backlog NAPI device. It 4977 * always succeeds. The buffer may be dropped during processing for 4978 * congestion control or by the protocol layers. 4979 * The network buffer is passed via the backlog NAPI device. Modern NIC 4980 * driver should use NAPI and GRO. 4981 * This function can used from interrupt and from process context. The 4982 * caller from process context must not disable interrupts before invoking 4983 * this function. 4984 * 4985 * return values: 4986 * NET_RX_SUCCESS (no congestion) 4987 * NET_RX_DROP (packet was dropped) 4988 * 4989 */ 4990 int netif_rx(struct sk_buff *skb) 4991 { 4992 bool need_bh_off = !(hardirq_count() | softirq_count()); 4993 int ret; 4994 4995 if (need_bh_off) 4996 local_bh_disable(); 4997 trace_netif_rx_entry(skb); 4998 ret = netif_rx_internal(skb); 4999 trace_netif_rx_exit(ret); 5000 if (need_bh_off) 5001 local_bh_enable(); 5002 return ret; 5003 } 5004 EXPORT_SYMBOL(netif_rx); 5005 5006 static __latent_entropy void net_tx_action(struct softirq_action *h) 5007 { 5008 struct softnet_data *sd = this_cpu_ptr(&softnet_data); 5009 5010 if (sd->completion_queue) { 5011 struct sk_buff *clist; 5012 5013 local_irq_disable(); 5014 clist = sd->completion_queue; 5015 sd->completion_queue = NULL; 5016 local_irq_enable(); 5017 5018 while (clist) { 5019 struct sk_buff *skb = clist; 5020 5021 clist = clist->next; 5022 5023 WARN_ON(refcount_read(&skb->users)); 5024 if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED)) 5025 trace_consume_skb(skb, net_tx_action); 5026 else 5027 trace_kfree_skb(skb, net_tx_action, 5028 SKB_DROP_REASON_NOT_SPECIFIED); 5029 5030 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) 5031 __kfree_skb(skb); 5032 else 5033 __kfree_skb_defer(skb); 5034 } 5035 } 5036 5037 if (sd->output_queue) { 5038 struct Qdisc *head; 5039 5040 local_irq_disable(); 5041 head = sd->output_queue; 5042 sd->output_queue = NULL; 5043 sd->output_queue_tailp = &sd->output_queue; 5044 local_irq_enable(); 5045 5046 rcu_read_lock(); 5047 5048 while (head) { 5049 struct Qdisc *q = head; 5050 spinlock_t *root_lock = NULL; 5051 5052 head = head->next_sched; 5053 5054 /* We need to make sure head->next_sched is read 5055 * before clearing __QDISC_STATE_SCHED 5056 */ 5057 smp_mb__before_atomic(); 5058 5059 if (!(q->flags & TCQ_F_NOLOCK)) { 5060 root_lock = qdisc_lock(q); 5061 spin_lock(root_lock); 5062 } else if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, 5063 &q->state))) { 5064 /* There is a synchronize_net() between 5065 * STATE_DEACTIVATED flag being set and 5066 * qdisc_reset()/some_qdisc_is_busy() in 5067 * dev_deactivate(), so we can safely bail out 5068 * early here to avoid data race between 5069 * qdisc_deactivate() and some_qdisc_is_busy() 5070 * for lockless qdisc. 5071 */ 5072 clear_bit(__QDISC_STATE_SCHED, &q->state); 5073 continue; 5074 } 5075 5076 clear_bit(__QDISC_STATE_SCHED, &q->state); 5077 qdisc_run(q); 5078 if (root_lock) 5079 spin_unlock(root_lock); 5080 } 5081 5082 rcu_read_unlock(); 5083 } 5084 5085 xfrm_dev_backlog(sd); 5086 } 5087 5088 #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE) 5089 /* This hook is defined here for ATM LANE */ 5090 int (*br_fdb_test_addr_hook)(struct net_device *dev, 5091 unsigned char *addr) __read_mostly; 5092 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook); 5093 #endif 5094 5095 static inline struct sk_buff * 5096 sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret, 5097 struct net_device *orig_dev, bool *another) 5098 { 5099 #ifdef CONFIG_NET_CLS_ACT 5100 struct mini_Qdisc *miniq = rcu_dereference_bh(skb->dev->miniq_ingress); 5101 struct tcf_result cl_res; 5102 5103 /* If there's at least one ingress present somewhere (so 5104 * we get here via enabled static key), remaining devices 5105 * that are not configured with an ingress qdisc will bail 5106 * out here. 5107 */ 5108 if (!miniq) 5109 return skb; 5110 5111 if (*pt_prev) { 5112 *ret = deliver_skb(skb, *pt_prev, orig_dev); 5113 *pt_prev = NULL; 5114 } 5115 5116 qdisc_skb_cb(skb)->pkt_len = skb->len; 5117 tc_skb_cb(skb)->mru = 0; 5118 tc_skb_cb(skb)->post_ct = false; 5119 skb->tc_at_ingress = 1; 5120 mini_qdisc_bstats_cpu_update(miniq, skb); 5121 5122 switch (tcf_classify(skb, miniq->block, miniq->filter_list, &cl_res, false)) { 5123 case TC_ACT_OK: 5124 case TC_ACT_RECLASSIFY: 5125 skb->tc_index = TC_H_MIN(cl_res.classid); 5126 break; 5127 case TC_ACT_SHOT: 5128 mini_qdisc_qstats_cpu_drop(miniq); 5129 kfree_skb_reason(skb, SKB_DROP_REASON_TC_INGRESS); 5130 *ret = NET_RX_DROP; 5131 return NULL; 5132 case TC_ACT_STOLEN: 5133 case TC_ACT_QUEUED: 5134 case TC_ACT_TRAP: 5135 consume_skb(skb); 5136 *ret = NET_RX_SUCCESS; 5137 return NULL; 5138 case TC_ACT_REDIRECT: 5139 /* skb_mac_header check was done by cls/act_bpf, so 5140 * we can safely push the L2 header back before 5141 * redirecting to another netdev 5142 */ 5143 __skb_push(skb, skb->mac_len); 5144 if (skb_do_redirect(skb) == -EAGAIN) { 5145 __skb_pull(skb, skb->mac_len); 5146 *another = true; 5147 break; 5148 } 5149 *ret = NET_RX_SUCCESS; 5150 return NULL; 5151 case TC_ACT_CONSUMED: 5152 *ret = NET_RX_SUCCESS; 5153 return NULL; 5154 default: 5155 break; 5156 } 5157 #endif /* CONFIG_NET_CLS_ACT */ 5158 return skb; 5159 } 5160 5161 /** 5162 * netdev_is_rx_handler_busy - check if receive handler is registered 5163 * @dev: device to check 5164 * 5165 * Check if a receive handler is already registered for a given device. 5166 * Return true if there one. 5167 * 5168 * The caller must hold the rtnl_mutex. 5169 */ 5170 bool netdev_is_rx_handler_busy(struct net_device *dev) 5171 { 5172 ASSERT_RTNL(); 5173 return dev && rtnl_dereference(dev->rx_handler); 5174 } 5175 EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy); 5176 5177 /** 5178 * netdev_rx_handler_register - register receive handler 5179 * @dev: device to register a handler for 5180 * @rx_handler: receive handler to register 5181 * @rx_handler_data: data pointer that is used by rx handler 5182 * 5183 * Register a receive handler for a device. This handler will then be 5184 * called from __netif_receive_skb. A negative errno code is returned 5185 * on a failure. 5186 * 5187 * The caller must hold the rtnl_mutex. 5188 * 5189 * For a general description of rx_handler, see enum rx_handler_result. 5190 */ 5191 int netdev_rx_handler_register(struct net_device *dev, 5192 rx_handler_func_t *rx_handler, 5193 void *rx_handler_data) 5194 { 5195 if (netdev_is_rx_handler_busy(dev)) 5196 return -EBUSY; 5197 5198 if (dev->priv_flags & IFF_NO_RX_HANDLER) 5199 return -EINVAL; 5200 5201 /* Note: rx_handler_data must be set before rx_handler */ 5202 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data); 5203 rcu_assign_pointer(dev->rx_handler, rx_handler); 5204 5205 return 0; 5206 } 5207 EXPORT_SYMBOL_GPL(netdev_rx_handler_register); 5208 5209 /** 5210 * netdev_rx_handler_unregister - unregister receive handler 5211 * @dev: device to unregister a handler from 5212 * 5213 * Unregister a receive handler from a device. 5214 * 5215 * The caller must hold the rtnl_mutex. 5216 */ 5217 void netdev_rx_handler_unregister(struct net_device *dev) 5218 { 5219 5220 ASSERT_RTNL(); 5221 RCU_INIT_POINTER(dev->rx_handler, NULL); 5222 /* a reader seeing a non NULL rx_handler in a rcu_read_lock() 5223 * section has a guarantee to see a non NULL rx_handler_data 5224 * as well. 5225 */ 5226 synchronize_net(); 5227 RCU_INIT_POINTER(dev->rx_handler_data, NULL); 5228 } 5229 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); 5230 5231 /* 5232 * Limit the use of PFMEMALLOC reserves to those protocols that implement 5233 * the special handling of PFMEMALLOC skbs. 5234 */ 5235 static bool skb_pfmemalloc_protocol(struct sk_buff *skb) 5236 { 5237 switch (skb->protocol) { 5238 case htons(ETH_P_ARP): 5239 case htons(ETH_P_IP): 5240 case htons(ETH_P_IPV6): 5241 case htons(ETH_P_8021Q): 5242 case htons(ETH_P_8021AD): 5243 return true; 5244 default: 5245 return false; 5246 } 5247 } 5248 5249 static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev, 5250 int *ret, struct net_device *orig_dev) 5251 { 5252 if (nf_hook_ingress_active(skb)) { 5253 int ingress_retval; 5254 5255 if (*pt_prev) { 5256 *ret = deliver_skb(skb, *pt_prev, orig_dev); 5257 *pt_prev = NULL; 5258 } 5259 5260 rcu_read_lock(); 5261 ingress_retval = nf_hook_ingress(skb); 5262 rcu_read_unlock(); 5263 return ingress_retval; 5264 } 5265 return 0; 5266 } 5267 5268 static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc, 5269 struct packet_type **ppt_prev) 5270 { 5271 struct packet_type *ptype, *pt_prev; 5272 rx_handler_func_t *rx_handler; 5273 struct sk_buff *skb = *pskb; 5274 struct net_device *orig_dev; 5275 bool deliver_exact = false; 5276 int ret = NET_RX_DROP; 5277 __be16 type; 5278 5279 net_timestamp_check(!READ_ONCE(netdev_tstamp_prequeue), skb); 5280 5281 trace_netif_receive_skb(skb); 5282 5283 orig_dev = skb->dev; 5284 5285 skb_reset_network_header(skb); 5286 if (!skb_transport_header_was_set(skb)) 5287 skb_reset_transport_header(skb); 5288 skb_reset_mac_len(skb); 5289 5290 pt_prev = NULL; 5291 5292 another_round: 5293 skb->skb_iif = skb->dev->ifindex; 5294 5295 __this_cpu_inc(softnet_data.processed); 5296 5297 if (static_branch_unlikely(&generic_xdp_needed_key)) { 5298 int ret2; 5299 5300 migrate_disable(); 5301 ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb); 5302 migrate_enable(); 5303 5304 if (ret2 != XDP_PASS) { 5305 ret = NET_RX_DROP; 5306 goto out; 5307 } 5308 } 5309 5310 if (eth_type_vlan(skb->protocol)) { 5311 skb = skb_vlan_untag(skb); 5312 if (unlikely(!skb)) 5313 goto out; 5314 } 5315 5316 if (skb_skip_tc_classify(skb)) 5317 goto skip_classify; 5318 5319 if (pfmemalloc) 5320 goto skip_taps; 5321 5322 list_for_each_entry_rcu(ptype, &ptype_all, list) { 5323 if (pt_prev) 5324 ret = deliver_skb(skb, pt_prev, orig_dev); 5325 pt_prev = ptype; 5326 } 5327 5328 list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) { 5329 if (pt_prev) 5330 ret = deliver_skb(skb, pt_prev, orig_dev); 5331 pt_prev = ptype; 5332 } 5333 5334 skip_taps: 5335 #ifdef CONFIG_NET_INGRESS 5336 if (static_branch_unlikely(&ingress_needed_key)) { 5337 bool another = false; 5338 5339 nf_skip_egress(skb, true); 5340 skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev, 5341 &another); 5342 if (another) 5343 goto another_round; 5344 if (!skb) 5345 goto out; 5346 5347 nf_skip_egress(skb, false); 5348 if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0) 5349 goto out; 5350 } 5351 #endif 5352 skb_reset_redirect(skb); 5353 skip_classify: 5354 if (pfmemalloc && !skb_pfmemalloc_protocol(skb)) 5355 goto drop; 5356 5357 if (skb_vlan_tag_present(skb)) { 5358 if (pt_prev) { 5359 ret = deliver_skb(skb, pt_prev, orig_dev); 5360 pt_prev = NULL; 5361 } 5362 if (vlan_do_receive(&skb)) 5363 goto another_round; 5364 else if (unlikely(!skb)) 5365 goto out; 5366 } 5367 5368 rx_handler = rcu_dereference(skb->dev->rx_handler); 5369 if (rx_handler) { 5370 if (pt_prev) { 5371 ret = deliver_skb(skb, pt_prev, orig_dev); 5372 pt_prev = NULL; 5373 } 5374 switch (rx_handler(&skb)) { 5375 case RX_HANDLER_CONSUMED: 5376 ret = NET_RX_SUCCESS; 5377 goto out; 5378 case RX_HANDLER_ANOTHER: 5379 goto another_round; 5380 case RX_HANDLER_EXACT: 5381 deliver_exact = true; 5382 break; 5383 case RX_HANDLER_PASS: 5384 break; 5385 default: 5386 BUG(); 5387 } 5388 } 5389 5390 if (unlikely(skb_vlan_tag_present(skb)) && !netdev_uses_dsa(skb->dev)) { 5391 check_vlan_id: 5392 if (skb_vlan_tag_get_id(skb)) { 5393 /* Vlan id is non 0 and vlan_do_receive() above couldn't 5394 * find vlan device. 5395 */ 5396 skb->pkt_type = PACKET_OTHERHOST; 5397 } else if (eth_type_vlan(skb->protocol)) { 5398 /* Outer header is 802.1P with vlan 0, inner header is 5399 * 802.1Q or 802.1AD and vlan_do_receive() above could 5400 * not find vlan dev for vlan id 0. 5401 */ 5402 __vlan_hwaccel_clear_tag(skb); 5403 skb = skb_vlan_untag(skb); 5404 if (unlikely(!skb)) 5405 goto out; 5406 if (vlan_do_receive(&skb)) 5407 /* After stripping off 802.1P header with vlan 0 5408 * vlan dev is found for inner header. 5409 */ 5410 goto another_round; 5411 else if (unlikely(!skb)) 5412 goto out; 5413 else 5414 /* We have stripped outer 802.1P vlan 0 header. 5415 * But could not find vlan dev. 5416 * check again for vlan id to set OTHERHOST. 5417 */ 5418 goto check_vlan_id; 5419 } 5420 /* Note: we might in the future use prio bits 5421 * and set skb->priority like in vlan_do_receive() 5422 * For the time being, just ignore Priority Code Point 5423 */ 5424 __vlan_hwaccel_clear_tag(skb); 5425 } 5426 5427 type = skb->protocol; 5428 5429 /* deliver only exact match when indicated */ 5430 if (likely(!deliver_exact)) { 5431 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, 5432 &ptype_base[ntohs(type) & 5433 PTYPE_HASH_MASK]); 5434 } 5435 5436 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, 5437 &orig_dev->ptype_specific); 5438 5439 if (unlikely(skb->dev != orig_dev)) { 5440 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, 5441 &skb->dev->ptype_specific); 5442 } 5443 5444 if (pt_prev) { 5445 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) 5446 goto drop; 5447 *ppt_prev = pt_prev; 5448 } else { 5449 drop: 5450 if (!deliver_exact) 5451 dev_core_stats_rx_dropped_inc(skb->dev); 5452 else 5453 dev_core_stats_rx_nohandler_inc(skb->dev); 5454 kfree_skb_reason(skb, SKB_DROP_REASON_UNHANDLED_PROTO); 5455 /* Jamal, now you will not able to escape explaining 5456 * me how you were going to use this. :-) 5457 */ 5458 ret = NET_RX_DROP; 5459 } 5460 5461 out: 5462 /* The invariant here is that if *ppt_prev is not NULL 5463 * then skb should also be non-NULL. 5464 * 5465 * Apparently *ppt_prev assignment above holds this invariant due to 5466 * skb dereferencing near it. 5467 */ 5468 *pskb = skb; 5469 return ret; 5470 } 5471 5472 static int __netif_receive_skb_one_core(struct sk_buff *skb, bool pfmemalloc) 5473 { 5474 struct net_device *orig_dev = skb->dev; 5475 struct packet_type *pt_prev = NULL; 5476 int ret; 5477 5478 ret = __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev); 5479 if (pt_prev) 5480 ret = INDIRECT_CALL_INET(pt_prev->func, ipv6_rcv, ip_rcv, skb, 5481 skb->dev, pt_prev, orig_dev); 5482 return ret; 5483 } 5484 5485 /** 5486 * netif_receive_skb_core - special purpose version of netif_receive_skb 5487 * @skb: buffer to process 5488 * 5489 * More direct receive version of netif_receive_skb(). It should 5490 * only be used by callers that have a need to skip RPS and Generic XDP. 5491 * Caller must also take care of handling if ``(page_is_)pfmemalloc``. 5492 * 5493 * This function may only be called from softirq context and interrupts 5494 * should be enabled. 5495 * 5496 * Return values (usually ignored): 5497 * NET_RX_SUCCESS: no congestion 5498 * NET_RX_DROP: packet was dropped 5499 */ 5500 int netif_receive_skb_core(struct sk_buff *skb) 5501 { 5502 int ret; 5503 5504 rcu_read_lock(); 5505 ret = __netif_receive_skb_one_core(skb, false); 5506 rcu_read_unlock(); 5507 5508 return ret; 5509 } 5510 EXPORT_SYMBOL(netif_receive_skb_core); 5511 5512 static inline void __netif_receive_skb_list_ptype(struct list_head *head, 5513 struct packet_type *pt_prev, 5514 struct net_device *orig_dev) 5515 { 5516 struct sk_buff *skb, *next; 5517 5518 if (!pt_prev) 5519 return; 5520 if (list_empty(head)) 5521 return; 5522 if (pt_prev->list_func != NULL) 5523 INDIRECT_CALL_INET(pt_prev->list_func, ipv6_list_rcv, 5524 ip_list_rcv, head, pt_prev, orig_dev); 5525 else 5526 list_for_each_entry_safe(skb, next, head, list) { 5527 skb_list_del_init(skb); 5528 pt_prev->func(skb, skb->dev, pt_prev, orig_dev); 5529 } 5530 } 5531 5532 static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc) 5533 { 5534 /* Fast-path assumptions: 5535 * - There is no RX handler. 5536 * - Only one packet_type matches. 5537 * If either of these fails, we will end up doing some per-packet 5538 * processing in-line, then handling the 'last ptype' for the whole 5539 * sublist. This can't cause out-of-order delivery to any single ptype, 5540 * because the 'last ptype' must be constant across the sublist, and all 5541 * other ptypes are handled per-packet. 5542 */ 5543 /* Current (common) ptype of sublist */ 5544 struct packet_type *pt_curr = NULL; 5545 /* Current (common) orig_dev of sublist */ 5546 struct net_device *od_curr = NULL; 5547 struct list_head sublist; 5548 struct sk_buff *skb, *next; 5549 5550 INIT_LIST_HEAD(&sublist); 5551 list_for_each_entry_safe(skb, next, head, list) { 5552 struct net_device *orig_dev = skb->dev; 5553 struct packet_type *pt_prev = NULL; 5554 5555 skb_list_del_init(skb); 5556 __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev); 5557 if (!pt_prev) 5558 continue; 5559 if (pt_curr != pt_prev || od_curr != orig_dev) { 5560 /* dispatch old sublist */ 5561 __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr); 5562 /* start new sublist */ 5563 INIT_LIST_HEAD(&sublist); 5564 pt_curr = pt_prev; 5565 od_curr = orig_dev; 5566 } 5567 list_add_tail(&skb->list, &sublist); 5568 } 5569 5570 /* dispatch final sublist */ 5571 __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr); 5572 } 5573 5574 static int __netif_receive_skb(struct sk_buff *skb) 5575 { 5576 int ret; 5577 5578 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) { 5579 unsigned int noreclaim_flag; 5580 5581 /* 5582 * PFMEMALLOC skbs are special, they should 5583 * - be delivered to SOCK_MEMALLOC sockets only 5584 * - stay away from userspace 5585 * - have bounded memory usage 5586 * 5587 * Use PF_MEMALLOC as this saves us from propagating the allocation 5588 * context down to all allocation sites. 5589 */ 5590 noreclaim_flag = memalloc_noreclaim_save(); 5591 ret = __netif_receive_skb_one_core(skb, true); 5592 memalloc_noreclaim_restore(noreclaim_flag); 5593 } else 5594 ret = __netif_receive_skb_one_core(skb, false); 5595 5596 return ret; 5597 } 5598 5599 static void __netif_receive_skb_list(struct list_head *head) 5600 { 5601 unsigned long noreclaim_flag = 0; 5602 struct sk_buff *skb, *next; 5603 bool pfmemalloc = false; /* Is current sublist PF_MEMALLOC? */ 5604 5605 list_for_each_entry_safe(skb, next, head, list) { 5606 if ((sk_memalloc_socks() && skb_pfmemalloc(skb)) != pfmemalloc) { 5607 struct list_head sublist; 5608 5609 /* Handle the previous sublist */ 5610 list_cut_before(&sublist, head, &skb->list); 5611 if (!list_empty(&sublist)) 5612 __netif_receive_skb_list_core(&sublist, pfmemalloc); 5613 pfmemalloc = !pfmemalloc; 5614 /* See comments in __netif_receive_skb */ 5615 if (pfmemalloc) 5616 noreclaim_flag = memalloc_noreclaim_save(); 5617 else 5618 memalloc_noreclaim_restore(noreclaim_flag); 5619 } 5620 } 5621 /* Handle the remaining sublist */ 5622 if (!list_empty(head)) 5623 __netif_receive_skb_list_core(head, pfmemalloc); 5624 /* Restore pflags */ 5625 if (pfmemalloc) 5626 memalloc_noreclaim_restore(noreclaim_flag); 5627 } 5628 5629 static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp) 5630 { 5631 struct bpf_prog *old = rtnl_dereference(dev->xdp_prog); 5632 struct bpf_prog *new = xdp->prog; 5633 int ret = 0; 5634 5635 switch (xdp->command) { 5636 case XDP_SETUP_PROG: 5637 rcu_assign_pointer(dev->xdp_prog, new); 5638 if (old) 5639 bpf_prog_put(old); 5640 5641 if (old && !new) { 5642 static_branch_dec(&generic_xdp_needed_key); 5643 } else if (new && !old) { 5644 static_branch_inc(&generic_xdp_needed_key); 5645 dev_disable_lro(dev); 5646 dev_disable_gro_hw(dev); 5647 } 5648 break; 5649 5650 default: 5651 ret = -EINVAL; 5652 break; 5653 } 5654 5655 return ret; 5656 } 5657 5658 static int netif_receive_skb_internal(struct sk_buff *skb) 5659 { 5660 int ret; 5661 5662 net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb); 5663 5664 if (skb_defer_rx_timestamp(skb)) 5665 return NET_RX_SUCCESS; 5666 5667 rcu_read_lock(); 5668 #ifdef CONFIG_RPS 5669 if (static_branch_unlikely(&rps_needed)) { 5670 struct rps_dev_flow voidflow, *rflow = &voidflow; 5671 int cpu = get_rps_cpu(skb->dev, skb, &rflow); 5672 5673 if (cpu >= 0) { 5674 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 5675 rcu_read_unlock(); 5676 return ret; 5677 } 5678 } 5679 #endif 5680 ret = __netif_receive_skb(skb); 5681 rcu_read_unlock(); 5682 return ret; 5683 } 5684 5685 void netif_receive_skb_list_internal(struct list_head *head) 5686 { 5687 struct sk_buff *skb, *next; 5688 struct list_head sublist; 5689 5690 INIT_LIST_HEAD(&sublist); 5691 list_for_each_entry_safe(skb, next, head, list) { 5692 net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb); 5693 skb_list_del_init(skb); 5694 if (!skb_defer_rx_timestamp(skb)) 5695 list_add_tail(&skb->list, &sublist); 5696 } 5697 list_splice_init(&sublist, head); 5698 5699 rcu_read_lock(); 5700 #ifdef CONFIG_RPS 5701 if (static_branch_unlikely(&rps_needed)) { 5702 list_for_each_entry_safe(skb, next, head, list) { 5703 struct rps_dev_flow voidflow, *rflow = &voidflow; 5704 int cpu = get_rps_cpu(skb->dev, skb, &rflow); 5705 5706 if (cpu >= 0) { 5707 /* Will be handled, remove from list */ 5708 skb_list_del_init(skb); 5709 enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 5710 } 5711 } 5712 } 5713 #endif 5714 __netif_receive_skb_list(head); 5715 rcu_read_unlock(); 5716 } 5717 5718 /** 5719 * netif_receive_skb - process receive buffer from network 5720 * @skb: buffer to process 5721 * 5722 * netif_receive_skb() is the main receive data processing function. 5723 * It always succeeds. The buffer may be dropped during processing 5724 * for congestion control or by the protocol layers. 5725 * 5726 * This function may only be called from softirq context and interrupts 5727 * should be enabled. 5728 * 5729 * Return values (usually ignored): 5730 * NET_RX_SUCCESS: no congestion 5731 * NET_RX_DROP: packet was dropped 5732 */ 5733 int netif_receive_skb(struct sk_buff *skb) 5734 { 5735 int ret; 5736 5737 trace_netif_receive_skb_entry(skb); 5738 5739 ret = netif_receive_skb_internal(skb); 5740 trace_netif_receive_skb_exit(ret); 5741 5742 return ret; 5743 } 5744 EXPORT_SYMBOL(netif_receive_skb); 5745 5746 /** 5747 * netif_receive_skb_list - process many receive buffers from network 5748 * @head: list of skbs to process. 5749 * 5750 * Since return value of netif_receive_skb() is normally ignored, and 5751 * wouldn't be meaningful for a list, this function returns void. 5752 * 5753 * This function may only be called from softirq context and interrupts 5754 * should be enabled. 5755 */ 5756 void netif_receive_skb_list(struct list_head *head) 5757 { 5758 struct sk_buff *skb; 5759 5760 if (list_empty(head)) 5761 return; 5762 if (trace_netif_receive_skb_list_entry_enabled()) { 5763 list_for_each_entry(skb, head, list) 5764 trace_netif_receive_skb_list_entry(skb); 5765 } 5766 netif_receive_skb_list_internal(head); 5767 trace_netif_receive_skb_list_exit(0); 5768 } 5769 EXPORT_SYMBOL(netif_receive_skb_list); 5770 5771 static DEFINE_PER_CPU(struct work_struct, flush_works); 5772 5773 /* Network device is going away, flush any packets still pending */ 5774 static void flush_backlog(struct work_struct *work) 5775 { 5776 struct sk_buff *skb, *tmp; 5777 struct softnet_data *sd; 5778 5779 local_bh_disable(); 5780 sd = this_cpu_ptr(&softnet_data); 5781 5782 rps_lock_irq_disable(sd); 5783 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) { 5784 if (skb->dev->reg_state == NETREG_UNREGISTERING) { 5785 __skb_unlink(skb, &sd->input_pkt_queue); 5786 dev_kfree_skb_irq(skb); 5787 input_queue_head_incr(sd); 5788 } 5789 } 5790 rps_unlock_irq_enable(sd); 5791 5792 skb_queue_walk_safe(&sd->process_queue, skb, tmp) { 5793 if (skb->dev->reg_state == NETREG_UNREGISTERING) { 5794 __skb_unlink(skb, &sd->process_queue); 5795 kfree_skb(skb); 5796 input_queue_head_incr(sd); 5797 } 5798 } 5799 local_bh_enable(); 5800 } 5801 5802 static bool flush_required(int cpu) 5803 { 5804 #if IS_ENABLED(CONFIG_RPS) 5805 struct softnet_data *sd = &per_cpu(softnet_data, cpu); 5806 bool do_flush; 5807 5808 rps_lock_irq_disable(sd); 5809 5810 /* as insertion into process_queue happens with the rps lock held, 5811 * process_queue access may race only with dequeue 5812 */ 5813 do_flush = !skb_queue_empty(&sd->input_pkt_queue) || 5814 !skb_queue_empty_lockless(&sd->process_queue); 5815 rps_unlock_irq_enable(sd); 5816 5817 return do_flush; 5818 #endif 5819 /* without RPS we can't safely check input_pkt_queue: during a 5820 * concurrent remote skb_queue_splice() we can detect as empty both 5821 * input_pkt_queue and process_queue even if the latter could end-up 5822 * containing a lot of packets. 5823 */ 5824 return true; 5825 } 5826 5827 static void flush_all_backlogs(void) 5828 { 5829 static cpumask_t flush_cpus; 5830 unsigned int cpu; 5831 5832 /* since we are under rtnl lock protection we can use static data 5833 * for the cpumask and avoid allocating on stack the possibly 5834 * large mask 5835 */ 5836 ASSERT_RTNL(); 5837 5838 cpus_read_lock(); 5839 5840 cpumask_clear(&flush_cpus); 5841 for_each_online_cpu(cpu) { 5842 if (flush_required(cpu)) { 5843 queue_work_on(cpu, system_highpri_wq, 5844 per_cpu_ptr(&flush_works, cpu)); 5845 cpumask_set_cpu(cpu, &flush_cpus); 5846 } 5847 } 5848 5849 /* we can have in flight packet[s] on the cpus we are not flushing, 5850 * synchronize_net() in unregister_netdevice_many() will take care of 5851 * them 5852 */ 5853 for_each_cpu(cpu, &flush_cpus) 5854 flush_work(per_cpu_ptr(&flush_works, cpu)); 5855 5856 cpus_read_unlock(); 5857 } 5858 5859 static void net_rps_send_ipi(struct softnet_data *remsd) 5860 { 5861 #ifdef CONFIG_RPS 5862 while (remsd) { 5863 struct softnet_data *next = remsd->rps_ipi_next; 5864 5865 if (cpu_online(remsd->cpu)) 5866 smp_call_function_single_async(remsd->cpu, &remsd->csd); 5867 remsd = next; 5868 } 5869 #endif 5870 } 5871 5872 /* 5873 * net_rps_action_and_irq_enable sends any pending IPI's for rps. 5874 * Note: called with local irq disabled, but exits with local irq enabled. 5875 */ 5876 static void net_rps_action_and_irq_enable(struct softnet_data *sd) 5877 { 5878 #ifdef CONFIG_RPS 5879 struct softnet_data *remsd = sd->rps_ipi_list; 5880 5881 if (remsd) { 5882 sd->rps_ipi_list = NULL; 5883 5884 local_irq_enable(); 5885 5886 /* Send pending IPI's to kick RPS processing on remote cpus. */ 5887 net_rps_send_ipi(remsd); 5888 } else 5889 #endif 5890 local_irq_enable(); 5891 } 5892 5893 static bool sd_has_rps_ipi_waiting(struct softnet_data *sd) 5894 { 5895 #ifdef CONFIG_RPS 5896 return sd->rps_ipi_list != NULL; 5897 #else 5898 return false; 5899 #endif 5900 } 5901 5902 static int process_backlog(struct napi_struct *napi, int quota) 5903 { 5904 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog); 5905 bool again = true; 5906 int work = 0; 5907 5908 /* Check if we have pending ipi, its better to send them now, 5909 * not waiting net_rx_action() end. 5910 */ 5911 if (sd_has_rps_ipi_waiting(sd)) { 5912 local_irq_disable(); 5913 net_rps_action_and_irq_enable(sd); 5914 } 5915 5916 napi->weight = READ_ONCE(dev_rx_weight); 5917 while (again) { 5918 struct sk_buff *skb; 5919 5920 while ((skb = __skb_dequeue(&sd->process_queue))) { 5921 rcu_read_lock(); 5922 __netif_receive_skb(skb); 5923 rcu_read_unlock(); 5924 input_queue_head_incr(sd); 5925 if (++work >= quota) 5926 return work; 5927 5928 } 5929 5930 rps_lock_irq_disable(sd); 5931 if (skb_queue_empty(&sd->input_pkt_queue)) { 5932 /* 5933 * Inline a custom version of __napi_complete(). 5934 * only current cpu owns and manipulates this napi, 5935 * and NAPI_STATE_SCHED is the only possible flag set 5936 * on backlog. 5937 * We can use a plain write instead of clear_bit(), 5938 * and we dont need an smp_mb() memory barrier. 5939 */ 5940 napi->state = 0; 5941 again = false; 5942 } else { 5943 skb_queue_splice_tail_init(&sd->input_pkt_queue, 5944 &sd->process_queue); 5945 } 5946 rps_unlock_irq_enable(sd); 5947 } 5948 5949 return work; 5950 } 5951 5952 /** 5953 * __napi_schedule - schedule for receive 5954 * @n: entry to schedule 5955 * 5956 * The entry's receive function will be scheduled to run. 5957 * Consider using __napi_schedule_irqoff() if hard irqs are masked. 5958 */ 5959 void __napi_schedule(struct napi_struct *n) 5960 { 5961 unsigned long flags; 5962 5963 local_irq_save(flags); 5964 ____napi_schedule(this_cpu_ptr(&softnet_data), n); 5965 local_irq_restore(flags); 5966 } 5967 EXPORT_SYMBOL(__napi_schedule); 5968 5969 /** 5970 * napi_schedule_prep - check if napi can be scheduled 5971 * @n: napi context 5972 * 5973 * Test if NAPI routine is already running, and if not mark 5974 * it as running. This is used as a condition variable to 5975 * insure only one NAPI poll instance runs. We also make 5976 * sure there is no pending NAPI disable. 5977 */ 5978 bool napi_schedule_prep(struct napi_struct *n) 5979 { 5980 unsigned long new, val = READ_ONCE(n->state); 5981 5982 do { 5983 if (unlikely(val & NAPIF_STATE_DISABLE)) 5984 return false; 5985 new = val | NAPIF_STATE_SCHED; 5986 5987 /* Sets STATE_MISSED bit if STATE_SCHED was already set 5988 * This was suggested by Alexander Duyck, as compiler 5989 * emits better code than : 5990 * if (val & NAPIF_STATE_SCHED) 5991 * new |= NAPIF_STATE_MISSED; 5992 */ 5993 new |= (val & NAPIF_STATE_SCHED) / NAPIF_STATE_SCHED * 5994 NAPIF_STATE_MISSED; 5995 } while (!try_cmpxchg(&n->state, &val, new)); 5996 5997 return !(val & NAPIF_STATE_SCHED); 5998 } 5999 EXPORT_SYMBOL(napi_schedule_prep); 6000 6001 /** 6002 * __napi_schedule_irqoff - schedule for receive 6003 * @n: entry to schedule 6004 * 6005 * Variant of __napi_schedule() assuming hard irqs are masked. 6006 * 6007 * On PREEMPT_RT enabled kernels this maps to __napi_schedule() 6008 * because the interrupt disabled assumption might not be true 6009 * due to force-threaded interrupts and spinlock substitution. 6010 */ 6011 void __napi_schedule_irqoff(struct napi_struct *n) 6012 { 6013 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) 6014 ____napi_schedule(this_cpu_ptr(&softnet_data), n); 6015 else 6016 __napi_schedule(n); 6017 } 6018 EXPORT_SYMBOL(__napi_schedule_irqoff); 6019 6020 bool napi_complete_done(struct napi_struct *n, int work_done) 6021 { 6022 unsigned long flags, val, new, timeout = 0; 6023 bool ret = true; 6024 6025 /* 6026 * 1) Don't let napi dequeue from the cpu poll list 6027 * just in case its running on a different cpu. 6028 * 2) If we are busy polling, do nothing here, we have 6029 * the guarantee we will be called later. 6030 */ 6031 if (unlikely(n->state & (NAPIF_STATE_NPSVC | 6032 NAPIF_STATE_IN_BUSY_POLL))) 6033 return false; 6034 6035 if (work_done) { 6036 if (n->gro_bitmask) 6037 timeout = READ_ONCE(n->dev->gro_flush_timeout); 6038 n->defer_hard_irqs_count = READ_ONCE(n->dev->napi_defer_hard_irqs); 6039 } 6040 if (n->defer_hard_irqs_count > 0) { 6041 n->defer_hard_irqs_count--; 6042 timeout = READ_ONCE(n->dev->gro_flush_timeout); 6043 if (timeout) 6044 ret = false; 6045 } 6046 if (n->gro_bitmask) { 6047 /* When the NAPI instance uses a timeout and keeps postponing 6048 * it, we need to bound somehow the time packets are kept in 6049 * the GRO layer 6050 */ 6051 napi_gro_flush(n, !!timeout); 6052 } 6053 6054 gro_normal_list(n); 6055 6056 if (unlikely(!list_empty(&n->poll_list))) { 6057 /* If n->poll_list is not empty, we need to mask irqs */ 6058 local_irq_save(flags); 6059 list_del_init(&n->poll_list); 6060 local_irq_restore(flags); 6061 } 6062 6063 val = READ_ONCE(n->state); 6064 do { 6065 WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED)); 6066 6067 new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED | 6068 NAPIF_STATE_SCHED_THREADED | 6069 NAPIF_STATE_PREFER_BUSY_POLL); 6070 6071 /* If STATE_MISSED was set, leave STATE_SCHED set, 6072 * because we will call napi->poll() one more time. 6073 * This C code was suggested by Alexander Duyck to help gcc. 6074 */ 6075 new |= (val & NAPIF_STATE_MISSED) / NAPIF_STATE_MISSED * 6076 NAPIF_STATE_SCHED; 6077 } while (!try_cmpxchg(&n->state, &val, new)); 6078 6079 if (unlikely(val & NAPIF_STATE_MISSED)) { 6080 __napi_schedule(n); 6081 return false; 6082 } 6083 6084 if (timeout) 6085 hrtimer_start(&n->timer, ns_to_ktime(timeout), 6086 HRTIMER_MODE_REL_PINNED); 6087 return ret; 6088 } 6089 EXPORT_SYMBOL(napi_complete_done); 6090 6091 /* must be called under rcu_read_lock(), as we dont take a reference */ 6092 static struct napi_struct *napi_by_id(unsigned int napi_id) 6093 { 6094 unsigned int hash = napi_id % HASH_SIZE(napi_hash); 6095 struct napi_struct *napi; 6096 6097 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node) 6098 if (napi->napi_id == napi_id) 6099 return napi; 6100 6101 return NULL; 6102 } 6103 6104 #if defined(CONFIG_NET_RX_BUSY_POLL) 6105 6106 static void __busy_poll_stop(struct napi_struct *napi, bool skip_schedule) 6107 { 6108 if (!skip_schedule) { 6109 gro_normal_list(napi); 6110 __napi_schedule(napi); 6111 return; 6112 } 6113 6114 if (napi->gro_bitmask) { 6115 /* flush too old packets 6116 * If HZ < 1000, flush all packets. 6117 */ 6118 napi_gro_flush(napi, HZ >= 1000); 6119 } 6120 6121 gro_normal_list(napi); 6122 clear_bit(NAPI_STATE_SCHED, &napi->state); 6123 } 6124 6125 static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock, bool prefer_busy_poll, 6126 u16 budget) 6127 { 6128 bool skip_schedule = false; 6129 unsigned long timeout; 6130 int rc; 6131 6132 /* Busy polling means there is a high chance device driver hard irq 6133 * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was 6134 * set in napi_schedule_prep(). 6135 * Since we are about to call napi->poll() once more, we can safely 6136 * clear NAPI_STATE_MISSED. 6137 * 6138 * Note: x86 could use a single "lock and ..." instruction 6139 * to perform these two clear_bit() 6140 */ 6141 clear_bit(NAPI_STATE_MISSED, &napi->state); 6142 clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state); 6143 6144 local_bh_disable(); 6145 6146 if (prefer_busy_poll) { 6147 napi->defer_hard_irqs_count = READ_ONCE(napi->dev->napi_defer_hard_irqs); 6148 timeout = READ_ONCE(napi->dev->gro_flush_timeout); 6149 if (napi->defer_hard_irqs_count && timeout) { 6150 hrtimer_start(&napi->timer, ns_to_ktime(timeout), HRTIMER_MODE_REL_PINNED); 6151 skip_schedule = true; 6152 } 6153 } 6154 6155 /* All we really want here is to re-enable device interrupts. 6156 * Ideally, a new ndo_busy_poll_stop() could avoid another round. 6157 */ 6158 rc = napi->poll(napi, budget); 6159 /* We can't gro_normal_list() here, because napi->poll() might have 6160 * rearmed the napi (napi_complete_done()) in which case it could 6161 * already be running on another CPU. 6162 */ 6163 trace_napi_poll(napi, rc, budget); 6164 netpoll_poll_unlock(have_poll_lock); 6165 if (rc == budget) 6166 __busy_poll_stop(napi, skip_schedule); 6167 local_bh_enable(); 6168 } 6169 6170 void napi_busy_loop(unsigned int napi_id, 6171 bool (*loop_end)(void *, unsigned long), 6172 void *loop_end_arg, bool prefer_busy_poll, u16 budget) 6173 { 6174 unsigned long start_time = loop_end ? busy_loop_current_time() : 0; 6175 int (*napi_poll)(struct napi_struct *napi, int budget); 6176 void *have_poll_lock = NULL; 6177 struct napi_struct *napi; 6178 6179 restart: 6180 napi_poll = NULL; 6181 6182 rcu_read_lock(); 6183 6184 napi = napi_by_id(napi_id); 6185 if (!napi) 6186 goto out; 6187 6188 preempt_disable(); 6189 for (;;) { 6190 int work = 0; 6191 6192 local_bh_disable(); 6193 if (!napi_poll) { 6194 unsigned long val = READ_ONCE(napi->state); 6195 6196 /* If multiple threads are competing for this napi, 6197 * we avoid dirtying napi->state as much as we can. 6198 */ 6199 if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED | 6200 NAPIF_STATE_IN_BUSY_POLL)) { 6201 if (prefer_busy_poll) 6202 set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state); 6203 goto count; 6204 } 6205 if (cmpxchg(&napi->state, val, 6206 val | NAPIF_STATE_IN_BUSY_POLL | 6207 NAPIF_STATE_SCHED) != val) { 6208 if (prefer_busy_poll) 6209 set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state); 6210 goto count; 6211 } 6212 have_poll_lock = netpoll_poll_lock(napi); 6213 napi_poll = napi->poll; 6214 } 6215 work = napi_poll(napi, budget); 6216 trace_napi_poll(napi, work, budget); 6217 gro_normal_list(napi); 6218 count: 6219 if (work > 0) 6220 __NET_ADD_STATS(dev_net(napi->dev), 6221 LINUX_MIB_BUSYPOLLRXPACKETS, work); 6222 local_bh_enable(); 6223 6224 if (!loop_end || loop_end(loop_end_arg, start_time)) 6225 break; 6226 6227 if (unlikely(need_resched())) { 6228 if (napi_poll) 6229 busy_poll_stop(napi, have_poll_lock, prefer_busy_poll, budget); 6230 preempt_enable(); 6231 rcu_read_unlock(); 6232 cond_resched(); 6233 if (loop_end(loop_end_arg, start_time)) 6234 return; 6235 goto restart; 6236 } 6237 cpu_relax(); 6238 } 6239 if (napi_poll) 6240 busy_poll_stop(napi, have_poll_lock, prefer_busy_poll, budget); 6241 preempt_enable(); 6242 out: 6243 rcu_read_unlock(); 6244 } 6245 EXPORT_SYMBOL(napi_busy_loop); 6246 6247 #endif /* CONFIG_NET_RX_BUSY_POLL */ 6248 6249 static void napi_hash_add(struct napi_struct *napi) 6250 { 6251 if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state)) 6252 return; 6253 6254 spin_lock(&napi_hash_lock); 6255 6256 /* 0..NR_CPUS range is reserved for sender_cpu use */ 6257 do { 6258 if (unlikely(++napi_gen_id < MIN_NAPI_ID)) 6259 napi_gen_id = MIN_NAPI_ID; 6260 } while (napi_by_id(napi_gen_id)); 6261 napi->napi_id = napi_gen_id; 6262 6263 hlist_add_head_rcu(&napi->napi_hash_node, 6264 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]); 6265 6266 spin_unlock(&napi_hash_lock); 6267 } 6268 6269 /* Warning : caller is responsible to make sure rcu grace period 6270 * is respected before freeing memory containing @napi 6271 */ 6272 static void napi_hash_del(struct napi_struct *napi) 6273 { 6274 spin_lock(&napi_hash_lock); 6275 6276 hlist_del_init_rcu(&napi->napi_hash_node); 6277 6278 spin_unlock(&napi_hash_lock); 6279 } 6280 6281 static enum hrtimer_restart napi_watchdog(struct hrtimer *timer) 6282 { 6283 struct napi_struct *napi; 6284 6285 napi = container_of(timer, struct napi_struct, timer); 6286 6287 /* Note : we use a relaxed variant of napi_schedule_prep() not setting 6288 * NAPI_STATE_MISSED, since we do not react to a device IRQ. 6289 */ 6290 if (!napi_disable_pending(napi) && 6291 !test_and_set_bit(NAPI_STATE_SCHED, &napi->state)) { 6292 clear_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state); 6293 __napi_schedule_irqoff(napi); 6294 } 6295 6296 return HRTIMER_NORESTART; 6297 } 6298 6299 static void init_gro_hash(struct napi_struct *napi) 6300 { 6301 int i; 6302 6303 for (i = 0; i < GRO_HASH_BUCKETS; i++) { 6304 INIT_LIST_HEAD(&napi->gro_hash[i].list); 6305 napi->gro_hash[i].count = 0; 6306 } 6307 napi->gro_bitmask = 0; 6308 } 6309 6310 int dev_set_threaded(struct net_device *dev, bool threaded) 6311 { 6312 struct napi_struct *napi; 6313 int err = 0; 6314 6315 if (dev->threaded == threaded) 6316 return 0; 6317 6318 if (threaded) { 6319 list_for_each_entry(napi, &dev->napi_list, dev_list) { 6320 if (!napi->thread) { 6321 err = napi_kthread_create(napi); 6322 if (err) { 6323 threaded = false; 6324 break; 6325 } 6326 } 6327 } 6328 } 6329 6330 dev->threaded = threaded; 6331 6332 /* Make sure kthread is created before THREADED bit 6333 * is set. 6334 */ 6335 smp_mb__before_atomic(); 6336 6337 /* Setting/unsetting threaded mode on a napi might not immediately 6338 * take effect, if the current napi instance is actively being 6339 * polled. In this case, the switch between threaded mode and 6340 * softirq mode will happen in the next round of napi_schedule(). 6341 * This should not cause hiccups/stalls to the live traffic. 6342 */ 6343 list_for_each_entry(napi, &dev->napi_list, dev_list) { 6344 if (threaded) 6345 set_bit(NAPI_STATE_THREADED, &napi->state); 6346 else 6347 clear_bit(NAPI_STATE_THREADED, &napi->state); 6348 } 6349 6350 return err; 6351 } 6352 EXPORT_SYMBOL(dev_set_threaded); 6353 6354 void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi, 6355 int (*poll)(struct napi_struct *, int), int weight) 6356 { 6357 if (WARN_ON(test_and_set_bit(NAPI_STATE_LISTED, &napi->state))) 6358 return; 6359 6360 INIT_LIST_HEAD(&napi->poll_list); 6361 INIT_HLIST_NODE(&napi->napi_hash_node); 6362 hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); 6363 napi->timer.function = napi_watchdog; 6364 init_gro_hash(napi); 6365 napi->skb = NULL; 6366 INIT_LIST_HEAD(&napi->rx_list); 6367 napi->rx_count = 0; 6368 napi->poll = poll; 6369 if (weight > NAPI_POLL_WEIGHT) 6370 netdev_err_once(dev, "%s() called with weight %d\n", __func__, 6371 weight); 6372 napi->weight = weight; 6373 napi->dev = dev; 6374 #ifdef CONFIG_NETPOLL 6375 napi->poll_owner = -1; 6376 #endif 6377 set_bit(NAPI_STATE_SCHED, &napi->state); 6378 set_bit(NAPI_STATE_NPSVC, &napi->state); 6379 list_add_rcu(&napi->dev_list, &dev->napi_list); 6380 napi_hash_add(napi); 6381 napi_get_frags_check(napi); 6382 /* Create kthread for this napi if dev->threaded is set. 6383 * Clear dev->threaded if kthread creation failed so that 6384 * threaded mode will not be enabled in napi_enable(). 6385 */ 6386 if (dev->threaded && napi_kthread_create(napi)) 6387 dev->threaded = 0; 6388 } 6389 EXPORT_SYMBOL(netif_napi_add_weight); 6390 6391 void napi_disable(struct napi_struct *n) 6392 { 6393 unsigned long val, new; 6394 6395 might_sleep(); 6396 set_bit(NAPI_STATE_DISABLE, &n->state); 6397 6398 val = READ_ONCE(n->state); 6399 do { 6400 while (val & (NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC)) { 6401 usleep_range(20, 200); 6402 val = READ_ONCE(n->state); 6403 } 6404 6405 new = val | NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC; 6406 new &= ~(NAPIF_STATE_THREADED | NAPIF_STATE_PREFER_BUSY_POLL); 6407 } while (!try_cmpxchg(&n->state, &val, new)); 6408 6409 hrtimer_cancel(&n->timer); 6410 6411 clear_bit(NAPI_STATE_DISABLE, &n->state); 6412 } 6413 EXPORT_SYMBOL(napi_disable); 6414 6415 /** 6416 * napi_enable - enable NAPI scheduling 6417 * @n: NAPI context 6418 * 6419 * Resume NAPI from being scheduled on this context. 6420 * Must be paired with napi_disable. 6421 */ 6422 void napi_enable(struct napi_struct *n) 6423 { 6424 unsigned long new, val = READ_ONCE(n->state); 6425 6426 do { 6427 BUG_ON(!test_bit(NAPI_STATE_SCHED, &val)); 6428 6429 new = val & ~(NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC); 6430 if (n->dev->threaded && n->thread) 6431 new |= NAPIF_STATE_THREADED; 6432 } while (!try_cmpxchg(&n->state, &val, new)); 6433 } 6434 EXPORT_SYMBOL(napi_enable); 6435 6436 static void flush_gro_hash(struct napi_struct *napi) 6437 { 6438 int i; 6439 6440 for (i = 0; i < GRO_HASH_BUCKETS; i++) { 6441 struct sk_buff *skb, *n; 6442 6443 list_for_each_entry_safe(skb, n, &napi->gro_hash[i].list, list) 6444 kfree_skb(skb); 6445 napi->gro_hash[i].count = 0; 6446 } 6447 } 6448 6449 /* Must be called in process context */ 6450 void __netif_napi_del(struct napi_struct *napi) 6451 { 6452 if (!test_and_clear_bit(NAPI_STATE_LISTED, &napi->state)) 6453 return; 6454 6455 napi_hash_del(napi); 6456 list_del_rcu(&napi->dev_list); 6457 napi_free_frags(napi); 6458 6459 flush_gro_hash(napi); 6460 napi->gro_bitmask = 0; 6461 6462 if (napi->thread) { 6463 kthread_stop(napi->thread); 6464 napi->thread = NULL; 6465 } 6466 } 6467 EXPORT_SYMBOL(__netif_napi_del); 6468 6469 static int __napi_poll(struct napi_struct *n, bool *repoll) 6470 { 6471 int work, weight; 6472 6473 weight = n->weight; 6474 6475 /* This NAPI_STATE_SCHED test is for avoiding a race 6476 * with netpoll's poll_napi(). Only the entity which 6477 * obtains the lock and sees NAPI_STATE_SCHED set will 6478 * actually make the ->poll() call. Therefore we avoid 6479 * accidentally calling ->poll() when NAPI is not scheduled. 6480 */ 6481 work = 0; 6482 if (test_bit(NAPI_STATE_SCHED, &n->state)) { 6483 work = n->poll(n, weight); 6484 trace_napi_poll(n, work, weight); 6485 } 6486 6487 if (unlikely(work > weight)) 6488 netdev_err_once(n->dev, "NAPI poll function %pS returned %d, exceeding its budget of %d.\n", 6489 n->poll, work, weight); 6490 6491 if (likely(work < weight)) 6492 return work; 6493 6494 /* Drivers must not modify the NAPI state if they 6495 * consume the entire weight. In such cases this code 6496 * still "owns" the NAPI instance and therefore can 6497 * move the instance around on the list at-will. 6498 */ 6499 if (unlikely(napi_disable_pending(n))) { 6500 napi_complete(n); 6501 return work; 6502 } 6503 6504 /* The NAPI context has more processing work, but busy-polling 6505 * is preferred. Exit early. 6506 */ 6507 if (napi_prefer_busy_poll(n)) { 6508 if (napi_complete_done(n, work)) { 6509 /* If timeout is not set, we need to make sure 6510 * that the NAPI is re-scheduled. 6511 */ 6512 napi_schedule(n); 6513 } 6514 return work; 6515 } 6516 6517 if (n->gro_bitmask) { 6518 /* flush too old packets 6519 * If HZ < 1000, flush all packets. 6520 */ 6521 napi_gro_flush(n, HZ >= 1000); 6522 } 6523 6524 gro_normal_list(n); 6525 6526 /* Some drivers may have called napi_schedule 6527 * prior to exhausting their budget. 6528 */ 6529 if (unlikely(!list_empty(&n->poll_list))) { 6530 pr_warn_once("%s: Budget exhausted after napi rescheduled\n", 6531 n->dev ? n->dev->name : "backlog"); 6532 return work; 6533 } 6534 6535 *repoll = true; 6536 6537 return work; 6538 } 6539 6540 static int napi_poll(struct napi_struct *n, struct list_head *repoll) 6541 { 6542 bool do_repoll = false; 6543 void *have; 6544 int work; 6545 6546 list_del_init(&n->poll_list); 6547 6548 have = netpoll_poll_lock(n); 6549 6550 work = __napi_poll(n, &do_repoll); 6551 6552 if (do_repoll) 6553 list_add_tail(&n->poll_list, repoll); 6554 6555 netpoll_poll_unlock(have); 6556 6557 return work; 6558 } 6559 6560 static int napi_thread_wait(struct napi_struct *napi) 6561 { 6562 bool woken = false; 6563 6564 set_current_state(TASK_INTERRUPTIBLE); 6565 6566 while (!kthread_should_stop()) { 6567 /* Testing SCHED_THREADED bit here to make sure the current 6568 * kthread owns this napi and could poll on this napi. 6569 * Testing SCHED bit is not enough because SCHED bit might be 6570 * set by some other busy poll thread or by napi_disable(). 6571 */ 6572 if (test_bit(NAPI_STATE_SCHED_THREADED, &napi->state) || woken) { 6573 WARN_ON(!list_empty(&napi->poll_list)); 6574 __set_current_state(TASK_RUNNING); 6575 return 0; 6576 } 6577 6578 schedule(); 6579 /* woken being true indicates this thread owns this napi. */ 6580 woken = true; 6581 set_current_state(TASK_INTERRUPTIBLE); 6582 } 6583 __set_current_state(TASK_RUNNING); 6584 6585 return -1; 6586 } 6587 6588 static int napi_threaded_poll(void *data) 6589 { 6590 struct napi_struct *napi = data; 6591 void *have; 6592 6593 while (!napi_thread_wait(napi)) { 6594 for (;;) { 6595 bool repoll = false; 6596 6597 local_bh_disable(); 6598 6599 have = netpoll_poll_lock(napi); 6600 __napi_poll(napi, &repoll); 6601 netpoll_poll_unlock(have); 6602 6603 local_bh_enable(); 6604 6605 if (!repoll) 6606 break; 6607 6608 cond_resched(); 6609 } 6610 } 6611 return 0; 6612 } 6613 6614 static void skb_defer_free_flush(struct softnet_data *sd) 6615 { 6616 struct sk_buff *skb, *next; 6617 6618 /* Paired with WRITE_ONCE() in skb_attempt_defer_free() */ 6619 if (!READ_ONCE(sd->defer_list)) 6620 return; 6621 6622 spin_lock_irq(&sd->defer_lock); 6623 skb = sd->defer_list; 6624 sd->defer_list = NULL; 6625 sd->defer_count = 0; 6626 spin_unlock_irq(&sd->defer_lock); 6627 6628 while (skb != NULL) { 6629 next = skb->next; 6630 napi_consume_skb(skb, 1); 6631 skb = next; 6632 } 6633 } 6634 6635 static __latent_entropy void net_rx_action(struct softirq_action *h) 6636 { 6637 struct softnet_data *sd = this_cpu_ptr(&softnet_data); 6638 unsigned long time_limit = jiffies + 6639 usecs_to_jiffies(READ_ONCE(netdev_budget_usecs)); 6640 int budget = READ_ONCE(netdev_budget); 6641 LIST_HEAD(list); 6642 LIST_HEAD(repoll); 6643 6644 local_irq_disable(); 6645 list_splice_init(&sd->poll_list, &list); 6646 local_irq_enable(); 6647 6648 for (;;) { 6649 struct napi_struct *n; 6650 6651 skb_defer_free_flush(sd); 6652 6653 if (list_empty(&list)) { 6654 if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll)) 6655 goto end; 6656 break; 6657 } 6658 6659 n = list_first_entry(&list, struct napi_struct, poll_list); 6660 budget -= napi_poll(n, &repoll); 6661 6662 /* If softirq window is exhausted then punt. 6663 * Allow this to run for 2 jiffies since which will allow 6664 * an average latency of 1.5/HZ. 6665 */ 6666 if (unlikely(budget <= 0 || 6667 time_after_eq(jiffies, time_limit))) { 6668 sd->time_squeeze++; 6669 break; 6670 } 6671 } 6672 6673 local_irq_disable(); 6674 6675 list_splice_tail_init(&sd->poll_list, &list); 6676 list_splice_tail(&repoll, &list); 6677 list_splice(&list, &sd->poll_list); 6678 if (!list_empty(&sd->poll_list)) 6679 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 6680 6681 net_rps_action_and_irq_enable(sd); 6682 end:; 6683 } 6684 6685 struct netdev_adjacent { 6686 struct net_device *dev; 6687 netdevice_tracker dev_tracker; 6688 6689 /* upper master flag, there can only be one master device per list */ 6690 bool master; 6691 6692 /* lookup ignore flag */ 6693 bool ignore; 6694 6695 /* counter for the number of times this device was added to us */ 6696 u16 ref_nr; 6697 6698 /* private field for the users */ 6699 void *private; 6700 6701 struct list_head list; 6702 struct rcu_head rcu; 6703 }; 6704 6705 static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev, 6706 struct list_head *adj_list) 6707 { 6708 struct netdev_adjacent *adj; 6709 6710 list_for_each_entry(adj, adj_list, list) { 6711 if (adj->dev == adj_dev) 6712 return adj; 6713 } 6714 return NULL; 6715 } 6716 6717 static int ____netdev_has_upper_dev(struct net_device *upper_dev, 6718 struct netdev_nested_priv *priv) 6719 { 6720 struct net_device *dev = (struct net_device *)priv->data; 6721 6722 return upper_dev == dev; 6723 } 6724 6725 /** 6726 * netdev_has_upper_dev - Check if device is linked to an upper device 6727 * @dev: device 6728 * @upper_dev: upper device to check 6729 * 6730 * Find out if a device is linked to specified upper device and return true 6731 * in case it is. Note that this checks only immediate upper device, 6732 * not through a complete stack of devices. The caller must hold the RTNL lock. 6733 */ 6734 bool netdev_has_upper_dev(struct net_device *dev, 6735 struct net_device *upper_dev) 6736 { 6737 struct netdev_nested_priv priv = { 6738 .data = (void *)upper_dev, 6739 }; 6740 6741 ASSERT_RTNL(); 6742 6743 return netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev, 6744 &priv); 6745 } 6746 EXPORT_SYMBOL(netdev_has_upper_dev); 6747 6748 /** 6749 * netdev_has_upper_dev_all_rcu - Check if device is linked to an upper device 6750 * @dev: device 6751 * @upper_dev: upper device to check 6752 * 6753 * Find out if a device is linked to specified upper device and return true 6754 * in case it is. Note that this checks the entire upper device chain. 6755 * The caller must hold rcu lock. 6756 */ 6757 6758 bool netdev_has_upper_dev_all_rcu(struct net_device *dev, 6759 struct net_device *upper_dev) 6760 { 6761 struct netdev_nested_priv priv = { 6762 .data = (void *)upper_dev, 6763 }; 6764 6765 return !!netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev, 6766 &priv); 6767 } 6768 EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu); 6769 6770 /** 6771 * netdev_has_any_upper_dev - Check if device is linked to some device 6772 * @dev: device 6773 * 6774 * Find out if a device is linked to an upper device and return true in case 6775 * it is. The caller must hold the RTNL lock. 6776 */ 6777 bool netdev_has_any_upper_dev(struct net_device *dev) 6778 { 6779 ASSERT_RTNL(); 6780 6781 return !list_empty(&dev->adj_list.upper); 6782 } 6783 EXPORT_SYMBOL(netdev_has_any_upper_dev); 6784 6785 /** 6786 * netdev_master_upper_dev_get - Get master upper device 6787 * @dev: device 6788 * 6789 * Find a master upper device and return pointer to it or NULL in case 6790 * it's not there. The caller must hold the RTNL lock. 6791 */ 6792 struct net_device *netdev_master_upper_dev_get(struct net_device *dev) 6793 { 6794 struct netdev_adjacent *upper; 6795 6796 ASSERT_RTNL(); 6797 6798 if (list_empty(&dev->adj_list.upper)) 6799 return NULL; 6800 6801 upper = list_first_entry(&dev->adj_list.upper, 6802 struct netdev_adjacent, list); 6803 if (likely(upper->master)) 6804 return upper->dev; 6805 return NULL; 6806 } 6807 EXPORT_SYMBOL(netdev_master_upper_dev_get); 6808 6809 static struct net_device *__netdev_master_upper_dev_get(struct net_device *dev) 6810 { 6811 struct netdev_adjacent *upper; 6812 6813 ASSERT_RTNL(); 6814 6815 if (list_empty(&dev->adj_list.upper)) 6816 return NULL; 6817 6818 upper = list_first_entry(&dev->adj_list.upper, 6819 struct netdev_adjacent, list); 6820 if (likely(upper->master) && !upper->ignore) 6821 return upper->dev; 6822 return NULL; 6823 } 6824 6825 /** 6826 * netdev_has_any_lower_dev - Check if device is linked to some device 6827 * @dev: device 6828 * 6829 * Find out if a device is linked to a lower device and return true in case 6830 * it is. The caller must hold the RTNL lock. 6831 */ 6832 static bool netdev_has_any_lower_dev(struct net_device *dev) 6833 { 6834 ASSERT_RTNL(); 6835 6836 return !list_empty(&dev->adj_list.lower); 6837 } 6838 6839 void *netdev_adjacent_get_private(struct list_head *adj_list) 6840 { 6841 struct netdev_adjacent *adj; 6842 6843 adj = list_entry(adj_list, struct netdev_adjacent, list); 6844 6845 return adj->private; 6846 } 6847 EXPORT_SYMBOL(netdev_adjacent_get_private); 6848 6849 /** 6850 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list 6851 * @dev: device 6852 * @iter: list_head ** of the current position 6853 * 6854 * Gets the next device from the dev's upper list, starting from iter 6855 * position. The caller must hold RCU read lock. 6856 */ 6857 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, 6858 struct list_head **iter) 6859 { 6860 struct netdev_adjacent *upper; 6861 6862 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held()); 6863 6864 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 6865 6866 if (&upper->list == &dev->adj_list.upper) 6867 return NULL; 6868 6869 *iter = &upper->list; 6870 6871 return upper->dev; 6872 } 6873 EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu); 6874 6875 static struct net_device *__netdev_next_upper_dev(struct net_device *dev, 6876 struct list_head **iter, 6877 bool *ignore) 6878 { 6879 struct netdev_adjacent *upper; 6880 6881 upper = list_entry((*iter)->next, struct netdev_adjacent, list); 6882 6883 if (&upper->list == &dev->adj_list.upper) 6884 return NULL; 6885 6886 *iter = &upper->list; 6887 *ignore = upper->ignore; 6888 6889 return upper->dev; 6890 } 6891 6892 static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev, 6893 struct list_head **iter) 6894 { 6895 struct netdev_adjacent *upper; 6896 6897 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held()); 6898 6899 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 6900 6901 if (&upper->list == &dev->adj_list.upper) 6902 return NULL; 6903 6904 *iter = &upper->list; 6905 6906 return upper->dev; 6907 } 6908 6909 static int __netdev_walk_all_upper_dev(struct net_device *dev, 6910 int (*fn)(struct net_device *dev, 6911 struct netdev_nested_priv *priv), 6912 struct netdev_nested_priv *priv) 6913 { 6914 struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; 6915 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; 6916 int ret, cur = 0; 6917 bool ignore; 6918 6919 now = dev; 6920 iter = &dev->adj_list.upper; 6921 6922 while (1) { 6923 if (now != dev) { 6924 ret = fn(now, priv); 6925 if (ret) 6926 return ret; 6927 } 6928 6929 next = NULL; 6930 while (1) { 6931 udev = __netdev_next_upper_dev(now, &iter, &ignore); 6932 if (!udev) 6933 break; 6934 if (ignore) 6935 continue; 6936 6937 next = udev; 6938 niter = &udev->adj_list.upper; 6939 dev_stack[cur] = now; 6940 iter_stack[cur++] = iter; 6941 break; 6942 } 6943 6944 if (!next) { 6945 if (!cur) 6946 return 0; 6947 next = dev_stack[--cur]; 6948 niter = iter_stack[cur]; 6949 } 6950 6951 now = next; 6952 iter = niter; 6953 } 6954 6955 return 0; 6956 } 6957 6958 int netdev_walk_all_upper_dev_rcu(struct net_device *dev, 6959 int (*fn)(struct net_device *dev, 6960 struct netdev_nested_priv *priv), 6961 struct netdev_nested_priv *priv) 6962 { 6963 struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; 6964 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; 6965 int ret, cur = 0; 6966 6967 now = dev; 6968 iter = &dev->adj_list.upper; 6969 6970 while (1) { 6971 if (now != dev) { 6972 ret = fn(now, priv); 6973 if (ret) 6974 return ret; 6975 } 6976 6977 next = NULL; 6978 while (1) { 6979 udev = netdev_next_upper_dev_rcu(now, &iter); 6980 if (!udev) 6981 break; 6982 6983 next = udev; 6984 niter = &udev->adj_list.upper; 6985 dev_stack[cur] = now; 6986 iter_stack[cur++] = iter; 6987 break; 6988 } 6989 6990 if (!next) { 6991 if (!cur) 6992 return 0; 6993 next = dev_stack[--cur]; 6994 niter = iter_stack[cur]; 6995 } 6996 6997 now = next; 6998 iter = niter; 6999 } 7000 7001 return 0; 7002 } 7003 EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu); 7004 7005 static bool __netdev_has_upper_dev(struct net_device *dev, 7006 struct net_device *upper_dev) 7007 { 7008 struct netdev_nested_priv priv = { 7009 .flags = 0, 7010 .data = (void *)upper_dev, 7011 }; 7012 7013 ASSERT_RTNL(); 7014 7015 return __netdev_walk_all_upper_dev(dev, ____netdev_has_upper_dev, 7016 &priv); 7017 } 7018 7019 /** 7020 * netdev_lower_get_next_private - Get the next ->private from the 7021 * lower neighbour list 7022 * @dev: device 7023 * @iter: list_head ** of the current position 7024 * 7025 * Gets the next netdev_adjacent->private from the dev's lower neighbour 7026 * list, starting from iter position. The caller must hold either hold the 7027 * RTNL lock or its own locking that guarantees that the neighbour lower 7028 * list will remain unchanged. 7029 */ 7030 void *netdev_lower_get_next_private(struct net_device *dev, 7031 struct list_head **iter) 7032 { 7033 struct netdev_adjacent *lower; 7034 7035 lower = list_entry(*iter, struct netdev_adjacent, list); 7036 7037 if (&lower->list == &dev->adj_list.lower) 7038 return NULL; 7039 7040 *iter = lower->list.next; 7041 7042 return lower->private; 7043 } 7044 EXPORT_SYMBOL(netdev_lower_get_next_private); 7045 7046 /** 7047 * netdev_lower_get_next_private_rcu - Get the next ->private from the 7048 * lower neighbour list, RCU 7049 * variant 7050 * @dev: device 7051 * @iter: list_head ** of the current position 7052 * 7053 * Gets the next netdev_adjacent->private from the dev's lower neighbour 7054 * list, starting from iter position. The caller must hold RCU read lock. 7055 */ 7056 void *netdev_lower_get_next_private_rcu(struct net_device *dev, 7057 struct list_head **iter) 7058 { 7059 struct netdev_adjacent *lower; 7060 7061 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); 7062 7063 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 7064 7065 if (&lower->list == &dev->adj_list.lower) 7066 return NULL; 7067 7068 *iter = &lower->list; 7069 7070 return lower->private; 7071 } 7072 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu); 7073 7074 /** 7075 * netdev_lower_get_next - Get the next device from the lower neighbour 7076 * list 7077 * @dev: device 7078 * @iter: list_head ** of the current position 7079 * 7080 * Gets the next netdev_adjacent from the dev's lower neighbour 7081 * list, starting from iter position. The caller must hold RTNL lock or 7082 * its own locking that guarantees that the neighbour lower 7083 * list will remain unchanged. 7084 */ 7085 void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter) 7086 { 7087 struct netdev_adjacent *lower; 7088 7089 lower = list_entry(*iter, struct netdev_adjacent, list); 7090 7091 if (&lower->list == &dev->adj_list.lower) 7092 return NULL; 7093 7094 *iter = lower->list.next; 7095 7096 return lower->dev; 7097 } 7098 EXPORT_SYMBOL(netdev_lower_get_next); 7099 7100 static struct net_device *netdev_next_lower_dev(struct net_device *dev, 7101 struct list_head **iter) 7102 { 7103 struct netdev_adjacent *lower; 7104 7105 lower = list_entry((*iter)->next, struct netdev_adjacent, list); 7106 7107 if (&lower->list == &dev->adj_list.lower) 7108 return NULL; 7109 7110 *iter = &lower->list; 7111 7112 return lower->dev; 7113 } 7114 7115 static struct net_device *__netdev_next_lower_dev(struct net_device *dev, 7116 struct list_head **iter, 7117 bool *ignore) 7118 { 7119 struct netdev_adjacent *lower; 7120 7121 lower = list_entry((*iter)->next, struct netdev_adjacent, list); 7122 7123 if (&lower->list == &dev->adj_list.lower) 7124 return NULL; 7125 7126 *iter = &lower->list; 7127 *ignore = lower->ignore; 7128 7129 return lower->dev; 7130 } 7131 7132 int netdev_walk_all_lower_dev(struct net_device *dev, 7133 int (*fn)(struct net_device *dev, 7134 struct netdev_nested_priv *priv), 7135 struct netdev_nested_priv *priv) 7136 { 7137 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; 7138 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; 7139 int ret, cur = 0; 7140 7141 now = dev; 7142 iter = &dev->adj_list.lower; 7143 7144 while (1) { 7145 if (now != dev) { 7146 ret = fn(now, priv); 7147 if (ret) 7148 return ret; 7149 } 7150 7151 next = NULL; 7152 while (1) { 7153 ldev = netdev_next_lower_dev(now, &iter); 7154 if (!ldev) 7155 break; 7156 7157 next = ldev; 7158 niter = &ldev->adj_list.lower; 7159 dev_stack[cur] = now; 7160 iter_stack[cur++] = iter; 7161 break; 7162 } 7163 7164 if (!next) { 7165 if (!cur) 7166 return 0; 7167 next = dev_stack[--cur]; 7168 niter = iter_stack[cur]; 7169 } 7170 7171 now = next; 7172 iter = niter; 7173 } 7174 7175 return 0; 7176 } 7177 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev); 7178 7179 static int __netdev_walk_all_lower_dev(struct net_device *dev, 7180 int (*fn)(struct net_device *dev, 7181 struct netdev_nested_priv *priv), 7182 struct netdev_nested_priv *priv) 7183 { 7184 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; 7185 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; 7186 int ret, cur = 0; 7187 bool ignore; 7188 7189 now = dev; 7190 iter = &dev->adj_list.lower; 7191 7192 while (1) { 7193 if (now != dev) { 7194 ret = fn(now, priv); 7195 if (ret) 7196 return ret; 7197 } 7198 7199 next = NULL; 7200 while (1) { 7201 ldev = __netdev_next_lower_dev(now, &iter, &ignore); 7202 if (!ldev) 7203 break; 7204 if (ignore) 7205 continue; 7206 7207 next = ldev; 7208 niter = &ldev->adj_list.lower; 7209 dev_stack[cur] = now; 7210 iter_stack[cur++] = iter; 7211 break; 7212 } 7213 7214 if (!next) { 7215 if (!cur) 7216 return 0; 7217 next = dev_stack[--cur]; 7218 niter = iter_stack[cur]; 7219 } 7220 7221 now = next; 7222 iter = niter; 7223 } 7224 7225 return 0; 7226 } 7227 7228 struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev, 7229 struct list_head **iter) 7230 { 7231 struct netdev_adjacent *lower; 7232 7233 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 7234 if (&lower->list == &dev->adj_list.lower) 7235 return NULL; 7236 7237 *iter = &lower->list; 7238 7239 return lower->dev; 7240 } 7241 EXPORT_SYMBOL(netdev_next_lower_dev_rcu); 7242 7243 static u8 __netdev_upper_depth(struct net_device *dev) 7244 { 7245 struct net_device *udev; 7246 struct list_head *iter; 7247 u8 max_depth = 0; 7248 bool ignore; 7249 7250 for (iter = &dev->adj_list.upper, 7251 udev = __netdev_next_upper_dev(dev, &iter, &ignore); 7252 udev; 7253 udev = __netdev_next_upper_dev(dev, &iter, &ignore)) { 7254 if (ignore) 7255 continue; 7256 if (max_depth < udev->upper_level) 7257 max_depth = udev->upper_level; 7258 } 7259 7260 return max_depth; 7261 } 7262 7263 static u8 __netdev_lower_depth(struct net_device *dev) 7264 { 7265 struct net_device *ldev; 7266 struct list_head *iter; 7267 u8 max_depth = 0; 7268 bool ignore; 7269 7270 for (iter = &dev->adj_list.lower, 7271 ldev = __netdev_next_lower_dev(dev, &iter, &ignore); 7272 ldev; 7273 ldev = __netdev_next_lower_dev(dev, &iter, &ignore)) { 7274 if (ignore) 7275 continue; 7276 if (max_depth < ldev->lower_level) 7277 max_depth = ldev->lower_level; 7278 } 7279 7280 return max_depth; 7281 } 7282 7283 static int __netdev_update_upper_level(struct net_device *dev, 7284 struct netdev_nested_priv *__unused) 7285 { 7286 dev->upper_level = __netdev_upper_depth(dev) + 1; 7287 return 0; 7288 } 7289 7290 #ifdef CONFIG_LOCKDEP 7291 static LIST_HEAD(net_unlink_list); 7292 7293 static void net_unlink_todo(struct net_device *dev) 7294 { 7295 if (list_empty(&dev->unlink_list)) 7296 list_add_tail(&dev->unlink_list, &net_unlink_list); 7297 } 7298 #endif 7299 7300 static int __netdev_update_lower_level(struct net_device *dev, 7301 struct netdev_nested_priv *priv) 7302 { 7303 dev->lower_level = __netdev_lower_depth(dev) + 1; 7304 7305 #ifdef CONFIG_LOCKDEP 7306 if (!priv) 7307 return 0; 7308 7309 if (priv->flags & NESTED_SYNC_IMM) 7310 dev->nested_level = dev->lower_level - 1; 7311 if (priv->flags & NESTED_SYNC_TODO) 7312 net_unlink_todo(dev); 7313 #endif 7314 return 0; 7315 } 7316 7317 int netdev_walk_all_lower_dev_rcu(struct net_device *dev, 7318 int (*fn)(struct net_device *dev, 7319 struct netdev_nested_priv *priv), 7320 struct netdev_nested_priv *priv) 7321 { 7322 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; 7323 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; 7324 int ret, cur = 0; 7325 7326 now = dev; 7327 iter = &dev->adj_list.lower; 7328 7329 while (1) { 7330 if (now != dev) { 7331 ret = fn(now, priv); 7332 if (ret) 7333 return ret; 7334 } 7335 7336 next = NULL; 7337 while (1) { 7338 ldev = netdev_next_lower_dev_rcu(now, &iter); 7339 if (!ldev) 7340 break; 7341 7342 next = ldev; 7343 niter = &ldev->adj_list.lower; 7344 dev_stack[cur] = now; 7345 iter_stack[cur++] = iter; 7346 break; 7347 } 7348 7349 if (!next) { 7350 if (!cur) 7351 return 0; 7352 next = dev_stack[--cur]; 7353 niter = iter_stack[cur]; 7354 } 7355 7356 now = next; 7357 iter = niter; 7358 } 7359 7360 return 0; 7361 } 7362 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu); 7363 7364 /** 7365 * netdev_lower_get_first_private_rcu - Get the first ->private from the 7366 * lower neighbour list, RCU 7367 * variant 7368 * @dev: device 7369 * 7370 * Gets the first netdev_adjacent->private from the dev's lower neighbour 7371 * list. The caller must hold RCU read lock. 7372 */ 7373 void *netdev_lower_get_first_private_rcu(struct net_device *dev) 7374 { 7375 struct netdev_adjacent *lower; 7376 7377 lower = list_first_or_null_rcu(&dev->adj_list.lower, 7378 struct netdev_adjacent, list); 7379 if (lower) 7380 return lower->private; 7381 return NULL; 7382 } 7383 EXPORT_SYMBOL(netdev_lower_get_first_private_rcu); 7384 7385 /** 7386 * netdev_master_upper_dev_get_rcu - Get master upper device 7387 * @dev: device 7388 * 7389 * Find a master upper device and return pointer to it or NULL in case 7390 * it's not there. The caller must hold the RCU read lock. 7391 */ 7392 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev) 7393 { 7394 struct netdev_adjacent *upper; 7395 7396 upper = list_first_or_null_rcu(&dev->adj_list.upper, 7397 struct netdev_adjacent, list); 7398 if (upper && likely(upper->master)) 7399 return upper->dev; 7400 return NULL; 7401 } 7402 EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu); 7403 7404 static int netdev_adjacent_sysfs_add(struct net_device *dev, 7405 struct net_device *adj_dev, 7406 struct list_head *dev_list) 7407 { 7408 char linkname[IFNAMSIZ+7]; 7409 7410 sprintf(linkname, dev_list == &dev->adj_list.upper ? 7411 "upper_%s" : "lower_%s", adj_dev->name); 7412 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj), 7413 linkname); 7414 } 7415 static void netdev_adjacent_sysfs_del(struct net_device *dev, 7416 char *name, 7417 struct list_head *dev_list) 7418 { 7419 char linkname[IFNAMSIZ+7]; 7420 7421 sprintf(linkname, dev_list == &dev->adj_list.upper ? 7422 "upper_%s" : "lower_%s", name); 7423 sysfs_remove_link(&(dev->dev.kobj), linkname); 7424 } 7425 7426 static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev, 7427 struct net_device *adj_dev, 7428 struct list_head *dev_list) 7429 { 7430 return (dev_list == &dev->adj_list.upper || 7431 dev_list == &dev->adj_list.lower) && 7432 net_eq(dev_net(dev), dev_net(adj_dev)); 7433 } 7434 7435 static int __netdev_adjacent_dev_insert(struct net_device *dev, 7436 struct net_device *adj_dev, 7437 struct list_head *dev_list, 7438 void *private, bool master) 7439 { 7440 struct netdev_adjacent *adj; 7441 int ret; 7442 7443 adj = __netdev_find_adj(adj_dev, dev_list); 7444 7445 if (adj) { 7446 adj->ref_nr += 1; 7447 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n", 7448 dev->name, adj_dev->name, adj->ref_nr); 7449 7450 return 0; 7451 } 7452 7453 adj = kmalloc(sizeof(*adj), GFP_KERNEL); 7454 if (!adj) 7455 return -ENOMEM; 7456 7457 adj->dev = adj_dev; 7458 adj->master = master; 7459 adj->ref_nr = 1; 7460 adj->private = private; 7461 adj->ignore = false; 7462 netdev_hold(adj_dev, &adj->dev_tracker, GFP_KERNEL); 7463 7464 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n", 7465 dev->name, adj_dev->name, adj->ref_nr, adj_dev->name); 7466 7467 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) { 7468 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list); 7469 if (ret) 7470 goto free_adj; 7471 } 7472 7473 /* Ensure that master link is always the first item in list. */ 7474 if (master) { 7475 ret = sysfs_create_link(&(dev->dev.kobj), 7476 &(adj_dev->dev.kobj), "master"); 7477 if (ret) 7478 goto remove_symlinks; 7479 7480 list_add_rcu(&adj->list, dev_list); 7481 } else { 7482 list_add_tail_rcu(&adj->list, dev_list); 7483 } 7484 7485 return 0; 7486 7487 remove_symlinks: 7488 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) 7489 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list); 7490 free_adj: 7491 netdev_put(adj_dev, &adj->dev_tracker); 7492 kfree(adj); 7493 7494 return ret; 7495 } 7496 7497 static void __netdev_adjacent_dev_remove(struct net_device *dev, 7498 struct net_device *adj_dev, 7499 u16 ref_nr, 7500 struct list_head *dev_list) 7501 { 7502 struct netdev_adjacent *adj; 7503 7504 pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n", 7505 dev->name, adj_dev->name, ref_nr); 7506 7507 adj = __netdev_find_adj(adj_dev, dev_list); 7508 7509 if (!adj) { 7510 pr_err("Adjacency does not exist for device %s from %s\n", 7511 dev->name, adj_dev->name); 7512 WARN_ON(1); 7513 return; 7514 } 7515 7516 if (adj->ref_nr > ref_nr) { 7517 pr_debug("adjacency: %s to %s ref_nr - %d = %d\n", 7518 dev->name, adj_dev->name, ref_nr, 7519 adj->ref_nr - ref_nr); 7520 adj->ref_nr -= ref_nr; 7521 return; 7522 } 7523 7524 if (adj->master) 7525 sysfs_remove_link(&(dev->dev.kobj), "master"); 7526 7527 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) 7528 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list); 7529 7530 list_del_rcu(&adj->list); 7531 pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n", 7532 adj_dev->name, dev->name, adj_dev->name); 7533 netdev_put(adj_dev, &adj->dev_tracker); 7534 kfree_rcu(adj, rcu); 7535 } 7536 7537 static int __netdev_adjacent_dev_link_lists(struct net_device *dev, 7538 struct net_device *upper_dev, 7539 struct list_head *up_list, 7540 struct list_head *down_list, 7541 void *private, bool master) 7542 { 7543 int ret; 7544 7545 ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, 7546 private, master); 7547 if (ret) 7548 return ret; 7549 7550 ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, 7551 private, false); 7552 if (ret) { 7553 __netdev_adjacent_dev_remove(dev, upper_dev, 1, up_list); 7554 return ret; 7555 } 7556 7557 return 0; 7558 } 7559 7560 static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev, 7561 struct net_device *upper_dev, 7562 u16 ref_nr, 7563 struct list_head *up_list, 7564 struct list_head *down_list) 7565 { 7566 __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list); 7567 __netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list); 7568 } 7569 7570 static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev, 7571 struct net_device *upper_dev, 7572 void *private, bool master) 7573 { 7574 return __netdev_adjacent_dev_link_lists(dev, upper_dev, 7575 &dev->adj_list.upper, 7576 &upper_dev->adj_list.lower, 7577 private, master); 7578 } 7579 7580 static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev, 7581 struct net_device *upper_dev) 7582 { 7583 __netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1, 7584 &dev->adj_list.upper, 7585 &upper_dev->adj_list.lower); 7586 } 7587 7588 static int __netdev_upper_dev_link(struct net_device *dev, 7589 struct net_device *upper_dev, bool master, 7590 void *upper_priv, void *upper_info, 7591 struct netdev_nested_priv *priv, 7592 struct netlink_ext_ack *extack) 7593 { 7594 struct netdev_notifier_changeupper_info changeupper_info = { 7595 .info = { 7596 .dev = dev, 7597 .extack = extack, 7598 }, 7599 .upper_dev = upper_dev, 7600 .master = master, 7601 .linking = true, 7602 .upper_info = upper_info, 7603 }; 7604 struct net_device *master_dev; 7605 int ret = 0; 7606 7607 ASSERT_RTNL(); 7608 7609 if (dev == upper_dev) 7610 return -EBUSY; 7611 7612 /* To prevent loops, check if dev is not upper device to upper_dev. */ 7613 if (__netdev_has_upper_dev(upper_dev, dev)) 7614 return -EBUSY; 7615 7616 if ((dev->lower_level + upper_dev->upper_level) > MAX_NEST_DEV) 7617 return -EMLINK; 7618 7619 if (!master) { 7620 if (__netdev_has_upper_dev(dev, upper_dev)) 7621 return -EEXIST; 7622 } else { 7623 master_dev = __netdev_master_upper_dev_get(dev); 7624 if (master_dev) 7625 return master_dev == upper_dev ? -EEXIST : -EBUSY; 7626 } 7627 7628 ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, 7629 &changeupper_info.info); 7630 ret = notifier_to_errno(ret); 7631 if (ret) 7632 return ret; 7633 7634 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv, 7635 master); 7636 if (ret) 7637 return ret; 7638 7639 ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, 7640 &changeupper_info.info); 7641 ret = notifier_to_errno(ret); 7642 if (ret) 7643 goto rollback; 7644 7645 __netdev_update_upper_level(dev, NULL); 7646 __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL); 7647 7648 __netdev_update_lower_level(upper_dev, priv); 7649 __netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level, 7650 priv); 7651 7652 return 0; 7653 7654 rollback: 7655 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev); 7656 7657 return ret; 7658 } 7659 7660 /** 7661 * netdev_upper_dev_link - Add a link to the upper device 7662 * @dev: device 7663 * @upper_dev: new upper device 7664 * @extack: netlink extended ack 7665 * 7666 * Adds a link to device which is upper to this one. The caller must hold 7667 * the RTNL lock. On a failure a negative errno code is returned. 7668 * On success the reference counts are adjusted and the function 7669 * returns zero. 7670 */ 7671 int netdev_upper_dev_link(struct net_device *dev, 7672 struct net_device *upper_dev, 7673 struct netlink_ext_ack *extack) 7674 { 7675 struct netdev_nested_priv priv = { 7676 .flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO, 7677 .data = NULL, 7678 }; 7679 7680 return __netdev_upper_dev_link(dev, upper_dev, false, 7681 NULL, NULL, &priv, extack); 7682 } 7683 EXPORT_SYMBOL(netdev_upper_dev_link); 7684 7685 /** 7686 * netdev_master_upper_dev_link - Add a master link to the upper device 7687 * @dev: device 7688 * @upper_dev: new upper device 7689 * @upper_priv: upper device private 7690 * @upper_info: upper info to be passed down via notifier 7691 * @extack: netlink extended ack 7692 * 7693 * Adds a link to device which is upper to this one. In this case, only 7694 * one master upper device can be linked, although other non-master devices 7695 * might be linked as well. The caller must hold the RTNL lock. 7696 * On a failure a negative errno code is returned. On success the reference 7697 * counts are adjusted and the function returns zero. 7698 */ 7699 int netdev_master_upper_dev_link(struct net_device *dev, 7700 struct net_device *upper_dev, 7701 void *upper_priv, void *upper_info, 7702 struct netlink_ext_ack *extack) 7703 { 7704 struct netdev_nested_priv priv = { 7705 .flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO, 7706 .data = NULL, 7707 }; 7708 7709 return __netdev_upper_dev_link(dev, upper_dev, true, 7710 upper_priv, upper_info, &priv, extack); 7711 } 7712 EXPORT_SYMBOL(netdev_master_upper_dev_link); 7713 7714 static void __netdev_upper_dev_unlink(struct net_device *dev, 7715 struct net_device *upper_dev, 7716 struct netdev_nested_priv *priv) 7717 { 7718 struct netdev_notifier_changeupper_info changeupper_info = { 7719 .info = { 7720 .dev = dev, 7721 }, 7722 .upper_dev = upper_dev, 7723 .linking = false, 7724 }; 7725 7726 ASSERT_RTNL(); 7727 7728 changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev; 7729 7730 call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, 7731 &changeupper_info.info); 7732 7733 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev); 7734 7735 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, 7736 &changeupper_info.info); 7737 7738 __netdev_update_upper_level(dev, NULL); 7739 __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL); 7740 7741 __netdev_update_lower_level(upper_dev, priv); 7742 __netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level, 7743 priv); 7744 } 7745 7746 /** 7747 * netdev_upper_dev_unlink - Removes a link to upper device 7748 * @dev: device 7749 * @upper_dev: new upper device 7750 * 7751 * Removes a link to device which is upper to this one. The caller must hold 7752 * the RTNL lock. 7753 */ 7754 void netdev_upper_dev_unlink(struct net_device *dev, 7755 struct net_device *upper_dev) 7756 { 7757 struct netdev_nested_priv priv = { 7758 .flags = NESTED_SYNC_TODO, 7759 .data = NULL, 7760 }; 7761 7762 __netdev_upper_dev_unlink(dev, upper_dev, &priv); 7763 } 7764 EXPORT_SYMBOL(netdev_upper_dev_unlink); 7765 7766 static void __netdev_adjacent_dev_set(struct net_device *upper_dev, 7767 struct net_device *lower_dev, 7768 bool val) 7769 { 7770 struct netdev_adjacent *adj; 7771 7772 adj = __netdev_find_adj(lower_dev, &upper_dev->adj_list.lower); 7773 if (adj) 7774 adj->ignore = val; 7775 7776 adj = __netdev_find_adj(upper_dev, &lower_dev->adj_list.upper); 7777 if (adj) 7778 adj->ignore = val; 7779 } 7780 7781 static void netdev_adjacent_dev_disable(struct net_device *upper_dev, 7782 struct net_device *lower_dev) 7783 { 7784 __netdev_adjacent_dev_set(upper_dev, lower_dev, true); 7785 } 7786 7787 static void netdev_adjacent_dev_enable(struct net_device *upper_dev, 7788 struct net_device *lower_dev) 7789 { 7790 __netdev_adjacent_dev_set(upper_dev, lower_dev, false); 7791 } 7792 7793 int netdev_adjacent_change_prepare(struct net_device *old_dev, 7794 struct net_device *new_dev, 7795 struct net_device *dev, 7796 struct netlink_ext_ack *extack) 7797 { 7798 struct netdev_nested_priv priv = { 7799 .flags = 0, 7800 .data = NULL, 7801 }; 7802 int err; 7803 7804 if (!new_dev) 7805 return 0; 7806 7807 if (old_dev && new_dev != old_dev) 7808 netdev_adjacent_dev_disable(dev, old_dev); 7809 err = __netdev_upper_dev_link(new_dev, dev, false, NULL, NULL, &priv, 7810 extack); 7811 if (err) { 7812 if (old_dev && new_dev != old_dev) 7813 netdev_adjacent_dev_enable(dev, old_dev); 7814 return err; 7815 } 7816 7817 return 0; 7818 } 7819 EXPORT_SYMBOL(netdev_adjacent_change_prepare); 7820 7821 void netdev_adjacent_change_commit(struct net_device *old_dev, 7822 struct net_device *new_dev, 7823 struct net_device *dev) 7824 { 7825 struct netdev_nested_priv priv = { 7826 .flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO, 7827 .data = NULL, 7828 }; 7829 7830 if (!new_dev || !old_dev) 7831 return; 7832 7833 if (new_dev == old_dev) 7834 return; 7835 7836 netdev_adjacent_dev_enable(dev, old_dev); 7837 __netdev_upper_dev_unlink(old_dev, dev, &priv); 7838 } 7839 EXPORT_SYMBOL(netdev_adjacent_change_commit); 7840 7841 void netdev_adjacent_change_abort(struct net_device *old_dev, 7842 struct net_device *new_dev, 7843 struct net_device *dev) 7844 { 7845 struct netdev_nested_priv priv = { 7846 .flags = 0, 7847 .data = NULL, 7848 }; 7849 7850 if (!new_dev) 7851 return; 7852 7853 if (old_dev && new_dev != old_dev) 7854 netdev_adjacent_dev_enable(dev, old_dev); 7855 7856 __netdev_upper_dev_unlink(new_dev, dev, &priv); 7857 } 7858 EXPORT_SYMBOL(netdev_adjacent_change_abort); 7859 7860 /** 7861 * netdev_bonding_info_change - Dispatch event about slave change 7862 * @dev: device 7863 * @bonding_info: info to dispatch 7864 * 7865 * Send NETDEV_BONDING_INFO to netdev notifiers with info. 7866 * The caller must hold the RTNL lock. 7867 */ 7868 void netdev_bonding_info_change(struct net_device *dev, 7869 struct netdev_bonding_info *bonding_info) 7870 { 7871 struct netdev_notifier_bonding_info info = { 7872 .info.dev = dev, 7873 }; 7874 7875 memcpy(&info.bonding_info, bonding_info, 7876 sizeof(struct netdev_bonding_info)); 7877 call_netdevice_notifiers_info(NETDEV_BONDING_INFO, 7878 &info.info); 7879 } 7880 EXPORT_SYMBOL(netdev_bonding_info_change); 7881 7882 static int netdev_offload_xstats_enable_l3(struct net_device *dev, 7883 struct netlink_ext_ack *extack) 7884 { 7885 struct netdev_notifier_offload_xstats_info info = { 7886 .info.dev = dev, 7887 .info.extack = extack, 7888 .type = NETDEV_OFFLOAD_XSTATS_TYPE_L3, 7889 }; 7890 int err; 7891 int rc; 7892 7893 dev->offload_xstats_l3 = kzalloc(sizeof(*dev->offload_xstats_l3), 7894 GFP_KERNEL); 7895 if (!dev->offload_xstats_l3) 7896 return -ENOMEM; 7897 7898 rc = call_netdevice_notifiers_info_robust(NETDEV_OFFLOAD_XSTATS_ENABLE, 7899 NETDEV_OFFLOAD_XSTATS_DISABLE, 7900 &info.info); 7901 err = notifier_to_errno(rc); 7902 if (err) 7903 goto free_stats; 7904 7905 return 0; 7906 7907 free_stats: 7908 kfree(dev->offload_xstats_l3); 7909 dev->offload_xstats_l3 = NULL; 7910 return err; 7911 } 7912 7913 int netdev_offload_xstats_enable(struct net_device *dev, 7914 enum netdev_offload_xstats_type type, 7915 struct netlink_ext_ack *extack) 7916 { 7917 ASSERT_RTNL(); 7918 7919 if (netdev_offload_xstats_enabled(dev, type)) 7920 return -EALREADY; 7921 7922 switch (type) { 7923 case NETDEV_OFFLOAD_XSTATS_TYPE_L3: 7924 return netdev_offload_xstats_enable_l3(dev, extack); 7925 } 7926 7927 WARN_ON(1); 7928 return -EINVAL; 7929 } 7930 EXPORT_SYMBOL(netdev_offload_xstats_enable); 7931 7932 static void netdev_offload_xstats_disable_l3(struct net_device *dev) 7933 { 7934 struct netdev_notifier_offload_xstats_info info = { 7935 .info.dev = dev, 7936 .type = NETDEV_OFFLOAD_XSTATS_TYPE_L3, 7937 }; 7938 7939 call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_DISABLE, 7940 &info.info); 7941 kfree(dev->offload_xstats_l3); 7942 dev->offload_xstats_l3 = NULL; 7943 } 7944 7945 int netdev_offload_xstats_disable(struct net_device *dev, 7946 enum netdev_offload_xstats_type type) 7947 { 7948 ASSERT_RTNL(); 7949 7950 if (!netdev_offload_xstats_enabled(dev, type)) 7951 return -EALREADY; 7952 7953 switch (type) { 7954 case NETDEV_OFFLOAD_XSTATS_TYPE_L3: 7955 netdev_offload_xstats_disable_l3(dev); 7956 return 0; 7957 } 7958 7959 WARN_ON(1); 7960 return -EINVAL; 7961 } 7962 EXPORT_SYMBOL(netdev_offload_xstats_disable); 7963 7964 static void netdev_offload_xstats_disable_all(struct net_device *dev) 7965 { 7966 netdev_offload_xstats_disable(dev, NETDEV_OFFLOAD_XSTATS_TYPE_L3); 7967 } 7968 7969 static struct rtnl_hw_stats64 * 7970 netdev_offload_xstats_get_ptr(const struct net_device *dev, 7971 enum netdev_offload_xstats_type type) 7972 { 7973 switch (type) { 7974 case NETDEV_OFFLOAD_XSTATS_TYPE_L3: 7975 return dev->offload_xstats_l3; 7976 } 7977 7978 WARN_ON(1); 7979 return NULL; 7980 } 7981 7982 bool netdev_offload_xstats_enabled(const struct net_device *dev, 7983 enum netdev_offload_xstats_type type) 7984 { 7985 ASSERT_RTNL(); 7986 7987 return netdev_offload_xstats_get_ptr(dev, type); 7988 } 7989 EXPORT_SYMBOL(netdev_offload_xstats_enabled); 7990 7991 struct netdev_notifier_offload_xstats_ru { 7992 bool used; 7993 }; 7994 7995 struct netdev_notifier_offload_xstats_rd { 7996 struct rtnl_hw_stats64 stats; 7997 bool used; 7998 }; 7999 8000 static void netdev_hw_stats64_add(struct rtnl_hw_stats64 *dest, 8001 const struct rtnl_hw_stats64 *src) 8002 { 8003 dest->rx_packets += src->rx_packets; 8004 dest->tx_packets += src->tx_packets; 8005 dest->rx_bytes += src->rx_bytes; 8006 dest->tx_bytes += src->tx_bytes; 8007 dest->rx_errors += src->rx_errors; 8008 dest->tx_errors += src->tx_errors; 8009 dest->rx_dropped += src->rx_dropped; 8010 dest->tx_dropped += src->tx_dropped; 8011 dest->multicast += src->multicast; 8012 } 8013 8014 static int netdev_offload_xstats_get_used(struct net_device *dev, 8015 enum netdev_offload_xstats_type type, 8016 bool *p_used, 8017 struct netlink_ext_ack *extack) 8018 { 8019 struct netdev_notifier_offload_xstats_ru report_used = {}; 8020 struct netdev_notifier_offload_xstats_info info = { 8021 .info.dev = dev, 8022 .info.extack = extack, 8023 .type = type, 8024 .report_used = &report_used, 8025 }; 8026 int rc; 8027 8028 WARN_ON(!netdev_offload_xstats_enabled(dev, type)); 8029 rc = call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_REPORT_USED, 8030 &info.info); 8031 *p_used = report_used.used; 8032 return notifier_to_errno(rc); 8033 } 8034 8035 static int netdev_offload_xstats_get_stats(struct net_device *dev, 8036 enum netdev_offload_xstats_type type, 8037 struct rtnl_hw_stats64 *p_stats, 8038 bool *p_used, 8039 struct netlink_ext_ack *extack) 8040 { 8041 struct netdev_notifier_offload_xstats_rd report_delta = {}; 8042 struct netdev_notifier_offload_xstats_info info = { 8043 .info.dev = dev, 8044 .info.extack = extack, 8045 .type = type, 8046 .report_delta = &report_delta, 8047 }; 8048 struct rtnl_hw_stats64 *stats; 8049 int rc; 8050 8051 stats = netdev_offload_xstats_get_ptr(dev, type); 8052 if (WARN_ON(!stats)) 8053 return -EINVAL; 8054 8055 rc = call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_REPORT_DELTA, 8056 &info.info); 8057 8058 /* Cache whatever we got, even if there was an error, otherwise the 8059 * successful stats retrievals would get lost. 8060 */ 8061 netdev_hw_stats64_add(stats, &report_delta.stats); 8062 8063 if (p_stats) 8064 *p_stats = *stats; 8065 *p_used = report_delta.used; 8066 8067 return notifier_to_errno(rc); 8068 } 8069 8070 int netdev_offload_xstats_get(struct net_device *dev, 8071 enum netdev_offload_xstats_type type, 8072 struct rtnl_hw_stats64 *p_stats, bool *p_used, 8073 struct netlink_ext_ack *extack) 8074 { 8075 ASSERT_RTNL(); 8076 8077 if (p_stats) 8078 return netdev_offload_xstats_get_stats(dev, type, p_stats, 8079 p_used, extack); 8080 else 8081 return netdev_offload_xstats_get_used(dev, type, p_used, 8082 extack); 8083 } 8084 EXPORT_SYMBOL(netdev_offload_xstats_get); 8085 8086 void 8087 netdev_offload_xstats_report_delta(struct netdev_notifier_offload_xstats_rd *report_delta, 8088 const struct rtnl_hw_stats64 *stats) 8089 { 8090 report_delta->used = true; 8091 netdev_hw_stats64_add(&report_delta->stats, stats); 8092 } 8093 EXPORT_SYMBOL(netdev_offload_xstats_report_delta); 8094 8095 void 8096 netdev_offload_xstats_report_used(struct netdev_notifier_offload_xstats_ru *report_used) 8097 { 8098 report_used->used = true; 8099 } 8100 EXPORT_SYMBOL(netdev_offload_xstats_report_used); 8101 8102 void netdev_offload_xstats_push_delta(struct net_device *dev, 8103 enum netdev_offload_xstats_type type, 8104 const struct rtnl_hw_stats64 *p_stats) 8105 { 8106 struct rtnl_hw_stats64 *stats; 8107 8108 ASSERT_RTNL(); 8109 8110 stats = netdev_offload_xstats_get_ptr(dev, type); 8111 if (WARN_ON(!stats)) 8112 return; 8113 8114 netdev_hw_stats64_add(stats, p_stats); 8115 } 8116 EXPORT_SYMBOL(netdev_offload_xstats_push_delta); 8117 8118 /** 8119 * netdev_get_xmit_slave - Get the xmit slave of master device 8120 * @dev: device 8121 * @skb: The packet 8122 * @all_slaves: assume all the slaves are active 8123 * 8124 * The reference counters are not incremented so the caller must be 8125 * careful with locks. The caller must hold RCU lock. 8126 * %NULL is returned if no slave is found. 8127 */ 8128 8129 struct net_device *netdev_get_xmit_slave(struct net_device *dev, 8130 struct sk_buff *skb, 8131 bool all_slaves) 8132 { 8133 const struct net_device_ops *ops = dev->netdev_ops; 8134 8135 if (!ops->ndo_get_xmit_slave) 8136 return NULL; 8137 return ops->ndo_get_xmit_slave(dev, skb, all_slaves); 8138 } 8139 EXPORT_SYMBOL(netdev_get_xmit_slave); 8140 8141 static struct net_device *netdev_sk_get_lower_dev(struct net_device *dev, 8142 struct sock *sk) 8143 { 8144 const struct net_device_ops *ops = dev->netdev_ops; 8145 8146 if (!ops->ndo_sk_get_lower_dev) 8147 return NULL; 8148 return ops->ndo_sk_get_lower_dev(dev, sk); 8149 } 8150 8151 /** 8152 * netdev_sk_get_lowest_dev - Get the lowest device in chain given device and socket 8153 * @dev: device 8154 * @sk: the socket 8155 * 8156 * %NULL is returned if no lower device is found. 8157 */ 8158 8159 struct net_device *netdev_sk_get_lowest_dev(struct net_device *dev, 8160 struct sock *sk) 8161 { 8162 struct net_device *lower; 8163 8164 lower = netdev_sk_get_lower_dev(dev, sk); 8165 while (lower) { 8166 dev = lower; 8167 lower = netdev_sk_get_lower_dev(dev, sk); 8168 } 8169 8170 return dev; 8171 } 8172 EXPORT_SYMBOL(netdev_sk_get_lowest_dev); 8173 8174 static void netdev_adjacent_add_links(struct net_device *dev) 8175 { 8176 struct netdev_adjacent *iter; 8177 8178 struct net *net = dev_net(dev); 8179 8180 list_for_each_entry(iter, &dev->adj_list.upper, list) { 8181 if (!net_eq(net, dev_net(iter->dev))) 8182 continue; 8183 netdev_adjacent_sysfs_add(iter->dev, dev, 8184 &iter->dev->adj_list.lower); 8185 netdev_adjacent_sysfs_add(dev, iter->dev, 8186 &dev->adj_list.upper); 8187 } 8188 8189 list_for_each_entry(iter, &dev->adj_list.lower, list) { 8190 if (!net_eq(net, dev_net(iter->dev))) 8191 continue; 8192 netdev_adjacent_sysfs_add(iter->dev, dev, 8193 &iter->dev->adj_list.upper); 8194 netdev_adjacent_sysfs_add(dev, iter->dev, 8195 &dev->adj_list.lower); 8196 } 8197 } 8198 8199 static void netdev_adjacent_del_links(struct net_device *dev) 8200 { 8201 struct netdev_adjacent *iter; 8202 8203 struct net *net = dev_net(dev); 8204 8205 list_for_each_entry(iter, &dev->adj_list.upper, list) { 8206 if (!net_eq(net, dev_net(iter->dev))) 8207 continue; 8208 netdev_adjacent_sysfs_del(iter->dev, dev->name, 8209 &iter->dev->adj_list.lower); 8210 netdev_adjacent_sysfs_del(dev, iter->dev->name, 8211 &dev->adj_list.upper); 8212 } 8213 8214 list_for_each_entry(iter, &dev->adj_list.lower, list) { 8215 if (!net_eq(net, dev_net(iter->dev))) 8216 continue; 8217 netdev_adjacent_sysfs_del(iter->dev, dev->name, 8218 &iter->dev->adj_list.upper); 8219 netdev_adjacent_sysfs_del(dev, iter->dev->name, 8220 &dev->adj_list.lower); 8221 } 8222 } 8223 8224 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname) 8225 { 8226 struct netdev_adjacent *iter; 8227 8228 struct net *net = dev_net(dev); 8229 8230 list_for_each_entry(iter, &dev->adj_list.upper, list) { 8231 if (!net_eq(net, dev_net(iter->dev))) 8232 continue; 8233 netdev_adjacent_sysfs_del(iter->dev, oldname, 8234 &iter->dev->adj_list.lower); 8235 netdev_adjacent_sysfs_add(iter->dev, dev, 8236 &iter->dev->adj_list.lower); 8237 } 8238 8239 list_for_each_entry(iter, &dev->adj_list.lower, list) { 8240 if (!net_eq(net, dev_net(iter->dev))) 8241 continue; 8242 netdev_adjacent_sysfs_del(iter->dev, oldname, 8243 &iter->dev->adj_list.upper); 8244 netdev_adjacent_sysfs_add(iter->dev, dev, 8245 &iter->dev->adj_list.upper); 8246 } 8247 } 8248 8249 void *netdev_lower_dev_get_private(struct net_device *dev, 8250 struct net_device *lower_dev) 8251 { 8252 struct netdev_adjacent *lower; 8253 8254 if (!lower_dev) 8255 return NULL; 8256 lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower); 8257 if (!lower) 8258 return NULL; 8259 8260 return lower->private; 8261 } 8262 EXPORT_SYMBOL(netdev_lower_dev_get_private); 8263 8264 8265 /** 8266 * netdev_lower_state_changed - Dispatch event about lower device state change 8267 * @lower_dev: device 8268 * @lower_state_info: state to dispatch 8269 * 8270 * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info. 8271 * The caller must hold the RTNL lock. 8272 */ 8273 void netdev_lower_state_changed(struct net_device *lower_dev, 8274 void *lower_state_info) 8275 { 8276 struct netdev_notifier_changelowerstate_info changelowerstate_info = { 8277 .info.dev = lower_dev, 8278 }; 8279 8280 ASSERT_RTNL(); 8281 changelowerstate_info.lower_state_info = lower_state_info; 8282 call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE, 8283 &changelowerstate_info.info); 8284 } 8285 EXPORT_SYMBOL(netdev_lower_state_changed); 8286 8287 static void dev_change_rx_flags(struct net_device *dev, int flags) 8288 { 8289 const struct net_device_ops *ops = dev->netdev_ops; 8290 8291 if (ops->ndo_change_rx_flags) 8292 ops->ndo_change_rx_flags(dev, flags); 8293 } 8294 8295 static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify) 8296 { 8297 unsigned int old_flags = dev->flags; 8298 kuid_t uid; 8299 kgid_t gid; 8300 8301 ASSERT_RTNL(); 8302 8303 dev->flags |= IFF_PROMISC; 8304 dev->promiscuity += inc; 8305 if (dev->promiscuity == 0) { 8306 /* 8307 * Avoid overflow. 8308 * If inc causes overflow, untouch promisc and return error. 8309 */ 8310 if (inc < 0) 8311 dev->flags &= ~IFF_PROMISC; 8312 else { 8313 dev->promiscuity -= inc; 8314 netdev_warn(dev, "promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n"); 8315 return -EOVERFLOW; 8316 } 8317 } 8318 if (dev->flags != old_flags) { 8319 netdev_info(dev, "%s promiscuous mode\n", 8320 dev->flags & IFF_PROMISC ? "entered" : "left"); 8321 if (audit_enabled) { 8322 current_uid_gid(&uid, &gid); 8323 audit_log(audit_context(), GFP_ATOMIC, 8324 AUDIT_ANOM_PROMISCUOUS, 8325 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u", 8326 dev->name, (dev->flags & IFF_PROMISC), 8327 (old_flags & IFF_PROMISC), 8328 from_kuid(&init_user_ns, audit_get_loginuid(current)), 8329 from_kuid(&init_user_ns, uid), 8330 from_kgid(&init_user_ns, gid), 8331 audit_get_sessionid(current)); 8332 } 8333 8334 dev_change_rx_flags(dev, IFF_PROMISC); 8335 } 8336 if (notify) 8337 __dev_notify_flags(dev, old_flags, IFF_PROMISC, 0, NULL); 8338 return 0; 8339 } 8340 8341 /** 8342 * dev_set_promiscuity - update promiscuity count on a device 8343 * @dev: device 8344 * @inc: modifier 8345 * 8346 * Add or remove promiscuity from a device. While the count in the device 8347 * remains above zero the interface remains promiscuous. Once it hits zero 8348 * the device reverts back to normal filtering operation. A negative inc 8349 * value is used to drop promiscuity on the device. 8350 * Return 0 if successful or a negative errno code on error. 8351 */ 8352 int dev_set_promiscuity(struct net_device *dev, int inc) 8353 { 8354 unsigned int old_flags = dev->flags; 8355 int err; 8356 8357 err = __dev_set_promiscuity(dev, inc, true); 8358 if (err < 0) 8359 return err; 8360 if (dev->flags != old_flags) 8361 dev_set_rx_mode(dev); 8362 return err; 8363 } 8364 EXPORT_SYMBOL(dev_set_promiscuity); 8365 8366 static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify) 8367 { 8368 unsigned int old_flags = dev->flags, old_gflags = dev->gflags; 8369 8370 ASSERT_RTNL(); 8371 8372 dev->flags |= IFF_ALLMULTI; 8373 dev->allmulti += inc; 8374 if (dev->allmulti == 0) { 8375 /* 8376 * Avoid overflow. 8377 * If inc causes overflow, untouch allmulti and return error. 8378 */ 8379 if (inc < 0) 8380 dev->flags &= ~IFF_ALLMULTI; 8381 else { 8382 dev->allmulti -= inc; 8383 netdev_warn(dev, "allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n"); 8384 return -EOVERFLOW; 8385 } 8386 } 8387 if (dev->flags ^ old_flags) { 8388 netdev_info(dev, "%s allmulticast mode\n", 8389 dev->flags & IFF_ALLMULTI ? "entered" : "left"); 8390 dev_change_rx_flags(dev, IFF_ALLMULTI); 8391 dev_set_rx_mode(dev); 8392 if (notify) 8393 __dev_notify_flags(dev, old_flags, 8394 dev->gflags ^ old_gflags, 0, NULL); 8395 } 8396 return 0; 8397 } 8398 8399 /** 8400 * dev_set_allmulti - update allmulti count on a device 8401 * @dev: device 8402 * @inc: modifier 8403 * 8404 * Add or remove reception of all multicast frames to a device. While the 8405 * count in the device remains above zero the interface remains listening 8406 * to all interfaces. Once it hits zero the device reverts back to normal 8407 * filtering operation. A negative @inc value is used to drop the counter 8408 * when releasing a resource needing all multicasts. 8409 * Return 0 if successful or a negative errno code on error. 8410 */ 8411 8412 int dev_set_allmulti(struct net_device *dev, int inc) 8413 { 8414 return __dev_set_allmulti(dev, inc, true); 8415 } 8416 EXPORT_SYMBOL(dev_set_allmulti); 8417 8418 /* 8419 * Upload unicast and multicast address lists to device and 8420 * configure RX filtering. When the device doesn't support unicast 8421 * filtering it is put in promiscuous mode while unicast addresses 8422 * are present. 8423 */ 8424 void __dev_set_rx_mode(struct net_device *dev) 8425 { 8426 const struct net_device_ops *ops = dev->netdev_ops; 8427 8428 /* dev_open will call this function so the list will stay sane. */ 8429 if (!(dev->flags&IFF_UP)) 8430 return; 8431 8432 if (!netif_device_present(dev)) 8433 return; 8434 8435 if (!(dev->priv_flags & IFF_UNICAST_FLT)) { 8436 /* Unicast addresses changes may only happen under the rtnl, 8437 * therefore calling __dev_set_promiscuity here is safe. 8438 */ 8439 if (!netdev_uc_empty(dev) && !dev->uc_promisc) { 8440 __dev_set_promiscuity(dev, 1, false); 8441 dev->uc_promisc = true; 8442 } else if (netdev_uc_empty(dev) && dev->uc_promisc) { 8443 __dev_set_promiscuity(dev, -1, false); 8444 dev->uc_promisc = false; 8445 } 8446 } 8447 8448 if (ops->ndo_set_rx_mode) 8449 ops->ndo_set_rx_mode(dev); 8450 } 8451 8452 void dev_set_rx_mode(struct net_device *dev) 8453 { 8454 netif_addr_lock_bh(dev); 8455 __dev_set_rx_mode(dev); 8456 netif_addr_unlock_bh(dev); 8457 } 8458 8459 /** 8460 * dev_get_flags - get flags reported to userspace 8461 * @dev: device 8462 * 8463 * Get the combination of flag bits exported through APIs to userspace. 8464 */ 8465 unsigned int dev_get_flags(const struct net_device *dev) 8466 { 8467 unsigned int flags; 8468 8469 flags = (dev->flags & ~(IFF_PROMISC | 8470 IFF_ALLMULTI | 8471 IFF_RUNNING | 8472 IFF_LOWER_UP | 8473 IFF_DORMANT)) | 8474 (dev->gflags & (IFF_PROMISC | 8475 IFF_ALLMULTI)); 8476 8477 if (netif_running(dev)) { 8478 if (netif_oper_up(dev)) 8479 flags |= IFF_RUNNING; 8480 if (netif_carrier_ok(dev)) 8481 flags |= IFF_LOWER_UP; 8482 if (netif_dormant(dev)) 8483 flags |= IFF_DORMANT; 8484 } 8485 8486 return flags; 8487 } 8488 EXPORT_SYMBOL(dev_get_flags); 8489 8490 int __dev_change_flags(struct net_device *dev, unsigned int flags, 8491 struct netlink_ext_ack *extack) 8492 { 8493 unsigned int old_flags = dev->flags; 8494 int ret; 8495 8496 ASSERT_RTNL(); 8497 8498 /* 8499 * Set the flags on our device. 8500 */ 8501 8502 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP | 8503 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL | 8504 IFF_AUTOMEDIA)) | 8505 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC | 8506 IFF_ALLMULTI)); 8507 8508 /* 8509 * Load in the correct multicast list now the flags have changed. 8510 */ 8511 8512 if ((old_flags ^ flags) & IFF_MULTICAST) 8513 dev_change_rx_flags(dev, IFF_MULTICAST); 8514 8515 dev_set_rx_mode(dev); 8516 8517 /* 8518 * Have we downed the interface. We handle IFF_UP ourselves 8519 * according to user attempts to set it, rather than blindly 8520 * setting it. 8521 */ 8522 8523 ret = 0; 8524 if ((old_flags ^ flags) & IFF_UP) { 8525 if (old_flags & IFF_UP) 8526 __dev_close(dev); 8527 else 8528 ret = __dev_open(dev, extack); 8529 } 8530 8531 if ((flags ^ dev->gflags) & IFF_PROMISC) { 8532 int inc = (flags & IFF_PROMISC) ? 1 : -1; 8533 unsigned int old_flags = dev->flags; 8534 8535 dev->gflags ^= IFF_PROMISC; 8536 8537 if (__dev_set_promiscuity(dev, inc, false) >= 0) 8538 if (dev->flags != old_flags) 8539 dev_set_rx_mode(dev); 8540 } 8541 8542 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI 8543 * is important. Some (broken) drivers set IFF_PROMISC, when 8544 * IFF_ALLMULTI is requested not asking us and not reporting. 8545 */ 8546 if ((flags ^ dev->gflags) & IFF_ALLMULTI) { 8547 int inc = (flags & IFF_ALLMULTI) ? 1 : -1; 8548 8549 dev->gflags ^= IFF_ALLMULTI; 8550 __dev_set_allmulti(dev, inc, false); 8551 } 8552 8553 return ret; 8554 } 8555 8556 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags, 8557 unsigned int gchanges, u32 portid, 8558 const struct nlmsghdr *nlh) 8559 { 8560 unsigned int changes = dev->flags ^ old_flags; 8561 8562 if (gchanges) 8563 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC, portid, nlh); 8564 8565 if (changes & IFF_UP) { 8566 if (dev->flags & IFF_UP) 8567 call_netdevice_notifiers(NETDEV_UP, dev); 8568 else 8569 call_netdevice_notifiers(NETDEV_DOWN, dev); 8570 } 8571 8572 if (dev->flags & IFF_UP && 8573 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) { 8574 struct netdev_notifier_change_info change_info = { 8575 .info = { 8576 .dev = dev, 8577 }, 8578 .flags_changed = changes, 8579 }; 8580 8581 call_netdevice_notifiers_info(NETDEV_CHANGE, &change_info.info); 8582 } 8583 } 8584 8585 /** 8586 * dev_change_flags - change device settings 8587 * @dev: device 8588 * @flags: device state flags 8589 * @extack: netlink extended ack 8590 * 8591 * Change settings on device based state flags. The flags are 8592 * in the userspace exported format. 8593 */ 8594 int dev_change_flags(struct net_device *dev, unsigned int flags, 8595 struct netlink_ext_ack *extack) 8596 { 8597 int ret; 8598 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags; 8599 8600 ret = __dev_change_flags(dev, flags, extack); 8601 if (ret < 0) 8602 return ret; 8603 8604 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags); 8605 __dev_notify_flags(dev, old_flags, changes, 0, NULL); 8606 return ret; 8607 } 8608 EXPORT_SYMBOL(dev_change_flags); 8609 8610 int __dev_set_mtu(struct net_device *dev, int new_mtu) 8611 { 8612 const struct net_device_ops *ops = dev->netdev_ops; 8613 8614 if (ops->ndo_change_mtu) 8615 return ops->ndo_change_mtu(dev, new_mtu); 8616 8617 /* Pairs with all the lockless reads of dev->mtu in the stack */ 8618 WRITE_ONCE(dev->mtu, new_mtu); 8619 return 0; 8620 } 8621 EXPORT_SYMBOL(__dev_set_mtu); 8622 8623 int dev_validate_mtu(struct net_device *dev, int new_mtu, 8624 struct netlink_ext_ack *extack) 8625 { 8626 /* MTU must be positive, and in range */ 8627 if (new_mtu < 0 || new_mtu < dev->min_mtu) { 8628 NL_SET_ERR_MSG(extack, "mtu less than device minimum"); 8629 return -EINVAL; 8630 } 8631 8632 if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) { 8633 NL_SET_ERR_MSG(extack, "mtu greater than device maximum"); 8634 return -EINVAL; 8635 } 8636 return 0; 8637 } 8638 8639 /** 8640 * dev_set_mtu_ext - Change maximum transfer unit 8641 * @dev: device 8642 * @new_mtu: new transfer unit 8643 * @extack: netlink extended ack 8644 * 8645 * Change the maximum transfer size of the network device. 8646 */ 8647 int dev_set_mtu_ext(struct net_device *dev, int new_mtu, 8648 struct netlink_ext_ack *extack) 8649 { 8650 int err, orig_mtu; 8651 8652 if (new_mtu == dev->mtu) 8653 return 0; 8654 8655 err = dev_validate_mtu(dev, new_mtu, extack); 8656 if (err) 8657 return err; 8658 8659 if (!netif_device_present(dev)) 8660 return -ENODEV; 8661 8662 err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev); 8663 err = notifier_to_errno(err); 8664 if (err) 8665 return err; 8666 8667 orig_mtu = dev->mtu; 8668 err = __dev_set_mtu(dev, new_mtu); 8669 8670 if (!err) { 8671 err = call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev, 8672 orig_mtu); 8673 err = notifier_to_errno(err); 8674 if (err) { 8675 /* setting mtu back and notifying everyone again, 8676 * so that they have a chance to revert changes. 8677 */ 8678 __dev_set_mtu(dev, orig_mtu); 8679 call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev, 8680 new_mtu); 8681 } 8682 } 8683 return err; 8684 } 8685 8686 int dev_set_mtu(struct net_device *dev, int new_mtu) 8687 { 8688 struct netlink_ext_ack extack; 8689 int err; 8690 8691 memset(&extack, 0, sizeof(extack)); 8692 err = dev_set_mtu_ext(dev, new_mtu, &extack); 8693 if (err && extack._msg) 8694 net_err_ratelimited("%s: %s\n", dev->name, extack._msg); 8695 return err; 8696 } 8697 EXPORT_SYMBOL(dev_set_mtu); 8698 8699 /** 8700 * dev_change_tx_queue_len - Change TX queue length of a netdevice 8701 * @dev: device 8702 * @new_len: new tx queue length 8703 */ 8704 int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len) 8705 { 8706 unsigned int orig_len = dev->tx_queue_len; 8707 int res; 8708 8709 if (new_len != (unsigned int)new_len) 8710 return -ERANGE; 8711 8712 if (new_len != orig_len) { 8713 dev->tx_queue_len = new_len; 8714 res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev); 8715 res = notifier_to_errno(res); 8716 if (res) 8717 goto err_rollback; 8718 res = dev_qdisc_change_tx_queue_len(dev); 8719 if (res) 8720 goto err_rollback; 8721 } 8722 8723 return 0; 8724 8725 err_rollback: 8726 netdev_err(dev, "refused to change device tx_queue_len\n"); 8727 dev->tx_queue_len = orig_len; 8728 return res; 8729 } 8730 8731 /** 8732 * dev_set_group - Change group this device belongs to 8733 * @dev: device 8734 * @new_group: group this device should belong to 8735 */ 8736 void dev_set_group(struct net_device *dev, int new_group) 8737 { 8738 dev->group = new_group; 8739 } 8740 8741 /** 8742 * dev_pre_changeaddr_notify - Call NETDEV_PRE_CHANGEADDR. 8743 * @dev: device 8744 * @addr: new address 8745 * @extack: netlink extended ack 8746 */ 8747 int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr, 8748 struct netlink_ext_ack *extack) 8749 { 8750 struct netdev_notifier_pre_changeaddr_info info = { 8751 .info.dev = dev, 8752 .info.extack = extack, 8753 .dev_addr = addr, 8754 }; 8755 int rc; 8756 8757 rc = call_netdevice_notifiers_info(NETDEV_PRE_CHANGEADDR, &info.info); 8758 return notifier_to_errno(rc); 8759 } 8760 EXPORT_SYMBOL(dev_pre_changeaddr_notify); 8761 8762 /** 8763 * dev_set_mac_address - Change Media Access Control Address 8764 * @dev: device 8765 * @sa: new address 8766 * @extack: netlink extended ack 8767 * 8768 * Change the hardware (MAC) address of the device 8769 */ 8770 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa, 8771 struct netlink_ext_ack *extack) 8772 { 8773 const struct net_device_ops *ops = dev->netdev_ops; 8774 int err; 8775 8776 if (!ops->ndo_set_mac_address) 8777 return -EOPNOTSUPP; 8778 if (sa->sa_family != dev->type) 8779 return -EINVAL; 8780 if (!netif_device_present(dev)) 8781 return -ENODEV; 8782 err = dev_pre_changeaddr_notify(dev, sa->sa_data, extack); 8783 if (err) 8784 return err; 8785 err = ops->ndo_set_mac_address(dev, sa); 8786 if (err) 8787 return err; 8788 dev->addr_assign_type = NET_ADDR_SET; 8789 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 8790 add_device_randomness(dev->dev_addr, dev->addr_len); 8791 return 0; 8792 } 8793 EXPORT_SYMBOL(dev_set_mac_address); 8794 8795 static DECLARE_RWSEM(dev_addr_sem); 8796 8797 int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa, 8798 struct netlink_ext_ack *extack) 8799 { 8800 int ret; 8801 8802 down_write(&dev_addr_sem); 8803 ret = dev_set_mac_address(dev, sa, extack); 8804 up_write(&dev_addr_sem); 8805 return ret; 8806 } 8807 EXPORT_SYMBOL(dev_set_mac_address_user); 8808 8809 int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name) 8810 { 8811 size_t size = sizeof(sa->sa_data_min); 8812 struct net_device *dev; 8813 int ret = 0; 8814 8815 down_read(&dev_addr_sem); 8816 rcu_read_lock(); 8817 8818 dev = dev_get_by_name_rcu(net, dev_name); 8819 if (!dev) { 8820 ret = -ENODEV; 8821 goto unlock; 8822 } 8823 if (!dev->addr_len) 8824 memset(sa->sa_data, 0, size); 8825 else 8826 memcpy(sa->sa_data, dev->dev_addr, 8827 min_t(size_t, size, dev->addr_len)); 8828 sa->sa_family = dev->type; 8829 8830 unlock: 8831 rcu_read_unlock(); 8832 up_read(&dev_addr_sem); 8833 return ret; 8834 } 8835 EXPORT_SYMBOL(dev_get_mac_address); 8836 8837 /** 8838 * dev_change_carrier - Change device carrier 8839 * @dev: device 8840 * @new_carrier: new value 8841 * 8842 * Change device carrier 8843 */ 8844 int dev_change_carrier(struct net_device *dev, bool new_carrier) 8845 { 8846 const struct net_device_ops *ops = dev->netdev_ops; 8847 8848 if (!ops->ndo_change_carrier) 8849 return -EOPNOTSUPP; 8850 if (!netif_device_present(dev)) 8851 return -ENODEV; 8852 return ops->ndo_change_carrier(dev, new_carrier); 8853 } 8854 8855 /** 8856 * dev_get_phys_port_id - Get device physical port ID 8857 * @dev: device 8858 * @ppid: port ID 8859 * 8860 * Get device physical port ID 8861 */ 8862 int dev_get_phys_port_id(struct net_device *dev, 8863 struct netdev_phys_item_id *ppid) 8864 { 8865 const struct net_device_ops *ops = dev->netdev_ops; 8866 8867 if (!ops->ndo_get_phys_port_id) 8868 return -EOPNOTSUPP; 8869 return ops->ndo_get_phys_port_id(dev, ppid); 8870 } 8871 8872 /** 8873 * dev_get_phys_port_name - Get device physical port name 8874 * @dev: device 8875 * @name: port name 8876 * @len: limit of bytes to copy to name 8877 * 8878 * Get device physical port name 8879 */ 8880 int dev_get_phys_port_name(struct net_device *dev, 8881 char *name, size_t len) 8882 { 8883 const struct net_device_ops *ops = dev->netdev_ops; 8884 int err; 8885 8886 if (ops->ndo_get_phys_port_name) { 8887 err = ops->ndo_get_phys_port_name(dev, name, len); 8888 if (err != -EOPNOTSUPP) 8889 return err; 8890 } 8891 return devlink_compat_phys_port_name_get(dev, name, len); 8892 } 8893 8894 /** 8895 * dev_get_port_parent_id - Get the device's port parent identifier 8896 * @dev: network device 8897 * @ppid: pointer to a storage for the port's parent identifier 8898 * @recurse: allow/disallow recursion to lower devices 8899 * 8900 * Get the devices's port parent identifier 8901 */ 8902 int dev_get_port_parent_id(struct net_device *dev, 8903 struct netdev_phys_item_id *ppid, 8904 bool recurse) 8905 { 8906 const struct net_device_ops *ops = dev->netdev_ops; 8907 struct netdev_phys_item_id first = { }; 8908 struct net_device *lower_dev; 8909 struct list_head *iter; 8910 int err; 8911 8912 if (ops->ndo_get_port_parent_id) { 8913 err = ops->ndo_get_port_parent_id(dev, ppid); 8914 if (err != -EOPNOTSUPP) 8915 return err; 8916 } 8917 8918 err = devlink_compat_switch_id_get(dev, ppid); 8919 if (!recurse || err != -EOPNOTSUPP) 8920 return err; 8921 8922 netdev_for_each_lower_dev(dev, lower_dev, iter) { 8923 err = dev_get_port_parent_id(lower_dev, ppid, true); 8924 if (err) 8925 break; 8926 if (!first.id_len) 8927 first = *ppid; 8928 else if (memcmp(&first, ppid, sizeof(*ppid))) 8929 return -EOPNOTSUPP; 8930 } 8931 8932 return err; 8933 } 8934 EXPORT_SYMBOL(dev_get_port_parent_id); 8935 8936 /** 8937 * netdev_port_same_parent_id - Indicate if two network devices have 8938 * the same port parent identifier 8939 * @a: first network device 8940 * @b: second network device 8941 */ 8942 bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b) 8943 { 8944 struct netdev_phys_item_id a_id = { }; 8945 struct netdev_phys_item_id b_id = { }; 8946 8947 if (dev_get_port_parent_id(a, &a_id, true) || 8948 dev_get_port_parent_id(b, &b_id, true)) 8949 return false; 8950 8951 return netdev_phys_item_id_same(&a_id, &b_id); 8952 } 8953 EXPORT_SYMBOL(netdev_port_same_parent_id); 8954 8955 /** 8956 * dev_change_proto_down - set carrier according to proto_down. 8957 * 8958 * @dev: device 8959 * @proto_down: new value 8960 */ 8961 int dev_change_proto_down(struct net_device *dev, bool proto_down) 8962 { 8963 if (!(dev->priv_flags & IFF_CHANGE_PROTO_DOWN)) 8964 return -EOPNOTSUPP; 8965 if (!netif_device_present(dev)) 8966 return -ENODEV; 8967 if (proto_down) 8968 netif_carrier_off(dev); 8969 else 8970 netif_carrier_on(dev); 8971 dev->proto_down = proto_down; 8972 return 0; 8973 } 8974 8975 /** 8976 * dev_change_proto_down_reason - proto down reason 8977 * 8978 * @dev: device 8979 * @mask: proto down mask 8980 * @value: proto down value 8981 */ 8982 void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask, 8983 u32 value) 8984 { 8985 int b; 8986 8987 if (!mask) { 8988 dev->proto_down_reason = value; 8989 } else { 8990 for_each_set_bit(b, &mask, 32) { 8991 if (value & (1 << b)) 8992 dev->proto_down_reason |= BIT(b); 8993 else 8994 dev->proto_down_reason &= ~BIT(b); 8995 } 8996 } 8997 } 8998 8999 struct bpf_xdp_link { 9000 struct bpf_link link; 9001 struct net_device *dev; /* protected by rtnl_lock, no refcnt held */ 9002 int flags; 9003 }; 9004 9005 static enum bpf_xdp_mode dev_xdp_mode(struct net_device *dev, u32 flags) 9006 { 9007 if (flags & XDP_FLAGS_HW_MODE) 9008 return XDP_MODE_HW; 9009 if (flags & XDP_FLAGS_DRV_MODE) 9010 return XDP_MODE_DRV; 9011 if (flags & XDP_FLAGS_SKB_MODE) 9012 return XDP_MODE_SKB; 9013 return dev->netdev_ops->ndo_bpf ? XDP_MODE_DRV : XDP_MODE_SKB; 9014 } 9015 9016 static bpf_op_t dev_xdp_bpf_op(struct net_device *dev, enum bpf_xdp_mode mode) 9017 { 9018 switch (mode) { 9019 case XDP_MODE_SKB: 9020 return generic_xdp_install; 9021 case XDP_MODE_DRV: 9022 case XDP_MODE_HW: 9023 return dev->netdev_ops->ndo_bpf; 9024 default: 9025 return NULL; 9026 } 9027 } 9028 9029 static struct bpf_xdp_link *dev_xdp_link(struct net_device *dev, 9030 enum bpf_xdp_mode mode) 9031 { 9032 return dev->xdp_state[mode].link; 9033 } 9034 9035 static struct bpf_prog *dev_xdp_prog(struct net_device *dev, 9036 enum bpf_xdp_mode mode) 9037 { 9038 struct bpf_xdp_link *link = dev_xdp_link(dev, mode); 9039 9040 if (link) 9041 return link->link.prog; 9042 return dev->xdp_state[mode].prog; 9043 } 9044 9045 u8 dev_xdp_prog_count(struct net_device *dev) 9046 { 9047 u8 count = 0; 9048 int i; 9049 9050 for (i = 0; i < __MAX_XDP_MODE; i++) 9051 if (dev->xdp_state[i].prog || dev->xdp_state[i].link) 9052 count++; 9053 return count; 9054 } 9055 EXPORT_SYMBOL_GPL(dev_xdp_prog_count); 9056 9057 u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode) 9058 { 9059 struct bpf_prog *prog = dev_xdp_prog(dev, mode); 9060 9061 return prog ? prog->aux->id : 0; 9062 } 9063 9064 static void dev_xdp_set_link(struct net_device *dev, enum bpf_xdp_mode mode, 9065 struct bpf_xdp_link *link) 9066 { 9067 dev->xdp_state[mode].link = link; 9068 dev->xdp_state[mode].prog = NULL; 9069 } 9070 9071 static void dev_xdp_set_prog(struct net_device *dev, enum bpf_xdp_mode mode, 9072 struct bpf_prog *prog) 9073 { 9074 dev->xdp_state[mode].link = NULL; 9075 dev->xdp_state[mode].prog = prog; 9076 } 9077 9078 static int dev_xdp_install(struct net_device *dev, enum bpf_xdp_mode mode, 9079 bpf_op_t bpf_op, struct netlink_ext_ack *extack, 9080 u32 flags, struct bpf_prog *prog) 9081 { 9082 struct netdev_bpf xdp; 9083 int err; 9084 9085 memset(&xdp, 0, sizeof(xdp)); 9086 xdp.command = mode == XDP_MODE_HW ? XDP_SETUP_PROG_HW : XDP_SETUP_PROG; 9087 xdp.extack = extack; 9088 xdp.flags = flags; 9089 xdp.prog = prog; 9090 9091 /* Drivers assume refcnt is already incremented (i.e, prog pointer is 9092 * "moved" into driver), so they don't increment it on their own, but 9093 * they do decrement refcnt when program is detached or replaced. 9094 * Given net_device also owns link/prog, we need to bump refcnt here 9095 * to prevent drivers from underflowing it. 9096 */ 9097 if (prog) 9098 bpf_prog_inc(prog); 9099 err = bpf_op(dev, &xdp); 9100 if (err) { 9101 if (prog) 9102 bpf_prog_put(prog); 9103 return err; 9104 } 9105 9106 if (mode != XDP_MODE_HW) 9107 bpf_prog_change_xdp(dev_xdp_prog(dev, mode), prog); 9108 9109 return 0; 9110 } 9111 9112 static void dev_xdp_uninstall(struct net_device *dev) 9113 { 9114 struct bpf_xdp_link *link; 9115 struct bpf_prog *prog; 9116 enum bpf_xdp_mode mode; 9117 bpf_op_t bpf_op; 9118 9119 ASSERT_RTNL(); 9120 9121 for (mode = XDP_MODE_SKB; mode < __MAX_XDP_MODE; mode++) { 9122 prog = dev_xdp_prog(dev, mode); 9123 if (!prog) 9124 continue; 9125 9126 bpf_op = dev_xdp_bpf_op(dev, mode); 9127 if (!bpf_op) 9128 continue; 9129 9130 WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL)); 9131 9132 /* auto-detach link from net device */ 9133 link = dev_xdp_link(dev, mode); 9134 if (link) 9135 link->dev = NULL; 9136 else 9137 bpf_prog_put(prog); 9138 9139 dev_xdp_set_link(dev, mode, NULL); 9140 } 9141 } 9142 9143 static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack, 9144 struct bpf_xdp_link *link, struct bpf_prog *new_prog, 9145 struct bpf_prog *old_prog, u32 flags) 9146 { 9147 unsigned int num_modes = hweight32(flags & XDP_FLAGS_MODES); 9148 struct bpf_prog *cur_prog; 9149 struct net_device *upper; 9150 struct list_head *iter; 9151 enum bpf_xdp_mode mode; 9152 bpf_op_t bpf_op; 9153 int err; 9154 9155 ASSERT_RTNL(); 9156 9157 /* either link or prog attachment, never both */ 9158 if (link && (new_prog || old_prog)) 9159 return -EINVAL; 9160 /* link supports only XDP mode flags */ 9161 if (link && (flags & ~XDP_FLAGS_MODES)) { 9162 NL_SET_ERR_MSG(extack, "Invalid XDP flags for BPF link attachment"); 9163 return -EINVAL; 9164 } 9165 /* just one XDP mode bit should be set, zero defaults to drv/skb mode */ 9166 if (num_modes > 1) { 9167 NL_SET_ERR_MSG(extack, "Only one XDP mode flag can be set"); 9168 return -EINVAL; 9169 } 9170 /* avoid ambiguity if offload + drv/skb mode progs are both loaded */ 9171 if (!num_modes && dev_xdp_prog_count(dev) > 1) { 9172 NL_SET_ERR_MSG(extack, 9173 "More than one program loaded, unset mode is ambiguous"); 9174 return -EINVAL; 9175 } 9176 /* old_prog != NULL implies XDP_FLAGS_REPLACE is set */ 9177 if (old_prog && !(flags & XDP_FLAGS_REPLACE)) { 9178 NL_SET_ERR_MSG(extack, "XDP_FLAGS_REPLACE is not specified"); 9179 return -EINVAL; 9180 } 9181 9182 mode = dev_xdp_mode(dev, flags); 9183 /* can't replace attached link */ 9184 if (dev_xdp_link(dev, mode)) { 9185 NL_SET_ERR_MSG(extack, "Can't replace active BPF XDP link"); 9186 return -EBUSY; 9187 } 9188 9189 /* don't allow if an upper device already has a program */ 9190 netdev_for_each_upper_dev_rcu(dev, upper, iter) { 9191 if (dev_xdp_prog_count(upper) > 0) { 9192 NL_SET_ERR_MSG(extack, "Cannot attach when an upper device already has a program"); 9193 return -EEXIST; 9194 } 9195 } 9196 9197 cur_prog = dev_xdp_prog(dev, mode); 9198 /* can't replace attached prog with link */ 9199 if (link && cur_prog) { 9200 NL_SET_ERR_MSG(extack, "Can't replace active XDP program with BPF link"); 9201 return -EBUSY; 9202 } 9203 if ((flags & XDP_FLAGS_REPLACE) && cur_prog != old_prog) { 9204 NL_SET_ERR_MSG(extack, "Active program does not match expected"); 9205 return -EEXIST; 9206 } 9207 9208 /* put effective new program into new_prog */ 9209 if (link) 9210 new_prog = link->link.prog; 9211 9212 if (new_prog) { 9213 bool offload = mode == XDP_MODE_HW; 9214 enum bpf_xdp_mode other_mode = mode == XDP_MODE_SKB 9215 ? XDP_MODE_DRV : XDP_MODE_SKB; 9216 9217 if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && cur_prog) { 9218 NL_SET_ERR_MSG(extack, "XDP program already attached"); 9219 return -EBUSY; 9220 } 9221 if (!offload && dev_xdp_prog(dev, other_mode)) { 9222 NL_SET_ERR_MSG(extack, "Native and generic XDP can't be active at the same time"); 9223 return -EEXIST; 9224 } 9225 if (!offload && bpf_prog_is_offloaded(new_prog->aux)) { 9226 NL_SET_ERR_MSG(extack, "Using offloaded program without HW_MODE flag is not supported"); 9227 return -EINVAL; 9228 } 9229 if (bpf_prog_is_dev_bound(new_prog->aux) && !bpf_offload_dev_match(new_prog, dev)) { 9230 NL_SET_ERR_MSG(extack, "Program bound to different device"); 9231 return -EINVAL; 9232 } 9233 if (new_prog->expected_attach_type == BPF_XDP_DEVMAP) { 9234 NL_SET_ERR_MSG(extack, "BPF_XDP_DEVMAP programs can not be attached to a device"); 9235 return -EINVAL; 9236 } 9237 if (new_prog->expected_attach_type == BPF_XDP_CPUMAP) { 9238 NL_SET_ERR_MSG(extack, "BPF_XDP_CPUMAP programs can not be attached to a device"); 9239 return -EINVAL; 9240 } 9241 } 9242 9243 /* don't call drivers if the effective program didn't change */ 9244 if (new_prog != cur_prog) { 9245 bpf_op = dev_xdp_bpf_op(dev, mode); 9246 if (!bpf_op) { 9247 NL_SET_ERR_MSG(extack, "Underlying driver does not support XDP in native mode"); 9248 return -EOPNOTSUPP; 9249 } 9250 9251 err = dev_xdp_install(dev, mode, bpf_op, extack, flags, new_prog); 9252 if (err) 9253 return err; 9254 } 9255 9256 if (link) 9257 dev_xdp_set_link(dev, mode, link); 9258 else 9259 dev_xdp_set_prog(dev, mode, new_prog); 9260 if (cur_prog) 9261 bpf_prog_put(cur_prog); 9262 9263 return 0; 9264 } 9265 9266 static int dev_xdp_attach_link(struct net_device *dev, 9267 struct netlink_ext_ack *extack, 9268 struct bpf_xdp_link *link) 9269 { 9270 return dev_xdp_attach(dev, extack, link, NULL, NULL, link->flags); 9271 } 9272 9273 static int dev_xdp_detach_link(struct net_device *dev, 9274 struct netlink_ext_ack *extack, 9275 struct bpf_xdp_link *link) 9276 { 9277 enum bpf_xdp_mode mode; 9278 bpf_op_t bpf_op; 9279 9280 ASSERT_RTNL(); 9281 9282 mode = dev_xdp_mode(dev, link->flags); 9283 if (dev_xdp_link(dev, mode) != link) 9284 return -EINVAL; 9285 9286 bpf_op = dev_xdp_bpf_op(dev, mode); 9287 WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL)); 9288 dev_xdp_set_link(dev, mode, NULL); 9289 return 0; 9290 } 9291 9292 static void bpf_xdp_link_release(struct bpf_link *link) 9293 { 9294 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link); 9295 9296 rtnl_lock(); 9297 9298 /* if racing with net_device's tear down, xdp_link->dev might be 9299 * already NULL, in which case link was already auto-detached 9300 */ 9301 if (xdp_link->dev) { 9302 WARN_ON(dev_xdp_detach_link(xdp_link->dev, NULL, xdp_link)); 9303 xdp_link->dev = NULL; 9304 } 9305 9306 rtnl_unlock(); 9307 } 9308 9309 static int bpf_xdp_link_detach(struct bpf_link *link) 9310 { 9311 bpf_xdp_link_release(link); 9312 return 0; 9313 } 9314 9315 static void bpf_xdp_link_dealloc(struct bpf_link *link) 9316 { 9317 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link); 9318 9319 kfree(xdp_link); 9320 } 9321 9322 static void bpf_xdp_link_show_fdinfo(const struct bpf_link *link, 9323 struct seq_file *seq) 9324 { 9325 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link); 9326 u32 ifindex = 0; 9327 9328 rtnl_lock(); 9329 if (xdp_link->dev) 9330 ifindex = xdp_link->dev->ifindex; 9331 rtnl_unlock(); 9332 9333 seq_printf(seq, "ifindex:\t%u\n", ifindex); 9334 } 9335 9336 static int bpf_xdp_link_fill_link_info(const struct bpf_link *link, 9337 struct bpf_link_info *info) 9338 { 9339 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link); 9340 u32 ifindex = 0; 9341 9342 rtnl_lock(); 9343 if (xdp_link->dev) 9344 ifindex = xdp_link->dev->ifindex; 9345 rtnl_unlock(); 9346 9347 info->xdp.ifindex = ifindex; 9348 return 0; 9349 } 9350 9351 static int bpf_xdp_link_update(struct bpf_link *link, struct bpf_prog *new_prog, 9352 struct bpf_prog *old_prog) 9353 { 9354 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link); 9355 enum bpf_xdp_mode mode; 9356 bpf_op_t bpf_op; 9357 int err = 0; 9358 9359 rtnl_lock(); 9360 9361 /* link might have been auto-released already, so fail */ 9362 if (!xdp_link->dev) { 9363 err = -ENOLINK; 9364 goto out_unlock; 9365 } 9366 9367 if (old_prog && link->prog != old_prog) { 9368 err = -EPERM; 9369 goto out_unlock; 9370 } 9371 old_prog = link->prog; 9372 if (old_prog->type != new_prog->type || 9373 old_prog->expected_attach_type != new_prog->expected_attach_type) { 9374 err = -EINVAL; 9375 goto out_unlock; 9376 } 9377 9378 if (old_prog == new_prog) { 9379 /* no-op, don't disturb drivers */ 9380 bpf_prog_put(new_prog); 9381 goto out_unlock; 9382 } 9383 9384 mode = dev_xdp_mode(xdp_link->dev, xdp_link->flags); 9385 bpf_op = dev_xdp_bpf_op(xdp_link->dev, mode); 9386 err = dev_xdp_install(xdp_link->dev, mode, bpf_op, NULL, 9387 xdp_link->flags, new_prog); 9388 if (err) 9389 goto out_unlock; 9390 9391 old_prog = xchg(&link->prog, new_prog); 9392 bpf_prog_put(old_prog); 9393 9394 out_unlock: 9395 rtnl_unlock(); 9396 return err; 9397 } 9398 9399 static const struct bpf_link_ops bpf_xdp_link_lops = { 9400 .release = bpf_xdp_link_release, 9401 .dealloc = bpf_xdp_link_dealloc, 9402 .detach = bpf_xdp_link_detach, 9403 .show_fdinfo = bpf_xdp_link_show_fdinfo, 9404 .fill_link_info = bpf_xdp_link_fill_link_info, 9405 .update_prog = bpf_xdp_link_update, 9406 }; 9407 9408 int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 9409 { 9410 struct net *net = current->nsproxy->net_ns; 9411 struct bpf_link_primer link_primer; 9412 struct bpf_xdp_link *link; 9413 struct net_device *dev; 9414 int err, fd; 9415 9416 rtnl_lock(); 9417 dev = dev_get_by_index(net, attr->link_create.target_ifindex); 9418 if (!dev) { 9419 rtnl_unlock(); 9420 return -EINVAL; 9421 } 9422 9423 link = kzalloc(sizeof(*link), GFP_USER); 9424 if (!link) { 9425 err = -ENOMEM; 9426 goto unlock; 9427 } 9428 9429 bpf_link_init(&link->link, BPF_LINK_TYPE_XDP, &bpf_xdp_link_lops, prog); 9430 link->dev = dev; 9431 link->flags = attr->link_create.flags; 9432 9433 err = bpf_link_prime(&link->link, &link_primer); 9434 if (err) { 9435 kfree(link); 9436 goto unlock; 9437 } 9438 9439 err = dev_xdp_attach_link(dev, NULL, link); 9440 rtnl_unlock(); 9441 9442 if (err) { 9443 link->dev = NULL; 9444 bpf_link_cleanup(&link_primer); 9445 goto out_put_dev; 9446 } 9447 9448 fd = bpf_link_settle(&link_primer); 9449 /* link itself doesn't hold dev's refcnt to not complicate shutdown */ 9450 dev_put(dev); 9451 return fd; 9452 9453 unlock: 9454 rtnl_unlock(); 9455 9456 out_put_dev: 9457 dev_put(dev); 9458 return err; 9459 } 9460 9461 /** 9462 * dev_change_xdp_fd - set or clear a bpf program for a device rx path 9463 * @dev: device 9464 * @extack: netlink extended ack 9465 * @fd: new program fd or negative value to clear 9466 * @expected_fd: old program fd that userspace expects to replace or clear 9467 * @flags: xdp-related flags 9468 * 9469 * Set or clear a bpf program for a device 9470 */ 9471 int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, 9472 int fd, int expected_fd, u32 flags) 9473 { 9474 enum bpf_xdp_mode mode = dev_xdp_mode(dev, flags); 9475 struct bpf_prog *new_prog = NULL, *old_prog = NULL; 9476 int err; 9477 9478 ASSERT_RTNL(); 9479 9480 if (fd >= 0) { 9481 new_prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP, 9482 mode != XDP_MODE_SKB); 9483 if (IS_ERR(new_prog)) 9484 return PTR_ERR(new_prog); 9485 } 9486 9487 if (expected_fd >= 0) { 9488 old_prog = bpf_prog_get_type_dev(expected_fd, BPF_PROG_TYPE_XDP, 9489 mode != XDP_MODE_SKB); 9490 if (IS_ERR(old_prog)) { 9491 err = PTR_ERR(old_prog); 9492 old_prog = NULL; 9493 goto err_out; 9494 } 9495 } 9496 9497 err = dev_xdp_attach(dev, extack, NULL, new_prog, old_prog, flags); 9498 9499 err_out: 9500 if (err && new_prog) 9501 bpf_prog_put(new_prog); 9502 if (old_prog) 9503 bpf_prog_put(old_prog); 9504 return err; 9505 } 9506 9507 /** 9508 * dev_new_index - allocate an ifindex 9509 * @net: the applicable net namespace 9510 * 9511 * Returns a suitable unique value for a new device interface 9512 * number. The caller must hold the rtnl semaphore or the 9513 * dev_base_lock to be sure it remains unique. 9514 */ 9515 static int dev_new_index(struct net *net) 9516 { 9517 int ifindex = net->ifindex; 9518 9519 for (;;) { 9520 if (++ifindex <= 0) 9521 ifindex = 1; 9522 if (!__dev_get_by_index(net, ifindex)) 9523 return net->ifindex = ifindex; 9524 } 9525 } 9526 9527 /* Delayed registration/unregisteration */ 9528 LIST_HEAD(net_todo_list); 9529 DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq); 9530 9531 static void net_set_todo(struct net_device *dev) 9532 { 9533 list_add_tail(&dev->todo_list, &net_todo_list); 9534 atomic_inc(&dev_net(dev)->dev_unreg_count); 9535 } 9536 9537 static netdev_features_t netdev_sync_upper_features(struct net_device *lower, 9538 struct net_device *upper, netdev_features_t features) 9539 { 9540 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES; 9541 netdev_features_t feature; 9542 int feature_bit; 9543 9544 for_each_netdev_feature(upper_disables, feature_bit) { 9545 feature = __NETIF_F_BIT(feature_bit); 9546 if (!(upper->wanted_features & feature) 9547 && (features & feature)) { 9548 netdev_dbg(lower, "Dropping feature %pNF, upper dev %s has it off.\n", 9549 &feature, upper->name); 9550 features &= ~feature; 9551 } 9552 } 9553 9554 return features; 9555 } 9556 9557 static void netdev_sync_lower_features(struct net_device *upper, 9558 struct net_device *lower, netdev_features_t features) 9559 { 9560 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES; 9561 netdev_features_t feature; 9562 int feature_bit; 9563 9564 for_each_netdev_feature(upper_disables, feature_bit) { 9565 feature = __NETIF_F_BIT(feature_bit); 9566 if (!(features & feature) && (lower->features & feature)) { 9567 netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n", 9568 &feature, lower->name); 9569 lower->wanted_features &= ~feature; 9570 __netdev_update_features(lower); 9571 9572 if (unlikely(lower->features & feature)) 9573 netdev_WARN(upper, "failed to disable %pNF on %s!\n", 9574 &feature, lower->name); 9575 else 9576 netdev_features_change(lower); 9577 } 9578 } 9579 } 9580 9581 static netdev_features_t netdev_fix_features(struct net_device *dev, 9582 netdev_features_t features) 9583 { 9584 /* Fix illegal checksum combinations */ 9585 if ((features & NETIF_F_HW_CSUM) && 9586 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { 9587 netdev_warn(dev, "mixed HW and IP checksum settings.\n"); 9588 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); 9589 } 9590 9591 /* TSO requires that SG is present as well. */ 9592 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) { 9593 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n"); 9594 features &= ~NETIF_F_ALL_TSO; 9595 } 9596 9597 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) && 9598 !(features & NETIF_F_IP_CSUM)) { 9599 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n"); 9600 features &= ~NETIF_F_TSO; 9601 features &= ~NETIF_F_TSO_ECN; 9602 } 9603 9604 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) && 9605 !(features & NETIF_F_IPV6_CSUM)) { 9606 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n"); 9607 features &= ~NETIF_F_TSO6; 9608 } 9609 9610 /* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */ 9611 if ((features & NETIF_F_TSO_MANGLEID) && !(features & NETIF_F_TSO)) 9612 features &= ~NETIF_F_TSO_MANGLEID; 9613 9614 /* TSO ECN requires that TSO is present as well. */ 9615 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN) 9616 features &= ~NETIF_F_TSO_ECN; 9617 9618 /* Software GSO depends on SG. */ 9619 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) { 9620 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n"); 9621 features &= ~NETIF_F_GSO; 9622 } 9623 9624 /* GSO partial features require GSO partial be set */ 9625 if ((features & dev->gso_partial_features) && 9626 !(features & NETIF_F_GSO_PARTIAL)) { 9627 netdev_dbg(dev, 9628 "Dropping partially supported GSO features since no GSO partial.\n"); 9629 features &= ~dev->gso_partial_features; 9630 } 9631 9632 if (!(features & NETIF_F_RXCSUM)) { 9633 /* NETIF_F_GRO_HW implies doing RXCSUM since every packet 9634 * successfully merged by hardware must also have the 9635 * checksum verified by hardware. If the user does not 9636 * want to enable RXCSUM, logically, we should disable GRO_HW. 9637 */ 9638 if (features & NETIF_F_GRO_HW) { 9639 netdev_dbg(dev, "Dropping NETIF_F_GRO_HW since no RXCSUM feature.\n"); 9640 features &= ~NETIF_F_GRO_HW; 9641 } 9642 } 9643 9644 /* LRO/HW-GRO features cannot be combined with RX-FCS */ 9645 if (features & NETIF_F_RXFCS) { 9646 if (features & NETIF_F_LRO) { 9647 netdev_dbg(dev, "Dropping LRO feature since RX-FCS is requested.\n"); 9648 features &= ~NETIF_F_LRO; 9649 } 9650 9651 if (features & NETIF_F_GRO_HW) { 9652 netdev_dbg(dev, "Dropping HW-GRO feature since RX-FCS is requested.\n"); 9653 features &= ~NETIF_F_GRO_HW; 9654 } 9655 } 9656 9657 if ((features & NETIF_F_GRO_HW) && (features & NETIF_F_LRO)) { 9658 netdev_dbg(dev, "Dropping LRO feature since HW-GRO is requested.\n"); 9659 features &= ~NETIF_F_LRO; 9660 } 9661 9662 if (features & NETIF_F_HW_TLS_TX) { 9663 bool ip_csum = (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) == 9664 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); 9665 bool hw_csum = features & NETIF_F_HW_CSUM; 9666 9667 if (!ip_csum && !hw_csum) { 9668 netdev_dbg(dev, "Dropping TLS TX HW offload feature since no CSUM feature.\n"); 9669 features &= ~NETIF_F_HW_TLS_TX; 9670 } 9671 } 9672 9673 if ((features & NETIF_F_HW_TLS_RX) && !(features & NETIF_F_RXCSUM)) { 9674 netdev_dbg(dev, "Dropping TLS RX HW offload feature since no RXCSUM feature.\n"); 9675 features &= ~NETIF_F_HW_TLS_RX; 9676 } 9677 9678 return features; 9679 } 9680 9681 int __netdev_update_features(struct net_device *dev) 9682 { 9683 struct net_device *upper, *lower; 9684 netdev_features_t features; 9685 struct list_head *iter; 9686 int err = -1; 9687 9688 ASSERT_RTNL(); 9689 9690 features = netdev_get_wanted_features(dev); 9691 9692 if (dev->netdev_ops->ndo_fix_features) 9693 features = dev->netdev_ops->ndo_fix_features(dev, features); 9694 9695 /* driver might be less strict about feature dependencies */ 9696 features = netdev_fix_features(dev, features); 9697 9698 /* some features can't be enabled if they're off on an upper device */ 9699 netdev_for_each_upper_dev_rcu(dev, upper, iter) 9700 features = netdev_sync_upper_features(dev, upper, features); 9701 9702 if (dev->features == features) 9703 goto sync_lower; 9704 9705 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n", 9706 &dev->features, &features); 9707 9708 if (dev->netdev_ops->ndo_set_features) 9709 err = dev->netdev_ops->ndo_set_features(dev, features); 9710 else 9711 err = 0; 9712 9713 if (unlikely(err < 0)) { 9714 netdev_err(dev, 9715 "set_features() failed (%d); wanted %pNF, left %pNF\n", 9716 err, &features, &dev->features); 9717 /* return non-0 since some features might have changed and 9718 * it's better to fire a spurious notification than miss it 9719 */ 9720 return -1; 9721 } 9722 9723 sync_lower: 9724 /* some features must be disabled on lower devices when disabled 9725 * on an upper device (think: bonding master or bridge) 9726 */ 9727 netdev_for_each_lower_dev(dev, lower, iter) 9728 netdev_sync_lower_features(dev, lower, features); 9729 9730 if (!err) { 9731 netdev_features_t diff = features ^ dev->features; 9732 9733 if (diff & NETIF_F_RX_UDP_TUNNEL_PORT) { 9734 /* udp_tunnel_{get,drop}_rx_info both need 9735 * NETIF_F_RX_UDP_TUNNEL_PORT enabled on the 9736 * device, or they won't do anything. 9737 * Thus we need to update dev->features 9738 * *before* calling udp_tunnel_get_rx_info, 9739 * but *after* calling udp_tunnel_drop_rx_info. 9740 */ 9741 if (features & NETIF_F_RX_UDP_TUNNEL_PORT) { 9742 dev->features = features; 9743 udp_tunnel_get_rx_info(dev); 9744 } else { 9745 udp_tunnel_drop_rx_info(dev); 9746 } 9747 } 9748 9749 if (diff & NETIF_F_HW_VLAN_CTAG_FILTER) { 9750 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) { 9751 dev->features = features; 9752 err |= vlan_get_rx_ctag_filter_info(dev); 9753 } else { 9754 vlan_drop_rx_ctag_filter_info(dev); 9755 } 9756 } 9757 9758 if (diff & NETIF_F_HW_VLAN_STAG_FILTER) { 9759 if (features & NETIF_F_HW_VLAN_STAG_FILTER) { 9760 dev->features = features; 9761 err |= vlan_get_rx_stag_filter_info(dev); 9762 } else { 9763 vlan_drop_rx_stag_filter_info(dev); 9764 } 9765 } 9766 9767 dev->features = features; 9768 } 9769 9770 return err < 0 ? 0 : 1; 9771 } 9772 9773 /** 9774 * netdev_update_features - recalculate device features 9775 * @dev: the device to check 9776 * 9777 * Recalculate dev->features set and send notifications if it 9778 * has changed. Should be called after driver or hardware dependent 9779 * conditions might have changed that influence the features. 9780 */ 9781 void netdev_update_features(struct net_device *dev) 9782 { 9783 if (__netdev_update_features(dev)) 9784 netdev_features_change(dev); 9785 } 9786 EXPORT_SYMBOL(netdev_update_features); 9787 9788 /** 9789 * netdev_change_features - recalculate device features 9790 * @dev: the device to check 9791 * 9792 * Recalculate dev->features set and send notifications even 9793 * if they have not changed. Should be called instead of 9794 * netdev_update_features() if also dev->vlan_features might 9795 * have changed to allow the changes to be propagated to stacked 9796 * VLAN devices. 9797 */ 9798 void netdev_change_features(struct net_device *dev) 9799 { 9800 __netdev_update_features(dev); 9801 netdev_features_change(dev); 9802 } 9803 EXPORT_SYMBOL(netdev_change_features); 9804 9805 /** 9806 * netif_stacked_transfer_operstate - transfer operstate 9807 * @rootdev: the root or lower level device to transfer state from 9808 * @dev: the device to transfer operstate to 9809 * 9810 * Transfer operational state from root to device. This is normally 9811 * called when a stacking relationship exists between the root 9812 * device and the device(a leaf device). 9813 */ 9814 void netif_stacked_transfer_operstate(const struct net_device *rootdev, 9815 struct net_device *dev) 9816 { 9817 if (rootdev->operstate == IF_OPER_DORMANT) 9818 netif_dormant_on(dev); 9819 else 9820 netif_dormant_off(dev); 9821 9822 if (rootdev->operstate == IF_OPER_TESTING) 9823 netif_testing_on(dev); 9824 else 9825 netif_testing_off(dev); 9826 9827 if (netif_carrier_ok(rootdev)) 9828 netif_carrier_on(dev); 9829 else 9830 netif_carrier_off(dev); 9831 } 9832 EXPORT_SYMBOL(netif_stacked_transfer_operstate); 9833 9834 static int netif_alloc_rx_queues(struct net_device *dev) 9835 { 9836 unsigned int i, count = dev->num_rx_queues; 9837 struct netdev_rx_queue *rx; 9838 size_t sz = count * sizeof(*rx); 9839 int err = 0; 9840 9841 BUG_ON(count < 1); 9842 9843 rx = kvzalloc(sz, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL); 9844 if (!rx) 9845 return -ENOMEM; 9846 9847 dev->_rx = rx; 9848 9849 for (i = 0; i < count; i++) { 9850 rx[i].dev = dev; 9851 9852 /* XDP RX-queue setup */ 9853 err = xdp_rxq_info_reg(&rx[i].xdp_rxq, dev, i, 0); 9854 if (err < 0) 9855 goto err_rxq_info; 9856 } 9857 return 0; 9858 9859 err_rxq_info: 9860 /* Rollback successful reg's and free other resources */ 9861 while (i--) 9862 xdp_rxq_info_unreg(&rx[i].xdp_rxq); 9863 kvfree(dev->_rx); 9864 dev->_rx = NULL; 9865 return err; 9866 } 9867 9868 static void netif_free_rx_queues(struct net_device *dev) 9869 { 9870 unsigned int i, count = dev->num_rx_queues; 9871 9872 /* netif_alloc_rx_queues alloc failed, resources have been unreg'ed */ 9873 if (!dev->_rx) 9874 return; 9875 9876 for (i = 0; i < count; i++) 9877 xdp_rxq_info_unreg(&dev->_rx[i].xdp_rxq); 9878 9879 kvfree(dev->_rx); 9880 } 9881 9882 static void netdev_init_one_queue(struct net_device *dev, 9883 struct netdev_queue *queue, void *_unused) 9884 { 9885 /* Initialize queue lock */ 9886 spin_lock_init(&queue->_xmit_lock); 9887 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type); 9888 queue->xmit_lock_owner = -1; 9889 netdev_queue_numa_node_write(queue, NUMA_NO_NODE); 9890 queue->dev = dev; 9891 #ifdef CONFIG_BQL 9892 dql_init(&queue->dql, HZ); 9893 #endif 9894 } 9895 9896 static void netif_free_tx_queues(struct net_device *dev) 9897 { 9898 kvfree(dev->_tx); 9899 } 9900 9901 static int netif_alloc_netdev_queues(struct net_device *dev) 9902 { 9903 unsigned int count = dev->num_tx_queues; 9904 struct netdev_queue *tx; 9905 size_t sz = count * sizeof(*tx); 9906 9907 if (count < 1 || count > 0xffff) 9908 return -EINVAL; 9909 9910 tx = kvzalloc(sz, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL); 9911 if (!tx) 9912 return -ENOMEM; 9913 9914 dev->_tx = tx; 9915 9916 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL); 9917 spin_lock_init(&dev->tx_global_lock); 9918 9919 return 0; 9920 } 9921 9922 void netif_tx_stop_all_queues(struct net_device *dev) 9923 { 9924 unsigned int i; 9925 9926 for (i = 0; i < dev->num_tx_queues; i++) { 9927 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 9928 9929 netif_tx_stop_queue(txq); 9930 } 9931 } 9932 EXPORT_SYMBOL(netif_tx_stop_all_queues); 9933 9934 /** 9935 * register_netdevice() - register a network device 9936 * @dev: device to register 9937 * 9938 * Take a prepared network device structure and make it externally accessible. 9939 * A %NETDEV_REGISTER message is sent to the netdev notifier chain. 9940 * Callers must hold the rtnl lock - you may want register_netdev() 9941 * instead of this. 9942 */ 9943 int register_netdevice(struct net_device *dev) 9944 { 9945 int ret; 9946 struct net *net = dev_net(dev); 9947 9948 BUILD_BUG_ON(sizeof(netdev_features_t) * BITS_PER_BYTE < 9949 NETDEV_FEATURE_COUNT); 9950 BUG_ON(dev_boot_phase); 9951 ASSERT_RTNL(); 9952 9953 might_sleep(); 9954 9955 /* When net_device's are persistent, this will be fatal. */ 9956 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED); 9957 BUG_ON(!net); 9958 9959 ret = ethtool_check_ops(dev->ethtool_ops); 9960 if (ret) 9961 return ret; 9962 9963 spin_lock_init(&dev->addr_list_lock); 9964 netdev_set_addr_lockdep_class(dev); 9965 9966 ret = dev_get_valid_name(net, dev, dev->name); 9967 if (ret < 0) 9968 goto out; 9969 9970 ret = -ENOMEM; 9971 dev->name_node = netdev_name_node_head_alloc(dev); 9972 if (!dev->name_node) 9973 goto out; 9974 9975 /* Init, if this function is available */ 9976 if (dev->netdev_ops->ndo_init) { 9977 ret = dev->netdev_ops->ndo_init(dev); 9978 if (ret) { 9979 if (ret > 0) 9980 ret = -EIO; 9981 goto err_free_name; 9982 } 9983 } 9984 9985 if (((dev->hw_features | dev->features) & 9986 NETIF_F_HW_VLAN_CTAG_FILTER) && 9987 (!dev->netdev_ops->ndo_vlan_rx_add_vid || 9988 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) { 9989 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n"); 9990 ret = -EINVAL; 9991 goto err_uninit; 9992 } 9993 9994 ret = -EBUSY; 9995 if (!dev->ifindex) 9996 dev->ifindex = dev_new_index(net); 9997 else if (__dev_get_by_index(net, dev->ifindex)) 9998 goto err_uninit; 9999 10000 /* Transfer changeable features to wanted_features and enable 10001 * software offloads (GSO and GRO). 10002 */ 10003 dev->hw_features |= (NETIF_F_SOFT_FEATURES | NETIF_F_SOFT_FEATURES_OFF); 10004 dev->features |= NETIF_F_SOFT_FEATURES; 10005 10006 if (dev->udp_tunnel_nic_info) { 10007 dev->features |= NETIF_F_RX_UDP_TUNNEL_PORT; 10008 dev->hw_features |= NETIF_F_RX_UDP_TUNNEL_PORT; 10009 } 10010 10011 dev->wanted_features = dev->features & dev->hw_features; 10012 10013 if (!(dev->flags & IFF_LOOPBACK)) 10014 dev->hw_features |= NETIF_F_NOCACHE_COPY; 10015 10016 /* If IPv4 TCP segmentation offload is supported we should also 10017 * allow the device to enable segmenting the frame with the option 10018 * of ignoring a static IP ID value. This doesn't enable the 10019 * feature itself but allows the user to enable it later. 10020 */ 10021 if (dev->hw_features & NETIF_F_TSO) 10022 dev->hw_features |= NETIF_F_TSO_MANGLEID; 10023 if (dev->vlan_features & NETIF_F_TSO) 10024 dev->vlan_features |= NETIF_F_TSO_MANGLEID; 10025 if (dev->mpls_features & NETIF_F_TSO) 10026 dev->mpls_features |= NETIF_F_TSO_MANGLEID; 10027 if (dev->hw_enc_features & NETIF_F_TSO) 10028 dev->hw_enc_features |= NETIF_F_TSO_MANGLEID; 10029 10030 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices. 10031 */ 10032 dev->vlan_features |= NETIF_F_HIGHDMA; 10033 10034 /* Make NETIF_F_SG inheritable to tunnel devices. 10035 */ 10036 dev->hw_enc_features |= NETIF_F_SG | NETIF_F_GSO_PARTIAL; 10037 10038 /* Make NETIF_F_SG inheritable to MPLS. 10039 */ 10040 dev->mpls_features |= NETIF_F_SG; 10041 10042 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev); 10043 ret = notifier_to_errno(ret); 10044 if (ret) 10045 goto err_uninit; 10046 10047 ret = netdev_register_kobject(dev); 10048 write_lock(&dev_base_lock); 10049 dev->reg_state = ret ? NETREG_UNREGISTERED : NETREG_REGISTERED; 10050 write_unlock(&dev_base_lock); 10051 if (ret) 10052 goto err_uninit_notify; 10053 10054 __netdev_update_features(dev); 10055 10056 /* 10057 * Default initial state at registry is that the 10058 * device is present. 10059 */ 10060 10061 set_bit(__LINK_STATE_PRESENT, &dev->state); 10062 10063 linkwatch_init_dev(dev); 10064 10065 dev_init_scheduler(dev); 10066 10067 netdev_hold(dev, &dev->dev_registered_tracker, GFP_KERNEL); 10068 list_netdevice(dev); 10069 10070 add_device_randomness(dev->dev_addr, dev->addr_len); 10071 10072 /* If the device has permanent device address, driver should 10073 * set dev_addr and also addr_assign_type should be set to 10074 * NET_ADDR_PERM (default value). 10075 */ 10076 if (dev->addr_assign_type == NET_ADDR_PERM) 10077 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 10078 10079 /* Notify protocols, that a new device appeared. */ 10080 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev); 10081 ret = notifier_to_errno(ret); 10082 if (ret) { 10083 /* Expect explicit free_netdev() on failure */ 10084 dev->needs_free_netdev = false; 10085 unregister_netdevice_queue(dev, NULL); 10086 goto out; 10087 } 10088 /* 10089 * Prevent userspace races by waiting until the network 10090 * device is fully setup before sending notifications. 10091 */ 10092 if (!dev->rtnl_link_ops || 10093 dev->rtnl_link_state == RTNL_LINK_INITIALIZED) 10094 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL, 0, NULL); 10095 10096 out: 10097 return ret; 10098 10099 err_uninit_notify: 10100 call_netdevice_notifiers(NETDEV_PRE_UNINIT, dev); 10101 err_uninit: 10102 if (dev->netdev_ops->ndo_uninit) 10103 dev->netdev_ops->ndo_uninit(dev); 10104 if (dev->priv_destructor) 10105 dev->priv_destructor(dev); 10106 err_free_name: 10107 netdev_name_node_free(dev->name_node); 10108 goto out; 10109 } 10110 EXPORT_SYMBOL(register_netdevice); 10111 10112 /** 10113 * init_dummy_netdev - init a dummy network device for NAPI 10114 * @dev: device to init 10115 * 10116 * This takes a network device structure and initialize the minimum 10117 * amount of fields so it can be used to schedule NAPI polls without 10118 * registering a full blown interface. This is to be used by drivers 10119 * that need to tie several hardware interfaces to a single NAPI 10120 * poll scheduler due to HW limitations. 10121 */ 10122 int init_dummy_netdev(struct net_device *dev) 10123 { 10124 /* Clear everything. Note we don't initialize spinlocks 10125 * are they aren't supposed to be taken by any of the 10126 * NAPI code and this dummy netdev is supposed to be 10127 * only ever used for NAPI polls 10128 */ 10129 memset(dev, 0, sizeof(struct net_device)); 10130 10131 /* make sure we BUG if trying to hit standard 10132 * register/unregister code path 10133 */ 10134 dev->reg_state = NETREG_DUMMY; 10135 10136 /* NAPI wants this */ 10137 INIT_LIST_HEAD(&dev->napi_list); 10138 10139 /* a dummy interface is started by default */ 10140 set_bit(__LINK_STATE_PRESENT, &dev->state); 10141 set_bit(__LINK_STATE_START, &dev->state); 10142 10143 /* napi_busy_loop stats accounting wants this */ 10144 dev_net_set(dev, &init_net); 10145 10146 /* Note : We dont allocate pcpu_refcnt for dummy devices, 10147 * because users of this 'device' dont need to change 10148 * its refcount. 10149 */ 10150 10151 return 0; 10152 } 10153 EXPORT_SYMBOL_GPL(init_dummy_netdev); 10154 10155 10156 /** 10157 * register_netdev - register a network device 10158 * @dev: device to register 10159 * 10160 * Take a completed network device structure and add it to the kernel 10161 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier 10162 * chain. 0 is returned on success. A negative errno code is returned 10163 * on a failure to set up the device, or if the name is a duplicate. 10164 * 10165 * This is a wrapper around register_netdevice that takes the rtnl semaphore 10166 * and expands the device name if you passed a format string to 10167 * alloc_netdev. 10168 */ 10169 int register_netdev(struct net_device *dev) 10170 { 10171 int err; 10172 10173 if (rtnl_lock_killable()) 10174 return -EINTR; 10175 err = register_netdevice(dev); 10176 rtnl_unlock(); 10177 return err; 10178 } 10179 EXPORT_SYMBOL(register_netdev); 10180 10181 int netdev_refcnt_read(const struct net_device *dev) 10182 { 10183 #ifdef CONFIG_PCPU_DEV_REFCNT 10184 int i, refcnt = 0; 10185 10186 for_each_possible_cpu(i) 10187 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i); 10188 return refcnt; 10189 #else 10190 return refcount_read(&dev->dev_refcnt); 10191 #endif 10192 } 10193 EXPORT_SYMBOL(netdev_refcnt_read); 10194 10195 int netdev_unregister_timeout_secs __read_mostly = 10; 10196 10197 #define WAIT_REFS_MIN_MSECS 1 10198 #define WAIT_REFS_MAX_MSECS 250 10199 /** 10200 * netdev_wait_allrefs_any - wait until all references are gone. 10201 * @list: list of net_devices to wait on 10202 * 10203 * This is called when unregistering network devices. 10204 * 10205 * Any protocol or device that holds a reference should register 10206 * for netdevice notification, and cleanup and put back the 10207 * reference if they receive an UNREGISTER event. 10208 * We can get stuck here if buggy protocols don't correctly 10209 * call dev_put. 10210 */ 10211 static struct net_device *netdev_wait_allrefs_any(struct list_head *list) 10212 { 10213 unsigned long rebroadcast_time, warning_time; 10214 struct net_device *dev; 10215 int wait = 0; 10216 10217 rebroadcast_time = warning_time = jiffies; 10218 10219 list_for_each_entry(dev, list, todo_list) 10220 if (netdev_refcnt_read(dev) == 1) 10221 return dev; 10222 10223 while (true) { 10224 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) { 10225 rtnl_lock(); 10226 10227 /* Rebroadcast unregister notification */ 10228 list_for_each_entry(dev, list, todo_list) 10229 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 10230 10231 __rtnl_unlock(); 10232 rcu_barrier(); 10233 rtnl_lock(); 10234 10235 list_for_each_entry(dev, list, todo_list) 10236 if (test_bit(__LINK_STATE_LINKWATCH_PENDING, 10237 &dev->state)) { 10238 /* We must not have linkwatch events 10239 * pending on unregister. If this 10240 * happens, we simply run the queue 10241 * unscheduled, resulting in a noop 10242 * for this device. 10243 */ 10244 linkwatch_run_queue(); 10245 break; 10246 } 10247 10248 __rtnl_unlock(); 10249 10250 rebroadcast_time = jiffies; 10251 } 10252 10253 if (!wait) { 10254 rcu_barrier(); 10255 wait = WAIT_REFS_MIN_MSECS; 10256 } else { 10257 msleep(wait); 10258 wait = min(wait << 1, WAIT_REFS_MAX_MSECS); 10259 } 10260 10261 list_for_each_entry(dev, list, todo_list) 10262 if (netdev_refcnt_read(dev) == 1) 10263 return dev; 10264 10265 if (time_after(jiffies, warning_time + 10266 READ_ONCE(netdev_unregister_timeout_secs) * HZ)) { 10267 list_for_each_entry(dev, list, todo_list) { 10268 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n", 10269 dev->name, netdev_refcnt_read(dev)); 10270 ref_tracker_dir_print(&dev->refcnt_tracker, 10); 10271 } 10272 10273 warning_time = jiffies; 10274 } 10275 } 10276 } 10277 10278 /* The sequence is: 10279 * 10280 * rtnl_lock(); 10281 * ... 10282 * register_netdevice(x1); 10283 * register_netdevice(x2); 10284 * ... 10285 * unregister_netdevice(y1); 10286 * unregister_netdevice(y2); 10287 * ... 10288 * rtnl_unlock(); 10289 * free_netdev(y1); 10290 * free_netdev(y2); 10291 * 10292 * We are invoked by rtnl_unlock(). 10293 * This allows us to deal with problems: 10294 * 1) We can delete sysfs objects which invoke hotplug 10295 * without deadlocking with linkwatch via keventd. 10296 * 2) Since we run with the RTNL semaphore not held, we can sleep 10297 * safely in order to wait for the netdev refcnt to drop to zero. 10298 * 10299 * We must not return until all unregister events added during 10300 * the interval the lock was held have been completed. 10301 */ 10302 void netdev_run_todo(void) 10303 { 10304 struct net_device *dev, *tmp; 10305 struct list_head list; 10306 #ifdef CONFIG_LOCKDEP 10307 struct list_head unlink_list; 10308 10309 list_replace_init(&net_unlink_list, &unlink_list); 10310 10311 while (!list_empty(&unlink_list)) { 10312 struct net_device *dev = list_first_entry(&unlink_list, 10313 struct net_device, 10314 unlink_list); 10315 list_del_init(&dev->unlink_list); 10316 dev->nested_level = dev->lower_level - 1; 10317 } 10318 #endif 10319 10320 /* Snapshot list, allow later requests */ 10321 list_replace_init(&net_todo_list, &list); 10322 10323 __rtnl_unlock(); 10324 10325 /* Wait for rcu callbacks to finish before next phase */ 10326 if (!list_empty(&list)) 10327 rcu_barrier(); 10328 10329 list_for_each_entry_safe(dev, tmp, &list, todo_list) { 10330 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) { 10331 netdev_WARN(dev, "run_todo but not unregistering\n"); 10332 list_del(&dev->todo_list); 10333 continue; 10334 } 10335 10336 write_lock(&dev_base_lock); 10337 dev->reg_state = NETREG_UNREGISTERED; 10338 write_unlock(&dev_base_lock); 10339 linkwatch_forget_dev(dev); 10340 } 10341 10342 while (!list_empty(&list)) { 10343 dev = netdev_wait_allrefs_any(&list); 10344 list_del(&dev->todo_list); 10345 10346 /* paranoia */ 10347 BUG_ON(netdev_refcnt_read(dev) != 1); 10348 BUG_ON(!list_empty(&dev->ptype_all)); 10349 BUG_ON(!list_empty(&dev->ptype_specific)); 10350 WARN_ON(rcu_access_pointer(dev->ip_ptr)); 10351 WARN_ON(rcu_access_pointer(dev->ip6_ptr)); 10352 10353 if (dev->priv_destructor) 10354 dev->priv_destructor(dev); 10355 if (dev->needs_free_netdev) 10356 free_netdev(dev); 10357 10358 if (atomic_dec_and_test(&dev_net(dev)->dev_unreg_count)) 10359 wake_up(&netdev_unregistering_wq); 10360 10361 /* Free network device */ 10362 kobject_put(&dev->dev.kobj); 10363 } 10364 } 10365 10366 /* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has 10367 * all the same fields in the same order as net_device_stats, with only 10368 * the type differing, but rtnl_link_stats64 may have additional fields 10369 * at the end for newer counters. 10370 */ 10371 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, 10372 const struct net_device_stats *netdev_stats) 10373 { 10374 size_t i, n = sizeof(*netdev_stats) / sizeof(atomic_long_t); 10375 const atomic_long_t *src = (atomic_long_t *)netdev_stats; 10376 u64 *dst = (u64 *)stats64; 10377 10378 BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64)); 10379 for (i = 0; i < n; i++) 10380 dst[i] = (unsigned long)atomic_long_read(&src[i]); 10381 /* zero out counters that only exist in rtnl_link_stats64 */ 10382 memset((char *)stats64 + n * sizeof(u64), 0, 10383 sizeof(*stats64) - n * sizeof(u64)); 10384 } 10385 EXPORT_SYMBOL(netdev_stats_to_stats64); 10386 10387 struct net_device_core_stats __percpu *netdev_core_stats_alloc(struct net_device *dev) 10388 { 10389 struct net_device_core_stats __percpu *p; 10390 10391 p = alloc_percpu_gfp(struct net_device_core_stats, 10392 GFP_ATOMIC | __GFP_NOWARN); 10393 10394 if (p && cmpxchg(&dev->core_stats, NULL, p)) 10395 free_percpu(p); 10396 10397 /* This READ_ONCE() pairs with the cmpxchg() above */ 10398 return READ_ONCE(dev->core_stats); 10399 } 10400 EXPORT_SYMBOL(netdev_core_stats_alloc); 10401 10402 /** 10403 * dev_get_stats - get network device statistics 10404 * @dev: device to get statistics from 10405 * @storage: place to store stats 10406 * 10407 * Get network statistics from device. Return @storage. 10408 * The device driver may provide its own method by setting 10409 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats; 10410 * otherwise the internal statistics structure is used. 10411 */ 10412 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, 10413 struct rtnl_link_stats64 *storage) 10414 { 10415 const struct net_device_ops *ops = dev->netdev_ops; 10416 const struct net_device_core_stats __percpu *p; 10417 10418 if (ops->ndo_get_stats64) { 10419 memset(storage, 0, sizeof(*storage)); 10420 ops->ndo_get_stats64(dev, storage); 10421 } else if (ops->ndo_get_stats) { 10422 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev)); 10423 } else { 10424 netdev_stats_to_stats64(storage, &dev->stats); 10425 } 10426 10427 /* This READ_ONCE() pairs with the write in netdev_core_stats_alloc() */ 10428 p = READ_ONCE(dev->core_stats); 10429 if (p) { 10430 const struct net_device_core_stats *core_stats; 10431 int i; 10432 10433 for_each_possible_cpu(i) { 10434 core_stats = per_cpu_ptr(p, i); 10435 storage->rx_dropped += READ_ONCE(core_stats->rx_dropped); 10436 storage->tx_dropped += READ_ONCE(core_stats->tx_dropped); 10437 storage->rx_nohandler += READ_ONCE(core_stats->rx_nohandler); 10438 storage->rx_otherhost_dropped += READ_ONCE(core_stats->rx_otherhost_dropped); 10439 } 10440 } 10441 return storage; 10442 } 10443 EXPORT_SYMBOL(dev_get_stats); 10444 10445 /** 10446 * dev_fetch_sw_netstats - get per-cpu network device statistics 10447 * @s: place to store stats 10448 * @netstats: per-cpu network stats to read from 10449 * 10450 * Read per-cpu network statistics and populate the related fields in @s. 10451 */ 10452 void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s, 10453 const struct pcpu_sw_netstats __percpu *netstats) 10454 { 10455 int cpu; 10456 10457 for_each_possible_cpu(cpu) { 10458 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 10459 const struct pcpu_sw_netstats *stats; 10460 unsigned int start; 10461 10462 stats = per_cpu_ptr(netstats, cpu); 10463 do { 10464 start = u64_stats_fetch_begin(&stats->syncp); 10465 rx_packets = u64_stats_read(&stats->rx_packets); 10466 rx_bytes = u64_stats_read(&stats->rx_bytes); 10467 tx_packets = u64_stats_read(&stats->tx_packets); 10468 tx_bytes = u64_stats_read(&stats->tx_bytes); 10469 } while (u64_stats_fetch_retry(&stats->syncp, start)); 10470 10471 s->rx_packets += rx_packets; 10472 s->rx_bytes += rx_bytes; 10473 s->tx_packets += tx_packets; 10474 s->tx_bytes += tx_bytes; 10475 } 10476 } 10477 EXPORT_SYMBOL_GPL(dev_fetch_sw_netstats); 10478 10479 /** 10480 * dev_get_tstats64 - ndo_get_stats64 implementation 10481 * @dev: device to get statistics from 10482 * @s: place to store stats 10483 * 10484 * Populate @s from dev->stats and dev->tstats. Can be used as 10485 * ndo_get_stats64() callback. 10486 */ 10487 void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s) 10488 { 10489 netdev_stats_to_stats64(s, &dev->stats); 10490 dev_fetch_sw_netstats(s, dev->tstats); 10491 } 10492 EXPORT_SYMBOL_GPL(dev_get_tstats64); 10493 10494 struct netdev_queue *dev_ingress_queue_create(struct net_device *dev) 10495 { 10496 struct netdev_queue *queue = dev_ingress_queue(dev); 10497 10498 #ifdef CONFIG_NET_CLS_ACT 10499 if (queue) 10500 return queue; 10501 queue = kzalloc(sizeof(*queue), GFP_KERNEL); 10502 if (!queue) 10503 return NULL; 10504 netdev_init_one_queue(dev, queue, NULL); 10505 RCU_INIT_POINTER(queue->qdisc, &noop_qdisc); 10506 queue->qdisc_sleeping = &noop_qdisc; 10507 rcu_assign_pointer(dev->ingress_queue, queue); 10508 #endif 10509 return queue; 10510 } 10511 10512 static const struct ethtool_ops default_ethtool_ops; 10513 10514 void netdev_set_default_ethtool_ops(struct net_device *dev, 10515 const struct ethtool_ops *ops) 10516 { 10517 if (dev->ethtool_ops == &default_ethtool_ops) 10518 dev->ethtool_ops = ops; 10519 } 10520 EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops); 10521 10522 /** 10523 * netdev_sw_irq_coalesce_default_on() - enable SW IRQ coalescing by default 10524 * @dev: netdev to enable the IRQ coalescing on 10525 * 10526 * Sets a conservative default for SW IRQ coalescing. Users can use 10527 * sysfs attributes to override the default values. 10528 */ 10529 void netdev_sw_irq_coalesce_default_on(struct net_device *dev) 10530 { 10531 WARN_ON(dev->reg_state == NETREG_REGISTERED); 10532 10533 dev->gro_flush_timeout = 20000; 10534 dev->napi_defer_hard_irqs = 1; 10535 } 10536 EXPORT_SYMBOL_GPL(netdev_sw_irq_coalesce_default_on); 10537 10538 void netdev_freemem(struct net_device *dev) 10539 { 10540 char *addr = (char *)dev - dev->padded; 10541 10542 kvfree(addr); 10543 } 10544 10545 /** 10546 * alloc_netdev_mqs - allocate network device 10547 * @sizeof_priv: size of private data to allocate space for 10548 * @name: device name format string 10549 * @name_assign_type: origin of device name 10550 * @setup: callback to initialize device 10551 * @txqs: the number of TX subqueues to allocate 10552 * @rxqs: the number of RX subqueues to allocate 10553 * 10554 * Allocates a struct net_device with private data area for driver use 10555 * and performs basic initialization. Also allocates subqueue structs 10556 * for each queue on the device. 10557 */ 10558 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, 10559 unsigned char name_assign_type, 10560 void (*setup)(struct net_device *), 10561 unsigned int txqs, unsigned int rxqs) 10562 { 10563 struct net_device *dev; 10564 unsigned int alloc_size; 10565 struct net_device *p; 10566 10567 BUG_ON(strlen(name) >= sizeof(dev->name)); 10568 10569 if (txqs < 1) { 10570 pr_err("alloc_netdev: Unable to allocate device with zero queues\n"); 10571 return NULL; 10572 } 10573 10574 if (rxqs < 1) { 10575 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n"); 10576 return NULL; 10577 } 10578 10579 alloc_size = sizeof(struct net_device); 10580 if (sizeof_priv) { 10581 /* ensure 32-byte alignment of private area */ 10582 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN); 10583 alloc_size += sizeof_priv; 10584 } 10585 /* ensure 32-byte alignment of whole construct */ 10586 alloc_size += NETDEV_ALIGN - 1; 10587 10588 p = kvzalloc(alloc_size, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL); 10589 if (!p) 10590 return NULL; 10591 10592 dev = PTR_ALIGN(p, NETDEV_ALIGN); 10593 dev->padded = (char *)dev - (char *)p; 10594 10595 ref_tracker_dir_init(&dev->refcnt_tracker, 128); 10596 #ifdef CONFIG_PCPU_DEV_REFCNT 10597 dev->pcpu_refcnt = alloc_percpu(int); 10598 if (!dev->pcpu_refcnt) 10599 goto free_dev; 10600 __dev_hold(dev); 10601 #else 10602 refcount_set(&dev->dev_refcnt, 1); 10603 #endif 10604 10605 if (dev_addr_init(dev)) 10606 goto free_pcpu; 10607 10608 dev_mc_init(dev); 10609 dev_uc_init(dev); 10610 10611 dev_net_set(dev, &init_net); 10612 10613 dev->gso_max_size = GSO_LEGACY_MAX_SIZE; 10614 dev->gso_max_segs = GSO_MAX_SEGS; 10615 dev->gro_max_size = GRO_LEGACY_MAX_SIZE; 10616 dev->gso_ipv4_max_size = GSO_LEGACY_MAX_SIZE; 10617 dev->gro_ipv4_max_size = GRO_LEGACY_MAX_SIZE; 10618 dev->tso_max_size = TSO_LEGACY_MAX_SIZE; 10619 dev->tso_max_segs = TSO_MAX_SEGS; 10620 dev->upper_level = 1; 10621 dev->lower_level = 1; 10622 #ifdef CONFIG_LOCKDEP 10623 dev->nested_level = 0; 10624 INIT_LIST_HEAD(&dev->unlink_list); 10625 #endif 10626 10627 INIT_LIST_HEAD(&dev->napi_list); 10628 INIT_LIST_HEAD(&dev->unreg_list); 10629 INIT_LIST_HEAD(&dev->close_list); 10630 INIT_LIST_HEAD(&dev->link_watch_list); 10631 INIT_LIST_HEAD(&dev->adj_list.upper); 10632 INIT_LIST_HEAD(&dev->adj_list.lower); 10633 INIT_LIST_HEAD(&dev->ptype_all); 10634 INIT_LIST_HEAD(&dev->ptype_specific); 10635 INIT_LIST_HEAD(&dev->net_notifier_list); 10636 #ifdef CONFIG_NET_SCHED 10637 hash_init(dev->qdisc_hash); 10638 #endif 10639 dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM; 10640 setup(dev); 10641 10642 if (!dev->tx_queue_len) { 10643 dev->priv_flags |= IFF_NO_QUEUE; 10644 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN; 10645 } 10646 10647 dev->num_tx_queues = txqs; 10648 dev->real_num_tx_queues = txqs; 10649 if (netif_alloc_netdev_queues(dev)) 10650 goto free_all; 10651 10652 dev->num_rx_queues = rxqs; 10653 dev->real_num_rx_queues = rxqs; 10654 if (netif_alloc_rx_queues(dev)) 10655 goto free_all; 10656 10657 strcpy(dev->name, name); 10658 dev->name_assign_type = name_assign_type; 10659 dev->group = INIT_NETDEV_GROUP; 10660 if (!dev->ethtool_ops) 10661 dev->ethtool_ops = &default_ethtool_ops; 10662 10663 nf_hook_netdev_init(dev); 10664 10665 return dev; 10666 10667 free_all: 10668 free_netdev(dev); 10669 return NULL; 10670 10671 free_pcpu: 10672 #ifdef CONFIG_PCPU_DEV_REFCNT 10673 free_percpu(dev->pcpu_refcnt); 10674 free_dev: 10675 #endif 10676 netdev_freemem(dev); 10677 return NULL; 10678 } 10679 EXPORT_SYMBOL(alloc_netdev_mqs); 10680 10681 /** 10682 * free_netdev - free network device 10683 * @dev: device 10684 * 10685 * This function does the last stage of destroying an allocated device 10686 * interface. The reference to the device object is released. If this 10687 * is the last reference then it will be freed.Must be called in process 10688 * context. 10689 */ 10690 void free_netdev(struct net_device *dev) 10691 { 10692 struct napi_struct *p, *n; 10693 10694 might_sleep(); 10695 10696 /* When called immediately after register_netdevice() failed the unwind 10697 * handling may still be dismantling the device. Handle that case by 10698 * deferring the free. 10699 */ 10700 if (dev->reg_state == NETREG_UNREGISTERING) { 10701 ASSERT_RTNL(); 10702 dev->needs_free_netdev = true; 10703 return; 10704 } 10705 10706 netif_free_tx_queues(dev); 10707 netif_free_rx_queues(dev); 10708 10709 kfree(rcu_dereference_protected(dev->ingress_queue, 1)); 10710 10711 /* Flush device addresses */ 10712 dev_addr_flush(dev); 10713 10714 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list) 10715 netif_napi_del(p); 10716 10717 ref_tracker_dir_exit(&dev->refcnt_tracker); 10718 #ifdef CONFIG_PCPU_DEV_REFCNT 10719 free_percpu(dev->pcpu_refcnt); 10720 dev->pcpu_refcnt = NULL; 10721 #endif 10722 free_percpu(dev->core_stats); 10723 dev->core_stats = NULL; 10724 free_percpu(dev->xdp_bulkq); 10725 dev->xdp_bulkq = NULL; 10726 10727 /* Compatibility with error handling in drivers */ 10728 if (dev->reg_state == NETREG_UNINITIALIZED) { 10729 netdev_freemem(dev); 10730 return; 10731 } 10732 10733 BUG_ON(dev->reg_state != NETREG_UNREGISTERED); 10734 dev->reg_state = NETREG_RELEASED; 10735 10736 /* will free via device release */ 10737 put_device(&dev->dev); 10738 } 10739 EXPORT_SYMBOL(free_netdev); 10740 10741 /** 10742 * synchronize_net - Synchronize with packet receive processing 10743 * 10744 * Wait for packets currently being received to be done. 10745 * Does not block later packets from starting. 10746 */ 10747 void synchronize_net(void) 10748 { 10749 might_sleep(); 10750 if (rtnl_is_locked()) 10751 synchronize_rcu_expedited(); 10752 else 10753 synchronize_rcu(); 10754 } 10755 EXPORT_SYMBOL(synchronize_net); 10756 10757 /** 10758 * unregister_netdevice_queue - remove device from the kernel 10759 * @dev: device 10760 * @head: list 10761 * 10762 * This function shuts down a device interface and removes it 10763 * from the kernel tables. 10764 * If head not NULL, device is queued to be unregistered later. 10765 * 10766 * Callers must hold the rtnl semaphore. You may want 10767 * unregister_netdev() instead of this. 10768 */ 10769 10770 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head) 10771 { 10772 ASSERT_RTNL(); 10773 10774 if (head) { 10775 list_move_tail(&dev->unreg_list, head); 10776 } else { 10777 LIST_HEAD(single); 10778 10779 list_add(&dev->unreg_list, &single); 10780 unregister_netdevice_many(&single); 10781 } 10782 } 10783 EXPORT_SYMBOL(unregister_netdevice_queue); 10784 10785 void unregister_netdevice_many_notify(struct list_head *head, 10786 u32 portid, const struct nlmsghdr *nlh) 10787 { 10788 struct net_device *dev, *tmp; 10789 LIST_HEAD(close_head); 10790 10791 BUG_ON(dev_boot_phase); 10792 ASSERT_RTNL(); 10793 10794 if (list_empty(head)) 10795 return; 10796 10797 list_for_each_entry_safe(dev, tmp, head, unreg_list) { 10798 /* Some devices call without registering 10799 * for initialization unwind. Remove those 10800 * devices and proceed with the remaining. 10801 */ 10802 if (dev->reg_state == NETREG_UNINITIALIZED) { 10803 pr_debug("unregister_netdevice: device %s/%p never was registered\n", 10804 dev->name, dev); 10805 10806 WARN_ON(1); 10807 list_del(&dev->unreg_list); 10808 continue; 10809 } 10810 dev->dismantle = true; 10811 BUG_ON(dev->reg_state != NETREG_REGISTERED); 10812 } 10813 10814 /* If device is running, close it first. */ 10815 list_for_each_entry(dev, head, unreg_list) 10816 list_add_tail(&dev->close_list, &close_head); 10817 dev_close_many(&close_head, true); 10818 10819 list_for_each_entry(dev, head, unreg_list) { 10820 /* And unlink it from device chain. */ 10821 write_lock(&dev_base_lock); 10822 unlist_netdevice(dev, false); 10823 dev->reg_state = NETREG_UNREGISTERING; 10824 write_unlock(&dev_base_lock); 10825 } 10826 flush_all_backlogs(); 10827 10828 synchronize_net(); 10829 10830 list_for_each_entry(dev, head, unreg_list) { 10831 struct sk_buff *skb = NULL; 10832 10833 /* Shutdown queueing discipline. */ 10834 dev_shutdown(dev); 10835 10836 dev_xdp_uninstall(dev); 10837 bpf_dev_bound_netdev_unregister(dev); 10838 10839 netdev_offload_xstats_disable_all(dev); 10840 10841 /* Notify protocols, that we are about to destroy 10842 * this device. They should clean all the things. 10843 */ 10844 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 10845 10846 if (!dev->rtnl_link_ops || 10847 dev->rtnl_link_state == RTNL_LINK_INITIALIZED) 10848 skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0, 10849 GFP_KERNEL, NULL, 0, 10850 portid, nlh); 10851 10852 /* 10853 * Flush the unicast and multicast chains 10854 */ 10855 dev_uc_flush(dev); 10856 dev_mc_flush(dev); 10857 10858 netdev_name_node_alt_flush(dev); 10859 netdev_name_node_free(dev->name_node); 10860 10861 call_netdevice_notifiers(NETDEV_PRE_UNINIT, dev); 10862 10863 if (dev->netdev_ops->ndo_uninit) 10864 dev->netdev_ops->ndo_uninit(dev); 10865 10866 if (skb) 10867 rtmsg_ifinfo_send(skb, dev, GFP_KERNEL, portid, nlh); 10868 10869 /* Notifier chain MUST detach us all upper devices. */ 10870 WARN_ON(netdev_has_any_upper_dev(dev)); 10871 WARN_ON(netdev_has_any_lower_dev(dev)); 10872 10873 /* Remove entries from kobject tree */ 10874 netdev_unregister_kobject(dev); 10875 #ifdef CONFIG_XPS 10876 /* Remove XPS queueing entries */ 10877 netif_reset_xps_queues_gt(dev, 0); 10878 #endif 10879 } 10880 10881 synchronize_net(); 10882 10883 list_for_each_entry(dev, head, unreg_list) { 10884 netdev_put(dev, &dev->dev_registered_tracker); 10885 net_set_todo(dev); 10886 } 10887 10888 list_del(head); 10889 } 10890 10891 /** 10892 * unregister_netdevice_many - unregister many devices 10893 * @head: list of devices 10894 * 10895 * Note: As most callers use a stack allocated list_head, 10896 * we force a list_del() to make sure stack wont be corrupted later. 10897 */ 10898 void unregister_netdevice_many(struct list_head *head) 10899 { 10900 unregister_netdevice_many_notify(head, 0, NULL); 10901 } 10902 EXPORT_SYMBOL(unregister_netdevice_many); 10903 10904 /** 10905 * unregister_netdev - remove device from the kernel 10906 * @dev: device 10907 * 10908 * This function shuts down a device interface and removes it 10909 * from the kernel tables. 10910 * 10911 * This is just a wrapper for unregister_netdevice that takes 10912 * the rtnl semaphore. In general you want to use this and not 10913 * unregister_netdevice. 10914 */ 10915 void unregister_netdev(struct net_device *dev) 10916 { 10917 rtnl_lock(); 10918 unregister_netdevice(dev); 10919 rtnl_unlock(); 10920 } 10921 EXPORT_SYMBOL(unregister_netdev); 10922 10923 /** 10924 * __dev_change_net_namespace - move device to different nethost namespace 10925 * @dev: device 10926 * @net: network namespace 10927 * @pat: If not NULL name pattern to try if the current device name 10928 * is already taken in the destination network namespace. 10929 * @new_ifindex: If not zero, specifies device index in the target 10930 * namespace. 10931 * 10932 * This function shuts down a device interface and moves it 10933 * to a new network namespace. On success 0 is returned, on 10934 * a failure a netagive errno code is returned. 10935 * 10936 * Callers must hold the rtnl semaphore. 10937 */ 10938 10939 int __dev_change_net_namespace(struct net_device *dev, struct net *net, 10940 const char *pat, int new_ifindex) 10941 { 10942 struct net *net_old = dev_net(dev); 10943 int err, new_nsid; 10944 10945 ASSERT_RTNL(); 10946 10947 /* Don't allow namespace local devices to be moved. */ 10948 err = -EINVAL; 10949 if (dev->features & NETIF_F_NETNS_LOCAL) 10950 goto out; 10951 10952 /* Ensure the device has been registrered */ 10953 if (dev->reg_state != NETREG_REGISTERED) 10954 goto out; 10955 10956 /* Get out if there is nothing todo */ 10957 err = 0; 10958 if (net_eq(net_old, net)) 10959 goto out; 10960 10961 /* Pick the destination device name, and ensure 10962 * we can use it in the destination network namespace. 10963 */ 10964 err = -EEXIST; 10965 if (netdev_name_in_use(net, dev->name)) { 10966 /* We get here if we can't use the current device name */ 10967 if (!pat) 10968 goto out; 10969 err = dev_get_valid_name(net, dev, pat); 10970 if (err < 0) 10971 goto out; 10972 } 10973 10974 /* Check that new_ifindex isn't used yet. */ 10975 err = -EBUSY; 10976 if (new_ifindex && __dev_get_by_index(net, new_ifindex)) 10977 goto out; 10978 10979 /* 10980 * And now a mini version of register_netdevice unregister_netdevice. 10981 */ 10982 10983 /* If device is running close it first. */ 10984 dev_close(dev); 10985 10986 /* And unlink it from device chain */ 10987 unlist_netdevice(dev, true); 10988 10989 synchronize_net(); 10990 10991 /* Shutdown queueing discipline. */ 10992 dev_shutdown(dev); 10993 10994 /* Notify protocols, that we are about to destroy 10995 * this device. They should clean all the things. 10996 * 10997 * Note that dev->reg_state stays at NETREG_REGISTERED. 10998 * This is wanted because this way 8021q and macvlan know 10999 * the device is just moving and can keep their slaves up. 11000 */ 11001 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 11002 rcu_barrier(); 11003 11004 new_nsid = peernet2id_alloc(dev_net(dev), net, GFP_KERNEL); 11005 /* If there is an ifindex conflict assign a new one */ 11006 if (!new_ifindex) { 11007 if (__dev_get_by_index(net, dev->ifindex)) 11008 new_ifindex = dev_new_index(net); 11009 else 11010 new_ifindex = dev->ifindex; 11011 } 11012 11013 rtmsg_ifinfo_newnet(RTM_DELLINK, dev, ~0U, GFP_KERNEL, &new_nsid, 11014 new_ifindex); 11015 11016 /* 11017 * Flush the unicast and multicast chains 11018 */ 11019 dev_uc_flush(dev); 11020 dev_mc_flush(dev); 11021 11022 /* Send a netdev-removed uevent to the old namespace */ 11023 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE); 11024 netdev_adjacent_del_links(dev); 11025 11026 /* Move per-net netdevice notifiers that are following the netdevice */ 11027 move_netdevice_notifiers_dev_net(dev, net); 11028 11029 /* Actually switch the network namespace */ 11030 dev_net_set(dev, net); 11031 dev->ifindex = new_ifindex; 11032 11033 /* Send a netdev-add uevent to the new namespace */ 11034 kobject_uevent(&dev->dev.kobj, KOBJ_ADD); 11035 netdev_adjacent_add_links(dev); 11036 11037 /* Fixup kobjects */ 11038 err = device_rename(&dev->dev, dev->name); 11039 WARN_ON(err); 11040 11041 /* Adapt owner in case owning user namespace of target network 11042 * namespace is different from the original one. 11043 */ 11044 err = netdev_change_owner(dev, net_old, net); 11045 WARN_ON(err); 11046 11047 /* Add the device back in the hashes */ 11048 list_netdevice(dev); 11049 11050 /* Notify protocols, that a new device appeared. */ 11051 call_netdevice_notifiers(NETDEV_REGISTER, dev); 11052 11053 /* 11054 * Prevent userspace races by waiting until the network 11055 * device is fully setup before sending notifications. 11056 */ 11057 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL, 0, NULL); 11058 11059 synchronize_net(); 11060 err = 0; 11061 out: 11062 return err; 11063 } 11064 EXPORT_SYMBOL_GPL(__dev_change_net_namespace); 11065 11066 static int dev_cpu_dead(unsigned int oldcpu) 11067 { 11068 struct sk_buff **list_skb; 11069 struct sk_buff *skb; 11070 unsigned int cpu; 11071 struct softnet_data *sd, *oldsd, *remsd = NULL; 11072 11073 local_irq_disable(); 11074 cpu = smp_processor_id(); 11075 sd = &per_cpu(softnet_data, cpu); 11076 oldsd = &per_cpu(softnet_data, oldcpu); 11077 11078 /* Find end of our completion_queue. */ 11079 list_skb = &sd->completion_queue; 11080 while (*list_skb) 11081 list_skb = &(*list_skb)->next; 11082 /* Append completion queue from offline CPU. */ 11083 *list_skb = oldsd->completion_queue; 11084 oldsd->completion_queue = NULL; 11085 11086 /* Append output queue from offline CPU. */ 11087 if (oldsd->output_queue) { 11088 *sd->output_queue_tailp = oldsd->output_queue; 11089 sd->output_queue_tailp = oldsd->output_queue_tailp; 11090 oldsd->output_queue = NULL; 11091 oldsd->output_queue_tailp = &oldsd->output_queue; 11092 } 11093 /* Append NAPI poll list from offline CPU, with one exception : 11094 * process_backlog() must be called by cpu owning percpu backlog. 11095 * We properly handle process_queue & input_pkt_queue later. 11096 */ 11097 while (!list_empty(&oldsd->poll_list)) { 11098 struct napi_struct *napi = list_first_entry(&oldsd->poll_list, 11099 struct napi_struct, 11100 poll_list); 11101 11102 list_del_init(&napi->poll_list); 11103 if (napi->poll == process_backlog) 11104 napi->state = 0; 11105 else 11106 ____napi_schedule(sd, napi); 11107 } 11108 11109 raise_softirq_irqoff(NET_TX_SOFTIRQ); 11110 local_irq_enable(); 11111 11112 #ifdef CONFIG_RPS 11113 remsd = oldsd->rps_ipi_list; 11114 oldsd->rps_ipi_list = NULL; 11115 #endif 11116 /* send out pending IPI's on offline CPU */ 11117 net_rps_send_ipi(remsd); 11118 11119 /* Process offline CPU's input_pkt_queue */ 11120 while ((skb = __skb_dequeue(&oldsd->process_queue))) { 11121 netif_rx(skb); 11122 input_queue_head_incr(oldsd); 11123 } 11124 while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) { 11125 netif_rx(skb); 11126 input_queue_head_incr(oldsd); 11127 } 11128 11129 return 0; 11130 } 11131 11132 /** 11133 * netdev_increment_features - increment feature set by one 11134 * @all: current feature set 11135 * @one: new feature set 11136 * @mask: mask feature set 11137 * 11138 * Computes a new feature set after adding a device with feature set 11139 * @one to the master device with current feature set @all. Will not 11140 * enable anything that is off in @mask. Returns the new feature set. 11141 */ 11142 netdev_features_t netdev_increment_features(netdev_features_t all, 11143 netdev_features_t one, netdev_features_t mask) 11144 { 11145 if (mask & NETIF_F_HW_CSUM) 11146 mask |= NETIF_F_CSUM_MASK; 11147 mask |= NETIF_F_VLAN_CHALLENGED; 11148 11149 all |= one & (NETIF_F_ONE_FOR_ALL | NETIF_F_CSUM_MASK) & mask; 11150 all &= one | ~NETIF_F_ALL_FOR_ALL; 11151 11152 /* If one device supports hw checksumming, set for all. */ 11153 if (all & NETIF_F_HW_CSUM) 11154 all &= ~(NETIF_F_CSUM_MASK & ~NETIF_F_HW_CSUM); 11155 11156 return all; 11157 } 11158 EXPORT_SYMBOL(netdev_increment_features); 11159 11160 static struct hlist_head * __net_init netdev_create_hash(void) 11161 { 11162 int i; 11163 struct hlist_head *hash; 11164 11165 hash = kmalloc_array(NETDEV_HASHENTRIES, sizeof(*hash), GFP_KERNEL); 11166 if (hash != NULL) 11167 for (i = 0; i < NETDEV_HASHENTRIES; i++) 11168 INIT_HLIST_HEAD(&hash[i]); 11169 11170 return hash; 11171 } 11172 11173 /* Initialize per network namespace state */ 11174 static int __net_init netdev_init(struct net *net) 11175 { 11176 BUILD_BUG_ON(GRO_HASH_BUCKETS > 11177 8 * sizeof_field(struct napi_struct, gro_bitmask)); 11178 11179 INIT_LIST_HEAD(&net->dev_base_head); 11180 11181 net->dev_name_head = netdev_create_hash(); 11182 if (net->dev_name_head == NULL) 11183 goto err_name; 11184 11185 net->dev_index_head = netdev_create_hash(); 11186 if (net->dev_index_head == NULL) 11187 goto err_idx; 11188 11189 RAW_INIT_NOTIFIER_HEAD(&net->netdev_chain); 11190 11191 return 0; 11192 11193 err_idx: 11194 kfree(net->dev_name_head); 11195 err_name: 11196 return -ENOMEM; 11197 } 11198 11199 /** 11200 * netdev_drivername - network driver for the device 11201 * @dev: network device 11202 * 11203 * Determine network driver for device. 11204 */ 11205 const char *netdev_drivername(const struct net_device *dev) 11206 { 11207 const struct device_driver *driver; 11208 const struct device *parent; 11209 const char *empty = ""; 11210 11211 parent = dev->dev.parent; 11212 if (!parent) 11213 return empty; 11214 11215 driver = parent->driver; 11216 if (driver && driver->name) 11217 return driver->name; 11218 return empty; 11219 } 11220 11221 static void __netdev_printk(const char *level, const struct net_device *dev, 11222 struct va_format *vaf) 11223 { 11224 if (dev && dev->dev.parent) { 11225 dev_printk_emit(level[1] - '0', 11226 dev->dev.parent, 11227 "%s %s %s%s: %pV", 11228 dev_driver_string(dev->dev.parent), 11229 dev_name(dev->dev.parent), 11230 netdev_name(dev), netdev_reg_state(dev), 11231 vaf); 11232 } else if (dev) { 11233 printk("%s%s%s: %pV", 11234 level, netdev_name(dev), netdev_reg_state(dev), vaf); 11235 } else { 11236 printk("%s(NULL net_device): %pV", level, vaf); 11237 } 11238 } 11239 11240 void netdev_printk(const char *level, const struct net_device *dev, 11241 const char *format, ...) 11242 { 11243 struct va_format vaf; 11244 va_list args; 11245 11246 va_start(args, format); 11247 11248 vaf.fmt = format; 11249 vaf.va = &args; 11250 11251 __netdev_printk(level, dev, &vaf); 11252 11253 va_end(args); 11254 } 11255 EXPORT_SYMBOL(netdev_printk); 11256 11257 #define define_netdev_printk_level(func, level) \ 11258 void func(const struct net_device *dev, const char *fmt, ...) \ 11259 { \ 11260 struct va_format vaf; \ 11261 va_list args; \ 11262 \ 11263 va_start(args, fmt); \ 11264 \ 11265 vaf.fmt = fmt; \ 11266 vaf.va = &args; \ 11267 \ 11268 __netdev_printk(level, dev, &vaf); \ 11269 \ 11270 va_end(args); \ 11271 } \ 11272 EXPORT_SYMBOL(func); 11273 11274 define_netdev_printk_level(netdev_emerg, KERN_EMERG); 11275 define_netdev_printk_level(netdev_alert, KERN_ALERT); 11276 define_netdev_printk_level(netdev_crit, KERN_CRIT); 11277 define_netdev_printk_level(netdev_err, KERN_ERR); 11278 define_netdev_printk_level(netdev_warn, KERN_WARNING); 11279 define_netdev_printk_level(netdev_notice, KERN_NOTICE); 11280 define_netdev_printk_level(netdev_info, KERN_INFO); 11281 11282 static void __net_exit netdev_exit(struct net *net) 11283 { 11284 kfree(net->dev_name_head); 11285 kfree(net->dev_index_head); 11286 if (net != &init_net) 11287 WARN_ON_ONCE(!list_empty(&net->dev_base_head)); 11288 } 11289 11290 static struct pernet_operations __net_initdata netdev_net_ops = { 11291 .init = netdev_init, 11292 .exit = netdev_exit, 11293 }; 11294 11295 static void __net_exit default_device_exit_net(struct net *net) 11296 { 11297 struct net_device *dev, *aux; 11298 /* 11299 * Push all migratable network devices back to the 11300 * initial network namespace 11301 */ 11302 ASSERT_RTNL(); 11303 for_each_netdev_safe(net, dev, aux) { 11304 int err; 11305 char fb_name[IFNAMSIZ]; 11306 11307 /* Ignore unmoveable devices (i.e. loopback) */ 11308 if (dev->features & NETIF_F_NETNS_LOCAL) 11309 continue; 11310 11311 /* Leave virtual devices for the generic cleanup */ 11312 if (dev->rtnl_link_ops && !dev->rtnl_link_ops->netns_refund) 11313 continue; 11314 11315 /* Push remaining network devices to init_net */ 11316 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex); 11317 if (netdev_name_in_use(&init_net, fb_name)) 11318 snprintf(fb_name, IFNAMSIZ, "dev%%d"); 11319 err = dev_change_net_namespace(dev, &init_net, fb_name); 11320 if (err) { 11321 pr_emerg("%s: failed to move %s to init_net: %d\n", 11322 __func__, dev->name, err); 11323 BUG(); 11324 } 11325 } 11326 } 11327 11328 static void __net_exit default_device_exit_batch(struct list_head *net_list) 11329 { 11330 /* At exit all network devices most be removed from a network 11331 * namespace. Do this in the reverse order of registration. 11332 * Do this across as many network namespaces as possible to 11333 * improve batching efficiency. 11334 */ 11335 struct net_device *dev; 11336 struct net *net; 11337 LIST_HEAD(dev_kill_list); 11338 11339 rtnl_lock(); 11340 list_for_each_entry(net, net_list, exit_list) { 11341 default_device_exit_net(net); 11342 cond_resched(); 11343 } 11344 11345 list_for_each_entry(net, net_list, exit_list) { 11346 for_each_netdev_reverse(net, dev) { 11347 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) 11348 dev->rtnl_link_ops->dellink(dev, &dev_kill_list); 11349 else 11350 unregister_netdevice_queue(dev, &dev_kill_list); 11351 } 11352 } 11353 unregister_netdevice_many(&dev_kill_list); 11354 rtnl_unlock(); 11355 } 11356 11357 static struct pernet_operations __net_initdata default_device_ops = { 11358 .exit_batch = default_device_exit_batch, 11359 }; 11360 11361 /* 11362 * Initialize the DEV module. At boot time this walks the device list and 11363 * unhooks any devices that fail to initialise (normally hardware not 11364 * present) and leaves us with a valid list of present and active devices. 11365 * 11366 */ 11367 11368 /* 11369 * This is called single threaded during boot, so no need 11370 * to take the rtnl semaphore. 11371 */ 11372 static int __init net_dev_init(void) 11373 { 11374 int i, rc = -ENOMEM; 11375 11376 BUG_ON(!dev_boot_phase); 11377 11378 if (dev_proc_init()) 11379 goto out; 11380 11381 if (netdev_kobject_init()) 11382 goto out; 11383 11384 INIT_LIST_HEAD(&ptype_all); 11385 for (i = 0; i < PTYPE_HASH_SIZE; i++) 11386 INIT_LIST_HEAD(&ptype_base[i]); 11387 11388 if (register_pernet_subsys(&netdev_net_ops)) 11389 goto out; 11390 11391 /* 11392 * Initialise the packet receive queues. 11393 */ 11394 11395 for_each_possible_cpu(i) { 11396 struct work_struct *flush = per_cpu_ptr(&flush_works, i); 11397 struct softnet_data *sd = &per_cpu(softnet_data, i); 11398 11399 INIT_WORK(flush, flush_backlog); 11400 11401 skb_queue_head_init(&sd->input_pkt_queue); 11402 skb_queue_head_init(&sd->process_queue); 11403 #ifdef CONFIG_XFRM_OFFLOAD 11404 skb_queue_head_init(&sd->xfrm_backlog); 11405 #endif 11406 INIT_LIST_HEAD(&sd->poll_list); 11407 sd->output_queue_tailp = &sd->output_queue; 11408 #ifdef CONFIG_RPS 11409 INIT_CSD(&sd->csd, rps_trigger_softirq, sd); 11410 sd->cpu = i; 11411 #endif 11412 INIT_CSD(&sd->defer_csd, trigger_rx_softirq, sd); 11413 spin_lock_init(&sd->defer_lock); 11414 11415 init_gro_hash(&sd->backlog); 11416 sd->backlog.poll = process_backlog; 11417 sd->backlog.weight = weight_p; 11418 } 11419 11420 dev_boot_phase = 0; 11421 11422 /* The loopback device is special if any other network devices 11423 * is present in a network namespace the loopback device must 11424 * be present. Since we now dynamically allocate and free the 11425 * loopback device ensure this invariant is maintained by 11426 * keeping the loopback device as the first device on the 11427 * list of network devices. Ensuring the loopback devices 11428 * is the first device that appears and the last network device 11429 * that disappears. 11430 */ 11431 if (register_pernet_device(&loopback_net_ops)) 11432 goto out; 11433 11434 if (register_pernet_device(&default_device_ops)) 11435 goto out; 11436 11437 open_softirq(NET_TX_SOFTIRQ, net_tx_action); 11438 open_softirq(NET_RX_SOFTIRQ, net_rx_action); 11439 11440 rc = cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD, "net/dev:dead", 11441 NULL, dev_cpu_dead); 11442 WARN_ON(rc < 0); 11443 rc = 0; 11444 out: 11445 return rc; 11446 } 11447 11448 subsys_initcall(net_dev_init); 11449