1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * NET3 Protocol independent device support routines. 4 * 5 * Derived from the non IP parts of dev.c 1.0.19 6 * Authors: Ross Biro 7 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 8 * Mark Evans, <evansmp@uhura.aston.ac.uk> 9 * 10 * Additional Authors: 11 * Florian la Roche <rzsfl@rz.uni-sb.de> 12 * Alan Cox <gw4pts@gw4pts.ampr.org> 13 * David Hinds <dahinds@users.sourceforge.net> 14 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> 15 * Adam Sulmicki <adam@cfar.umd.edu> 16 * Pekka Riikonen <priikone@poesidon.pspt.fi> 17 * 18 * Changes: 19 * D.J. Barrow : Fixed bug where dev->refcnt gets set 20 * to 2 if register_netdev gets called 21 * before net_dev_init & also removed a 22 * few lines of code in the process. 23 * Alan Cox : device private ioctl copies fields back. 24 * Alan Cox : Transmit queue code does relevant 25 * stunts to keep the queue safe. 26 * Alan Cox : Fixed double lock. 27 * Alan Cox : Fixed promisc NULL pointer trap 28 * ???????? : Support the full private ioctl range 29 * Alan Cox : Moved ioctl permission check into 30 * drivers 31 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI 32 * Alan Cox : 100 backlog just doesn't cut it when 33 * you start doing multicast video 8) 34 * Alan Cox : Rewrote net_bh and list manager. 35 * Alan Cox : Fix ETH_P_ALL echoback lengths. 36 * Alan Cox : Took out transmit every packet pass 37 * Saved a few bytes in the ioctl handler 38 * Alan Cox : Network driver sets packet type before 39 * calling netif_rx. Saves a function 40 * call a packet. 41 * Alan Cox : Hashed net_bh() 42 * Richard Kooijman: Timestamp fixes. 43 * Alan Cox : Wrong field in SIOCGIFDSTADDR 44 * Alan Cox : Device lock protection. 45 * Alan Cox : Fixed nasty side effect of device close 46 * changes. 47 * Rudi Cilibrasi : Pass the right thing to 48 * set_mac_address() 49 * Dave Miller : 32bit quantity for the device lock to 50 * make it work out on a Sparc. 51 * Bjorn Ekwall : Added KERNELD hack. 52 * Alan Cox : Cleaned up the backlog initialise. 53 * Craig Metz : SIOCGIFCONF fix if space for under 54 * 1 device. 55 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there 56 * is no device open function. 57 * Andi Kleen : Fix error reporting for SIOCGIFCONF 58 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF 59 * Cyrus Durgin : Cleaned for KMOD 60 * Adam Sulmicki : Bug Fix : Network Device Unload 61 * A network device unload needs to purge 62 * the backlog queue. 63 * Paul Rusty Russell : SIOCSIFNAME 64 * Pekka Riikonen : Netdev boot-time settings code 65 * Andrew Morton : Make unregister_netdevice wait 66 * indefinitely on dev->refcnt 67 * J Hadi Salim : - Backlog queue sampling 68 * - netif_rx() feedback 69 */ 70 71 #include <linux/uaccess.h> 72 #include <linux/bitops.h> 73 #include <linux/capability.h> 74 #include <linux/cpu.h> 75 #include <linux/types.h> 76 #include <linux/kernel.h> 77 #include <linux/hash.h> 78 #include <linux/slab.h> 79 #include <linux/sched.h> 80 #include <linux/sched/mm.h> 81 #include <linux/mutex.h> 82 #include <linux/rwsem.h> 83 #include <linux/string.h> 84 #include <linux/mm.h> 85 #include <linux/socket.h> 86 #include <linux/sockios.h> 87 #include <linux/errno.h> 88 #include <linux/interrupt.h> 89 #include <linux/if_ether.h> 90 #include <linux/netdevice.h> 91 #include <linux/etherdevice.h> 92 #include <linux/ethtool.h> 93 #include <linux/skbuff.h> 94 #include <linux/kthread.h> 95 #include <linux/bpf.h> 96 #include <linux/bpf_trace.h> 97 #include <net/net_namespace.h> 98 #include <net/sock.h> 99 #include <net/busy_poll.h> 100 #include <linux/rtnetlink.h> 101 #include <linux/stat.h> 102 #include <net/dsa.h> 103 #include <net/dst.h> 104 #include <net/dst_metadata.h> 105 #include <net/gro.h> 106 #include <net/pkt_sched.h> 107 #include <net/pkt_cls.h> 108 #include <net/checksum.h> 109 #include <net/xfrm.h> 110 #include <linux/highmem.h> 111 #include <linux/init.h> 112 #include <linux/module.h> 113 #include <linux/netpoll.h> 114 #include <linux/rcupdate.h> 115 #include <linux/delay.h> 116 #include <net/iw_handler.h> 117 #include <asm/current.h> 118 #include <linux/audit.h> 119 #include <linux/dmaengine.h> 120 #include <linux/err.h> 121 #include <linux/ctype.h> 122 #include <linux/if_arp.h> 123 #include <linux/if_vlan.h> 124 #include <linux/ip.h> 125 #include <net/ip.h> 126 #include <net/mpls.h> 127 #include <linux/ipv6.h> 128 #include <linux/in.h> 129 #include <linux/jhash.h> 130 #include <linux/random.h> 131 #include <trace/events/napi.h> 132 #include <trace/events/net.h> 133 #include <trace/events/skb.h> 134 #include <trace/events/qdisc.h> 135 #include <linux/inetdevice.h> 136 #include <linux/cpu_rmap.h> 137 #include <linux/static_key.h> 138 #include <linux/hashtable.h> 139 #include <linux/vmalloc.h> 140 #include <linux/if_macvlan.h> 141 #include <linux/errqueue.h> 142 #include <linux/hrtimer.h> 143 #include <linux/netfilter_netdev.h> 144 #include <linux/crash_dump.h> 145 #include <linux/sctp.h> 146 #include <net/udp_tunnel.h> 147 #include <linux/net_namespace.h> 148 #include <linux/indirect_call_wrapper.h> 149 #include <net/devlink.h> 150 #include <linux/pm_runtime.h> 151 #include <linux/prandom.h> 152 #include <linux/once_lite.h> 153 154 #include "net-sysfs.h" 155 156 157 static DEFINE_SPINLOCK(ptype_lock); 158 struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; 159 struct list_head ptype_all __read_mostly; /* Taps */ 160 161 static int netif_rx_internal(struct sk_buff *skb); 162 static int call_netdevice_notifiers_info(unsigned long val, 163 struct netdev_notifier_info *info); 164 static int call_netdevice_notifiers_extack(unsigned long val, 165 struct net_device *dev, 166 struct netlink_ext_ack *extack); 167 static struct napi_struct *napi_by_id(unsigned int napi_id); 168 169 /* 170 * The @dev_base_head list is protected by @dev_base_lock and the rtnl 171 * semaphore. 172 * 173 * Pure readers hold dev_base_lock for reading, or rcu_read_lock() 174 * 175 * Writers must hold the rtnl semaphore while they loop through the 176 * dev_base_head list, and hold dev_base_lock for writing when they do the 177 * actual updates. This allows pure readers to access the list even 178 * while a writer is preparing to update it. 179 * 180 * To put it another way, dev_base_lock is held for writing only to 181 * protect against pure readers; the rtnl semaphore provides the 182 * protection against other writers. 183 * 184 * See, for example usages, register_netdevice() and 185 * unregister_netdevice(), which must be called with the rtnl 186 * semaphore held. 187 */ 188 DEFINE_RWLOCK(dev_base_lock); 189 EXPORT_SYMBOL(dev_base_lock); 190 191 static DEFINE_MUTEX(ifalias_mutex); 192 193 /* protects napi_hash addition/deletion and napi_gen_id */ 194 static DEFINE_SPINLOCK(napi_hash_lock); 195 196 static unsigned int napi_gen_id = NR_CPUS; 197 static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8); 198 199 static DECLARE_RWSEM(devnet_rename_sem); 200 201 static inline void dev_base_seq_inc(struct net *net) 202 { 203 while (++net->dev_base_seq == 0) 204 ; 205 } 206 207 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name) 208 { 209 unsigned int hash = full_name_hash(net, name, strnlen(name, IFNAMSIZ)); 210 211 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)]; 212 } 213 214 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) 215 { 216 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)]; 217 } 218 219 static inline void rps_lock_irqsave(struct softnet_data *sd, 220 unsigned long *flags) 221 { 222 if (IS_ENABLED(CONFIG_RPS)) 223 spin_lock_irqsave(&sd->input_pkt_queue.lock, *flags); 224 else if (!IS_ENABLED(CONFIG_PREEMPT_RT)) 225 local_irq_save(*flags); 226 } 227 228 static inline void rps_lock_irq_disable(struct softnet_data *sd) 229 { 230 if (IS_ENABLED(CONFIG_RPS)) 231 spin_lock_irq(&sd->input_pkt_queue.lock); 232 else if (!IS_ENABLED(CONFIG_PREEMPT_RT)) 233 local_irq_disable(); 234 } 235 236 static inline void rps_unlock_irq_restore(struct softnet_data *sd, 237 unsigned long *flags) 238 { 239 if (IS_ENABLED(CONFIG_RPS)) 240 spin_unlock_irqrestore(&sd->input_pkt_queue.lock, *flags); 241 else if (!IS_ENABLED(CONFIG_PREEMPT_RT)) 242 local_irq_restore(*flags); 243 } 244 245 static inline void rps_unlock_irq_enable(struct softnet_data *sd) 246 { 247 if (IS_ENABLED(CONFIG_RPS)) 248 spin_unlock_irq(&sd->input_pkt_queue.lock); 249 else if (!IS_ENABLED(CONFIG_PREEMPT_RT)) 250 local_irq_enable(); 251 } 252 253 static struct netdev_name_node *netdev_name_node_alloc(struct net_device *dev, 254 const char *name) 255 { 256 struct netdev_name_node *name_node; 257 258 name_node = kmalloc(sizeof(*name_node), GFP_KERNEL); 259 if (!name_node) 260 return NULL; 261 INIT_HLIST_NODE(&name_node->hlist); 262 name_node->dev = dev; 263 name_node->name = name; 264 return name_node; 265 } 266 267 static struct netdev_name_node * 268 netdev_name_node_head_alloc(struct net_device *dev) 269 { 270 struct netdev_name_node *name_node; 271 272 name_node = netdev_name_node_alloc(dev, dev->name); 273 if (!name_node) 274 return NULL; 275 INIT_LIST_HEAD(&name_node->list); 276 return name_node; 277 } 278 279 static void netdev_name_node_free(struct netdev_name_node *name_node) 280 { 281 kfree(name_node); 282 } 283 284 static void netdev_name_node_add(struct net *net, 285 struct netdev_name_node *name_node) 286 { 287 hlist_add_head_rcu(&name_node->hlist, 288 dev_name_hash(net, name_node->name)); 289 } 290 291 static void netdev_name_node_del(struct netdev_name_node *name_node) 292 { 293 hlist_del_rcu(&name_node->hlist); 294 } 295 296 static struct netdev_name_node *netdev_name_node_lookup(struct net *net, 297 const char *name) 298 { 299 struct hlist_head *head = dev_name_hash(net, name); 300 struct netdev_name_node *name_node; 301 302 hlist_for_each_entry(name_node, head, hlist) 303 if (!strcmp(name_node->name, name)) 304 return name_node; 305 return NULL; 306 } 307 308 static struct netdev_name_node *netdev_name_node_lookup_rcu(struct net *net, 309 const char *name) 310 { 311 struct hlist_head *head = dev_name_hash(net, name); 312 struct netdev_name_node *name_node; 313 314 hlist_for_each_entry_rcu(name_node, head, hlist) 315 if (!strcmp(name_node->name, name)) 316 return name_node; 317 return NULL; 318 } 319 320 bool netdev_name_in_use(struct net *net, const char *name) 321 { 322 return netdev_name_node_lookup(net, name); 323 } 324 EXPORT_SYMBOL(netdev_name_in_use); 325 326 int netdev_name_node_alt_create(struct net_device *dev, const char *name) 327 { 328 struct netdev_name_node *name_node; 329 struct net *net = dev_net(dev); 330 331 name_node = netdev_name_node_lookup(net, name); 332 if (name_node) 333 return -EEXIST; 334 name_node = netdev_name_node_alloc(dev, name); 335 if (!name_node) 336 return -ENOMEM; 337 netdev_name_node_add(net, name_node); 338 /* The node that holds dev->name acts as a head of per-device list. */ 339 list_add_tail(&name_node->list, &dev->name_node->list); 340 341 return 0; 342 } 343 EXPORT_SYMBOL(netdev_name_node_alt_create); 344 345 static void __netdev_name_node_alt_destroy(struct netdev_name_node *name_node) 346 { 347 list_del(&name_node->list); 348 netdev_name_node_del(name_node); 349 kfree(name_node->name); 350 netdev_name_node_free(name_node); 351 } 352 353 int netdev_name_node_alt_destroy(struct net_device *dev, const char *name) 354 { 355 struct netdev_name_node *name_node; 356 struct net *net = dev_net(dev); 357 358 name_node = netdev_name_node_lookup(net, name); 359 if (!name_node) 360 return -ENOENT; 361 /* lookup might have found our primary name or a name belonging 362 * to another device. 363 */ 364 if (name_node == dev->name_node || name_node->dev != dev) 365 return -EINVAL; 366 367 __netdev_name_node_alt_destroy(name_node); 368 369 return 0; 370 } 371 EXPORT_SYMBOL(netdev_name_node_alt_destroy); 372 373 static void netdev_name_node_alt_flush(struct net_device *dev) 374 { 375 struct netdev_name_node *name_node, *tmp; 376 377 list_for_each_entry_safe(name_node, tmp, &dev->name_node->list, list) 378 __netdev_name_node_alt_destroy(name_node); 379 } 380 381 /* Device list insertion */ 382 static void list_netdevice(struct net_device *dev) 383 { 384 struct net *net = dev_net(dev); 385 386 ASSERT_RTNL(); 387 388 write_lock(&dev_base_lock); 389 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head); 390 netdev_name_node_add(net, dev->name_node); 391 hlist_add_head_rcu(&dev->index_hlist, 392 dev_index_hash(net, dev->ifindex)); 393 write_unlock(&dev_base_lock); 394 395 dev_base_seq_inc(net); 396 } 397 398 /* Device list removal 399 * caller must respect a RCU grace period before freeing/reusing dev 400 */ 401 static void unlist_netdevice(struct net_device *dev) 402 { 403 ASSERT_RTNL(); 404 405 /* Unlink dev from the device chain */ 406 write_lock(&dev_base_lock); 407 list_del_rcu(&dev->dev_list); 408 netdev_name_node_del(dev->name_node); 409 hlist_del_rcu(&dev->index_hlist); 410 write_unlock(&dev_base_lock); 411 412 dev_base_seq_inc(dev_net(dev)); 413 } 414 415 /* 416 * Our notifier list 417 */ 418 419 static RAW_NOTIFIER_HEAD(netdev_chain); 420 421 /* 422 * Device drivers call our routines to queue packets here. We empty the 423 * queue in the local softnet handler. 424 */ 425 426 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); 427 EXPORT_PER_CPU_SYMBOL(softnet_data); 428 429 #ifdef CONFIG_LOCKDEP 430 /* 431 * register_netdevice() inits txq->_xmit_lock and sets lockdep class 432 * according to dev->type 433 */ 434 static const unsigned short netdev_lock_type[] = { 435 ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25, 436 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET, 437 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM, 438 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP, 439 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD, 440 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25, 441 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP, 442 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD, 443 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI, 444 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE, 445 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET, 446 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL, 447 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM, 448 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE, 449 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE}; 450 451 static const char *const netdev_lock_name[] = { 452 "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25", 453 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET", 454 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM", 455 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP", 456 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD", 457 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25", 458 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP", 459 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD", 460 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI", 461 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE", 462 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET", 463 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL", 464 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM", 465 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE", 466 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"}; 467 468 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)]; 469 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)]; 470 471 static inline unsigned short netdev_lock_pos(unsigned short dev_type) 472 { 473 int i; 474 475 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++) 476 if (netdev_lock_type[i] == dev_type) 477 return i; 478 /* the last key is used by default */ 479 return ARRAY_SIZE(netdev_lock_type) - 1; 480 } 481 482 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, 483 unsigned short dev_type) 484 { 485 int i; 486 487 i = netdev_lock_pos(dev_type); 488 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i], 489 netdev_lock_name[i]); 490 } 491 492 static inline void netdev_set_addr_lockdep_class(struct net_device *dev) 493 { 494 int i; 495 496 i = netdev_lock_pos(dev->type); 497 lockdep_set_class_and_name(&dev->addr_list_lock, 498 &netdev_addr_lock_key[i], 499 netdev_lock_name[i]); 500 } 501 #else 502 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, 503 unsigned short dev_type) 504 { 505 } 506 507 static inline void netdev_set_addr_lockdep_class(struct net_device *dev) 508 { 509 } 510 #endif 511 512 /******************************************************************************* 513 * 514 * Protocol management and registration routines 515 * 516 *******************************************************************************/ 517 518 519 /* 520 * Add a protocol ID to the list. Now that the input handler is 521 * smarter we can dispense with all the messy stuff that used to be 522 * here. 523 * 524 * BEWARE!!! Protocol handlers, mangling input packets, 525 * MUST BE last in hash buckets and checking protocol handlers 526 * MUST start from promiscuous ptype_all chain in net_bh. 527 * It is true now, do not change it. 528 * Explanation follows: if protocol handler, mangling packet, will 529 * be the first on list, it is not able to sense, that packet 530 * is cloned and should be copied-on-write, so that it will 531 * change it and subsequent readers will get broken packet. 532 * --ANK (980803) 533 */ 534 535 static inline struct list_head *ptype_head(const struct packet_type *pt) 536 { 537 if (pt->type == htons(ETH_P_ALL)) 538 return pt->dev ? &pt->dev->ptype_all : &ptype_all; 539 else 540 return pt->dev ? &pt->dev->ptype_specific : 541 &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK]; 542 } 543 544 /** 545 * dev_add_pack - add packet handler 546 * @pt: packet type declaration 547 * 548 * Add a protocol handler to the networking stack. The passed &packet_type 549 * is linked into kernel lists and may not be freed until it has been 550 * removed from the kernel lists. 551 * 552 * This call does not sleep therefore it can not 553 * guarantee all CPU's that are in middle of receiving packets 554 * will see the new packet type (until the next received packet). 555 */ 556 557 void dev_add_pack(struct packet_type *pt) 558 { 559 struct list_head *head = ptype_head(pt); 560 561 spin_lock(&ptype_lock); 562 list_add_rcu(&pt->list, head); 563 spin_unlock(&ptype_lock); 564 } 565 EXPORT_SYMBOL(dev_add_pack); 566 567 /** 568 * __dev_remove_pack - remove packet handler 569 * @pt: packet type declaration 570 * 571 * Remove a protocol handler that was previously added to the kernel 572 * protocol handlers by dev_add_pack(). The passed &packet_type is removed 573 * from the kernel lists and can be freed or reused once this function 574 * returns. 575 * 576 * The packet type might still be in use by receivers 577 * and must not be freed until after all the CPU's have gone 578 * through a quiescent state. 579 */ 580 void __dev_remove_pack(struct packet_type *pt) 581 { 582 struct list_head *head = ptype_head(pt); 583 struct packet_type *pt1; 584 585 spin_lock(&ptype_lock); 586 587 list_for_each_entry(pt1, head, list) { 588 if (pt == pt1) { 589 list_del_rcu(&pt->list); 590 goto out; 591 } 592 } 593 594 pr_warn("dev_remove_pack: %p not found\n", pt); 595 out: 596 spin_unlock(&ptype_lock); 597 } 598 EXPORT_SYMBOL(__dev_remove_pack); 599 600 /** 601 * dev_remove_pack - remove packet handler 602 * @pt: packet type declaration 603 * 604 * Remove a protocol handler that was previously added to the kernel 605 * protocol handlers by dev_add_pack(). The passed &packet_type is removed 606 * from the kernel lists and can be freed or reused once this function 607 * returns. 608 * 609 * This call sleeps to guarantee that no CPU is looking at the packet 610 * type after return. 611 */ 612 void dev_remove_pack(struct packet_type *pt) 613 { 614 __dev_remove_pack(pt); 615 616 synchronize_net(); 617 } 618 EXPORT_SYMBOL(dev_remove_pack); 619 620 621 /******************************************************************************* 622 * 623 * Device Interface Subroutines 624 * 625 *******************************************************************************/ 626 627 /** 628 * dev_get_iflink - get 'iflink' value of a interface 629 * @dev: targeted interface 630 * 631 * Indicates the ifindex the interface is linked to. 632 * Physical interfaces have the same 'ifindex' and 'iflink' values. 633 */ 634 635 int dev_get_iflink(const struct net_device *dev) 636 { 637 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink) 638 return dev->netdev_ops->ndo_get_iflink(dev); 639 640 return dev->ifindex; 641 } 642 EXPORT_SYMBOL(dev_get_iflink); 643 644 /** 645 * dev_fill_metadata_dst - Retrieve tunnel egress information. 646 * @dev: targeted interface 647 * @skb: The packet. 648 * 649 * For better visibility of tunnel traffic OVS needs to retrieve 650 * egress tunnel information for a packet. Following API allows 651 * user to get this info. 652 */ 653 int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) 654 { 655 struct ip_tunnel_info *info; 656 657 if (!dev->netdev_ops || !dev->netdev_ops->ndo_fill_metadata_dst) 658 return -EINVAL; 659 660 info = skb_tunnel_info_unclone(skb); 661 if (!info) 662 return -ENOMEM; 663 if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX))) 664 return -EINVAL; 665 666 return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb); 667 } 668 EXPORT_SYMBOL_GPL(dev_fill_metadata_dst); 669 670 static struct net_device_path *dev_fwd_path(struct net_device_path_stack *stack) 671 { 672 int k = stack->num_paths++; 673 674 if (WARN_ON_ONCE(k >= NET_DEVICE_PATH_STACK_MAX)) 675 return NULL; 676 677 return &stack->path[k]; 678 } 679 680 int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr, 681 struct net_device_path_stack *stack) 682 { 683 const struct net_device *last_dev; 684 struct net_device_path_ctx ctx = { 685 .dev = dev, 686 .daddr = daddr, 687 }; 688 struct net_device_path *path; 689 int ret = 0; 690 691 stack->num_paths = 0; 692 while (ctx.dev && ctx.dev->netdev_ops->ndo_fill_forward_path) { 693 last_dev = ctx.dev; 694 path = dev_fwd_path(stack); 695 if (!path) 696 return -1; 697 698 memset(path, 0, sizeof(struct net_device_path)); 699 ret = ctx.dev->netdev_ops->ndo_fill_forward_path(&ctx, path); 700 if (ret < 0) 701 return -1; 702 703 if (WARN_ON_ONCE(last_dev == ctx.dev)) 704 return -1; 705 } 706 path = dev_fwd_path(stack); 707 if (!path) 708 return -1; 709 path->type = DEV_PATH_ETHERNET; 710 path->dev = ctx.dev; 711 712 return ret; 713 } 714 EXPORT_SYMBOL_GPL(dev_fill_forward_path); 715 716 /** 717 * __dev_get_by_name - find a device by its name 718 * @net: the applicable net namespace 719 * @name: name to find 720 * 721 * Find an interface by name. Must be called under RTNL semaphore 722 * or @dev_base_lock. If the name is found a pointer to the device 723 * is returned. If the name is not found then %NULL is returned. The 724 * reference counters are not incremented so the caller must be 725 * careful with locks. 726 */ 727 728 struct net_device *__dev_get_by_name(struct net *net, const char *name) 729 { 730 struct netdev_name_node *node_name; 731 732 node_name = netdev_name_node_lookup(net, name); 733 return node_name ? node_name->dev : NULL; 734 } 735 EXPORT_SYMBOL(__dev_get_by_name); 736 737 /** 738 * dev_get_by_name_rcu - find a device by its name 739 * @net: the applicable net namespace 740 * @name: name to find 741 * 742 * Find an interface by name. 743 * If the name is found a pointer to the device is returned. 744 * If the name is not found then %NULL is returned. 745 * The reference counters are not incremented so the caller must be 746 * careful with locks. The caller must hold RCU lock. 747 */ 748 749 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name) 750 { 751 struct netdev_name_node *node_name; 752 753 node_name = netdev_name_node_lookup_rcu(net, name); 754 return node_name ? node_name->dev : NULL; 755 } 756 EXPORT_SYMBOL(dev_get_by_name_rcu); 757 758 /** 759 * dev_get_by_name - find a device by its name 760 * @net: the applicable net namespace 761 * @name: name to find 762 * 763 * Find an interface by name. This can be called from any 764 * context and does its own locking. The returned handle has 765 * the usage count incremented and the caller must use dev_put() to 766 * release it when it is no longer needed. %NULL is returned if no 767 * matching device is found. 768 */ 769 770 struct net_device *dev_get_by_name(struct net *net, const char *name) 771 { 772 struct net_device *dev; 773 774 rcu_read_lock(); 775 dev = dev_get_by_name_rcu(net, name); 776 dev_hold(dev); 777 rcu_read_unlock(); 778 return dev; 779 } 780 EXPORT_SYMBOL(dev_get_by_name); 781 782 /** 783 * __dev_get_by_index - find a device by its ifindex 784 * @net: the applicable net namespace 785 * @ifindex: index of device 786 * 787 * Search for an interface by index. Returns %NULL if the device 788 * is not found or a pointer to the device. The device has not 789 * had its reference counter increased so the caller must be careful 790 * about locking. The caller must hold either the RTNL semaphore 791 * or @dev_base_lock. 792 */ 793 794 struct net_device *__dev_get_by_index(struct net *net, int ifindex) 795 { 796 struct net_device *dev; 797 struct hlist_head *head = dev_index_hash(net, ifindex); 798 799 hlist_for_each_entry(dev, head, index_hlist) 800 if (dev->ifindex == ifindex) 801 return dev; 802 803 return NULL; 804 } 805 EXPORT_SYMBOL(__dev_get_by_index); 806 807 /** 808 * dev_get_by_index_rcu - find a device by its ifindex 809 * @net: the applicable net namespace 810 * @ifindex: index of device 811 * 812 * Search for an interface by index. Returns %NULL if the device 813 * is not found or a pointer to the device. The device has not 814 * had its reference counter increased so the caller must be careful 815 * about locking. The caller must hold RCU lock. 816 */ 817 818 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex) 819 { 820 struct net_device *dev; 821 struct hlist_head *head = dev_index_hash(net, ifindex); 822 823 hlist_for_each_entry_rcu(dev, head, index_hlist) 824 if (dev->ifindex == ifindex) 825 return dev; 826 827 return NULL; 828 } 829 EXPORT_SYMBOL(dev_get_by_index_rcu); 830 831 832 /** 833 * dev_get_by_index - find a device by its ifindex 834 * @net: the applicable net namespace 835 * @ifindex: index of device 836 * 837 * Search for an interface by index. Returns NULL if the device 838 * is not found or a pointer to the device. The device returned has 839 * had a reference added and the pointer is safe until the user calls 840 * dev_put to indicate they have finished with it. 841 */ 842 843 struct net_device *dev_get_by_index(struct net *net, int ifindex) 844 { 845 struct net_device *dev; 846 847 rcu_read_lock(); 848 dev = dev_get_by_index_rcu(net, ifindex); 849 dev_hold(dev); 850 rcu_read_unlock(); 851 return dev; 852 } 853 EXPORT_SYMBOL(dev_get_by_index); 854 855 /** 856 * dev_get_by_napi_id - find a device by napi_id 857 * @napi_id: ID of the NAPI struct 858 * 859 * Search for an interface by NAPI ID. Returns %NULL if the device 860 * is not found or a pointer to the device. The device has not had 861 * its reference counter increased so the caller must be careful 862 * about locking. The caller must hold RCU lock. 863 */ 864 865 struct net_device *dev_get_by_napi_id(unsigned int napi_id) 866 { 867 struct napi_struct *napi; 868 869 WARN_ON_ONCE(!rcu_read_lock_held()); 870 871 if (napi_id < MIN_NAPI_ID) 872 return NULL; 873 874 napi = napi_by_id(napi_id); 875 876 return napi ? napi->dev : NULL; 877 } 878 EXPORT_SYMBOL(dev_get_by_napi_id); 879 880 /** 881 * netdev_get_name - get a netdevice name, knowing its ifindex. 882 * @net: network namespace 883 * @name: a pointer to the buffer where the name will be stored. 884 * @ifindex: the ifindex of the interface to get the name from. 885 */ 886 int netdev_get_name(struct net *net, char *name, int ifindex) 887 { 888 struct net_device *dev; 889 int ret; 890 891 down_read(&devnet_rename_sem); 892 rcu_read_lock(); 893 894 dev = dev_get_by_index_rcu(net, ifindex); 895 if (!dev) { 896 ret = -ENODEV; 897 goto out; 898 } 899 900 strcpy(name, dev->name); 901 902 ret = 0; 903 out: 904 rcu_read_unlock(); 905 up_read(&devnet_rename_sem); 906 return ret; 907 } 908 909 /** 910 * dev_getbyhwaddr_rcu - find a device by its hardware address 911 * @net: the applicable net namespace 912 * @type: media type of device 913 * @ha: hardware address 914 * 915 * Search for an interface by MAC address. Returns NULL if the device 916 * is not found or a pointer to the device. 917 * The caller must hold RCU or RTNL. 918 * The returned device has not had its ref count increased 919 * and the caller must therefore be careful about locking 920 * 921 */ 922 923 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, 924 const char *ha) 925 { 926 struct net_device *dev; 927 928 for_each_netdev_rcu(net, dev) 929 if (dev->type == type && 930 !memcmp(dev->dev_addr, ha, dev->addr_len)) 931 return dev; 932 933 return NULL; 934 } 935 EXPORT_SYMBOL(dev_getbyhwaddr_rcu); 936 937 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type) 938 { 939 struct net_device *dev, *ret = NULL; 940 941 rcu_read_lock(); 942 for_each_netdev_rcu(net, dev) 943 if (dev->type == type) { 944 dev_hold(dev); 945 ret = dev; 946 break; 947 } 948 rcu_read_unlock(); 949 return ret; 950 } 951 EXPORT_SYMBOL(dev_getfirstbyhwtype); 952 953 /** 954 * __dev_get_by_flags - find any device with given flags 955 * @net: the applicable net namespace 956 * @if_flags: IFF_* values 957 * @mask: bitmask of bits in if_flags to check 958 * 959 * Search for any interface with the given flags. Returns NULL if a device 960 * is not found or a pointer to the device. Must be called inside 961 * rtnl_lock(), and result refcount is unchanged. 962 */ 963 964 struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags, 965 unsigned short mask) 966 { 967 struct net_device *dev, *ret; 968 969 ASSERT_RTNL(); 970 971 ret = NULL; 972 for_each_netdev(net, dev) { 973 if (((dev->flags ^ if_flags) & mask) == 0) { 974 ret = dev; 975 break; 976 } 977 } 978 return ret; 979 } 980 EXPORT_SYMBOL(__dev_get_by_flags); 981 982 /** 983 * dev_valid_name - check if name is okay for network device 984 * @name: name string 985 * 986 * Network device names need to be valid file names to 987 * allow sysfs to work. We also disallow any kind of 988 * whitespace. 989 */ 990 bool dev_valid_name(const char *name) 991 { 992 if (*name == '\0') 993 return false; 994 if (strnlen(name, IFNAMSIZ) == IFNAMSIZ) 995 return false; 996 if (!strcmp(name, ".") || !strcmp(name, "..")) 997 return false; 998 999 while (*name) { 1000 if (*name == '/' || *name == ':' || isspace(*name)) 1001 return false; 1002 name++; 1003 } 1004 return true; 1005 } 1006 EXPORT_SYMBOL(dev_valid_name); 1007 1008 /** 1009 * __dev_alloc_name - allocate a name for a device 1010 * @net: network namespace to allocate the device name in 1011 * @name: name format string 1012 * @buf: scratch buffer and result name string 1013 * 1014 * Passed a format string - eg "lt%d" it will try and find a suitable 1015 * id. It scans list of devices to build up a free map, then chooses 1016 * the first empty slot. The caller must hold the dev_base or rtnl lock 1017 * while allocating the name and adding the device in order to avoid 1018 * duplicates. 1019 * Limited to bits_per_byte * page size devices (ie 32K on most platforms). 1020 * Returns the number of the unit assigned or a negative errno code. 1021 */ 1022 1023 static int __dev_alloc_name(struct net *net, const char *name, char *buf) 1024 { 1025 int i = 0; 1026 const char *p; 1027 const int max_netdevices = 8*PAGE_SIZE; 1028 unsigned long *inuse; 1029 struct net_device *d; 1030 1031 if (!dev_valid_name(name)) 1032 return -EINVAL; 1033 1034 p = strchr(name, '%'); 1035 if (p) { 1036 /* 1037 * Verify the string as this thing may have come from 1038 * the user. There must be either one "%d" and no other "%" 1039 * characters. 1040 */ 1041 if (p[1] != 'd' || strchr(p + 2, '%')) 1042 return -EINVAL; 1043 1044 /* Use one page as a bit array of possible slots */ 1045 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC); 1046 if (!inuse) 1047 return -ENOMEM; 1048 1049 for_each_netdev(net, d) { 1050 struct netdev_name_node *name_node; 1051 list_for_each_entry(name_node, &d->name_node->list, list) { 1052 if (!sscanf(name_node->name, name, &i)) 1053 continue; 1054 if (i < 0 || i >= max_netdevices) 1055 continue; 1056 1057 /* avoid cases where sscanf is not exact inverse of printf */ 1058 snprintf(buf, IFNAMSIZ, name, i); 1059 if (!strncmp(buf, name_node->name, IFNAMSIZ)) 1060 __set_bit(i, inuse); 1061 } 1062 if (!sscanf(d->name, name, &i)) 1063 continue; 1064 if (i < 0 || i >= max_netdevices) 1065 continue; 1066 1067 /* avoid cases where sscanf is not exact inverse of printf */ 1068 snprintf(buf, IFNAMSIZ, name, i); 1069 if (!strncmp(buf, d->name, IFNAMSIZ)) 1070 __set_bit(i, inuse); 1071 } 1072 1073 i = find_first_zero_bit(inuse, max_netdevices); 1074 free_page((unsigned long) inuse); 1075 } 1076 1077 snprintf(buf, IFNAMSIZ, name, i); 1078 if (!netdev_name_in_use(net, buf)) 1079 return i; 1080 1081 /* It is possible to run out of possible slots 1082 * when the name is long and there isn't enough space left 1083 * for the digits, or if all bits are used. 1084 */ 1085 return -ENFILE; 1086 } 1087 1088 static int dev_alloc_name_ns(struct net *net, 1089 struct net_device *dev, 1090 const char *name) 1091 { 1092 char buf[IFNAMSIZ]; 1093 int ret; 1094 1095 BUG_ON(!net); 1096 ret = __dev_alloc_name(net, name, buf); 1097 if (ret >= 0) 1098 strlcpy(dev->name, buf, IFNAMSIZ); 1099 return ret; 1100 } 1101 1102 /** 1103 * dev_alloc_name - allocate a name for a device 1104 * @dev: device 1105 * @name: name format string 1106 * 1107 * Passed a format string - eg "lt%d" it will try and find a suitable 1108 * id. It scans list of devices to build up a free map, then chooses 1109 * the first empty slot. The caller must hold the dev_base or rtnl lock 1110 * while allocating the name and adding the device in order to avoid 1111 * duplicates. 1112 * Limited to bits_per_byte * page size devices (ie 32K on most platforms). 1113 * Returns the number of the unit assigned or a negative errno code. 1114 */ 1115 1116 int dev_alloc_name(struct net_device *dev, const char *name) 1117 { 1118 return dev_alloc_name_ns(dev_net(dev), dev, name); 1119 } 1120 EXPORT_SYMBOL(dev_alloc_name); 1121 1122 static int dev_get_valid_name(struct net *net, struct net_device *dev, 1123 const char *name) 1124 { 1125 BUG_ON(!net); 1126 1127 if (!dev_valid_name(name)) 1128 return -EINVAL; 1129 1130 if (strchr(name, '%')) 1131 return dev_alloc_name_ns(net, dev, name); 1132 else if (netdev_name_in_use(net, name)) 1133 return -EEXIST; 1134 else if (dev->name != name) 1135 strlcpy(dev->name, name, IFNAMSIZ); 1136 1137 return 0; 1138 } 1139 1140 /** 1141 * dev_change_name - change name of a device 1142 * @dev: device 1143 * @newname: name (or format string) must be at least IFNAMSIZ 1144 * 1145 * Change name of a device, can pass format strings "eth%d". 1146 * for wildcarding. 1147 */ 1148 int dev_change_name(struct net_device *dev, const char *newname) 1149 { 1150 unsigned char old_assign_type; 1151 char oldname[IFNAMSIZ]; 1152 int err = 0; 1153 int ret; 1154 struct net *net; 1155 1156 ASSERT_RTNL(); 1157 BUG_ON(!dev_net(dev)); 1158 1159 net = dev_net(dev); 1160 1161 /* Some auto-enslaved devices e.g. failover slaves are 1162 * special, as userspace might rename the device after 1163 * the interface had been brought up and running since 1164 * the point kernel initiated auto-enslavement. Allow 1165 * live name change even when these slave devices are 1166 * up and running. 1167 * 1168 * Typically, users of these auto-enslaving devices 1169 * don't actually care about slave name change, as 1170 * they are supposed to operate on master interface 1171 * directly. 1172 */ 1173 if (dev->flags & IFF_UP && 1174 likely(!(dev->priv_flags & IFF_LIVE_RENAME_OK))) 1175 return -EBUSY; 1176 1177 down_write(&devnet_rename_sem); 1178 1179 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) { 1180 up_write(&devnet_rename_sem); 1181 return 0; 1182 } 1183 1184 memcpy(oldname, dev->name, IFNAMSIZ); 1185 1186 err = dev_get_valid_name(net, dev, newname); 1187 if (err < 0) { 1188 up_write(&devnet_rename_sem); 1189 return err; 1190 } 1191 1192 if (oldname[0] && !strchr(oldname, '%')) 1193 netdev_info(dev, "renamed from %s\n", oldname); 1194 1195 old_assign_type = dev->name_assign_type; 1196 dev->name_assign_type = NET_NAME_RENAMED; 1197 1198 rollback: 1199 ret = device_rename(&dev->dev, dev->name); 1200 if (ret) { 1201 memcpy(dev->name, oldname, IFNAMSIZ); 1202 dev->name_assign_type = old_assign_type; 1203 up_write(&devnet_rename_sem); 1204 return ret; 1205 } 1206 1207 up_write(&devnet_rename_sem); 1208 1209 netdev_adjacent_rename_links(dev, oldname); 1210 1211 write_lock(&dev_base_lock); 1212 netdev_name_node_del(dev->name_node); 1213 write_unlock(&dev_base_lock); 1214 1215 synchronize_rcu(); 1216 1217 write_lock(&dev_base_lock); 1218 netdev_name_node_add(net, dev->name_node); 1219 write_unlock(&dev_base_lock); 1220 1221 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev); 1222 ret = notifier_to_errno(ret); 1223 1224 if (ret) { 1225 /* err >= 0 after dev_alloc_name() or stores the first errno */ 1226 if (err >= 0) { 1227 err = ret; 1228 down_write(&devnet_rename_sem); 1229 memcpy(dev->name, oldname, IFNAMSIZ); 1230 memcpy(oldname, newname, IFNAMSIZ); 1231 dev->name_assign_type = old_assign_type; 1232 old_assign_type = NET_NAME_RENAMED; 1233 goto rollback; 1234 } else { 1235 netdev_err(dev, "name change rollback failed: %d\n", 1236 ret); 1237 } 1238 } 1239 1240 return err; 1241 } 1242 1243 /** 1244 * dev_set_alias - change ifalias of a device 1245 * @dev: device 1246 * @alias: name up to IFALIASZ 1247 * @len: limit of bytes to copy from info 1248 * 1249 * Set ifalias for a device, 1250 */ 1251 int dev_set_alias(struct net_device *dev, const char *alias, size_t len) 1252 { 1253 struct dev_ifalias *new_alias = NULL; 1254 1255 if (len >= IFALIASZ) 1256 return -EINVAL; 1257 1258 if (len) { 1259 new_alias = kmalloc(sizeof(*new_alias) + len + 1, GFP_KERNEL); 1260 if (!new_alias) 1261 return -ENOMEM; 1262 1263 memcpy(new_alias->ifalias, alias, len); 1264 new_alias->ifalias[len] = 0; 1265 } 1266 1267 mutex_lock(&ifalias_mutex); 1268 new_alias = rcu_replace_pointer(dev->ifalias, new_alias, 1269 mutex_is_locked(&ifalias_mutex)); 1270 mutex_unlock(&ifalias_mutex); 1271 1272 if (new_alias) 1273 kfree_rcu(new_alias, rcuhead); 1274 1275 return len; 1276 } 1277 EXPORT_SYMBOL(dev_set_alias); 1278 1279 /** 1280 * dev_get_alias - get ifalias of a device 1281 * @dev: device 1282 * @name: buffer to store name of ifalias 1283 * @len: size of buffer 1284 * 1285 * get ifalias for a device. Caller must make sure dev cannot go 1286 * away, e.g. rcu read lock or own a reference count to device. 1287 */ 1288 int dev_get_alias(const struct net_device *dev, char *name, size_t len) 1289 { 1290 const struct dev_ifalias *alias; 1291 int ret = 0; 1292 1293 rcu_read_lock(); 1294 alias = rcu_dereference(dev->ifalias); 1295 if (alias) 1296 ret = snprintf(name, len, "%s", alias->ifalias); 1297 rcu_read_unlock(); 1298 1299 return ret; 1300 } 1301 1302 /** 1303 * netdev_features_change - device changes features 1304 * @dev: device to cause notification 1305 * 1306 * Called to indicate a device has changed features. 1307 */ 1308 void netdev_features_change(struct net_device *dev) 1309 { 1310 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev); 1311 } 1312 EXPORT_SYMBOL(netdev_features_change); 1313 1314 /** 1315 * netdev_state_change - device changes state 1316 * @dev: device to cause notification 1317 * 1318 * Called to indicate a device has changed state. This function calls 1319 * the notifier chains for netdev_chain and sends a NEWLINK message 1320 * to the routing socket. 1321 */ 1322 void netdev_state_change(struct net_device *dev) 1323 { 1324 if (dev->flags & IFF_UP) { 1325 struct netdev_notifier_change_info change_info = { 1326 .info.dev = dev, 1327 }; 1328 1329 call_netdevice_notifiers_info(NETDEV_CHANGE, 1330 &change_info.info); 1331 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL); 1332 } 1333 } 1334 EXPORT_SYMBOL(netdev_state_change); 1335 1336 /** 1337 * __netdev_notify_peers - notify network peers about existence of @dev, 1338 * to be called when rtnl lock is already held. 1339 * @dev: network device 1340 * 1341 * Generate traffic such that interested network peers are aware of 1342 * @dev, such as by generating a gratuitous ARP. This may be used when 1343 * a device wants to inform the rest of the network about some sort of 1344 * reconfiguration such as a failover event or virtual machine 1345 * migration. 1346 */ 1347 void __netdev_notify_peers(struct net_device *dev) 1348 { 1349 ASSERT_RTNL(); 1350 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev); 1351 call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev); 1352 } 1353 EXPORT_SYMBOL(__netdev_notify_peers); 1354 1355 /** 1356 * netdev_notify_peers - notify network peers about existence of @dev 1357 * @dev: network device 1358 * 1359 * Generate traffic such that interested network peers are aware of 1360 * @dev, such as by generating a gratuitous ARP. This may be used when 1361 * a device wants to inform the rest of the network about some sort of 1362 * reconfiguration such as a failover event or virtual machine 1363 * migration. 1364 */ 1365 void netdev_notify_peers(struct net_device *dev) 1366 { 1367 rtnl_lock(); 1368 __netdev_notify_peers(dev); 1369 rtnl_unlock(); 1370 } 1371 EXPORT_SYMBOL(netdev_notify_peers); 1372 1373 static int napi_threaded_poll(void *data); 1374 1375 static int napi_kthread_create(struct napi_struct *n) 1376 { 1377 int err = 0; 1378 1379 /* Create and wake up the kthread once to put it in 1380 * TASK_INTERRUPTIBLE mode to avoid the blocked task 1381 * warning and work with loadavg. 1382 */ 1383 n->thread = kthread_run(napi_threaded_poll, n, "napi/%s-%d", 1384 n->dev->name, n->napi_id); 1385 if (IS_ERR(n->thread)) { 1386 err = PTR_ERR(n->thread); 1387 pr_err("kthread_run failed with err %d\n", err); 1388 n->thread = NULL; 1389 } 1390 1391 return err; 1392 } 1393 1394 static int __dev_open(struct net_device *dev, struct netlink_ext_ack *extack) 1395 { 1396 const struct net_device_ops *ops = dev->netdev_ops; 1397 int ret; 1398 1399 ASSERT_RTNL(); 1400 dev_addr_check(dev); 1401 1402 if (!netif_device_present(dev)) { 1403 /* may be detached because parent is runtime-suspended */ 1404 if (dev->dev.parent) 1405 pm_runtime_resume(dev->dev.parent); 1406 if (!netif_device_present(dev)) 1407 return -ENODEV; 1408 } 1409 1410 /* Block netpoll from trying to do any rx path servicing. 1411 * If we don't do this there is a chance ndo_poll_controller 1412 * or ndo_poll may be running while we open the device 1413 */ 1414 netpoll_poll_disable(dev); 1415 1416 ret = call_netdevice_notifiers_extack(NETDEV_PRE_UP, dev, extack); 1417 ret = notifier_to_errno(ret); 1418 if (ret) 1419 return ret; 1420 1421 set_bit(__LINK_STATE_START, &dev->state); 1422 1423 if (ops->ndo_validate_addr) 1424 ret = ops->ndo_validate_addr(dev); 1425 1426 if (!ret && ops->ndo_open) 1427 ret = ops->ndo_open(dev); 1428 1429 netpoll_poll_enable(dev); 1430 1431 if (ret) 1432 clear_bit(__LINK_STATE_START, &dev->state); 1433 else { 1434 dev->flags |= IFF_UP; 1435 dev_set_rx_mode(dev); 1436 dev_activate(dev); 1437 add_device_randomness(dev->dev_addr, dev->addr_len); 1438 } 1439 1440 return ret; 1441 } 1442 1443 /** 1444 * dev_open - prepare an interface for use. 1445 * @dev: device to open 1446 * @extack: netlink extended ack 1447 * 1448 * Takes a device from down to up state. The device's private open 1449 * function is invoked and then the multicast lists are loaded. Finally 1450 * the device is moved into the up state and a %NETDEV_UP message is 1451 * sent to the netdev notifier chain. 1452 * 1453 * Calling this function on an active interface is a nop. On a failure 1454 * a negative errno code is returned. 1455 */ 1456 int dev_open(struct net_device *dev, struct netlink_ext_ack *extack) 1457 { 1458 int ret; 1459 1460 if (dev->flags & IFF_UP) 1461 return 0; 1462 1463 ret = __dev_open(dev, extack); 1464 if (ret < 0) 1465 return ret; 1466 1467 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL); 1468 call_netdevice_notifiers(NETDEV_UP, dev); 1469 1470 return ret; 1471 } 1472 EXPORT_SYMBOL(dev_open); 1473 1474 static void __dev_close_many(struct list_head *head) 1475 { 1476 struct net_device *dev; 1477 1478 ASSERT_RTNL(); 1479 might_sleep(); 1480 1481 list_for_each_entry(dev, head, close_list) { 1482 /* Temporarily disable netpoll until the interface is down */ 1483 netpoll_poll_disable(dev); 1484 1485 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev); 1486 1487 clear_bit(__LINK_STATE_START, &dev->state); 1488 1489 /* Synchronize to scheduled poll. We cannot touch poll list, it 1490 * can be even on different cpu. So just clear netif_running(). 1491 * 1492 * dev->stop() will invoke napi_disable() on all of it's 1493 * napi_struct instances on this device. 1494 */ 1495 smp_mb__after_atomic(); /* Commit netif_running(). */ 1496 } 1497 1498 dev_deactivate_many(head); 1499 1500 list_for_each_entry(dev, head, close_list) { 1501 const struct net_device_ops *ops = dev->netdev_ops; 1502 1503 /* 1504 * Call the device specific close. This cannot fail. 1505 * Only if device is UP 1506 * 1507 * We allow it to be called even after a DETACH hot-plug 1508 * event. 1509 */ 1510 if (ops->ndo_stop) 1511 ops->ndo_stop(dev); 1512 1513 dev->flags &= ~IFF_UP; 1514 netpoll_poll_enable(dev); 1515 } 1516 } 1517 1518 static void __dev_close(struct net_device *dev) 1519 { 1520 LIST_HEAD(single); 1521 1522 list_add(&dev->close_list, &single); 1523 __dev_close_many(&single); 1524 list_del(&single); 1525 } 1526 1527 void dev_close_many(struct list_head *head, bool unlink) 1528 { 1529 struct net_device *dev, *tmp; 1530 1531 /* Remove the devices that don't need to be closed */ 1532 list_for_each_entry_safe(dev, tmp, head, close_list) 1533 if (!(dev->flags & IFF_UP)) 1534 list_del_init(&dev->close_list); 1535 1536 __dev_close_many(head); 1537 1538 list_for_each_entry_safe(dev, tmp, head, close_list) { 1539 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL); 1540 call_netdevice_notifiers(NETDEV_DOWN, dev); 1541 if (unlink) 1542 list_del_init(&dev->close_list); 1543 } 1544 } 1545 EXPORT_SYMBOL(dev_close_many); 1546 1547 /** 1548 * dev_close - shutdown an interface. 1549 * @dev: device to shutdown 1550 * 1551 * This function moves an active device into down state. A 1552 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device 1553 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier 1554 * chain. 1555 */ 1556 void dev_close(struct net_device *dev) 1557 { 1558 if (dev->flags & IFF_UP) { 1559 LIST_HEAD(single); 1560 1561 list_add(&dev->close_list, &single); 1562 dev_close_many(&single, true); 1563 list_del(&single); 1564 } 1565 } 1566 EXPORT_SYMBOL(dev_close); 1567 1568 1569 /** 1570 * dev_disable_lro - disable Large Receive Offload on a device 1571 * @dev: device 1572 * 1573 * Disable Large Receive Offload (LRO) on a net device. Must be 1574 * called under RTNL. This is needed if received packets may be 1575 * forwarded to another interface. 1576 */ 1577 void dev_disable_lro(struct net_device *dev) 1578 { 1579 struct net_device *lower_dev; 1580 struct list_head *iter; 1581 1582 dev->wanted_features &= ~NETIF_F_LRO; 1583 netdev_update_features(dev); 1584 1585 if (unlikely(dev->features & NETIF_F_LRO)) 1586 netdev_WARN(dev, "failed to disable LRO!\n"); 1587 1588 netdev_for_each_lower_dev(dev, lower_dev, iter) 1589 dev_disable_lro(lower_dev); 1590 } 1591 EXPORT_SYMBOL(dev_disable_lro); 1592 1593 /** 1594 * dev_disable_gro_hw - disable HW Generic Receive Offload on a device 1595 * @dev: device 1596 * 1597 * Disable HW Generic Receive Offload (GRO_HW) on a net device. Must be 1598 * called under RTNL. This is needed if Generic XDP is installed on 1599 * the device. 1600 */ 1601 static void dev_disable_gro_hw(struct net_device *dev) 1602 { 1603 dev->wanted_features &= ~NETIF_F_GRO_HW; 1604 netdev_update_features(dev); 1605 1606 if (unlikely(dev->features & NETIF_F_GRO_HW)) 1607 netdev_WARN(dev, "failed to disable GRO_HW!\n"); 1608 } 1609 1610 const char *netdev_cmd_to_name(enum netdev_cmd cmd) 1611 { 1612 #define N(val) \ 1613 case NETDEV_##val: \ 1614 return "NETDEV_" __stringify(val); 1615 switch (cmd) { 1616 N(UP) N(DOWN) N(REBOOT) N(CHANGE) N(REGISTER) N(UNREGISTER) 1617 N(CHANGEMTU) N(CHANGEADDR) N(GOING_DOWN) N(CHANGENAME) N(FEAT_CHANGE) 1618 N(BONDING_FAILOVER) N(PRE_UP) N(PRE_TYPE_CHANGE) N(POST_TYPE_CHANGE) 1619 N(POST_INIT) N(RELEASE) N(NOTIFY_PEERS) N(JOIN) N(CHANGEUPPER) 1620 N(RESEND_IGMP) N(PRECHANGEMTU) N(CHANGEINFODATA) N(BONDING_INFO) 1621 N(PRECHANGEUPPER) N(CHANGELOWERSTATE) N(UDP_TUNNEL_PUSH_INFO) 1622 N(UDP_TUNNEL_DROP_INFO) N(CHANGE_TX_QUEUE_LEN) 1623 N(CVLAN_FILTER_PUSH_INFO) N(CVLAN_FILTER_DROP_INFO) 1624 N(SVLAN_FILTER_PUSH_INFO) N(SVLAN_FILTER_DROP_INFO) 1625 N(PRE_CHANGEADDR) 1626 } 1627 #undef N 1628 return "UNKNOWN_NETDEV_EVENT"; 1629 } 1630 EXPORT_SYMBOL_GPL(netdev_cmd_to_name); 1631 1632 static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val, 1633 struct net_device *dev) 1634 { 1635 struct netdev_notifier_info info = { 1636 .dev = dev, 1637 }; 1638 1639 return nb->notifier_call(nb, val, &info); 1640 } 1641 1642 static int call_netdevice_register_notifiers(struct notifier_block *nb, 1643 struct net_device *dev) 1644 { 1645 int err; 1646 1647 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev); 1648 err = notifier_to_errno(err); 1649 if (err) 1650 return err; 1651 1652 if (!(dev->flags & IFF_UP)) 1653 return 0; 1654 1655 call_netdevice_notifier(nb, NETDEV_UP, dev); 1656 return 0; 1657 } 1658 1659 static void call_netdevice_unregister_notifiers(struct notifier_block *nb, 1660 struct net_device *dev) 1661 { 1662 if (dev->flags & IFF_UP) { 1663 call_netdevice_notifier(nb, NETDEV_GOING_DOWN, 1664 dev); 1665 call_netdevice_notifier(nb, NETDEV_DOWN, dev); 1666 } 1667 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev); 1668 } 1669 1670 static int call_netdevice_register_net_notifiers(struct notifier_block *nb, 1671 struct net *net) 1672 { 1673 struct net_device *dev; 1674 int err; 1675 1676 for_each_netdev(net, dev) { 1677 err = call_netdevice_register_notifiers(nb, dev); 1678 if (err) 1679 goto rollback; 1680 } 1681 return 0; 1682 1683 rollback: 1684 for_each_netdev_continue_reverse(net, dev) 1685 call_netdevice_unregister_notifiers(nb, dev); 1686 return err; 1687 } 1688 1689 static void call_netdevice_unregister_net_notifiers(struct notifier_block *nb, 1690 struct net *net) 1691 { 1692 struct net_device *dev; 1693 1694 for_each_netdev(net, dev) 1695 call_netdevice_unregister_notifiers(nb, dev); 1696 } 1697 1698 static int dev_boot_phase = 1; 1699 1700 /** 1701 * register_netdevice_notifier - register a network notifier block 1702 * @nb: notifier 1703 * 1704 * Register a notifier to be called when network device events occur. 1705 * The notifier passed is linked into the kernel structures and must 1706 * not be reused until it has been unregistered. A negative errno code 1707 * is returned on a failure. 1708 * 1709 * When registered all registration and up events are replayed 1710 * to the new notifier to allow device to have a race free 1711 * view of the network device list. 1712 */ 1713 1714 int register_netdevice_notifier(struct notifier_block *nb) 1715 { 1716 struct net *net; 1717 int err; 1718 1719 /* Close race with setup_net() and cleanup_net() */ 1720 down_write(&pernet_ops_rwsem); 1721 rtnl_lock(); 1722 err = raw_notifier_chain_register(&netdev_chain, nb); 1723 if (err) 1724 goto unlock; 1725 if (dev_boot_phase) 1726 goto unlock; 1727 for_each_net(net) { 1728 err = call_netdevice_register_net_notifiers(nb, net); 1729 if (err) 1730 goto rollback; 1731 } 1732 1733 unlock: 1734 rtnl_unlock(); 1735 up_write(&pernet_ops_rwsem); 1736 return err; 1737 1738 rollback: 1739 for_each_net_continue_reverse(net) 1740 call_netdevice_unregister_net_notifiers(nb, net); 1741 1742 raw_notifier_chain_unregister(&netdev_chain, nb); 1743 goto unlock; 1744 } 1745 EXPORT_SYMBOL(register_netdevice_notifier); 1746 1747 /** 1748 * unregister_netdevice_notifier - unregister a network notifier block 1749 * @nb: notifier 1750 * 1751 * Unregister a notifier previously registered by 1752 * register_netdevice_notifier(). The notifier is unlinked into the 1753 * kernel structures and may then be reused. A negative errno code 1754 * is returned on a failure. 1755 * 1756 * After unregistering unregister and down device events are synthesized 1757 * for all devices on the device list to the removed notifier to remove 1758 * the need for special case cleanup code. 1759 */ 1760 1761 int unregister_netdevice_notifier(struct notifier_block *nb) 1762 { 1763 struct net *net; 1764 int err; 1765 1766 /* Close race with setup_net() and cleanup_net() */ 1767 down_write(&pernet_ops_rwsem); 1768 rtnl_lock(); 1769 err = raw_notifier_chain_unregister(&netdev_chain, nb); 1770 if (err) 1771 goto unlock; 1772 1773 for_each_net(net) 1774 call_netdevice_unregister_net_notifiers(nb, net); 1775 1776 unlock: 1777 rtnl_unlock(); 1778 up_write(&pernet_ops_rwsem); 1779 return err; 1780 } 1781 EXPORT_SYMBOL(unregister_netdevice_notifier); 1782 1783 static int __register_netdevice_notifier_net(struct net *net, 1784 struct notifier_block *nb, 1785 bool ignore_call_fail) 1786 { 1787 int err; 1788 1789 err = raw_notifier_chain_register(&net->netdev_chain, nb); 1790 if (err) 1791 return err; 1792 if (dev_boot_phase) 1793 return 0; 1794 1795 err = call_netdevice_register_net_notifiers(nb, net); 1796 if (err && !ignore_call_fail) 1797 goto chain_unregister; 1798 1799 return 0; 1800 1801 chain_unregister: 1802 raw_notifier_chain_unregister(&net->netdev_chain, nb); 1803 return err; 1804 } 1805 1806 static int __unregister_netdevice_notifier_net(struct net *net, 1807 struct notifier_block *nb) 1808 { 1809 int err; 1810 1811 err = raw_notifier_chain_unregister(&net->netdev_chain, nb); 1812 if (err) 1813 return err; 1814 1815 call_netdevice_unregister_net_notifiers(nb, net); 1816 return 0; 1817 } 1818 1819 /** 1820 * register_netdevice_notifier_net - register a per-netns network notifier block 1821 * @net: network namespace 1822 * @nb: notifier 1823 * 1824 * Register a notifier to be called when network device events occur. 1825 * The notifier passed is linked into the kernel structures and must 1826 * not be reused until it has been unregistered. A negative errno code 1827 * is returned on a failure. 1828 * 1829 * When registered all registration and up events are replayed 1830 * to the new notifier to allow device to have a race free 1831 * view of the network device list. 1832 */ 1833 1834 int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb) 1835 { 1836 int err; 1837 1838 rtnl_lock(); 1839 err = __register_netdevice_notifier_net(net, nb, false); 1840 rtnl_unlock(); 1841 return err; 1842 } 1843 EXPORT_SYMBOL(register_netdevice_notifier_net); 1844 1845 /** 1846 * unregister_netdevice_notifier_net - unregister a per-netns 1847 * network notifier block 1848 * @net: network namespace 1849 * @nb: notifier 1850 * 1851 * Unregister a notifier previously registered by 1852 * register_netdevice_notifier(). The notifier is unlinked into the 1853 * kernel structures and may then be reused. A negative errno code 1854 * is returned on a failure. 1855 * 1856 * After unregistering unregister and down device events are synthesized 1857 * for all devices on the device list to the removed notifier to remove 1858 * the need for special case cleanup code. 1859 */ 1860 1861 int unregister_netdevice_notifier_net(struct net *net, 1862 struct notifier_block *nb) 1863 { 1864 int err; 1865 1866 rtnl_lock(); 1867 err = __unregister_netdevice_notifier_net(net, nb); 1868 rtnl_unlock(); 1869 return err; 1870 } 1871 EXPORT_SYMBOL(unregister_netdevice_notifier_net); 1872 1873 int register_netdevice_notifier_dev_net(struct net_device *dev, 1874 struct notifier_block *nb, 1875 struct netdev_net_notifier *nn) 1876 { 1877 int err; 1878 1879 rtnl_lock(); 1880 err = __register_netdevice_notifier_net(dev_net(dev), nb, false); 1881 if (!err) { 1882 nn->nb = nb; 1883 list_add(&nn->list, &dev->net_notifier_list); 1884 } 1885 rtnl_unlock(); 1886 return err; 1887 } 1888 EXPORT_SYMBOL(register_netdevice_notifier_dev_net); 1889 1890 int unregister_netdevice_notifier_dev_net(struct net_device *dev, 1891 struct notifier_block *nb, 1892 struct netdev_net_notifier *nn) 1893 { 1894 int err; 1895 1896 rtnl_lock(); 1897 list_del(&nn->list); 1898 err = __unregister_netdevice_notifier_net(dev_net(dev), nb); 1899 rtnl_unlock(); 1900 return err; 1901 } 1902 EXPORT_SYMBOL(unregister_netdevice_notifier_dev_net); 1903 1904 static void move_netdevice_notifiers_dev_net(struct net_device *dev, 1905 struct net *net) 1906 { 1907 struct netdev_net_notifier *nn; 1908 1909 list_for_each_entry(nn, &dev->net_notifier_list, list) { 1910 __unregister_netdevice_notifier_net(dev_net(dev), nn->nb); 1911 __register_netdevice_notifier_net(net, nn->nb, true); 1912 } 1913 } 1914 1915 /** 1916 * call_netdevice_notifiers_info - call all network notifier blocks 1917 * @val: value passed unmodified to notifier function 1918 * @info: notifier information data 1919 * 1920 * Call all network notifier blocks. Parameters and return value 1921 * are as for raw_notifier_call_chain(). 1922 */ 1923 1924 static int call_netdevice_notifiers_info(unsigned long val, 1925 struct netdev_notifier_info *info) 1926 { 1927 struct net *net = dev_net(info->dev); 1928 int ret; 1929 1930 ASSERT_RTNL(); 1931 1932 /* Run per-netns notifier block chain first, then run the global one. 1933 * Hopefully, one day, the global one is going to be removed after 1934 * all notifier block registrators get converted to be per-netns. 1935 */ 1936 ret = raw_notifier_call_chain(&net->netdev_chain, val, info); 1937 if (ret & NOTIFY_STOP_MASK) 1938 return ret; 1939 return raw_notifier_call_chain(&netdev_chain, val, info); 1940 } 1941 1942 static int call_netdevice_notifiers_extack(unsigned long val, 1943 struct net_device *dev, 1944 struct netlink_ext_ack *extack) 1945 { 1946 struct netdev_notifier_info info = { 1947 .dev = dev, 1948 .extack = extack, 1949 }; 1950 1951 return call_netdevice_notifiers_info(val, &info); 1952 } 1953 1954 /** 1955 * call_netdevice_notifiers - call all network notifier blocks 1956 * @val: value passed unmodified to notifier function 1957 * @dev: net_device pointer passed unmodified to notifier function 1958 * 1959 * Call all network notifier blocks. Parameters and return value 1960 * are as for raw_notifier_call_chain(). 1961 */ 1962 1963 int call_netdevice_notifiers(unsigned long val, struct net_device *dev) 1964 { 1965 return call_netdevice_notifiers_extack(val, dev, NULL); 1966 } 1967 EXPORT_SYMBOL(call_netdevice_notifiers); 1968 1969 /** 1970 * call_netdevice_notifiers_mtu - call all network notifier blocks 1971 * @val: value passed unmodified to notifier function 1972 * @dev: net_device pointer passed unmodified to notifier function 1973 * @arg: additional u32 argument passed to the notifier function 1974 * 1975 * Call all network notifier blocks. Parameters and return value 1976 * are as for raw_notifier_call_chain(). 1977 */ 1978 static int call_netdevice_notifiers_mtu(unsigned long val, 1979 struct net_device *dev, u32 arg) 1980 { 1981 struct netdev_notifier_info_ext info = { 1982 .info.dev = dev, 1983 .ext.mtu = arg, 1984 }; 1985 1986 BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext, info) != 0); 1987 1988 return call_netdevice_notifiers_info(val, &info.info); 1989 } 1990 1991 #ifdef CONFIG_NET_INGRESS 1992 static DEFINE_STATIC_KEY_FALSE(ingress_needed_key); 1993 1994 void net_inc_ingress_queue(void) 1995 { 1996 static_branch_inc(&ingress_needed_key); 1997 } 1998 EXPORT_SYMBOL_GPL(net_inc_ingress_queue); 1999 2000 void net_dec_ingress_queue(void) 2001 { 2002 static_branch_dec(&ingress_needed_key); 2003 } 2004 EXPORT_SYMBOL_GPL(net_dec_ingress_queue); 2005 #endif 2006 2007 #ifdef CONFIG_NET_EGRESS 2008 static DEFINE_STATIC_KEY_FALSE(egress_needed_key); 2009 2010 void net_inc_egress_queue(void) 2011 { 2012 static_branch_inc(&egress_needed_key); 2013 } 2014 EXPORT_SYMBOL_GPL(net_inc_egress_queue); 2015 2016 void net_dec_egress_queue(void) 2017 { 2018 static_branch_dec(&egress_needed_key); 2019 } 2020 EXPORT_SYMBOL_GPL(net_dec_egress_queue); 2021 #endif 2022 2023 static DEFINE_STATIC_KEY_FALSE(netstamp_needed_key); 2024 #ifdef CONFIG_JUMP_LABEL 2025 static atomic_t netstamp_needed_deferred; 2026 static atomic_t netstamp_wanted; 2027 static void netstamp_clear(struct work_struct *work) 2028 { 2029 int deferred = atomic_xchg(&netstamp_needed_deferred, 0); 2030 int wanted; 2031 2032 wanted = atomic_add_return(deferred, &netstamp_wanted); 2033 if (wanted > 0) 2034 static_branch_enable(&netstamp_needed_key); 2035 else 2036 static_branch_disable(&netstamp_needed_key); 2037 } 2038 static DECLARE_WORK(netstamp_work, netstamp_clear); 2039 #endif 2040 2041 void net_enable_timestamp(void) 2042 { 2043 #ifdef CONFIG_JUMP_LABEL 2044 int wanted; 2045 2046 while (1) { 2047 wanted = atomic_read(&netstamp_wanted); 2048 if (wanted <= 0) 2049 break; 2050 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted + 1) == wanted) 2051 return; 2052 } 2053 atomic_inc(&netstamp_needed_deferred); 2054 schedule_work(&netstamp_work); 2055 #else 2056 static_branch_inc(&netstamp_needed_key); 2057 #endif 2058 } 2059 EXPORT_SYMBOL(net_enable_timestamp); 2060 2061 void net_disable_timestamp(void) 2062 { 2063 #ifdef CONFIG_JUMP_LABEL 2064 int wanted; 2065 2066 while (1) { 2067 wanted = atomic_read(&netstamp_wanted); 2068 if (wanted <= 1) 2069 break; 2070 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted - 1) == wanted) 2071 return; 2072 } 2073 atomic_dec(&netstamp_needed_deferred); 2074 schedule_work(&netstamp_work); 2075 #else 2076 static_branch_dec(&netstamp_needed_key); 2077 #endif 2078 } 2079 EXPORT_SYMBOL(net_disable_timestamp); 2080 2081 static inline void net_timestamp_set(struct sk_buff *skb) 2082 { 2083 skb->tstamp = 0; 2084 if (static_branch_unlikely(&netstamp_needed_key)) 2085 __net_timestamp(skb); 2086 } 2087 2088 #define net_timestamp_check(COND, SKB) \ 2089 if (static_branch_unlikely(&netstamp_needed_key)) { \ 2090 if ((COND) && !(SKB)->tstamp) \ 2091 __net_timestamp(SKB); \ 2092 } \ 2093 2094 bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb) 2095 { 2096 return __is_skb_forwardable(dev, skb, true); 2097 } 2098 EXPORT_SYMBOL_GPL(is_skb_forwardable); 2099 2100 static int __dev_forward_skb2(struct net_device *dev, struct sk_buff *skb, 2101 bool check_mtu) 2102 { 2103 int ret = ____dev_forward_skb(dev, skb, check_mtu); 2104 2105 if (likely(!ret)) { 2106 skb->protocol = eth_type_trans(skb, dev); 2107 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); 2108 } 2109 2110 return ret; 2111 } 2112 2113 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb) 2114 { 2115 return __dev_forward_skb2(dev, skb, true); 2116 } 2117 EXPORT_SYMBOL_GPL(__dev_forward_skb); 2118 2119 /** 2120 * dev_forward_skb - loopback an skb to another netif 2121 * 2122 * @dev: destination network device 2123 * @skb: buffer to forward 2124 * 2125 * return values: 2126 * NET_RX_SUCCESS (no congestion) 2127 * NET_RX_DROP (packet was dropped, but freed) 2128 * 2129 * dev_forward_skb can be used for injecting an skb from the 2130 * start_xmit function of one device into the receive queue 2131 * of another device. 2132 * 2133 * The receiving device may be in another namespace, so 2134 * we have to clear all information in the skb that could 2135 * impact namespace isolation. 2136 */ 2137 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) 2138 { 2139 return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb); 2140 } 2141 EXPORT_SYMBOL_GPL(dev_forward_skb); 2142 2143 int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb) 2144 { 2145 return __dev_forward_skb2(dev, skb, false) ?: netif_rx_internal(skb); 2146 } 2147 2148 static inline int deliver_skb(struct sk_buff *skb, 2149 struct packet_type *pt_prev, 2150 struct net_device *orig_dev) 2151 { 2152 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) 2153 return -ENOMEM; 2154 refcount_inc(&skb->users); 2155 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev); 2156 } 2157 2158 static inline void deliver_ptype_list_skb(struct sk_buff *skb, 2159 struct packet_type **pt, 2160 struct net_device *orig_dev, 2161 __be16 type, 2162 struct list_head *ptype_list) 2163 { 2164 struct packet_type *ptype, *pt_prev = *pt; 2165 2166 list_for_each_entry_rcu(ptype, ptype_list, list) { 2167 if (ptype->type != type) 2168 continue; 2169 if (pt_prev) 2170 deliver_skb(skb, pt_prev, orig_dev); 2171 pt_prev = ptype; 2172 } 2173 *pt = pt_prev; 2174 } 2175 2176 static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb) 2177 { 2178 if (!ptype->af_packet_priv || !skb->sk) 2179 return false; 2180 2181 if (ptype->id_match) 2182 return ptype->id_match(ptype, skb->sk); 2183 else if ((struct sock *)ptype->af_packet_priv == skb->sk) 2184 return true; 2185 2186 return false; 2187 } 2188 2189 /** 2190 * dev_nit_active - return true if any network interface taps are in use 2191 * 2192 * @dev: network device to check for the presence of taps 2193 */ 2194 bool dev_nit_active(struct net_device *dev) 2195 { 2196 return !list_empty(&ptype_all) || !list_empty(&dev->ptype_all); 2197 } 2198 EXPORT_SYMBOL_GPL(dev_nit_active); 2199 2200 /* 2201 * Support routine. Sends outgoing frames to any network 2202 * taps currently in use. 2203 */ 2204 2205 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) 2206 { 2207 struct packet_type *ptype; 2208 struct sk_buff *skb2 = NULL; 2209 struct packet_type *pt_prev = NULL; 2210 struct list_head *ptype_list = &ptype_all; 2211 2212 rcu_read_lock(); 2213 again: 2214 list_for_each_entry_rcu(ptype, ptype_list, list) { 2215 if (ptype->ignore_outgoing) 2216 continue; 2217 2218 /* Never send packets back to the socket 2219 * they originated from - MvS (miquels@drinkel.ow.org) 2220 */ 2221 if (skb_loop_sk(ptype, skb)) 2222 continue; 2223 2224 if (pt_prev) { 2225 deliver_skb(skb2, pt_prev, skb->dev); 2226 pt_prev = ptype; 2227 continue; 2228 } 2229 2230 /* need to clone skb, done only once */ 2231 skb2 = skb_clone(skb, GFP_ATOMIC); 2232 if (!skb2) 2233 goto out_unlock; 2234 2235 net_timestamp_set(skb2); 2236 2237 /* skb->nh should be correctly 2238 * set by sender, so that the second statement is 2239 * just protection against buggy protocols. 2240 */ 2241 skb_reset_mac_header(skb2); 2242 2243 if (skb_network_header(skb2) < skb2->data || 2244 skb_network_header(skb2) > skb_tail_pointer(skb2)) { 2245 net_crit_ratelimited("protocol %04x is buggy, dev %s\n", 2246 ntohs(skb2->protocol), 2247 dev->name); 2248 skb_reset_network_header(skb2); 2249 } 2250 2251 skb2->transport_header = skb2->network_header; 2252 skb2->pkt_type = PACKET_OUTGOING; 2253 pt_prev = ptype; 2254 } 2255 2256 if (ptype_list == &ptype_all) { 2257 ptype_list = &dev->ptype_all; 2258 goto again; 2259 } 2260 out_unlock: 2261 if (pt_prev) { 2262 if (!skb_orphan_frags_rx(skb2, GFP_ATOMIC)) 2263 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev); 2264 else 2265 kfree_skb(skb2); 2266 } 2267 rcu_read_unlock(); 2268 } 2269 EXPORT_SYMBOL_GPL(dev_queue_xmit_nit); 2270 2271 /** 2272 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change 2273 * @dev: Network device 2274 * @txq: number of queues available 2275 * 2276 * If real_num_tx_queues is changed the tc mappings may no longer be 2277 * valid. To resolve this verify the tc mapping remains valid and if 2278 * not NULL the mapping. With no priorities mapping to this 2279 * offset/count pair it will no longer be used. In the worst case TC0 2280 * is invalid nothing can be done so disable priority mappings. If is 2281 * expected that drivers will fix this mapping if they can before 2282 * calling netif_set_real_num_tx_queues. 2283 */ 2284 static void netif_setup_tc(struct net_device *dev, unsigned int txq) 2285 { 2286 int i; 2287 struct netdev_tc_txq *tc = &dev->tc_to_txq[0]; 2288 2289 /* If TC0 is invalidated disable TC mapping */ 2290 if (tc->offset + tc->count > txq) { 2291 netdev_warn(dev, "Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n"); 2292 dev->num_tc = 0; 2293 return; 2294 } 2295 2296 /* Invalidated prio to tc mappings set to TC0 */ 2297 for (i = 1; i < TC_BITMASK + 1; i++) { 2298 int q = netdev_get_prio_tc_map(dev, i); 2299 2300 tc = &dev->tc_to_txq[q]; 2301 if (tc->offset + tc->count > txq) { 2302 netdev_warn(dev, "Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n", 2303 i, q); 2304 netdev_set_prio_tc_map(dev, i, 0); 2305 } 2306 } 2307 } 2308 2309 int netdev_txq_to_tc(struct net_device *dev, unsigned int txq) 2310 { 2311 if (dev->num_tc) { 2312 struct netdev_tc_txq *tc = &dev->tc_to_txq[0]; 2313 int i; 2314 2315 /* walk through the TCs and see if it falls into any of them */ 2316 for (i = 0; i < TC_MAX_QUEUE; i++, tc++) { 2317 if ((txq - tc->offset) < tc->count) 2318 return i; 2319 } 2320 2321 /* didn't find it, just return -1 to indicate no match */ 2322 return -1; 2323 } 2324 2325 return 0; 2326 } 2327 EXPORT_SYMBOL(netdev_txq_to_tc); 2328 2329 #ifdef CONFIG_XPS 2330 static struct static_key xps_needed __read_mostly; 2331 static struct static_key xps_rxqs_needed __read_mostly; 2332 static DEFINE_MUTEX(xps_map_mutex); 2333 #define xmap_dereference(P) \ 2334 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex)) 2335 2336 static bool remove_xps_queue(struct xps_dev_maps *dev_maps, 2337 struct xps_dev_maps *old_maps, int tci, u16 index) 2338 { 2339 struct xps_map *map = NULL; 2340 int pos; 2341 2342 if (dev_maps) 2343 map = xmap_dereference(dev_maps->attr_map[tci]); 2344 if (!map) 2345 return false; 2346 2347 for (pos = map->len; pos--;) { 2348 if (map->queues[pos] != index) 2349 continue; 2350 2351 if (map->len > 1) { 2352 map->queues[pos] = map->queues[--map->len]; 2353 break; 2354 } 2355 2356 if (old_maps) 2357 RCU_INIT_POINTER(old_maps->attr_map[tci], NULL); 2358 RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL); 2359 kfree_rcu(map, rcu); 2360 return false; 2361 } 2362 2363 return true; 2364 } 2365 2366 static bool remove_xps_queue_cpu(struct net_device *dev, 2367 struct xps_dev_maps *dev_maps, 2368 int cpu, u16 offset, u16 count) 2369 { 2370 int num_tc = dev_maps->num_tc; 2371 bool active = false; 2372 int tci; 2373 2374 for (tci = cpu * num_tc; num_tc--; tci++) { 2375 int i, j; 2376 2377 for (i = count, j = offset; i--; j++) { 2378 if (!remove_xps_queue(dev_maps, NULL, tci, j)) 2379 break; 2380 } 2381 2382 active |= i < 0; 2383 } 2384 2385 return active; 2386 } 2387 2388 static void reset_xps_maps(struct net_device *dev, 2389 struct xps_dev_maps *dev_maps, 2390 enum xps_map_type type) 2391 { 2392 static_key_slow_dec_cpuslocked(&xps_needed); 2393 if (type == XPS_RXQS) 2394 static_key_slow_dec_cpuslocked(&xps_rxqs_needed); 2395 2396 RCU_INIT_POINTER(dev->xps_maps[type], NULL); 2397 2398 kfree_rcu(dev_maps, rcu); 2399 } 2400 2401 static void clean_xps_maps(struct net_device *dev, enum xps_map_type type, 2402 u16 offset, u16 count) 2403 { 2404 struct xps_dev_maps *dev_maps; 2405 bool active = false; 2406 int i, j; 2407 2408 dev_maps = xmap_dereference(dev->xps_maps[type]); 2409 if (!dev_maps) 2410 return; 2411 2412 for (j = 0; j < dev_maps->nr_ids; j++) 2413 active |= remove_xps_queue_cpu(dev, dev_maps, j, offset, count); 2414 if (!active) 2415 reset_xps_maps(dev, dev_maps, type); 2416 2417 if (type == XPS_CPUS) { 2418 for (i = offset + (count - 1); count--; i--) 2419 netdev_queue_numa_node_write( 2420 netdev_get_tx_queue(dev, i), NUMA_NO_NODE); 2421 } 2422 } 2423 2424 static void netif_reset_xps_queues(struct net_device *dev, u16 offset, 2425 u16 count) 2426 { 2427 if (!static_key_false(&xps_needed)) 2428 return; 2429 2430 cpus_read_lock(); 2431 mutex_lock(&xps_map_mutex); 2432 2433 if (static_key_false(&xps_rxqs_needed)) 2434 clean_xps_maps(dev, XPS_RXQS, offset, count); 2435 2436 clean_xps_maps(dev, XPS_CPUS, offset, count); 2437 2438 mutex_unlock(&xps_map_mutex); 2439 cpus_read_unlock(); 2440 } 2441 2442 static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index) 2443 { 2444 netif_reset_xps_queues(dev, index, dev->num_tx_queues - index); 2445 } 2446 2447 static struct xps_map *expand_xps_map(struct xps_map *map, int attr_index, 2448 u16 index, bool is_rxqs_map) 2449 { 2450 struct xps_map *new_map; 2451 int alloc_len = XPS_MIN_MAP_ALLOC; 2452 int i, pos; 2453 2454 for (pos = 0; map && pos < map->len; pos++) { 2455 if (map->queues[pos] != index) 2456 continue; 2457 return map; 2458 } 2459 2460 /* Need to add tx-queue to this CPU's/rx-queue's existing map */ 2461 if (map) { 2462 if (pos < map->alloc_len) 2463 return map; 2464 2465 alloc_len = map->alloc_len * 2; 2466 } 2467 2468 /* Need to allocate new map to store tx-queue on this CPU's/rx-queue's 2469 * map 2470 */ 2471 if (is_rxqs_map) 2472 new_map = kzalloc(XPS_MAP_SIZE(alloc_len), GFP_KERNEL); 2473 else 2474 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL, 2475 cpu_to_node(attr_index)); 2476 if (!new_map) 2477 return NULL; 2478 2479 for (i = 0; i < pos; i++) 2480 new_map->queues[i] = map->queues[i]; 2481 new_map->alloc_len = alloc_len; 2482 new_map->len = pos; 2483 2484 return new_map; 2485 } 2486 2487 /* Copy xps maps at a given index */ 2488 static void xps_copy_dev_maps(struct xps_dev_maps *dev_maps, 2489 struct xps_dev_maps *new_dev_maps, int index, 2490 int tc, bool skip_tc) 2491 { 2492 int i, tci = index * dev_maps->num_tc; 2493 struct xps_map *map; 2494 2495 /* copy maps belonging to foreign traffic classes */ 2496 for (i = 0; i < dev_maps->num_tc; i++, tci++) { 2497 if (i == tc && skip_tc) 2498 continue; 2499 2500 /* fill in the new device map from the old device map */ 2501 map = xmap_dereference(dev_maps->attr_map[tci]); 2502 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map); 2503 } 2504 } 2505 2506 /* Must be called under cpus_read_lock */ 2507 int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, 2508 u16 index, enum xps_map_type type) 2509 { 2510 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL, *old_dev_maps = NULL; 2511 const unsigned long *online_mask = NULL; 2512 bool active = false, copy = false; 2513 int i, j, tci, numa_node_id = -2; 2514 int maps_sz, num_tc = 1, tc = 0; 2515 struct xps_map *map, *new_map; 2516 unsigned int nr_ids; 2517 2518 if (dev->num_tc) { 2519 /* Do not allow XPS on subordinate device directly */ 2520 num_tc = dev->num_tc; 2521 if (num_tc < 0) 2522 return -EINVAL; 2523 2524 /* If queue belongs to subordinate dev use its map */ 2525 dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev; 2526 2527 tc = netdev_txq_to_tc(dev, index); 2528 if (tc < 0) 2529 return -EINVAL; 2530 } 2531 2532 mutex_lock(&xps_map_mutex); 2533 2534 dev_maps = xmap_dereference(dev->xps_maps[type]); 2535 if (type == XPS_RXQS) { 2536 maps_sz = XPS_RXQ_DEV_MAPS_SIZE(num_tc, dev->num_rx_queues); 2537 nr_ids = dev->num_rx_queues; 2538 } else { 2539 maps_sz = XPS_CPU_DEV_MAPS_SIZE(num_tc); 2540 if (num_possible_cpus() > 1) 2541 online_mask = cpumask_bits(cpu_online_mask); 2542 nr_ids = nr_cpu_ids; 2543 } 2544 2545 if (maps_sz < L1_CACHE_BYTES) 2546 maps_sz = L1_CACHE_BYTES; 2547 2548 /* The old dev_maps could be larger or smaller than the one we're 2549 * setting up now, as dev->num_tc or nr_ids could have been updated in 2550 * between. We could try to be smart, but let's be safe instead and only 2551 * copy foreign traffic classes if the two map sizes match. 2552 */ 2553 if (dev_maps && 2554 dev_maps->num_tc == num_tc && dev_maps->nr_ids == nr_ids) 2555 copy = true; 2556 2557 /* allocate memory for queue storage */ 2558 for (j = -1; j = netif_attrmask_next_and(j, online_mask, mask, nr_ids), 2559 j < nr_ids;) { 2560 if (!new_dev_maps) { 2561 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL); 2562 if (!new_dev_maps) { 2563 mutex_unlock(&xps_map_mutex); 2564 return -ENOMEM; 2565 } 2566 2567 new_dev_maps->nr_ids = nr_ids; 2568 new_dev_maps->num_tc = num_tc; 2569 } 2570 2571 tci = j * num_tc + tc; 2572 map = copy ? xmap_dereference(dev_maps->attr_map[tci]) : NULL; 2573 2574 map = expand_xps_map(map, j, index, type == XPS_RXQS); 2575 if (!map) 2576 goto error; 2577 2578 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map); 2579 } 2580 2581 if (!new_dev_maps) 2582 goto out_no_new_maps; 2583 2584 if (!dev_maps) { 2585 /* Increment static keys at most once per type */ 2586 static_key_slow_inc_cpuslocked(&xps_needed); 2587 if (type == XPS_RXQS) 2588 static_key_slow_inc_cpuslocked(&xps_rxqs_needed); 2589 } 2590 2591 for (j = 0; j < nr_ids; j++) { 2592 bool skip_tc = false; 2593 2594 tci = j * num_tc + tc; 2595 if (netif_attr_test_mask(j, mask, nr_ids) && 2596 netif_attr_test_online(j, online_mask, nr_ids)) { 2597 /* add tx-queue to CPU/rx-queue maps */ 2598 int pos = 0; 2599 2600 skip_tc = true; 2601 2602 map = xmap_dereference(new_dev_maps->attr_map[tci]); 2603 while ((pos < map->len) && (map->queues[pos] != index)) 2604 pos++; 2605 2606 if (pos == map->len) 2607 map->queues[map->len++] = index; 2608 #ifdef CONFIG_NUMA 2609 if (type == XPS_CPUS) { 2610 if (numa_node_id == -2) 2611 numa_node_id = cpu_to_node(j); 2612 else if (numa_node_id != cpu_to_node(j)) 2613 numa_node_id = -1; 2614 } 2615 #endif 2616 } 2617 2618 if (copy) 2619 xps_copy_dev_maps(dev_maps, new_dev_maps, j, tc, 2620 skip_tc); 2621 } 2622 2623 rcu_assign_pointer(dev->xps_maps[type], new_dev_maps); 2624 2625 /* Cleanup old maps */ 2626 if (!dev_maps) 2627 goto out_no_old_maps; 2628 2629 for (j = 0; j < dev_maps->nr_ids; j++) { 2630 for (i = num_tc, tci = j * dev_maps->num_tc; i--; tci++) { 2631 map = xmap_dereference(dev_maps->attr_map[tci]); 2632 if (!map) 2633 continue; 2634 2635 if (copy) { 2636 new_map = xmap_dereference(new_dev_maps->attr_map[tci]); 2637 if (map == new_map) 2638 continue; 2639 } 2640 2641 RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL); 2642 kfree_rcu(map, rcu); 2643 } 2644 } 2645 2646 old_dev_maps = dev_maps; 2647 2648 out_no_old_maps: 2649 dev_maps = new_dev_maps; 2650 active = true; 2651 2652 out_no_new_maps: 2653 if (type == XPS_CPUS) 2654 /* update Tx queue numa node */ 2655 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index), 2656 (numa_node_id >= 0) ? 2657 numa_node_id : NUMA_NO_NODE); 2658 2659 if (!dev_maps) 2660 goto out_no_maps; 2661 2662 /* removes tx-queue from unused CPUs/rx-queues */ 2663 for (j = 0; j < dev_maps->nr_ids; j++) { 2664 tci = j * dev_maps->num_tc; 2665 2666 for (i = 0; i < dev_maps->num_tc; i++, tci++) { 2667 if (i == tc && 2668 netif_attr_test_mask(j, mask, dev_maps->nr_ids) && 2669 netif_attr_test_online(j, online_mask, dev_maps->nr_ids)) 2670 continue; 2671 2672 active |= remove_xps_queue(dev_maps, 2673 copy ? old_dev_maps : NULL, 2674 tci, index); 2675 } 2676 } 2677 2678 if (old_dev_maps) 2679 kfree_rcu(old_dev_maps, rcu); 2680 2681 /* free map if not active */ 2682 if (!active) 2683 reset_xps_maps(dev, dev_maps, type); 2684 2685 out_no_maps: 2686 mutex_unlock(&xps_map_mutex); 2687 2688 return 0; 2689 error: 2690 /* remove any maps that we added */ 2691 for (j = 0; j < nr_ids; j++) { 2692 for (i = num_tc, tci = j * num_tc; i--; tci++) { 2693 new_map = xmap_dereference(new_dev_maps->attr_map[tci]); 2694 map = copy ? 2695 xmap_dereference(dev_maps->attr_map[tci]) : 2696 NULL; 2697 if (new_map && new_map != map) 2698 kfree(new_map); 2699 } 2700 } 2701 2702 mutex_unlock(&xps_map_mutex); 2703 2704 kfree(new_dev_maps); 2705 return -ENOMEM; 2706 } 2707 EXPORT_SYMBOL_GPL(__netif_set_xps_queue); 2708 2709 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, 2710 u16 index) 2711 { 2712 int ret; 2713 2714 cpus_read_lock(); 2715 ret = __netif_set_xps_queue(dev, cpumask_bits(mask), index, XPS_CPUS); 2716 cpus_read_unlock(); 2717 2718 return ret; 2719 } 2720 EXPORT_SYMBOL(netif_set_xps_queue); 2721 2722 #endif 2723 static void netdev_unbind_all_sb_channels(struct net_device *dev) 2724 { 2725 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues]; 2726 2727 /* Unbind any subordinate channels */ 2728 while (txq-- != &dev->_tx[0]) { 2729 if (txq->sb_dev) 2730 netdev_unbind_sb_channel(dev, txq->sb_dev); 2731 } 2732 } 2733 2734 void netdev_reset_tc(struct net_device *dev) 2735 { 2736 #ifdef CONFIG_XPS 2737 netif_reset_xps_queues_gt(dev, 0); 2738 #endif 2739 netdev_unbind_all_sb_channels(dev); 2740 2741 /* Reset TC configuration of device */ 2742 dev->num_tc = 0; 2743 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq)); 2744 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map)); 2745 } 2746 EXPORT_SYMBOL(netdev_reset_tc); 2747 2748 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset) 2749 { 2750 if (tc >= dev->num_tc) 2751 return -EINVAL; 2752 2753 #ifdef CONFIG_XPS 2754 netif_reset_xps_queues(dev, offset, count); 2755 #endif 2756 dev->tc_to_txq[tc].count = count; 2757 dev->tc_to_txq[tc].offset = offset; 2758 return 0; 2759 } 2760 EXPORT_SYMBOL(netdev_set_tc_queue); 2761 2762 int netdev_set_num_tc(struct net_device *dev, u8 num_tc) 2763 { 2764 if (num_tc > TC_MAX_QUEUE) 2765 return -EINVAL; 2766 2767 #ifdef CONFIG_XPS 2768 netif_reset_xps_queues_gt(dev, 0); 2769 #endif 2770 netdev_unbind_all_sb_channels(dev); 2771 2772 dev->num_tc = num_tc; 2773 return 0; 2774 } 2775 EXPORT_SYMBOL(netdev_set_num_tc); 2776 2777 void netdev_unbind_sb_channel(struct net_device *dev, 2778 struct net_device *sb_dev) 2779 { 2780 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues]; 2781 2782 #ifdef CONFIG_XPS 2783 netif_reset_xps_queues_gt(sb_dev, 0); 2784 #endif 2785 memset(sb_dev->tc_to_txq, 0, sizeof(sb_dev->tc_to_txq)); 2786 memset(sb_dev->prio_tc_map, 0, sizeof(sb_dev->prio_tc_map)); 2787 2788 while (txq-- != &dev->_tx[0]) { 2789 if (txq->sb_dev == sb_dev) 2790 txq->sb_dev = NULL; 2791 } 2792 } 2793 EXPORT_SYMBOL(netdev_unbind_sb_channel); 2794 2795 int netdev_bind_sb_channel_queue(struct net_device *dev, 2796 struct net_device *sb_dev, 2797 u8 tc, u16 count, u16 offset) 2798 { 2799 /* Make certain the sb_dev and dev are already configured */ 2800 if (sb_dev->num_tc >= 0 || tc >= dev->num_tc) 2801 return -EINVAL; 2802 2803 /* We cannot hand out queues we don't have */ 2804 if ((offset + count) > dev->real_num_tx_queues) 2805 return -EINVAL; 2806 2807 /* Record the mapping */ 2808 sb_dev->tc_to_txq[tc].count = count; 2809 sb_dev->tc_to_txq[tc].offset = offset; 2810 2811 /* Provide a way for Tx queue to find the tc_to_txq map or 2812 * XPS map for itself. 2813 */ 2814 while (count--) 2815 netdev_get_tx_queue(dev, count + offset)->sb_dev = sb_dev; 2816 2817 return 0; 2818 } 2819 EXPORT_SYMBOL(netdev_bind_sb_channel_queue); 2820 2821 int netdev_set_sb_channel(struct net_device *dev, u16 channel) 2822 { 2823 /* Do not use a multiqueue device to represent a subordinate channel */ 2824 if (netif_is_multiqueue(dev)) 2825 return -ENODEV; 2826 2827 /* We allow channels 1 - 32767 to be used for subordinate channels. 2828 * Channel 0 is meant to be "native" mode and used only to represent 2829 * the main root device. We allow writing 0 to reset the device back 2830 * to normal mode after being used as a subordinate channel. 2831 */ 2832 if (channel > S16_MAX) 2833 return -EINVAL; 2834 2835 dev->num_tc = -channel; 2836 2837 return 0; 2838 } 2839 EXPORT_SYMBOL(netdev_set_sb_channel); 2840 2841 /* 2842 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues 2843 * greater than real_num_tx_queues stale skbs on the qdisc must be flushed. 2844 */ 2845 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) 2846 { 2847 bool disabling; 2848 int rc; 2849 2850 disabling = txq < dev->real_num_tx_queues; 2851 2852 if (txq < 1 || txq > dev->num_tx_queues) 2853 return -EINVAL; 2854 2855 if (dev->reg_state == NETREG_REGISTERED || 2856 dev->reg_state == NETREG_UNREGISTERING) { 2857 ASSERT_RTNL(); 2858 2859 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues, 2860 txq); 2861 if (rc) 2862 return rc; 2863 2864 if (dev->num_tc) 2865 netif_setup_tc(dev, txq); 2866 2867 dev_qdisc_change_real_num_tx(dev, txq); 2868 2869 dev->real_num_tx_queues = txq; 2870 2871 if (disabling) { 2872 synchronize_net(); 2873 qdisc_reset_all_tx_gt(dev, txq); 2874 #ifdef CONFIG_XPS 2875 netif_reset_xps_queues_gt(dev, txq); 2876 #endif 2877 } 2878 } else { 2879 dev->real_num_tx_queues = txq; 2880 } 2881 2882 return 0; 2883 } 2884 EXPORT_SYMBOL(netif_set_real_num_tx_queues); 2885 2886 #ifdef CONFIG_SYSFS 2887 /** 2888 * netif_set_real_num_rx_queues - set actual number of RX queues used 2889 * @dev: Network device 2890 * @rxq: Actual number of RX queues 2891 * 2892 * This must be called either with the rtnl_lock held or before 2893 * registration of the net device. Returns 0 on success, or a 2894 * negative error code. If called before registration, it always 2895 * succeeds. 2896 */ 2897 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq) 2898 { 2899 int rc; 2900 2901 if (rxq < 1 || rxq > dev->num_rx_queues) 2902 return -EINVAL; 2903 2904 if (dev->reg_state == NETREG_REGISTERED) { 2905 ASSERT_RTNL(); 2906 2907 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues, 2908 rxq); 2909 if (rc) 2910 return rc; 2911 } 2912 2913 dev->real_num_rx_queues = rxq; 2914 return 0; 2915 } 2916 EXPORT_SYMBOL(netif_set_real_num_rx_queues); 2917 #endif 2918 2919 /** 2920 * netif_set_real_num_queues - set actual number of RX and TX queues used 2921 * @dev: Network device 2922 * @txq: Actual number of TX queues 2923 * @rxq: Actual number of RX queues 2924 * 2925 * Set the real number of both TX and RX queues. 2926 * Does nothing if the number of queues is already correct. 2927 */ 2928 int netif_set_real_num_queues(struct net_device *dev, 2929 unsigned int txq, unsigned int rxq) 2930 { 2931 unsigned int old_rxq = dev->real_num_rx_queues; 2932 int err; 2933 2934 if (txq < 1 || txq > dev->num_tx_queues || 2935 rxq < 1 || rxq > dev->num_rx_queues) 2936 return -EINVAL; 2937 2938 /* Start from increases, so the error path only does decreases - 2939 * decreases can't fail. 2940 */ 2941 if (rxq > dev->real_num_rx_queues) { 2942 err = netif_set_real_num_rx_queues(dev, rxq); 2943 if (err) 2944 return err; 2945 } 2946 if (txq > dev->real_num_tx_queues) { 2947 err = netif_set_real_num_tx_queues(dev, txq); 2948 if (err) 2949 goto undo_rx; 2950 } 2951 if (rxq < dev->real_num_rx_queues) 2952 WARN_ON(netif_set_real_num_rx_queues(dev, rxq)); 2953 if (txq < dev->real_num_tx_queues) 2954 WARN_ON(netif_set_real_num_tx_queues(dev, txq)); 2955 2956 return 0; 2957 undo_rx: 2958 WARN_ON(netif_set_real_num_rx_queues(dev, old_rxq)); 2959 return err; 2960 } 2961 EXPORT_SYMBOL(netif_set_real_num_queues); 2962 2963 /** 2964 * netif_get_num_default_rss_queues - default number of RSS queues 2965 * 2966 * This routine should set an upper limit on the number of RSS queues 2967 * used by default by multiqueue devices. 2968 */ 2969 int netif_get_num_default_rss_queues(void) 2970 { 2971 return is_kdump_kernel() ? 2972 1 : min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus()); 2973 } 2974 EXPORT_SYMBOL(netif_get_num_default_rss_queues); 2975 2976 static void __netif_reschedule(struct Qdisc *q) 2977 { 2978 struct softnet_data *sd; 2979 unsigned long flags; 2980 2981 local_irq_save(flags); 2982 sd = this_cpu_ptr(&softnet_data); 2983 q->next_sched = NULL; 2984 *sd->output_queue_tailp = q; 2985 sd->output_queue_tailp = &q->next_sched; 2986 raise_softirq_irqoff(NET_TX_SOFTIRQ); 2987 local_irq_restore(flags); 2988 } 2989 2990 void __netif_schedule(struct Qdisc *q) 2991 { 2992 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) 2993 __netif_reschedule(q); 2994 } 2995 EXPORT_SYMBOL(__netif_schedule); 2996 2997 struct dev_kfree_skb_cb { 2998 enum skb_free_reason reason; 2999 }; 3000 3001 static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb) 3002 { 3003 return (struct dev_kfree_skb_cb *)skb->cb; 3004 } 3005 3006 void netif_schedule_queue(struct netdev_queue *txq) 3007 { 3008 rcu_read_lock(); 3009 if (!netif_xmit_stopped(txq)) { 3010 struct Qdisc *q = rcu_dereference(txq->qdisc); 3011 3012 __netif_schedule(q); 3013 } 3014 rcu_read_unlock(); 3015 } 3016 EXPORT_SYMBOL(netif_schedule_queue); 3017 3018 void netif_tx_wake_queue(struct netdev_queue *dev_queue) 3019 { 3020 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) { 3021 struct Qdisc *q; 3022 3023 rcu_read_lock(); 3024 q = rcu_dereference(dev_queue->qdisc); 3025 __netif_schedule(q); 3026 rcu_read_unlock(); 3027 } 3028 } 3029 EXPORT_SYMBOL(netif_tx_wake_queue); 3030 3031 void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason) 3032 { 3033 unsigned long flags; 3034 3035 if (unlikely(!skb)) 3036 return; 3037 3038 if (likely(refcount_read(&skb->users) == 1)) { 3039 smp_rmb(); 3040 refcount_set(&skb->users, 0); 3041 } else if (likely(!refcount_dec_and_test(&skb->users))) { 3042 return; 3043 } 3044 get_kfree_skb_cb(skb)->reason = reason; 3045 local_irq_save(flags); 3046 skb->next = __this_cpu_read(softnet_data.completion_queue); 3047 __this_cpu_write(softnet_data.completion_queue, skb); 3048 raise_softirq_irqoff(NET_TX_SOFTIRQ); 3049 local_irq_restore(flags); 3050 } 3051 EXPORT_SYMBOL(__dev_kfree_skb_irq); 3052 3053 void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason) 3054 { 3055 if (in_hardirq() || irqs_disabled()) 3056 __dev_kfree_skb_irq(skb, reason); 3057 else 3058 dev_kfree_skb(skb); 3059 } 3060 EXPORT_SYMBOL(__dev_kfree_skb_any); 3061 3062 3063 /** 3064 * netif_device_detach - mark device as removed 3065 * @dev: network device 3066 * 3067 * Mark device as removed from system and therefore no longer available. 3068 */ 3069 void netif_device_detach(struct net_device *dev) 3070 { 3071 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) && 3072 netif_running(dev)) { 3073 netif_tx_stop_all_queues(dev); 3074 } 3075 } 3076 EXPORT_SYMBOL(netif_device_detach); 3077 3078 /** 3079 * netif_device_attach - mark device as attached 3080 * @dev: network device 3081 * 3082 * Mark device as attached from system and restart if needed. 3083 */ 3084 void netif_device_attach(struct net_device *dev) 3085 { 3086 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) && 3087 netif_running(dev)) { 3088 netif_tx_wake_all_queues(dev); 3089 __netdev_watchdog_up(dev); 3090 } 3091 } 3092 EXPORT_SYMBOL(netif_device_attach); 3093 3094 /* 3095 * Returns a Tx hash based on the given packet descriptor a Tx queues' number 3096 * to be used as a distribution range. 3097 */ 3098 static u16 skb_tx_hash(const struct net_device *dev, 3099 const struct net_device *sb_dev, 3100 struct sk_buff *skb) 3101 { 3102 u32 hash; 3103 u16 qoffset = 0; 3104 u16 qcount = dev->real_num_tx_queues; 3105 3106 if (dev->num_tc) { 3107 u8 tc = netdev_get_prio_tc_map(dev, skb->priority); 3108 3109 qoffset = sb_dev->tc_to_txq[tc].offset; 3110 qcount = sb_dev->tc_to_txq[tc].count; 3111 if (unlikely(!qcount)) { 3112 net_warn_ratelimited("%s: invalid qcount, qoffset %u for tc %u\n", 3113 sb_dev->name, qoffset, tc); 3114 qoffset = 0; 3115 qcount = dev->real_num_tx_queues; 3116 } 3117 } 3118 3119 if (skb_rx_queue_recorded(skb)) { 3120 hash = skb_get_rx_queue(skb); 3121 if (hash >= qoffset) 3122 hash -= qoffset; 3123 while (unlikely(hash >= qcount)) 3124 hash -= qcount; 3125 return hash + qoffset; 3126 } 3127 3128 return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset; 3129 } 3130 3131 static void skb_warn_bad_offload(const struct sk_buff *skb) 3132 { 3133 static const netdev_features_t null_features; 3134 struct net_device *dev = skb->dev; 3135 const char *name = ""; 3136 3137 if (!net_ratelimit()) 3138 return; 3139 3140 if (dev) { 3141 if (dev->dev.parent) 3142 name = dev_driver_string(dev->dev.parent); 3143 else 3144 name = netdev_name(dev); 3145 } 3146 skb_dump(KERN_WARNING, skb, false); 3147 WARN(1, "%s: caps=(%pNF, %pNF)\n", 3148 name, dev ? &dev->features : &null_features, 3149 skb->sk ? &skb->sk->sk_route_caps : &null_features); 3150 } 3151 3152 /* 3153 * Invalidate hardware checksum when packet is to be mangled, and 3154 * complete checksum manually on outgoing path. 3155 */ 3156 int skb_checksum_help(struct sk_buff *skb) 3157 { 3158 __wsum csum; 3159 int ret = 0, offset; 3160 3161 if (skb->ip_summed == CHECKSUM_COMPLETE) 3162 goto out_set_summed; 3163 3164 if (unlikely(skb_is_gso(skb))) { 3165 skb_warn_bad_offload(skb); 3166 return -EINVAL; 3167 } 3168 3169 /* Before computing a checksum, we should make sure no frag could 3170 * be modified by an external entity : checksum could be wrong. 3171 */ 3172 if (skb_has_shared_frag(skb)) { 3173 ret = __skb_linearize(skb); 3174 if (ret) 3175 goto out; 3176 } 3177 3178 offset = skb_checksum_start_offset(skb); 3179 BUG_ON(offset >= skb_headlen(skb)); 3180 csum = skb_checksum(skb, offset, skb->len - offset, 0); 3181 3182 offset += skb->csum_offset; 3183 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb)); 3184 3185 ret = skb_ensure_writable(skb, offset + sizeof(__sum16)); 3186 if (ret) 3187 goto out; 3188 3189 *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0; 3190 out_set_summed: 3191 skb->ip_summed = CHECKSUM_NONE; 3192 out: 3193 return ret; 3194 } 3195 EXPORT_SYMBOL(skb_checksum_help); 3196 3197 int skb_crc32c_csum_help(struct sk_buff *skb) 3198 { 3199 __le32 crc32c_csum; 3200 int ret = 0, offset, start; 3201 3202 if (skb->ip_summed != CHECKSUM_PARTIAL) 3203 goto out; 3204 3205 if (unlikely(skb_is_gso(skb))) 3206 goto out; 3207 3208 /* Before computing a checksum, we should make sure no frag could 3209 * be modified by an external entity : checksum could be wrong. 3210 */ 3211 if (unlikely(skb_has_shared_frag(skb))) { 3212 ret = __skb_linearize(skb); 3213 if (ret) 3214 goto out; 3215 } 3216 start = skb_checksum_start_offset(skb); 3217 offset = start + offsetof(struct sctphdr, checksum); 3218 if (WARN_ON_ONCE(offset >= skb_headlen(skb))) { 3219 ret = -EINVAL; 3220 goto out; 3221 } 3222 3223 ret = skb_ensure_writable(skb, offset + sizeof(__le32)); 3224 if (ret) 3225 goto out; 3226 3227 crc32c_csum = cpu_to_le32(~__skb_checksum(skb, start, 3228 skb->len - start, ~(__u32)0, 3229 crc32c_csum_stub)); 3230 *(__le32 *)(skb->data + offset) = crc32c_csum; 3231 skb->ip_summed = CHECKSUM_NONE; 3232 skb->csum_not_inet = 0; 3233 out: 3234 return ret; 3235 } 3236 3237 __be16 skb_network_protocol(struct sk_buff *skb, int *depth) 3238 { 3239 __be16 type = skb->protocol; 3240 3241 /* Tunnel gso handlers can set protocol to ethernet. */ 3242 if (type == htons(ETH_P_TEB)) { 3243 struct ethhdr *eth; 3244 3245 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr)))) 3246 return 0; 3247 3248 eth = (struct ethhdr *)skb->data; 3249 type = eth->h_proto; 3250 } 3251 3252 return __vlan_get_protocol(skb, type, depth); 3253 } 3254 3255 /* openvswitch calls this on rx path, so we need a different check. 3256 */ 3257 static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path) 3258 { 3259 if (tx_path) 3260 return skb->ip_summed != CHECKSUM_PARTIAL && 3261 skb->ip_summed != CHECKSUM_UNNECESSARY; 3262 3263 return skb->ip_summed == CHECKSUM_NONE; 3264 } 3265 3266 /** 3267 * __skb_gso_segment - Perform segmentation on skb. 3268 * @skb: buffer to segment 3269 * @features: features for the output path (see dev->features) 3270 * @tx_path: whether it is called in TX path 3271 * 3272 * This function segments the given skb and returns a list of segments. 3273 * 3274 * It may return NULL if the skb requires no segmentation. This is 3275 * only possible when GSO is used for verifying header integrity. 3276 * 3277 * Segmentation preserves SKB_GSO_CB_OFFSET bytes of previous skb cb. 3278 */ 3279 struct sk_buff *__skb_gso_segment(struct sk_buff *skb, 3280 netdev_features_t features, bool tx_path) 3281 { 3282 struct sk_buff *segs; 3283 3284 if (unlikely(skb_needs_check(skb, tx_path))) { 3285 int err; 3286 3287 /* We're going to init ->check field in TCP or UDP header */ 3288 err = skb_cow_head(skb, 0); 3289 if (err < 0) 3290 return ERR_PTR(err); 3291 } 3292 3293 /* Only report GSO partial support if it will enable us to 3294 * support segmentation on this frame without needing additional 3295 * work. 3296 */ 3297 if (features & NETIF_F_GSO_PARTIAL) { 3298 netdev_features_t partial_features = NETIF_F_GSO_ROBUST; 3299 struct net_device *dev = skb->dev; 3300 3301 partial_features |= dev->features & dev->gso_partial_features; 3302 if (!skb_gso_ok(skb, features | partial_features)) 3303 features &= ~NETIF_F_GSO_PARTIAL; 3304 } 3305 3306 BUILD_BUG_ON(SKB_GSO_CB_OFFSET + 3307 sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb)); 3308 3309 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb); 3310 SKB_GSO_CB(skb)->encap_level = 0; 3311 3312 skb_reset_mac_header(skb); 3313 skb_reset_mac_len(skb); 3314 3315 segs = skb_mac_gso_segment(skb, features); 3316 3317 if (segs != skb && unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs))) 3318 skb_warn_bad_offload(skb); 3319 3320 return segs; 3321 } 3322 EXPORT_SYMBOL(__skb_gso_segment); 3323 3324 /* Take action when hardware reception checksum errors are detected. */ 3325 #ifdef CONFIG_BUG 3326 static void do_netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb) 3327 { 3328 netdev_err(dev, "hw csum failure\n"); 3329 skb_dump(KERN_ERR, skb, true); 3330 dump_stack(); 3331 } 3332 3333 void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb) 3334 { 3335 DO_ONCE_LITE(do_netdev_rx_csum_fault, dev, skb); 3336 } 3337 EXPORT_SYMBOL(netdev_rx_csum_fault); 3338 #endif 3339 3340 /* XXX: check that highmem exists at all on the given machine. */ 3341 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb) 3342 { 3343 #ifdef CONFIG_HIGHMEM 3344 int i; 3345 3346 if (!(dev->features & NETIF_F_HIGHDMA)) { 3347 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 3348 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3349 3350 if (PageHighMem(skb_frag_page(frag))) 3351 return 1; 3352 } 3353 } 3354 #endif 3355 return 0; 3356 } 3357 3358 /* If MPLS offload request, verify we are testing hardware MPLS features 3359 * instead of standard features for the netdev. 3360 */ 3361 #if IS_ENABLED(CONFIG_NET_MPLS_GSO) 3362 static netdev_features_t net_mpls_features(struct sk_buff *skb, 3363 netdev_features_t features, 3364 __be16 type) 3365 { 3366 if (eth_p_mpls(type)) 3367 features &= skb->dev->mpls_features; 3368 3369 return features; 3370 } 3371 #else 3372 static netdev_features_t net_mpls_features(struct sk_buff *skb, 3373 netdev_features_t features, 3374 __be16 type) 3375 { 3376 return features; 3377 } 3378 #endif 3379 3380 static netdev_features_t harmonize_features(struct sk_buff *skb, 3381 netdev_features_t features) 3382 { 3383 __be16 type; 3384 3385 type = skb_network_protocol(skb, NULL); 3386 features = net_mpls_features(skb, features, type); 3387 3388 if (skb->ip_summed != CHECKSUM_NONE && 3389 !can_checksum_protocol(features, type)) { 3390 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 3391 } 3392 if (illegal_highdma(skb->dev, skb)) 3393 features &= ~NETIF_F_SG; 3394 3395 return features; 3396 } 3397 3398 netdev_features_t passthru_features_check(struct sk_buff *skb, 3399 struct net_device *dev, 3400 netdev_features_t features) 3401 { 3402 return features; 3403 } 3404 EXPORT_SYMBOL(passthru_features_check); 3405 3406 static netdev_features_t dflt_features_check(struct sk_buff *skb, 3407 struct net_device *dev, 3408 netdev_features_t features) 3409 { 3410 return vlan_features_check(skb, features); 3411 } 3412 3413 static netdev_features_t gso_features_check(const struct sk_buff *skb, 3414 struct net_device *dev, 3415 netdev_features_t features) 3416 { 3417 u16 gso_segs = skb_shinfo(skb)->gso_segs; 3418 3419 if (gso_segs > READ_ONCE(dev->gso_max_segs)) 3420 return features & ~NETIF_F_GSO_MASK; 3421 3422 if (!skb_shinfo(skb)->gso_type) { 3423 skb_warn_bad_offload(skb); 3424 return features & ~NETIF_F_GSO_MASK; 3425 } 3426 3427 /* Support for GSO partial features requires software 3428 * intervention before we can actually process the packets 3429 * so we need to strip support for any partial features now 3430 * and we can pull them back in after we have partially 3431 * segmented the frame. 3432 */ 3433 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL)) 3434 features &= ~dev->gso_partial_features; 3435 3436 /* Make sure to clear the IPv4 ID mangling feature if the 3437 * IPv4 header has the potential to be fragmented. 3438 */ 3439 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) { 3440 struct iphdr *iph = skb->encapsulation ? 3441 inner_ip_hdr(skb) : ip_hdr(skb); 3442 3443 if (!(iph->frag_off & htons(IP_DF))) 3444 features &= ~NETIF_F_TSO_MANGLEID; 3445 } 3446 3447 return features; 3448 } 3449 3450 netdev_features_t netif_skb_features(struct sk_buff *skb) 3451 { 3452 struct net_device *dev = skb->dev; 3453 netdev_features_t features = dev->features; 3454 3455 if (skb_is_gso(skb)) 3456 features = gso_features_check(skb, dev, features); 3457 3458 /* If encapsulation offload request, verify we are testing 3459 * hardware encapsulation features instead of standard 3460 * features for the netdev 3461 */ 3462 if (skb->encapsulation) 3463 features &= dev->hw_enc_features; 3464 3465 if (skb_vlan_tagged(skb)) 3466 features = netdev_intersect_features(features, 3467 dev->vlan_features | 3468 NETIF_F_HW_VLAN_CTAG_TX | 3469 NETIF_F_HW_VLAN_STAG_TX); 3470 3471 if (dev->netdev_ops->ndo_features_check) 3472 features &= dev->netdev_ops->ndo_features_check(skb, dev, 3473 features); 3474 else 3475 features &= dflt_features_check(skb, dev, features); 3476 3477 return harmonize_features(skb, features); 3478 } 3479 EXPORT_SYMBOL(netif_skb_features); 3480 3481 static int xmit_one(struct sk_buff *skb, struct net_device *dev, 3482 struct netdev_queue *txq, bool more) 3483 { 3484 unsigned int len; 3485 int rc; 3486 3487 if (dev_nit_active(dev)) 3488 dev_queue_xmit_nit(skb, dev); 3489 3490 len = skb->len; 3491 PRANDOM_ADD_NOISE(skb, dev, txq, len + jiffies); 3492 trace_net_dev_start_xmit(skb, dev); 3493 rc = netdev_start_xmit(skb, dev, txq, more); 3494 trace_net_dev_xmit(skb, rc, dev, len); 3495 3496 return rc; 3497 } 3498 3499 struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev, 3500 struct netdev_queue *txq, int *ret) 3501 { 3502 struct sk_buff *skb = first; 3503 int rc = NETDEV_TX_OK; 3504 3505 while (skb) { 3506 struct sk_buff *next = skb->next; 3507 3508 skb_mark_not_on_list(skb); 3509 rc = xmit_one(skb, dev, txq, next != NULL); 3510 if (unlikely(!dev_xmit_complete(rc))) { 3511 skb->next = next; 3512 goto out; 3513 } 3514 3515 skb = next; 3516 if (netif_tx_queue_stopped(txq) && skb) { 3517 rc = NETDEV_TX_BUSY; 3518 break; 3519 } 3520 } 3521 3522 out: 3523 *ret = rc; 3524 return skb; 3525 } 3526 3527 static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb, 3528 netdev_features_t features) 3529 { 3530 if (skb_vlan_tag_present(skb) && 3531 !vlan_hw_offload_capable(features, skb->vlan_proto)) 3532 skb = __vlan_hwaccel_push_inside(skb); 3533 return skb; 3534 } 3535 3536 int skb_csum_hwoffload_help(struct sk_buff *skb, 3537 const netdev_features_t features) 3538 { 3539 if (unlikely(skb_csum_is_sctp(skb))) 3540 return !!(features & NETIF_F_SCTP_CRC) ? 0 : 3541 skb_crc32c_csum_help(skb); 3542 3543 if (features & NETIF_F_HW_CSUM) 3544 return 0; 3545 3546 if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) { 3547 switch (skb->csum_offset) { 3548 case offsetof(struct tcphdr, check): 3549 case offsetof(struct udphdr, check): 3550 return 0; 3551 } 3552 } 3553 3554 return skb_checksum_help(skb); 3555 } 3556 EXPORT_SYMBOL(skb_csum_hwoffload_help); 3557 3558 static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again) 3559 { 3560 netdev_features_t features; 3561 3562 features = netif_skb_features(skb); 3563 skb = validate_xmit_vlan(skb, features); 3564 if (unlikely(!skb)) 3565 goto out_null; 3566 3567 skb = sk_validate_xmit_skb(skb, dev); 3568 if (unlikely(!skb)) 3569 goto out_null; 3570 3571 if (netif_needs_gso(skb, features)) { 3572 struct sk_buff *segs; 3573 3574 segs = skb_gso_segment(skb, features); 3575 if (IS_ERR(segs)) { 3576 goto out_kfree_skb; 3577 } else if (segs) { 3578 consume_skb(skb); 3579 skb = segs; 3580 } 3581 } else { 3582 if (skb_needs_linearize(skb, features) && 3583 __skb_linearize(skb)) 3584 goto out_kfree_skb; 3585 3586 /* If packet is not checksummed and device does not 3587 * support checksumming for this protocol, complete 3588 * checksumming here. 3589 */ 3590 if (skb->ip_summed == CHECKSUM_PARTIAL) { 3591 if (skb->encapsulation) 3592 skb_set_inner_transport_header(skb, 3593 skb_checksum_start_offset(skb)); 3594 else 3595 skb_set_transport_header(skb, 3596 skb_checksum_start_offset(skb)); 3597 if (skb_csum_hwoffload_help(skb, features)) 3598 goto out_kfree_skb; 3599 } 3600 } 3601 3602 skb = validate_xmit_xfrm(skb, features, again); 3603 3604 return skb; 3605 3606 out_kfree_skb: 3607 kfree_skb(skb); 3608 out_null: 3609 atomic_long_inc(&dev->tx_dropped); 3610 return NULL; 3611 } 3612 3613 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again) 3614 { 3615 struct sk_buff *next, *head = NULL, *tail; 3616 3617 for (; skb != NULL; skb = next) { 3618 next = skb->next; 3619 skb_mark_not_on_list(skb); 3620 3621 /* in case skb wont be segmented, point to itself */ 3622 skb->prev = skb; 3623 3624 skb = validate_xmit_skb(skb, dev, again); 3625 if (!skb) 3626 continue; 3627 3628 if (!head) 3629 head = skb; 3630 else 3631 tail->next = skb; 3632 /* If skb was segmented, skb->prev points to 3633 * the last segment. If not, it still contains skb. 3634 */ 3635 tail = skb->prev; 3636 } 3637 return head; 3638 } 3639 EXPORT_SYMBOL_GPL(validate_xmit_skb_list); 3640 3641 static void qdisc_pkt_len_init(struct sk_buff *skb) 3642 { 3643 const struct skb_shared_info *shinfo = skb_shinfo(skb); 3644 3645 qdisc_skb_cb(skb)->pkt_len = skb->len; 3646 3647 /* To get more precise estimation of bytes sent on wire, 3648 * we add to pkt_len the headers size of all segments 3649 */ 3650 if (shinfo->gso_size && skb_transport_header_was_set(skb)) { 3651 unsigned int hdr_len; 3652 u16 gso_segs = shinfo->gso_segs; 3653 3654 /* mac layer + network layer */ 3655 hdr_len = skb_transport_header(skb) - skb_mac_header(skb); 3656 3657 /* + transport layer */ 3658 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) { 3659 const struct tcphdr *th; 3660 struct tcphdr _tcphdr; 3661 3662 th = skb_header_pointer(skb, skb_transport_offset(skb), 3663 sizeof(_tcphdr), &_tcphdr); 3664 if (likely(th)) 3665 hdr_len += __tcp_hdrlen(th); 3666 } else { 3667 struct udphdr _udphdr; 3668 3669 if (skb_header_pointer(skb, skb_transport_offset(skb), 3670 sizeof(_udphdr), &_udphdr)) 3671 hdr_len += sizeof(struct udphdr); 3672 } 3673 3674 if (shinfo->gso_type & SKB_GSO_DODGY) 3675 gso_segs = DIV_ROUND_UP(skb->len - hdr_len, 3676 shinfo->gso_size); 3677 3678 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len; 3679 } 3680 } 3681 3682 static int dev_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *q, 3683 struct sk_buff **to_free, 3684 struct netdev_queue *txq) 3685 { 3686 int rc; 3687 3688 rc = q->enqueue(skb, q, to_free) & NET_XMIT_MASK; 3689 if (rc == NET_XMIT_SUCCESS) 3690 trace_qdisc_enqueue(q, txq, skb); 3691 return rc; 3692 } 3693 3694 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, 3695 struct net_device *dev, 3696 struct netdev_queue *txq) 3697 { 3698 spinlock_t *root_lock = qdisc_lock(q); 3699 struct sk_buff *to_free = NULL; 3700 bool contended; 3701 int rc; 3702 3703 qdisc_calculate_pkt_len(skb, q); 3704 3705 if (q->flags & TCQ_F_NOLOCK) { 3706 if (q->flags & TCQ_F_CAN_BYPASS && nolock_qdisc_is_empty(q) && 3707 qdisc_run_begin(q)) { 3708 /* Retest nolock_qdisc_is_empty() within the protection 3709 * of q->seqlock to protect from racing with requeuing. 3710 */ 3711 if (unlikely(!nolock_qdisc_is_empty(q))) { 3712 rc = dev_qdisc_enqueue(skb, q, &to_free, txq); 3713 __qdisc_run(q); 3714 qdisc_run_end(q); 3715 3716 goto no_lock_out; 3717 } 3718 3719 qdisc_bstats_cpu_update(q, skb); 3720 if (sch_direct_xmit(skb, q, dev, txq, NULL, true) && 3721 !nolock_qdisc_is_empty(q)) 3722 __qdisc_run(q); 3723 3724 qdisc_run_end(q); 3725 return NET_XMIT_SUCCESS; 3726 } 3727 3728 rc = dev_qdisc_enqueue(skb, q, &to_free, txq); 3729 qdisc_run(q); 3730 3731 no_lock_out: 3732 if (unlikely(to_free)) 3733 kfree_skb_list(to_free); 3734 return rc; 3735 } 3736 3737 /* 3738 * Heuristic to force contended enqueues to serialize on a 3739 * separate lock before trying to get qdisc main lock. 3740 * This permits qdisc->running owner to get the lock more 3741 * often and dequeue packets faster. 3742 * On PREEMPT_RT it is possible to preempt the qdisc owner during xmit 3743 * and then other tasks will only enqueue packets. The packets will be 3744 * sent after the qdisc owner is scheduled again. To prevent this 3745 * scenario the task always serialize on the lock. 3746 */ 3747 contended = qdisc_is_running(q) || IS_ENABLED(CONFIG_PREEMPT_RT); 3748 if (unlikely(contended)) 3749 spin_lock(&q->busylock); 3750 3751 spin_lock(root_lock); 3752 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) { 3753 __qdisc_drop(skb, &to_free); 3754 rc = NET_XMIT_DROP; 3755 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) && 3756 qdisc_run_begin(q)) { 3757 /* 3758 * This is a work-conserving queue; there are no old skbs 3759 * waiting to be sent out; and the qdisc is not running - 3760 * xmit the skb directly. 3761 */ 3762 3763 qdisc_bstats_update(q, skb); 3764 3765 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) { 3766 if (unlikely(contended)) { 3767 spin_unlock(&q->busylock); 3768 contended = false; 3769 } 3770 __qdisc_run(q); 3771 } 3772 3773 qdisc_run_end(q); 3774 rc = NET_XMIT_SUCCESS; 3775 } else { 3776 rc = dev_qdisc_enqueue(skb, q, &to_free, txq); 3777 if (qdisc_run_begin(q)) { 3778 if (unlikely(contended)) { 3779 spin_unlock(&q->busylock); 3780 contended = false; 3781 } 3782 __qdisc_run(q); 3783 qdisc_run_end(q); 3784 } 3785 } 3786 spin_unlock(root_lock); 3787 if (unlikely(to_free)) 3788 kfree_skb_list(to_free); 3789 if (unlikely(contended)) 3790 spin_unlock(&q->busylock); 3791 return rc; 3792 } 3793 3794 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) 3795 static void skb_update_prio(struct sk_buff *skb) 3796 { 3797 const struct netprio_map *map; 3798 const struct sock *sk; 3799 unsigned int prioidx; 3800 3801 if (skb->priority) 3802 return; 3803 map = rcu_dereference_bh(skb->dev->priomap); 3804 if (!map) 3805 return; 3806 sk = skb_to_full_sk(skb); 3807 if (!sk) 3808 return; 3809 3810 prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data); 3811 3812 if (prioidx < map->priomap_len) 3813 skb->priority = map->priomap[prioidx]; 3814 } 3815 #else 3816 #define skb_update_prio(skb) 3817 #endif 3818 3819 /** 3820 * dev_loopback_xmit - loop back @skb 3821 * @net: network namespace this loopback is happening in 3822 * @sk: sk needed to be a netfilter okfn 3823 * @skb: buffer to transmit 3824 */ 3825 int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb) 3826 { 3827 skb_reset_mac_header(skb); 3828 __skb_pull(skb, skb_network_offset(skb)); 3829 skb->pkt_type = PACKET_LOOPBACK; 3830 if (skb->ip_summed == CHECKSUM_NONE) 3831 skb->ip_summed = CHECKSUM_UNNECESSARY; 3832 WARN_ON(!skb_dst(skb)); 3833 skb_dst_force(skb); 3834 netif_rx_ni(skb); 3835 return 0; 3836 } 3837 EXPORT_SYMBOL(dev_loopback_xmit); 3838 3839 #ifdef CONFIG_NET_EGRESS 3840 static struct sk_buff * 3841 sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev) 3842 { 3843 #ifdef CONFIG_NET_CLS_ACT 3844 struct mini_Qdisc *miniq = rcu_dereference_bh(dev->miniq_egress); 3845 struct tcf_result cl_res; 3846 3847 if (!miniq) 3848 return skb; 3849 3850 /* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */ 3851 tc_skb_cb(skb)->mru = 0; 3852 tc_skb_cb(skb)->post_ct = false; 3853 mini_qdisc_bstats_cpu_update(miniq, skb); 3854 3855 switch (tcf_classify(skb, miniq->block, miniq->filter_list, &cl_res, false)) { 3856 case TC_ACT_OK: 3857 case TC_ACT_RECLASSIFY: 3858 skb->tc_index = TC_H_MIN(cl_res.classid); 3859 break; 3860 case TC_ACT_SHOT: 3861 mini_qdisc_qstats_cpu_drop(miniq); 3862 *ret = NET_XMIT_DROP; 3863 kfree_skb(skb); 3864 return NULL; 3865 case TC_ACT_STOLEN: 3866 case TC_ACT_QUEUED: 3867 case TC_ACT_TRAP: 3868 *ret = NET_XMIT_SUCCESS; 3869 consume_skb(skb); 3870 return NULL; 3871 case TC_ACT_REDIRECT: 3872 /* No need to push/pop skb's mac_header here on egress! */ 3873 skb_do_redirect(skb); 3874 *ret = NET_XMIT_SUCCESS; 3875 return NULL; 3876 default: 3877 break; 3878 } 3879 #endif /* CONFIG_NET_CLS_ACT */ 3880 3881 return skb; 3882 } 3883 #endif /* CONFIG_NET_EGRESS */ 3884 3885 #ifdef CONFIG_XPS 3886 static int __get_xps_queue_idx(struct net_device *dev, struct sk_buff *skb, 3887 struct xps_dev_maps *dev_maps, unsigned int tci) 3888 { 3889 int tc = netdev_get_prio_tc_map(dev, skb->priority); 3890 struct xps_map *map; 3891 int queue_index = -1; 3892 3893 if (tc >= dev_maps->num_tc || tci >= dev_maps->nr_ids) 3894 return queue_index; 3895 3896 tci *= dev_maps->num_tc; 3897 tci += tc; 3898 3899 map = rcu_dereference(dev_maps->attr_map[tci]); 3900 if (map) { 3901 if (map->len == 1) 3902 queue_index = map->queues[0]; 3903 else 3904 queue_index = map->queues[reciprocal_scale( 3905 skb_get_hash(skb), map->len)]; 3906 if (unlikely(queue_index >= dev->real_num_tx_queues)) 3907 queue_index = -1; 3908 } 3909 return queue_index; 3910 } 3911 #endif 3912 3913 static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev, 3914 struct sk_buff *skb) 3915 { 3916 #ifdef CONFIG_XPS 3917 struct xps_dev_maps *dev_maps; 3918 struct sock *sk = skb->sk; 3919 int queue_index = -1; 3920 3921 if (!static_key_false(&xps_needed)) 3922 return -1; 3923 3924 rcu_read_lock(); 3925 if (!static_key_false(&xps_rxqs_needed)) 3926 goto get_cpus_map; 3927 3928 dev_maps = rcu_dereference(sb_dev->xps_maps[XPS_RXQS]); 3929 if (dev_maps) { 3930 int tci = sk_rx_queue_get(sk); 3931 3932 if (tci >= 0) 3933 queue_index = __get_xps_queue_idx(dev, skb, dev_maps, 3934 tci); 3935 } 3936 3937 get_cpus_map: 3938 if (queue_index < 0) { 3939 dev_maps = rcu_dereference(sb_dev->xps_maps[XPS_CPUS]); 3940 if (dev_maps) { 3941 unsigned int tci = skb->sender_cpu - 1; 3942 3943 queue_index = __get_xps_queue_idx(dev, skb, dev_maps, 3944 tci); 3945 } 3946 } 3947 rcu_read_unlock(); 3948 3949 return queue_index; 3950 #else 3951 return -1; 3952 #endif 3953 } 3954 3955 u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb, 3956 struct net_device *sb_dev) 3957 { 3958 return 0; 3959 } 3960 EXPORT_SYMBOL(dev_pick_tx_zero); 3961 3962 u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb, 3963 struct net_device *sb_dev) 3964 { 3965 return (u16)raw_smp_processor_id() % dev->real_num_tx_queues; 3966 } 3967 EXPORT_SYMBOL(dev_pick_tx_cpu_id); 3968 3969 u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, 3970 struct net_device *sb_dev) 3971 { 3972 struct sock *sk = skb->sk; 3973 int queue_index = sk_tx_queue_get(sk); 3974 3975 sb_dev = sb_dev ? : dev; 3976 3977 if (queue_index < 0 || skb->ooo_okay || 3978 queue_index >= dev->real_num_tx_queues) { 3979 int new_index = get_xps_queue(dev, sb_dev, skb); 3980 3981 if (new_index < 0) 3982 new_index = skb_tx_hash(dev, sb_dev, skb); 3983 3984 if (queue_index != new_index && sk && 3985 sk_fullsock(sk) && 3986 rcu_access_pointer(sk->sk_dst_cache)) 3987 sk_tx_queue_set(sk, new_index); 3988 3989 queue_index = new_index; 3990 } 3991 3992 return queue_index; 3993 } 3994 EXPORT_SYMBOL(netdev_pick_tx); 3995 3996 struct netdev_queue *netdev_core_pick_tx(struct net_device *dev, 3997 struct sk_buff *skb, 3998 struct net_device *sb_dev) 3999 { 4000 int queue_index = 0; 4001 4002 #ifdef CONFIG_XPS 4003 u32 sender_cpu = skb->sender_cpu - 1; 4004 4005 if (sender_cpu >= (u32)NR_CPUS) 4006 skb->sender_cpu = raw_smp_processor_id() + 1; 4007 #endif 4008 4009 if (dev->real_num_tx_queues != 1) { 4010 const struct net_device_ops *ops = dev->netdev_ops; 4011 4012 if (ops->ndo_select_queue) 4013 queue_index = ops->ndo_select_queue(dev, skb, sb_dev); 4014 else 4015 queue_index = netdev_pick_tx(dev, skb, sb_dev); 4016 4017 queue_index = netdev_cap_txqueue(dev, queue_index); 4018 } 4019 4020 skb_set_queue_mapping(skb, queue_index); 4021 return netdev_get_tx_queue(dev, queue_index); 4022 } 4023 4024 /** 4025 * __dev_queue_xmit - transmit a buffer 4026 * @skb: buffer to transmit 4027 * @sb_dev: suboordinate device used for L2 forwarding offload 4028 * 4029 * Queue a buffer for transmission to a network device. The caller must 4030 * have set the device and priority and built the buffer before calling 4031 * this function. The function can be called from an interrupt. 4032 * 4033 * A negative errno code is returned on a failure. A success does not 4034 * guarantee the frame will be transmitted as it may be dropped due 4035 * to congestion or traffic shaping. 4036 * 4037 * ----------------------------------------------------------------------------------- 4038 * I notice this method can also return errors from the queue disciplines, 4039 * including NET_XMIT_DROP, which is a positive value. So, errors can also 4040 * be positive. 4041 * 4042 * Regardless of the return value, the skb is consumed, so it is currently 4043 * difficult to retry a send to this method. (You can bump the ref count 4044 * before sending to hold a reference for retry if you are careful.) 4045 * 4046 * When calling this method, interrupts MUST be enabled. This is because 4047 * the BH enable code must have IRQs enabled so that it will not deadlock. 4048 * --BLG 4049 */ 4050 static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev) 4051 { 4052 struct net_device *dev = skb->dev; 4053 struct netdev_queue *txq; 4054 struct Qdisc *q; 4055 int rc = -ENOMEM; 4056 bool again = false; 4057 4058 skb_reset_mac_header(skb); 4059 4060 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP)) 4061 __skb_tstamp_tx(skb, NULL, NULL, skb->sk, SCM_TSTAMP_SCHED); 4062 4063 /* Disable soft irqs for various locks below. Also 4064 * stops preemption for RCU. 4065 */ 4066 rcu_read_lock_bh(); 4067 4068 skb_update_prio(skb); 4069 4070 qdisc_pkt_len_init(skb); 4071 #ifdef CONFIG_NET_CLS_ACT 4072 skb->tc_at_ingress = 0; 4073 #endif 4074 #ifdef CONFIG_NET_EGRESS 4075 if (static_branch_unlikely(&egress_needed_key)) { 4076 if (nf_hook_egress_active()) { 4077 skb = nf_hook_egress(skb, &rc, dev); 4078 if (!skb) 4079 goto out; 4080 } 4081 nf_skip_egress(skb, true); 4082 skb = sch_handle_egress(skb, &rc, dev); 4083 if (!skb) 4084 goto out; 4085 nf_skip_egress(skb, false); 4086 } 4087 #endif 4088 /* If device/qdisc don't need skb->dst, release it right now while 4089 * its hot in this cpu cache. 4090 */ 4091 if (dev->priv_flags & IFF_XMIT_DST_RELEASE) 4092 skb_dst_drop(skb); 4093 else 4094 skb_dst_force(skb); 4095 4096 txq = netdev_core_pick_tx(dev, skb, sb_dev); 4097 q = rcu_dereference_bh(txq->qdisc); 4098 4099 trace_net_dev_queue(skb); 4100 if (q->enqueue) { 4101 rc = __dev_xmit_skb(skb, q, dev, txq); 4102 goto out; 4103 } 4104 4105 /* The device has no queue. Common case for software devices: 4106 * loopback, all the sorts of tunnels... 4107 4108 * Really, it is unlikely that netif_tx_lock protection is necessary 4109 * here. (f.e. loopback and IP tunnels are clean ignoring statistics 4110 * counters.) 4111 * However, it is possible, that they rely on protection 4112 * made by us here. 4113 4114 * Check this and shot the lock. It is not prone from deadlocks. 4115 *Either shot noqueue qdisc, it is even simpler 8) 4116 */ 4117 if (dev->flags & IFF_UP) { 4118 int cpu = smp_processor_id(); /* ok because BHs are off */ 4119 4120 /* Other cpus might concurrently change txq->xmit_lock_owner 4121 * to -1 or to their cpu id, but not to our id. 4122 */ 4123 if (READ_ONCE(txq->xmit_lock_owner) != cpu) { 4124 if (dev_xmit_recursion()) 4125 goto recursion_alert; 4126 4127 skb = validate_xmit_skb(skb, dev, &again); 4128 if (!skb) 4129 goto out; 4130 4131 PRANDOM_ADD_NOISE(skb, dev, txq, jiffies); 4132 HARD_TX_LOCK(dev, txq, cpu); 4133 4134 if (!netif_xmit_stopped(txq)) { 4135 dev_xmit_recursion_inc(); 4136 skb = dev_hard_start_xmit(skb, dev, txq, &rc); 4137 dev_xmit_recursion_dec(); 4138 if (dev_xmit_complete(rc)) { 4139 HARD_TX_UNLOCK(dev, txq); 4140 goto out; 4141 } 4142 } 4143 HARD_TX_UNLOCK(dev, txq); 4144 net_crit_ratelimited("Virtual device %s asks to queue packet!\n", 4145 dev->name); 4146 } else { 4147 /* Recursion is detected! It is possible, 4148 * unfortunately 4149 */ 4150 recursion_alert: 4151 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n", 4152 dev->name); 4153 } 4154 } 4155 4156 rc = -ENETDOWN; 4157 rcu_read_unlock_bh(); 4158 4159 atomic_long_inc(&dev->tx_dropped); 4160 kfree_skb_list(skb); 4161 return rc; 4162 out: 4163 rcu_read_unlock_bh(); 4164 return rc; 4165 } 4166 4167 int dev_queue_xmit(struct sk_buff *skb) 4168 { 4169 return __dev_queue_xmit(skb, NULL); 4170 } 4171 EXPORT_SYMBOL(dev_queue_xmit); 4172 4173 int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev) 4174 { 4175 return __dev_queue_xmit(skb, sb_dev); 4176 } 4177 EXPORT_SYMBOL(dev_queue_xmit_accel); 4178 4179 int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id) 4180 { 4181 struct net_device *dev = skb->dev; 4182 struct sk_buff *orig_skb = skb; 4183 struct netdev_queue *txq; 4184 int ret = NETDEV_TX_BUSY; 4185 bool again = false; 4186 4187 if (unlikely(!netif_running(dev) || 4188 !netif_carrier_ok(dev))) 4189 goto drop; 4190 4191 skb = validate_xmit_skb_list(skb, dev, &again); 4192 if (skb != orig_skb) 4193 goto drop; 4194 4195 skb_set_queue_mapping(skb, queue_id); 4196 txq = skb_get_tx_queue(dev, skb); 4197 PRANDOM_ADD_NOISE(skb, dev, txq, jiffies); 4198 4199 local_bh_disable(); 4200 4201 dev_xmit_recursion_inc(); 4202 HARD_TX_LOCK(dev, txq, smp_processor_id()); 4203 if (!netif_xmit_frozen_or_drv_stopped(txq)) 4204 ret = netdev_start_xmit(skb, dev, txq, false); 4205 HARD_TX_UNLOCK(dev, txq); 4206 dev_xmit_recursion_dec(); 4207 4208 local_bh_enable(); 4209 return ret; 4210 drop: 4211 atomic_long_inc(&dev->tx_dropped); 4212 kfree_skb_list(skb); 4213 return NET_XMIT_DROP; 4214 } 4215 EXPORT_SYMBOL(__dev_direct_xmit); 4216 4217 /************************************************************************* 4218 * Receiver routines 4219 *************************************************************************/ 4220 4221 int netdev_max_backlog __read_mostly = 1000; 4222 EXPORT_SYMBOL(netdev_max_backlog); 4223 4224 int netdev_tstamp_prequeue __read_mostly = 1; 4225 int netdev_budget __read_mostly = 300; 4226 /* Must be at least 2 jiffes to guarantee 1 jiffy timeout */ 4227 unsigned int __read_mostly netdev_budget_usecs = 2 * USEC_PER_SEC / HZ; 4228 int weight_p __read_mostly = 64; /* old backlog weight */ 4229 int dev_weight_rx_bias __read_mostly = 1; /* bias for backlog weight */ 4230 int dev_weight_tx_bias __read_mostly = 1; /* bias for output_queue quota */ 4231 int dev_rx_weight __read_mostly = 64; 4232 int dev_tx_weight __read_mostly = 64; 4233 4234 /* Called with irq disabled */ 4235 static inline void ____napi_schedule(struct softnet_data *sd, 4236 struct napi_struct *napi) 4237 { 4238 struct task_struct *thread; 4239 4240 if (test_bit(NAPI_STATE_THREADED, &napi->state)) { 4241 /* Paired with smp_mb__before_atomic() in 4242 * napi_enable()/dev_set_threaded(). 4243 * Use READ_ONCE() to guarantee a complete 4244 * read on napi->thread. Only call 4245 * wake_up_process() when it's not NULL. 4246 */ 4247 thread = READ_ONCE(napi->thread); 4248 if (thread) { 4249 /* Avoid doing set_bit() if the thread is in 4250 * INTERRUPTIBLE state, cause napi_thread_wait() 4251 * makes sure to proceed with napi polling 4252 * if the thread is explicitly woken from here. 4253 */ 4254 if (READ_ONCE(thread->__state) != TASK_INTERRUPTIBLE) 4255 set_bit(NAPI_STATE_SCHED_THREADED, &napi->state); 4256 wake_up_process(thread); 4257 return; 4258 } 4259 } 4260 4261 list_add_tail(&napi->poll_list, &sd->poll_list); 4262 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 4263 } 4264 4265 #ifdef CONFIG_RPS 4266 4267 /* One global table that all flow-based protocols share. */ 4268 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly; 4269 EXPORT_SYMBOL(rps_sock_flow_table); 4270 u32 rps_cpu_mask __read_mostly; 4271 EXPORT_SYMBOL(rps_cpu_mask); 4272 4273 struct static_key_false rps_needed __read_mostly; 4274 EXPORT_SYMBOL(rps_needed); 4275 struct static_key_false rfs_needed __read_mostly; 4276 EXPORT_SYMBOL(rfs_needed); 4277 4278 static struct rps_dev_flow * 4279 set_rps_cpu(struct net_device *dev, struct sk_buff *skb, 4280 struct rps_dev_flow *rflow, u16 next_cpu) 4281 { 4282 if (next_cpu < nr_cpu_ids) { 4283 #ifdef CONFIG_RFS_ACCEL 4284 struct netdev_rx_queue *rxqueue; 4285 struct rps_dev_flow_table *flow_table; 4286 struct rps_dev_flow *old_rflow; 4287 u32 flow_id; 4288 u16 rxq_index; 4289 int rc; 4290 4291 /* Should we steer this flow to a different hardware queue? */ 4292 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap || 4293 !(dev->features & NETIF_F_NTUPLE)) 4294 goto out; 4295 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu); 4296 if (rxq_index == skb_get_rx_queue(skb)) 4297 goto out; 4298 4299 rxqueue = dev->_rx + rxq_index; 4300 flow_table = rcu_dereference(rxqueue->rps_flow_table); 4301 if (!flow_table) 4302 goto out; 4303 flow_id = skb_get_hash(skb) & flow_table->mask; 4304 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb, 4305 rxq_index, flow_id); 4306 if (rc < 0) 4307 goto out; 4308 old_rflow = rflow; 4309 rflow = &flow_table->flows[flow_id]; 4310 rflow->filter = rc; 4311 if (old_rflow->filter == rflow->filter) 4312 old_rflow->filter = RPS_NO_FILTER; 4313 out: 4314 #endif 4315 rflow->last_qtail = 4316 per_cpu(softnet_data, next_cpu).input_queue_head; 4317 } 4318 4319 rflow->cpu = next_cpu; 4320 return rflow; 4321 } 4322 4323 /* 4324 * get_rps_cpu is called from netif_receive_skb and returns the target 4325 * CPU from the RPS map of the receiving queue for a given skb. 4326 * rcu_read_lock must be held on entry. 4327 */ 4328 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, 4329 struct rps_dev_flow **rflowp) 4330 { 4331 const struct rps_sock_flow_table *sock_flow_table; 4332 struct netdev_rx_queue *rxqueue = dev->_rx; 4333 struct rps_dev_flow_table *flow_table; 4334 struct rps_map *map; 4335 int cpu = -1; 4336 u32 tcpu; 4337 u32 hash; 4338 4339 if (skb_rx_queue_recorded(skb)) { 4340 u16 index = skb_get_rx_queue(skb); 4341 4342 if (unlikely(index >= dev->real_num_rx_queues)) { 4343 WARN_ONCE(dev->real_num_rx_queues > 1, 4344 "%s received packet on queue %u, but number " 4345 "of RX queues is %u\n", 4346 dev->name, index, dev->real_num_rx_queues); 4347 goto done; 4348 } 4349 rxqueue += index; 4350 } 4351 4352 /* Avoid computing hash if RFS/RPS is not active for this rxqueue */ 4353 4354 flow_table = rcu_dereference(rxqueue->rps_flow_table); 4355 map = rcu_dereference(rxqueue->rps_map); 4356 if (!flow_table && !map) 4357 goto done; 4358 4359 skb_reset_network_header(skb); 4360 hash = skb_get_hash(skb); 4361 if (!hash) 4362 goto done; 4363 4364 sock_flow_table = rcu_dereference(rps_sock_flow_table); 4365 if (flow_table && sock_flow_table) { 4366 struct rps_dev_flow *rflow; 4367 u32 next_cpu; 4368 u32 ident; 4369 4370 /* First check into global flow table if there is a match */ 4371 ident = sock_flow_table->ents[hash & sock_flow_table->mask]; 4372 if ((ident ^ hash) & ~rps_cpu_mask) 4373 goto try_rps; 4374 4375 next_cpu = ident & rps_cpu_mask; 4376 4377 /* OK, now we know there is a match, 4378 * we can look at the local (per receive queue) flow table 4379 */ 4380 rflow = &flow_table->flows[hash & flow_table->mask]; 4381 tcpu = rflow->cpu; 4382 4383 /* 4384 * If the desired CPU (where last recvmsg was done) is 4385 * different from current CPU (one in the rx-queue flow 4386 * table entry), switch if one of the following holds: 4387 * - Current CPU is unset (>= nr_cpu_ids). 4388 * - Current CPU is offline. 4389 * - The current CPU's queue tail has advanced beyond the 4390 * last packet that was enqueued using this table entry. 4391 * This guarantees that all previous packets for the flow 4392 * have been dequeued, thus preserving in order delivery. 4393 */ 4394 if (unlikely(tcpu != next_cpu) && 4395 (tcpu >= nr_cpu_ids || !cpu_online(tcpu) || 4396 ((int)(per_cpu(softnet_data, tcpu).input_queue_head - 4397 rflow->last_qtail)) >= 0)) { 4398 tcpu = next_cpu; 4399 rflow = set_rps_cpu(dev, skb, rflow, next_cpu); 4400 } 4401 4402 if (tcpu < nr_cpu_ids && cpu_online(tcpu)) { 4403 *rflowp = rflow; 4404 cpu = tcpu; 4405 goto done; 4406 } 4407 } 4408 4409 try_rps: 4410 4411 if (map) { 4412 tcpu = map->cpus[reciprocal_scale(hash, map->len)]; 4413 if (cpu_online(tcpu)) { 4414 cpu = tcpu; 4415 goto done; 4416 } 4417 } 4418 4419 done: 4420 return cpu; 4421 } 4422 4423 #ifdef CONFIG_RFS_ACCEL 4424 4425 /** 4426 * rps_may_expire_flow - check whether an RFS hardware filter may be removed 4427 * @dev: Device on which the filter was set 4428 * @rxq_index: RX queue index 4429 * @flow_id: Flow ID passed to ndo_rx_flow_steer() 4430 * @filter_id: Filter ID returned by ndo_rx_flow_steer() 4431 * 4432 * Drivers that implement ndo_rx_flow_steer() should periodically call 4433 * this function for each installed filter and remove the filters for 4434 * which it returns %true. 4435 */ 4436 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, 4437 u32 flow_id, u16 filter_id) 4438 { 4439 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index; 4440 struct rps_dev_flow_table *flow_table; 4441 struct rps_dev_flow *rflow; 4442 bool expire = true; 4443 unsigned int cpu; 4444 4445 rcu_read_lock(); 4446 flow_table = rcu_dereference(rxqueue->rps_flow_table); 4447 if (flow_table && flow_id <= flow_table->mask) { 4448 rflow = &flow_table->flows[flow_id]; 4449 cpu = READ_ONCE(rflow->cpu); 4450 if (rflow->filter == filter_id && cpu < nr_cpu_ids && 4451 ((int)(per_cpu(softnet_data, cpu).input_queue_head - 4452 rflow->last_qtail) < 4453 (int)(10 * flow_table->mask))) 4454 expire = false; 4455 } 4456 rcu_read_unlock(); 4457 return expire; 4458 } 4459 EXPORT_SYMBOL(rps_may_expire_flow); 4460 4461 #endif /* CONFIG_RFS_ACCEL */ 4462 4463 /* Called from hardirq (IPI) context */ 4464 static void rps_trigger_softirq(void *data) 4465 { 4466 struct softnet_data *sd = data; 4467 4468 ____napi_schedule(sd, &sd->backlog); 4469 sd->received_rps++; 4470 } 4471 4472 #endif /* CONFIG_RPS */ 4473 4474 /* 4475 * Check if this softnet_data structure is another cpu one 4476 * If yes, queue it to our IPI list and return 1 4477 * If no, return 0 4478 */ 4479 static int napi_schedule_rps(struct softnet_data *sd) 4480 { 4481 struct softnet_data *mysd = this_cpu_ptr(&softnet_data); 4482 4483 #ifdef CONFIG_RPS 4484 if (sd != mysd) { 4485 sd->rps_ipi_next = mysd->rps_ipi_list; 4486 mysd->rps_ipi_list = sd; 4487 4488 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 4489 return 1; 4490 } 4491 #endif /* CONFIG_RPS */ 4492 __napi_schedule_irqoff(&mysd->backlog); 4493 return 0; 4494 } 4495 4496 #ifdef CONFIG_NET_FLOW_LIMIT 4497 int netdev_flow_limit_table_len __read_mostly = (1 << 12); 4498 #endif 4499 4500 static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen) 4501 { 4502 #ifdef CONFIG_NET_FLOW_LIMIT 4503 struct sd_flow_limit *fl; 4504 struct softnet_data *sd; 4505 unsigned int old_flow, new_flow; 4506 4507 if (qlen < (netdev_max_backlog >> 1)) 4508 return false; 4509 4510 sd = this_cpu_ptr(&softnet_data); 4511 4512 rcu_read_lock(); 4513 fl = rcu_dereference(sd->flow_limit); 4514 if (fl) { 4515 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1); 4516 old_flow = fl->history[fl->history_head]; 4517 fl->history[fl->history_head] = new_flow; 4518 4519 fl->history_head++; 4520 fl->history_head &= FLOW_LIMIT_HISTORY - 1; 4521 4522 if (likely(fl->buckets[old_flow])) 4523 fl->buckets[old_flow]--; 4524 4525 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) { 4526 fl->count++; 4527 rcu_read_unlock(); 4528 return true; 4529 } 4530 } 4531 rcu_read_unlock(); 4532 #endif 4533 return false; 4534 } 4535 4536 /* 4537 * enqueue_to_backlog is called to queue an skb to a per CPU backlog 4538 * queue (may be a remote CPU queue). 4539 */ 4540 static int enqueue_to_backlog(struct sk_buff *skb, int cpu, 4541 unsigned int *qtail) 4542 { 4543 struct softnet_data *sd; 4544 unsigned long flags; 4545 unsigned int qlen; 4546 4547 sd = &per_cpu(softnet_data, cpu); 4548 4549 rps_lock_irqsave(sd, &flags); 4550 if (!netif_running(skb->dev)) 4551 goto drop; 4552 qlen = skb_queue_len(&sd->input_pkt_queue); 4553 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) { 4554 if (qlen) { 4555 enqueue: 4556 __skb_queue_tail(&sd->input_pkt_queue, skb); 4557 input_queue_tail_incr_save(sd, qtail); 4558 rps_unlock_irq_restore(sd, &flags); 4559 return NET_RX_SUCCESS; 4560 } 4561 4562 /* Schedule NAPI for backlog device 4563 * We can use non atomic operation since we own the queue lock 4564 */ 4565 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) 4566 napi_schedule_rps(sd); 4567 goto enqueue; 4568 } 4569 4570 drop: 4571 sd->dropped++; 4572 rps_unlock_irq_restore(sd, &flags); 4573 4574 atomic_long_inc(&skb->dev->rx_dropped); 4575 kfree_skb(skb); 4576 return NET_RX_DROP; 4577 } 4578 4579 static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb) 4580 { 4581 struct net_device *dev = skb->dev; 4582 struct netdev_rx_queue *rxqueue; 4583 4584 rxqueue = dev->_rx; 4585 4586 if (skb_rx_queue_recorded(skb)) { 4587 u16 index = skb_get_rx_queue(skb); 4588 4589 if (unlikely(index >= dev->real_num_rx_queues)) { 4590 WARN_ONCE(dev->real_num_rx_queues > 1, 4591 "%s received packet on queue %u, but number " 4592 "of RX queues is %u\n", 4593 dev->name, index, dev->real_num_rx_queues); 4594 4595 return rxqueue; /* Return first rxqueue */ 4596 } 4597 rxqueue += index; 4598 } 4599 return rxqueue; 4600 } 4601 4602 u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp, 4603 struct bpf_prog *xdp_prog) 4604 { 4605 void *orig_data, *orig_data_end, *hard_start; 4606 struct netdev_rx_queue *rxqueue; 4607 bool orig_bcast, orig_host; 4608 u32 mac_len, frame_sz; 4609 __be16 orig_eth_type; 4610 struct ethhdr *eth; 4611 u32 metalen, act; 4612 int off; 4613 4614 /* The XDP program wants to see the packet starting at the MAC 4615 * header. 4616 */ 4617 mac_len = skb->data - skb_mac_header(skb); 4618 hard_start = skb->data - skb_headroom(skb); 4619 4620 /* SKB "head" area always have tailroom for skb_shared_info */ 4621 frame_sz = (void *)skb_end_pointer(skb) - hard_start; 4622 frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 4623 4624 rxqueue = netif_get_rxqueue(skb); 4625 xdp_init_buff(xdp, frame_sz, &rxqueue->xdp_rxq); 4626 xdp_prepare_buff(xdp, hard_start, skb_headroom(skb) - mac_len, 4627 skb_headlen(skb) + mac_len, true); 4628 4629 orig_data_end = xdp->data_end; 4630 orig_data = xdp->data; 4631 eth = (struct ethhdr *)xdp->data; 4632 orig_host = ether_addr_equal_64bits(eth->h_dest, skb->dev->dev_addr); 4633 orig_bcast = is_multicast_ether_addr_64bits(eth->h_dest); 4634 orig_eth_type = eth->h_proto; 4635 4636 act = bpf_prog_run_xdp(xdp_prog, xdp); 4637 4638 /* check if bpf_xdp_adjust_head was used */ 4639 off = xdp->data - orig_data; 4640 if (off) { 4641 if (off > 0) 4642 __skb_pull(skb, off); 4643 else if (off < 0) 4644 __skb_push(skb, -off); 4645 4646 skb->mac_header += off; 4647 skb_reset_network_header(skb); 4648 } 4649 4650 /* check if bpf_xdp_adjust_tail was used */ 4651 off = xdp->data_end - orig_data_end; 4652 if (off != 0) { 4653 skb_set_tail_pointer(skb, xdp->data_end - xdp->data); 4654 skb->len += off; /* positive on grow, negative on shrink */ 4655 } 4656 4657 /* check if XDP changed eth hdr such SKB needs update */ 4658 eth = (struct ethhdr *)xdp->data; 4659 if ((orig_eth_type != eth->h_proto) || 4660 (orig_host != ether_addr_equal_64bits(eth->h_dest, 4661 skb->dev->dev_addr)) || 4662 (orig_bcast != is_multicast_ether_addr_64bits(eth->h_dest))) { 4663 __skb_push(skb, ETH_HLEN); 4664 skb->pkt_type = PACKET_HOST; 4665 skb->protocol = eth_type_trans(skb, skb->dev); 4666 } 4667 4668 /* Redirect/Tx gives L2 packet, code that will reuse skb must __skb_pull 4669 * before calling us again on redirect path. We do not call do_redirect 4670 * as we leave that up to the caller. 4671 * 4672 * Caller is responsible for managing lifetime of skb (i.e. calling 4673 * kfree_skb in response to actions it cannot handle/XDP_DROP). 4674 */ 4675 switch (act) { 4676 case XDP_REDIRECT: 4677 case XDP_TX: 4678 __skb_push(skb, mac_len); 4679 break; 4680 case XDP_PASS: 4681 metalen = xdp->data - xdp->data_meta; 4682 if (metalen) 4683 skb_metadata_set(skb, metalen); 4684 break; 4685 } 4686 4687 return act; 4688 } 4689 4690 static u32 netif_receive_generic_xdp(struct sk_buff *skb, 4691 struct xdp_buff *xdp, 4692 struct bpf_prog *xdp_prog) 4693 { 4694 u32 act = XDP_DROP; 4695 4696 /* Reinjected packets coming from act_mirred or similar should 4697 * not get XDP generic processing. 4698 */ 4699 if (skb_is_redirected(skb)) 4700 return XDP_PASS; 4701 4702 /* XDP packets must be linear and must have sufficient headroom 4703 * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also 4704 * native XDP provides, thus we need to do it here as well. 4705 */ 4706 if (skb_cloned(skb) || skb_is_nonlinear(skb) || 4707 skb_headroom(skb) < XDP_PACKET_HEADROOM) { 4708 int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb); 4709 int troom = skb->tail + skb->data_len - skb->end; 4710 4711 /* In case we have to go down the path and also linearize, 4712 * then lets do the pskb_expand_head() work just once here. 4713 */ 4714 if (pskb_expand_head(skb, 4715 hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0, 4716 troom > 0 ? troom + 128 : 0, GFP_ATOMIC)) 4717 goto do_drop; 4718 if (skb_linearize(skb)) 4719 goto do_drop; 4720 } 4721 4722 act = bpf_prog_run_generic_xdp(skb, xdp, xdp_prog); 4723 switch (act) { 4724 case XDP_REDIRECT: 4725 case XDP_TX: 4726 case XDP_PASS: 4727 break; 4728 default: 4729 bpf_warn_invalid_xdp_action(skb->dev, xdp_prog, act); 4730 fallthrough; 4731 case XDP_ABORTED: 4732 trace_xdp_exception(skb->dev, xdp_prog, act); 4733 fallthrough; 4734 case XDP_DROP: 4735 do_drop: 4736 kfree_skb(skb); 4737 break; 4738 } 4739 4740 return act; 4741 } 4742 4743 /* When doing generic XDP we have to bypass the qdisc layer and the 4744 * network taps in order to match in-driver-XDP behavior. 4745 */ 4746 void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog) 4747 { 4748 struct net_device *dev = skb->dev; 4749 struct netdev_queue *txq; 4750 bool free_skb = true; 4751 int cpu, rc; 4752 4753 txq = netdev_core_pick_tx(dev, skb, NULL); 4754 cpu = smp_processor_id(); 4755 HARD_TX_LOCK(dev, txq, cpu); 4756 if (!netif_xmit_stopped(txq)) { 4757 rc = netdev_start_xmit(skb, dev, txq, 0); 4758 if (dev_xmit_complete(rc)) 4759 free_skb = false; 4760 } 4761 HARD_TX_UNLOCK(dev, txq); 4762 if (free_skb) { 4763 trace_xdp_exception(dev, xdp_prog, XDP_TX); 4764 kfree_skb(skb); 4765 } 4766 } 4767 4768 static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key); 4769 4770 int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb) 4771 { 4772 if (xdp_prog) { 4773 struct xdp_buff xdp; 4774 u32 act; 4775 int err; 4776 4777 act = netif_receive_generic_xdp(skb, &xdp, xdp_prog); 4778 if (act != XDP_PASS) { 4779 switch (act) { 4780 case XDP_REDIRECT: 4781 err = xdp_do_generic_redirect(skb->dev, skb, 4782 &xdp, xdp_prog); 4783 if (err) 4784 goto out_redir; 4785 break; 4786 case XDP_TX: 4787 generic_xdp_tx(skb, xdp_prog); 4788 break; 4789 } 4790 return XDP_DROP; 4791 } 4792 } 4793 return XDP_PASS; 4794 out_redir: 4795 kfree_skb(skb); 4796 return XDP_DROP; 4797 } 4798 EXPORT_SYMBOL_GPL(do_xdp_generic); 4799 4800 static int netif_rx_internal(struct sk_buff *skb) 4801 { 4802 int ret; 4803 4804 net_timestamp_check(netdev_tstamp_prequeue, skb); 4805 4806 trace_netif_rx(skb); 4807 4808 #ifdef CONFIG_RPS 4809 if (static_branch_unlikely(&rps_needed)) { 4810 struct rps_dev_flow voidflow, *rflow = &voidflow; 4811 int cpu; 4812 4813 rcu_read_lock(); 4814 4815 cpu = get_rps_cpu(skb->dev, skb, &rflow); 4816 if (cpu < 0) 4817 cpu = smp_processor_id(); 4818 4819 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 4820 4821 rcu_read_unlock(); 4822 } else 4823 #endif 4824 { 4825 unsigned int qtail; 4826 4827 ret = enqueue_to_backlog(skb, smp_processor_id(), &qtail); 4828 } 4829 return ret; 4830 } 4831 4832 /** 4833 * __netif_rx - Slightly optimized version of netif_rx 4834 * @skb: buffer to post 4835 * 4836 * This behaves as netif_rx except that it does not disable bottom halves. 4837 * As a result this function may only be invoked from the interrupt context 4838 * (either hard or soft interrupt). 4839 */ 4840 int __netif_rx(struct sk_buff *skb) 4841 { 4842 int ret; 4843 4844 lockdep_assert_once(hardirq_count() | softirq_count()); 4845 4846 trace_netif_rx_entry(skb); 4847 ret = netif_rx_internal(skb); 4848 trace_netif_rx_exit(ret); 4849 return ret; 4850 } 4851 EXPORT_SYMBOL(__netif_rx); 4852 4853 /** 4854 * netif_rx - post buffer to the network code 4855 * @skb: buffer to post 4856 * 4857 * This function receives a packet from a device driver and queues it for 4858 * the upper (protocol) levels to process via the backlog NAPI device. It 4859 * always succeeds. The buffer may be dropped during processing for 4860 * congestion control or by the protocol layers. 4861 * The network buffer is passed via the backlog NAPI device. Modern NIC 4862 * driver should use NAPI and GRO. 4863 * This function can used from any context. 4864 * 4865 * return values: 4866 * NET_RX_SUCCESS (no congestion) 4867 * NET_RX_DROP (packet was dropped) 4868 * 4869 */ 4870 int netif_rx(struct sk_buff *skb) 4871 { 4872 int ret; 4873 4874 local_bh_disable(); 4875 trace_netif_rx_entry(skb); 4876 ret = netif_rx_internal(skb); 4877 trace_netif_rx_exit(ret); 4878 local_bh_enable(); 4879 return ret; 4880 } 4881 EXPORT_SYMBOL(netif_rx); 4882 4883 static __latent_entropy void net_tx_action(struct softirq_action *h) 4884 { 4885 struct softnet_data *sd = this_cpu_ptr(&softnet_data); 4886 4887 if (sd->completion_queue) { 4888 struct sk_buff *clist; 4889 4890 local_irq_disable(); 4891 clist = sd->completion_queue; 4892 sd->completion_queue = NULL; 4893 local_irq_enable(); 4894 4895 while (clist) { 4896 struct sk_buff *skb = clist; 4897 4898 clist = clist->next; 4899 4900 WARN_ON(refcount_read(&skb->users)); 4901 if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED)) 4902 trace_consume_skb(skb); 4903 else 4904 trace_kfree_skb(skb, net_tx_action, 4905 SKB_DROP_REASON_NOT_SPECIFIED); 4906 4907 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) 4908 __kfree_skb(skb); 4909 else 4910 __kfree_skb_defer(skb); 4911 } 4912 } 4913 4914 if (sd->output_queue) { 4915 struct Qdisc *head; 4916 4917 local_irq_disable(); 4918 head = sd->output_queue; 4919 sd->output_queue = NULL; 4920 sd->output_queue_tailp = &sd->output_queue; 4921 local_irq_enable(); 4922 4923 rcu_read_lock(); 4924 4925 while (head) { 4926 struct Qdisc *q = head; 4927 spinlock_t *root_lock = NULL; 4928 4929 head = head->next_sched; 4930 4931 /* We need to make sure head->next_sched is read 4932 * before clearing __QDISC_STATE_SCHED 4933 */ 4934 smp_mb__before_atomic(); 4935 4936 if (!(q->flags & TCQ_F_NOLOCK)) { 4937 root_lock = qdisc_lock(q); 4938 spin_lock(root_lock); 4939 } else if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, 4940 &q->state))) { 4941 /* There is a synchronize_net() between 4942 * STATE_DEACTIVATED flag being set and 4943 * qdisc_reset()/some_qdisc_is_busy() in 4944 * dev_deactivate(), so we can safely bail out 4945 * early here to avoid data race between 4946 * qdisc_deactivate() and some_qdisc_is_busy() 4947 * for lockless qdisc. 4948 */ 4949 clear_bit(__QDISC_STATE_SCHED, &q->state); 4950 continue; 4951 } 4952 4953 clear_bit(__QDISC_STATE_SCHED, &q->state); 4954 qdisc_run(q); 4955 if (root_lock) 4956 spin_unlock(root_lock); 4957 } 4958 4959 rcu_read_unlock(); 4960 } 4961 4962 xfrm_dev_backlog(sd); 4963 } 4964 4965 #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE) 4966 /* This hook is defined here for ATM LANE */ 4967 int (*br_fdb_test_addr_hook)(struct net_device *dev, 4968 unsigned char *addr) __read_mostly; 4969 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook); 4970 #endif 4971 4972 static inline struct sk_buff * 4973 sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret, 4974 struct net_device *orig_dev, bool *another) 4975 { 4976 #ifdef CONFIG_NET_CLS_ACT 4977 struct mini_Qdisc *miniq = rcu_dereference_bh(skb->dev->miniq_ingress); 4978 struct tcf_result cl_res; 4979 4980 /* If there's at least one ingress present somewhere (so 4981 * we get here via enabled static key), remaining devices 4982 * that are not configured with an ingress qdisc will bail 4983 * out here. 4984 */ 4985 if (!miniq) 4986 return skb; 4987 4988 if (*pt_prev) { 4989 *ret = deliver_skb(skb, *pt_prev, orig_dev); 4990 *pt_prev = NULL; 4991 } 4992 4993 qdisc_skb_cb(skb)->pkt_len = skb->len; 4994 tc_skb_cb(skb)->mru = 0; 4995 tc_skb_cb(skb)->post_ct = false; 4996 skb->tc_at_ingress = 1; 4997 mini_qdisc_bstats_cpu_update(miniq, skb); 4998 4999 switch (tcf_classify(skb, miniq->block, miniq->filter_list, &cl_res, false)) { 5000 case TC_ACT_OK: 5001 case TC_ACT_RECLASSIFY: 5002 skb->tc_index = TC_H_MIN(cl_res.classid); 5003 break; 5004 case TC_ACT_SHOT: 5005 mini_qdisc_qstats_cpu_drop(miniq); 5006 kfree_skb(skb); 5007 return NULL; 5008 case TC_ACT_STOLEN: 5009 case TC_ACT_QUEUED: 5010 case TC_ACT_TRAP: 5011 consume_skb(skb); 5012 return NULL; 5013 case TC_ACT_REDIRECT: 5014 /* skb_mac_header check was done by cls/act_bpf, so 5015 * we can safely push the L2 header back before 5016 * redirecting to another netdev 5017 */ 5018 __skb_push(skb, skb->mac_len); 5019 if (skb_do_redirect(skb) == -EAGAIN) { 5020 __skb_pull(skb, skb->mac_len); 5021 *another = true; 5022 break; 5023 } 5024 return NULL; 5025 case TC_ACT_CONSUMED: 5026 return NULL; 5027 default: 5028 break; 5029 } 5030 #endif /* CONFIG_NET_CLS_ACT */ 5031 return skb; 5032 } 5033 5034 /** 5035 * netdev_is_rx_handler_busy - check if receive handler is registered 5036 * @dev: device to check 5037 * 5038 * Check if a receive handler is already registered for a given device. 5039 * Return true if there one. 5040 * 5041 * The caller must hold the rtnl_mutex. 5042 */ 5043 bool netdev_is_rx_handler_busy(struct net_device *dev) 5044 { 5045 ASSERT_RTNL(); 5046 return dev && rtnl_dereference(dev->rx_handler); 5047 } 5048 EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy); 5049 5050 /** 5051 * netdev_rx_handler_register - register receive handler 5052 * @dev: device to register a handler for 5053 * @rx_handler: receive handler to register 5054 * @rx_handler_data: data pointer that is used by rx handler 5055 * 5056 * Register a receive handler for a device. This handler will then be 5057 * called from __netif_receive_skb. A negative errno code is returned 5058 * on a failure. 5059 * 5060 * The caller must hold the rtnl_mutex. 5061 * 5062 * For a general description of rx_handler, see enum rx_handler_result. 5063 */ 5064 int netdev_rx_handler_register(struct net_device *dev, 5065 rx_handler_func_t *rx_handler, 5066 void *rx_handler_data) 5067 { 5068 if (netdev_is_rx_handler_busy(dev)) 5069 return -EBUSY; 5070 5071 if (dev->priv_flags & IFF_NO_RX_HANDLER) 5072 return -EINVAL; 5073 5074 /* Note: rx_handler_data must be set before rx_handler */ 5075 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data); 5076 rcu_assign_pointer(dev->rx_handler, rx_handler); 5077 5078 return 0; 5079 } 5080 EXPORT_SYMBOL_GPL(netdev_rx_handler_register); 5081 5082 /** 5083 * netdev_rx_handler_unregister - unregister receive handler 5084 * @dev: device to unregister a handler from 5085 * 5086 * Unregister a receive handler from a device. 5087 * 5088 * The caller must hold the rtnl_mutex. 5089 */ 5090 void netdev_rx_handler_unregister(struct net_device *dev) 5091 { 5092 5093 ASSERT_RTNL(); 5094 RCU_INIT_POINTER(dev->rx_handler, NULL); 5095 /* a reader seeing a non NULL rx_handler in a rcu_read_lock() 5096 * section has a guarantee to see a non NULL rx_handler_data 5097 * as well. 5098 */ 5099 synchronize_net(); 5100 RCU_INIT_POINTER(dev->rx_handler_data, NULL); 5101 } 5102 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); 5103 5104 /* 5105 * Limit the use of PFMEMALLOC reserves to those protocols that implement 5106 * the special handling of PFMEMALLOC skbs. 5107 */ 5108 static bool skb_pfmemalloc_protocol(struct sk_buff *skb) 5109 { 5110 switch (skb->protocol) { 5111 case htons(ETH_P_ARP): 5112 case htons(ETH_P_IP): 5113 case htons(ETH_P_IPV6): 5114 case htons(ETH_P_8021Q): 5115 case htons(ETH_P_8021AD): 5116 return true; 5117 default: 5118 return false; 5119 } 5120 } 5121 5122 static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev, 5123 int *ret, struct net_device *orig_dev) 5124 { 5125 if (nf_hook_ingress_active(skb)) { 5126 int ingress_retval; 5127 5128 if (*pt_prev) { 5129 *ret = deliver_skb(skb, *pt_prev, orig_dev); 5130 *pt_prev = NULL; 5131 } 5132 5133 rcu_read_lock(); 5134 ingress_retval = nf_hook_ingress(skb); 5135 rcu_read_unlock(); 5136 return ingress_retval; 5137 } 5138 return 0; 5139 } 5140 5141 static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc, 5142 struct packet_type **ppt_prev) 5143 { 5144 struct packet_type *ptype, *pt_prev; 5145 rx_handler_func_t *rx_handler; 5146 struct sk_buff *skb = *pskb; 5147 struct net_device *orig_dev; 5148 bool deliver_exact = false; 5149 int ret = NET_RX_DROP; 5150 __be16 type; 5151 5152 net_timestamp_check(!netdev_tstamp_prequeue, skb); 5153 5154 trace_netif_receive_skb(skb); 5155 5156 orig_dev = skb->dev; 5157 5158 skb_reset_network_header(skb); 5159 if (!skb_transport_header_was_set(skb)) 5160 skb_reset_transport_header(skb); 5161 skb_reset_mac_len(skb); 5162 5163 pt_prev = NULL; 5164 5165 another_round: 5166 skb->skb_iif = skb->dev->ifindex; 5167 5168 __this_cpu_inc(softnet_data.processed); 5169 5170 if (static_branch_unlikely(&generic_xdp_needed_key)) { 5171 int ret2; 5172 5173 migrate_disable(); 5174 ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb); 5175 migrate_enable(); 5176 5177 if (ret2 != XDP_PASS) { 5178 ret = NET_RX_DROP; 5179 goto out; 5180 } 5181 } 5182 5183 if (eth_type_vlan(skb->protocol)) { 5184 skb = skb_vlan_untag(skb); 5185 if (unlikely(!skb)) 5186 goto out; 5187 } 5188 5189 if (skb_skip_tc_classify(skb)) 5190 goto skip_classify; 5191 5192 if (pfmemalloc) 5193 goto skip_taps; 5194 5195 list_for_each_entry_rcu(ptype, &ptype_all, list) { 5196 if (pt_prev) 5197 ret = deliver_skb(skb, pt_prev, orig_dev); 5198 pt_prev = ptype; 5199 } 5200 5201 list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) { 5202 if (pt_prev) 5203 ret = deliver_skb(skb, pt_prev, orig_dev); 5204 pt_prev = ptype; 5205 } 5206 5207 skip_taps: 5208 #ifdef CONFIG_NET_INGRESS 5209 if (static_branch_unlikely(&ingress_needed_key)) { 5210 bool another = false; 5211 5212 nf_skip_egress(skb, true); 5213 skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev, 5214 &another); 5215 if (another) 5216 goto another_round; 5217 if (!skb) 5218 goto out; 5219 5220 nf_skip_egress(skb, false); 5221 if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0) 5222 goto out; 5223 } 5224 #endif 5225 skb_reset_redirect(skb); 5226 skip_classify: 5227 if (pfmemalloc && !skb_pfmemalloc_protocol(skb)) 5228 goto drop; 5229 5230 if (skb_vlan_tag_present(skb)) { 5231 if (pt_prev) { 5232 ret = deliver_skb(skb, pt_prev, orig_dev); 5233 pt_prev = NULL; 5234 } 5235 if (vlan_do_receive(&skb)) 5236 goto another_round; 5237 else if (unlikely(!skb)) 5238 goto out; 5239 } 5240 5241 rx_handler = rcu_dereference(skb->dev->rx_handler); 5242 if (rx_handler) { 5243 if (pt_prev) { 5244 ret = deliver_skb(skb, pt_prev, orig_dev); 5245 pt_prev = NULL; 5246 } 5247 switch (rx_handler(&skb)) { 5248 case RX_HANDLER_CONSUMED: 5249 ret = NET_RX_SUCCESS; 5250 goto out; 5251 case RX_HANDLER_ANOTHER: 5252 goto another_round; 5253 case RX_HANDLER_EXACT: 5254 deliver_exact = true; 5255 break; 5256 case RX_HANDLER_PASS: 5257 break; 5258 default: 5259 BUG(); 5260 } 5261 } 5262 5263 if (unlikely(skb_vlan_tag_present(skb)) && !netdev_uses_dsa(skb->dev)) { 5264 check_vlan_id: 5265 if (skb_vlan_tag_get_id(skb)) { 5266 /* Vlan id is non 0 and vlan_do_receive() above couldn't 5267 * find vlan device. 5268 */ 5269 skb->pkt_type = PACKET_OTHERHOST; 5270 } else if (eth_type_vlan(skb->protocol)) { 5271 /* Outer header is 802.1P with vlan 0, inner header is 5272 * 802.1Q or 802.1AD and vlan_do_receive() above could 5273 * not find vlan dev for vlan id 0. 5274 */ 5275 __vlan_hwaccel_clear_tag(skb); 5276 skb = skb_vlan_untag(skb); 5277 if (unlikely(!skb)) 5278 goto out; 5279 if (vlan_do_receive(&skb)) 5280 /* After stripping off 802.1P header with vlan 0 5281 * vlan dev is found for inner header. 5282 */ 5283 goto another_round; 5284 else if (unlikely(!skb)) 5285 goto out; 5286 else 5287 /* We have stripped outer 802.1P vlan 0 header. 5288 * But could not find vlan dev. 5289 * check again for vlan id to set OTHERHOST. 5290 */ 5291 goto check_vlan_id; 5292 } 5293 /* Note: we might in the future use prio bits 5294 * and set skb->priority like in vlan_do_receive() 5295 * For the time being, just ignore Priority Code Point 5296 */ 5297 __vlan_hwaccel_clear_tag(skb); 5298 } 5299 5300 type = skb->protocol; 5301 5302 /* deliver only exact match when indicated */ 5303 if (likely(!deliver_exact)) { 5304 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, 5305 &ptype_base[ntohs(type) & 5306 PTYPE_HASH_MASK]); 5307 } 5308 5309 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, 5310 &orig_dev->ptype_specific); 5311 5312 if (unlikely(skb->dev != orig_dev)) { 5313 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, 5314 &skb->dev->ptype_specific); 5315 } 5316 5317 if (pt_prev) { 5318 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) 5319 goto drop; 5320 *ppt_prev = pt_prev; 5321 } else { 5322 drop: 5323 if (!deliver_exact) 5324 atomic_long_inc(&skb->dev->rx_dropped); 5325 else 5326 atomic_long_inc(&skb->dev->rx_nohandler); 5327 kfree_skb(skb); 5328 /* Jamal, now you will not able to escape explaining 5329 * me how you were going to use this. :-) 5330 */ 5331 ret = NET_RX_DROP; 5332 } 5333 5334 out: 5335 /* The invariant here is that if *ppt_prev is not NULL 5336 * then skb should also be non-NULL. 5337 * 5338 * Apparently *ppt_prev assignment above holds this invariant due to 5339 * skb dereferencing near it. 5340 */ 5341 *pskb = skb; 5342 return ret; 5343 } 5344 5345 static int __netif_receive_skb_one_core(struct sk_buff *skb, bool pfmemalloc) 5346 { 5347 struct net_device *orig_dev = skb->dev; 5348 struct packet_type *pt_prev = NULL; 5349 int ret; 5350 5351 ret = __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev); 5352 if (pt_prev) 5353 ret = INDIRECT_CALL_INET(pt_prev->func, ipv6_rcv, ip_rcv, skb, 5354 skb->dev, pt_prev, orig_dev); 5355 return ret; 5356 } 5357 5358 /** 5359 * netif_receive_skb_core - special purpose version of netif_receive_skb 5360 * @skb: buffer to process 5361 * 5362 * More direct receive version of netif_receive_skb(). It should 5363 * only be used by callers that have a need to skip RPS and Generic XDP. 5364 * Caller must also take care of handling if ``(page_is_)pfmemalloc``. 5365 * 5366 * This function may only be called from softirq context and interrupts 5367 * should be enabled. 5368 * 5369 * Return values (usually ignored): 5370 * NET_RX_SUCCESS: no congestion 5371 * NET_RX_DROP: packet was dropped 5372 */ 5373 int netif_receive_skb_core(struct sk_buff *skb) 5374 { 5375 int ret; 5376 5377 rcu_read_lock(); 5378 ret = __netif_receive_skb_one_core(skb, false); 5379 rcu_read_unlock(); 5380 5381 return ret; 5382 } 5383 EXPORT_SYMBOL(netif_receive_skb_core); 5384 5385 static inline void __netif_receive_skb_list_ptype(struct list_head *head, 5386 struct packet_type *pt_prev, 5387 struct net_device *orig_dev) 5388 { 5389 struct sk_buff *skb, *next; 5390 5391 if (!pt_prev) 5392 return; 5393 if (list_empty(head)) 5394 return; 5395 if (pt_prev->list_func != NULL) 5396 INDIRECT_CALL_INET(pt_prev->list_func, ipv6_list_rcv, 5397 ip_list_rcv, head, pt_prev, orig_dev); 5398 else 5399 list_for_each_entry_safe(skb, next, head, list) { 5400 skb_list_del_init(skb); 5401 pt_prev->func(skb, skb->dev, pt_prev, orig_dev); 5402 } 5403 } 5404 5405 static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc) 5406 { 5407 /* Fast-path assumptions: 5408 * - There is no RX handler. 5409 * - Only one packet_type matches. 5410 * If either of these fails, we will end up doing some per-packet 5411 * processing in-line, then handling the 'last ptype' for the whole 5412 * sublist. This can't cause out-of-order delivery to any single ptype, 5413 * because the 'last ptype' must be constant across the sublist, and all 5414 * other ptypes are handled per-packet. 5415 */ 5416 /* Current (common) ptype of sublist */ 5417 struct packet_type *pt_curr = NULL; 5418 /* Current (common) orig_dev of sublist */ 5419 struct net_device *od_curr = NULL; 5420 struct list_head sublist; 5421 struct sk_buff *skb, *next; 5422 5423 INIT_LIST_HEAD(&sublist); 5424 list_for_each_entry_safe(skb, next, head, list) { 5425 struct net_device *orig_dev = skb->dev; 5426 struct packet_type *pt_prev = NULL; 5427 5428 skb_list_del_init(skb); 5429 __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev); 5430 if (!pt_prev) 5431 continue; 5432 if (pt_curr != pt_prev || od_curr != orig_dev) { 5433 /* dispatch old sublist */ 5434 __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr); 5435 /* start new sublist */ 5436 INIT_LIST_HEAD(&sublist); 5437 pt_curr = pt_prev; 5438 od_curr = orig_dev; 5439 } 5440 list_add_tail(&skb->list, &sublist); 5441 } 5442 5443 /* dispatch final sublist */ 5444 __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr); 5445 } 5446 5447 static int __netif_receive_skb(struct sk_buff *skb) 5448 { 5449 int ret; 5450 5451 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) { 5452 unsigned int noreclaim_flag; 5453 5454 /* 5455 * PFMEMALLOC skbs are special, they should 5456 * - be delivered to SOCK_MEMALLOC sockets only 5457 * - stay away from userspace 5458 * - have bounded memory usage 5459 * 5460 * Use PF_MEMALLOC as this saves us from propagating the allocation 5461 * context down to all allocation sites. 5462 */ 5463 noreclaim_flag = memalloc_noreclaim_save(); 5464 ret = __netif_receive_skb_one_core(skb, true); 5465 memalloc_noreclaim_restore(noreclaim_flag); 5466 } else 5467 ret = __netif_receive_skb_one_core(skb, false); 5468 5469 return ret; 5470 } 5471 5472 static void __netif_receive_skb_list(struct list_head *head) 5473 { 5474 unsigned long noreclaim_flag = 0; 5475 struct sk_buff *skb, *next; 5476 bool pfmemalloc = false; /* Is current sublist PF_MEMALLOC? */ 5477 5478 list_for_each_entry_safe(skb, next, head, list) { 5479 if ((sk_memalloc_socks() && skb_pfmemalloc(skb)) != pfmemalloc) { 5480 struct list_head sublist; 5481 5482 /* Handle the previous sublist */ 5483 list_cut_before(&sublist, head, &skb->list); 5484 if (!list_empty(&sublist)) 5485 __netif_receive_skb_list_core(&sublist, pfmemalloc); 5486 pfmemalloc = !pfmemalloc; 5487 /* See comments in __netif_receive_skb */ 5488 if (pfmemalloc) 5489 noreclaim_flag = memalloc_noreclaim_save(); 5490 else 5491 memalloc_noreclaim_restore(noreclaim_flag); 5492 } 5493 } 5494 /* Handle the remaining sublist */ 5495 if (!list_empty(head)) 5496 __netif_receive_skb_list_core(head, pfmemalloc); 5497 /* Restore pflags */ 5498 if (pfmemalloc) 5499 memalloc_noreclaim_restore(noreclaim_flag); 5500 } 5501 5502 static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp) 5503 { 5504 struct bpf_prog *old = rtnl_dereference(dev->xdp_prog); 5505 struct bpf_prog *new = xdp->prog; 5506 int ret = 0; 5507 5508 switch (xdp->command) { 5509 case XDP_SETUP_PROG: 5510 rcu_assign_pointer(dev->xdp_prog, new); 5511 if (old) 5512 bpf_prog_put(old); 5513 5514 if (old && !new) { 5515 static_branch_dec(&generic_xdp_needed_key); 5516 } else if (new && !old) { 5517 static_branch_inc(&generic_xdp_needed_key); 5518 dev_disable_lro(dev); 5519 dev_disable_gro_hw(dev); 5520 } 5521 break; 5522 5523 default: 5524 ret = -EINVAL; 5525 break; 5526 } 5527 5528 return ret; 5529 } 5530 5531 static int netif_receive_skb_internal(struct sk_buff *skb) 5532 { 5533 int ret; 5534 5535 net_timestamp_check(netdev_tstamp_prequeue, skb); 5536 5537 if (skb_defer_rx_timestamp(skb)) 5538 return NET_RX_SUCCESS; 5539 5540 rcu_read_lock(); 5541 #ifdef CONFIG_RPS 5542 if (static_branch_unlikely(&rps_needed)) { 5543 struct rps_dev_flow voidflow, *rflow = &voidflow; 5544 int cpu = get_rps_cpu(skb->dev, skb, &rflow); 5545 5546 if (cpu >= 0) { 5547 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 5548 rcu_read_unlock(); 5549 return ret; 5550 } 5551 } 5552 #endif 5553 ret = __netif_receive_skb(skb); 5554 rcu_read_unlock(); 5555 return ret; 5556 } 5557 5558 void netif_receive_skb_list_internal(struct list_head *head) 5559 { 5560 struct sk_buff *skb, *next; 5561 struct list_head sublist; 5562 5563 INIT_LIST_HEAD(&sublist); 5564 list_for_each_entry_safe(skb, next, head, list) { 5565 net_timestamp_check(netdev_tstamp_prequeue, skb); 5566 skb_list_del_init(skb); 5567 if (!skb_defer_rx_timestamp(skb)) 5568 list_add_tail(&skb->list, &sublist); 5569 } 5570 list_splice_init(&sublist, head); 5571 5572 rcu_read_lock(); 5573 #ifdef CONFIG_RPS 5574 if (static_branch_unlikely(&rps_needed)) { 5575 list_for_each_entry_safe(skb, next, head, list) { 5576 struct rps_dev_flow voidflow, *rflow = &voidflow; 5577 int cpu = get_rps_cpu(skb->dev, skb, &rflow); 5578 5579 if (cpu >= 0) { 5580 /* Will be handled, remove from list */ 5581 skb_list_del_init(skb); 5582 enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 5583 } 5584 } 5585 } 5586 #endif 5587 __netif_receive_skb_list(head); 5588 rcu_read_unlock(); 5589 } 5590 5591 /** 5592 * netif_receive_skb - process receive buffer from network 5593 * @skb: buffer to process 5594 * 5595 * netif_receive_skb() is the main receive data processing function. 5596 * It always succeeds. The buffer may be dropped during processing 5597 * for congestion control or by the protocol layers. 5598 * 5599 * This function may only be called from softirq context and interrupts 5600 * should be enabled. 5601 * 5602 * Return values (usually ignored): 5603 * NET_RX_SUCCESS: no congestion 5604 * NET_RX_DROP: packet was dropped 5605 */ 5606 int netif_receive_skb(struct sk_buff *skb) 5607 { 5608 int ret; 5609 5610 trace_netif_receive_skb_entry(skb); 5611 5612 ret = netif_receive_skb_internal(skb); 5613 trace_netif_receive_skb_exit(ret); 5614 5615 return ret; 5616 } 5617 EXPORT_SYMBOL(netif_receive_skb); 5618 5619 /** 5620 * netif_receive_skb_list - process many receive buffers from network 5621 * @head: list of skbs to process. 5622 * 5623 * Since return value of netif_receive_skb() is normally ignored, and 5624 * wouldn't be meaningful for a list, this function returns void. 5625 * 5626 * This function may only be called from softirq context and interrupts 5627 * should be enabled. 5628 */ 5629 void netif_receive_skb_list(struct list_head *head) 5630 { 5631 struct sk_buff *skb; 5632 5633 if (list_empty(head)) 5634 return; 5635 if (trace_netif_receive_skb_list_entry_enabled()) { 5636 list_for_each_entry(skb, head, list) 5637 trace_netif_receive_skb_list_entry(skb); 5638 } 5639 netif_receive_skb_list_internal(head); 5640 trace_netif_receive_skb_list_exit(0); 5641 } 5642 EXPORT_SYMBOL(netif_receive_skb_list); 5643 5644 static DEFINE_PER_CPU(struct work_struct, flush_works); 5645 5646 /* Network device is going away, flush any packets still pending */ 5647 static void flush_backlog(struct work_struct *work) 5648 { 5649 struct sk_buff *skb, *tmp; 5650 struct softnet_data *sd; 5651 5652 local_bh_disable(); 5653 sd = this_cpu_ptr(&softnet_data); 5654 5655 rps_lock_irq_disable(sd); 5656 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) { 5657 if (skb->dev->reg_state == NETREG_UNREGISTERING) { 5658 __skb_unlink(skb, &sd->input_pkt_queue); 5659 dev_kfree_skb_irq(skb); 5660 input_queue_head_incr(sd); 5661 } 5662 } 5663 rps_unlock_irq_enable(sd); 5664 5665 skb_queue_walk_safe(&sd->process_queue, skb, tmp) { 5666 if (skb->dev->reg_state == NETREG_UNREGISTERING) { 5667 __skb_unlink(skb, &sd->process_queue); 5668 kfree_skb(skb); 5669 input_queue_head_incr(sd); 5670 } 5671 } 5672 local_bh_enable(); 5673 } 5674 5675 static bool flush_required(int cpu) 5676 { 5677 #if IS_ENABLED(CONFIG_RPS) 5678 struct softnet_data *sd = &per_cpu(softnet_data, cpu); 5679 bool do_flush; 5680 5681 rps_lock_irq_disable(sd); 5682 5683 /* as insertion into process_queue happens with the rps lock held, 5684 * process_queue access may race only with dequeue 5685 */ 5686 do_flush = !skb_queue_empty(&sd->input_pkt_queue) || 5687 !skb_queue_empty_lockless(&sd->process_queue); 5688 rps_unlock_irq_enable(sd); 5689 5690 return do_flush; 5691 #endif 5692 /* without RPS we can't safely check input_pkt_queue: during a 5693 * concurrent remote skb_queue_splice() we can detect as empty both 5694 * input_pkt_queue and process_queue even if the latter could end-up 5695 * containing a lot of packets. 5696 */ 5697 return true; 5698 } 5699 5700 static void flush_all_backlogs(void) 5701 { 5702 static cpumask_t flush_cpus; 5703 unsigned int cpu; 5704 5705 /* since we are under rtnl lock protection we can use static data 5706 * for the cpumask and avoid allocating on stack the possibly 5707 * large mask 5708 */ 5709 ASSERT_RTNL(); 5710 5711 cpus_read_lock(); 5712 5713 cpumask_clear(&flush_cpus); 5714 for_each_online_cpu(cpu) { 5715 if (flush_required(cpu)) { 5716 queue_work_on(cpu, system_highpri_wq, 5717 per_cpu_ptr(&flush_works, cpu)); 5718 cpumask_set_cpu(cpu, &flush_cpus); 5719 } 5720 } 5721 5722 /* we can have in flight packet[s] on the cpus we are not flushing, 5723 * synchronize_net() in unregister_netdevice_many() will take care of 5724 * them 5725 */ 5726 for_each_cpu(cpu, &flush_cpus) 5727 flush_work(per_cpu_ptr(&flush_works, cpu)); 5728 5729 cpus_read_unlock(); 5730 } 5731 5732 static void net_rps_send_ipi(struct softnet_data *remsd) 5733 { 5734 #ifdef CONFIG_RPS 5735 while (remsd) { 5736 struct softnet_data *next = remsd->rps_ipi_next; 5737 5738 if (cpu_online(remsd->cpu)) 5739 smp_call_function_single_async(remsd->cpu, &remsd->csd); 5740 remsd = next; 5741 } 5742 #endif 5743 } 5744 5745 /* 5746 * net_rps_action_and_irq_enable sends any pending IPI's for rps. 5747 * Note: called with local irq disabled, but exits with local irq enabled. 5748 */ 5749 static void net_rps_action_and_irq_enable(struct softnet_data *sd) 5750 { 5751 #ifdef CONFIG_RPS 5752 struct softnet_data *remsd = sd->rps_ipi_list; 5753 5754 if (remsd) { 5755 sd->rps_ipi_list = NULL; 5756 5757 local_irq_enable(); 5758 5759 /* Send pending IPI's to kick RPS processing on remote cpus. */ 5760 net_rps_send_ipi(remsd); 5761 } else 5762 #endif 5763 local_irq_enable(); 5764 } 5765 5766 static bool sd_has_rps_ipi_waiting(struct softnet_data *sd) 5767 { 5768 #ifdef CONFIG_RPS 5769 return sd->rps_ipi_list != NULL; 5770 #else 5771 return false; 5772 #endif 5773 } 5774 5775 static int process_backlog(struct napi_struct *napi, int quota) 5776 { 5777 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog); 5778 bool again = true; 5779 int work = 0; 5780 5781 /* Check if we have pending ipi, its better to send them now, 5782 * not waiting net_rx_action() end. 5783 */ 5784 if (sd_has_rps_ipi_waiting(sd)) { 5785 local_irq_disable(); 5786 net_rps_action_and_irq_enable(sd); 5787 } 5788 5789 napi->weight = dev_rx_weight; 5790 while (again) { 5791 struct sk_buff *skb; 5792 5793 while ((skb = __skb_dequeue(&sd->process_queue))) { 5794 rcu_read_lock(); 5795 __netif_receive_skb(skb); 5796 rcu_read_unlock(); 5797 input_queue_head_incr(sd); 5798 if (++work >= quota) 5799 return work; 5800 5801 } 5802 5803 rps_lock_irq_disable(sd); 5804 if (skb_queue_empty(&sd->input_pkt_queue)) { 5805 /* 5806 * Inline a custom version of __napi_complete(). 5807 * only current cpu owns and manipulates this napi, 5808 * and NAPI_STATE_SCHED is the only possible flag set 5809 * on backlog. 5810 * We can use a plain write instead of clear_bit(), 5811 * and we dont need an smp_mb() memory barrier. 5812 */ 5813 napi->state = 0; 5814 again = false; 5815 } else { 5816 skb_queue_splice_tail_init(&sd->input_pkt_queue, 5817 &sd->process_queue); 5818 } 5819 rps_unlock_irq_enable(sd); 5820 } 5821 5822 return work; 5823 } 5824 5825 /** 5826 * __napi_schedule - schedule for receive 5827 * @n: entry to schedule 5828 * 5829 * The entry's receive function will be scheduled to run. 5830 * Consider using __napi_schedule_irqoff() if hard irqs are masked. 5831 */ 5832 void __napi_schedule(struct napi_struct *n) 5833 { 5834 unsigned long flags; 5835 5836 local_irq_save(flags); 5837 ____napi_schedule(this_cpu_ptr(&softnet_data), n); 5838 local_irq_restore(flags); 5839 } 5840 EXPORT_SYMBOL(__napi_schedule); 5841 5842 /** 5843 * napi_schedule_prep - check if napi can be scheduled 5844 * @n: napi context 5845 * 5846 * Test if NAPI routine is already running, and if not mark 5847 * it as running. This is used as a condition variable to 5848 * insure only one NAPI poll instance runs. We also make 5849 * sure there is no pending NAPI disable. 5850 */ 5851 bool napi_schedule_prep(struct napi_struct *n) 5852 { 5853 unsigned long val, new; 5854 5855 do { 5856 val = READ_ONCE(n->state); 5857 if (unlikely(val & NAPIF_STATE_DISABLE)) 5858 return false; 5859 new = val | NAPIF_STATE_SCHED; 5860 5861 /* Sets STATE_MISSED bit if STATE_SCHED was already set 5862 * This was suggested by Alexander Duyck, as compiler 5863 * emits better code than : 5864 * if (val & NAPIF_STATE_SCHED) 5865 * new |= NAPIF_STATE_MISSED; 5866 */ 5867 new |= (val & NAPIF_STATE_SCHED) / NAPIF_STATE_SCHED * 5868 NAPIF_STATE_MISSED; 5869 } while (cmpxchg(&n->state, val, new) != val); 5870 5871 return !(val & NAPIF_STATE_SCHED); 5872 } 5873 EXPORT_SYMBOL(napi_schedule_prep); 5874 5875 /** 5876 * __napi_schedule_irqoff - schedule for receive 5877 * @n: entry to schedule 5878 * 5879 * Variant of __napi_schedule() assuming hard irqs are masked. 5880 * 5881 * On PREEMPT_RT enabled kernels this maps to __napi_schedule() 5882 * because the interrupt disabled assumption might not be true 5883 * due to force-threaded interrupts and spinlock substitution. 5884 */ 5885 void __napi_schedule_irqoff(struct napi_struct *n) 5886 { 5887 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) 5888 ____napi_schedule(this_cpu_ptr(&softnet_data), n); 5889 else 5890 __napi_schedule(n); 5891 } 5892 EXPORT_SYMBOL(__napi_schedule_irqoff); 5893 5894 bool napi_complete_done(struct napi_struct *n, int work_done) 5895 { 5896 unsigned long flags, val, new, timeout = 0; 5897 bool ret = true; 5898 5899 /* 5900 * 1) Don't let napi dequeue from the cpu poll list 5901 * just in case its running on a different cpu. 5902 * 2) If we are busy polling, do nothing here, we have 5903 * the guarantee we will be called later. 5904 */ 5905 if (unlikely(n->state & (NAPIF_STATE_NPSVC | 5906 NAPIF_STATE_IN_BUSY_POLL))) 5907 return false; 5908 5909 if (work_done) { 5910 if (n->gro_bitmask) 5911 timeout = READ_ONCE(n->dev->gro_flush_timeout); 5912 n->defer_hard_irqs_count = READ_ONCE(n->dev->napi_defer_hard_irqs); 5913 } 5914 if (n->defer_hard_irqs_count > 0) { 5915 n->defer_hard_irqs_count--; 5916 timeout = READ_ONCE(n->dev->gro_flush_timeout); 5917 if (timeout) 5918 ret = false; 5919 } 5920 if (n->gro_bitmask) { 5921 /* When the NAPI instance uses a timeout and keeps postponing 5922 * it, we need to bound somehow the time packets are kept in 5923 * the GRO layer 5924 */ 5925 napi_gro_flush(n, !!timeout); 5926 } 5927 5928 gro_normal_list(n); 5929 5930 if (unlikely(!list_empty(&n->poll_list))) { 5931 /* If n->poll_list is not empty, we need to mask irqs */ 5932 local_irq_save(flags); 5933 list_del_init(&n->poll_list); 5934 local_irq_restore(flags); 5935 } 5936 5937 do { 5938 val = READ_ONCE(n->state); 5939 5940 WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED)); 5941 5942 new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED | 5943 NAPIF_STATE_SCHED_THREADED | 5944 NAPIF_STATE_PREFER_BUSY_POLL); 5945 5946 /* If STATE_MISSED was set, leave STATE_SCHED set, 5947 * because we will call napi->poll() one more time. 5948 * This C code was suggested by Alexander Duyck to help gcc. 5949 */ 5950 new |= (val & NAPIF_STATE_MISSED) / NAPIF_STATE_MISSED * 5951 NAPIF_STATE_SCHED; 5952 } while (cmpxchg(&n->state, val, new) != val); 5953 5954 if (unlikely(val & NAPIF_STATE_MISSED)) { 5955 __napi_schedule(n); 5956 return false; 5957 } 5958 5959 if (timeout) 5960 hrtimer_start(&n->timer, ns_to_ktime(timeout), 5961 HRTIMER_MODE_REL_PINNED); 5962 return ret; 5963 } 5964 EXPORT_SYMBOL(napi_complete_done); 5965 5966 /* must be called under rcu_read_lock(), as we dont take a reference */ 5967 static struct napi_struct *napi_by_id(unsigned int napi_id) 5968 { 5969 unsigned int hash = napi_id % HASH_SIZE(napi_hash); 5970 struct napi_struct *napi; 5971 5972 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node) 5973 if (napi->napi_id == napi_id) 5974 return napi; 5975 5976 return NULL; 5977 } 5978 5979 #if defined(CONFIG_NET_RX_BUSY_POLL) 5980 5981 static void __busy_poll_stop(struct napi_struct *napi, bool skip_schedule) 5982 { 5983 if (!skip_schedule) { 5984 gro_normal_list(napi); 5985 __napi_schedule(napi); 5986 return; 5987 } 5988 5989 if (napi->gro_bitmask) { 5990 /* flush too old packets 5991 * If HZ < 1000, flush all packets. 5992 */ 5993 napi_gro_flush(napi, HZ >= 1000); 5994 } 5995 5996 gro_normal_list(napi); 5997 clear_bit(NAPI_STATE_SCHED, &napi->state); 5998 } 5999 6000 static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock, bool prefer_busy_poll, 6001 u16 budget) 6002 { 6003 bool skip_schedule = false; 6004 unsigned long timeout; 6005 int rc; 6006 6007 /* Busy polling means there is a high chance device driver hard irq 6008 * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was 6009 * set in napi_schedule_prep(). 6010 * Since we are about to call napi->poll() once more, we can safely 6011 * clear NAPI_STATE_MISSED. 6012 * 6013 * Note: x86 could use a single "lock and ..." instruction 6014 * to perform these two clear_bit() 6015 */ 6016 clear_bit(NAPI_STATE_MISSED, &napi->state); 6017 clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state); 6018 6019 local_bh_disable(); 6020 6021 if (prefer_busy_poll) { 6022 napi->defer_hard_irqs_count = READ_ONCE(napi->dev->napi_defer_hard_irqs); 6023 timeout = READ_ONCE(napi->dev->gro_flush_timeout); 6024 if (napi->defer_hard_irqs_count && timeout) { 6025 hrtimer_start(&napi->timer, ns_to_ktime(timeout), HRTIMER_MODE_REL_PINNED); 6026 skip_schedule = true; 6027 } 6028 } 6029 6030 /* All we really want here is to re-enable device interrupts. 6031 * Ideally, a new ndo_busy_poll_stop() could avoid another round. 6032 */ 6033 rc = napi->poll(napi, budget); 6034 /* We can't gro_normal_list() here, because napi->poll() might have 6035 * rearmed the napi (napi_complete_done()) in which case it could 6036 * already be running on another CPU. 6037 */ 6038 trace_napi_poll(napi, rc, budget); 6039 netpoll_poll_unlock(have_poll_lock); 6040 if (rc == budget) 6041 __busy_poll_stop(napi, skip_schedule); 6042 local_bh_enable(); 6043 } 6044 6045 void napi_busy_loop(unsigned int napi_id, 6046 bool (*loop_end)(void *, unsigned long), 6047 void *loop_end_arg, bool prefer_busy_poll, u16 budget) 6048 { 6049 unsigned long start_time = loop_end ? busy_loop_current_time() : 0; 6050 int (*napi_poll)(struct napi_struct *napi, int budget); 6051 void *have_poll_lock = NULL; 6052 struct napi_struct *napi; 6053 6054 restart: 6055 napi_poll = NULL; 6056 6057 rcu_read_lock(); 6058 6059 napi = napi_by_id(napi_id); 6060 if (!napi) 6061 goto out; 6062 6063 preempt_disable(); 6064 for (;;) { 6065 int work = 0; 6066 6067 local_bh_disable(); 6068 if (!napi_poll) { 6069 unsigned long val = READ_ONCE(napi->state); 6070 6071 /* If multiple threads are competing for this napi, 6072 * we avoid dirtying napi->state as much as we can. 6073 */ 6074 if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED | 6075 NAPIF_STATE_IN_BUSY_POLL)) { 6076 if (prefer_busy_poll) 6077 set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state); 6078 goto count; 6079 } 6080 if (cmpxchg(&napi->state, val, 6081 val | NAPIF_STATE_IN_BUSY_POLL | 6082 NAPIF_STATE_SCHED) != val) { 6083 if (prefer_busy_poll) 6084 set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state); 6085 goto count; 6086 } 6087 have_poll_lock = netpoll_poll_lock(napi); 6088 napi_poll = napi->poll; 6089 } 6090 work = napi_poll(napi, budget); 6091 trace_napi_poll(napi, work, budget); 6092 gro_normal_list(napi); 6093 count: 6094 if (work > 0) 6095 __NET_ADD_STATS(dev_net(napi->dev), 6096 LINUX_MIB_BUSYPOLLRXPACKETS, work); 6097 local_bh_enable(); 6098 6099 if (!loop_end || loop_end(loop_end_arg, start_time)) 6100 break; 6101 6102 if (unlikely(need_resched())) { 6103 if (napi_poll) 6104 busy_poll_stop(napi, have_poll_lock, prefer_busy_poll, budget); 6105 preempt_enable(); 6106 rcu_read_unlock(); 6107 cond_resched(); 6108 if (loop_end(loop_end_arg, start_time)) 6109 return; 6110 goto restart; 6111 } 6112 cpu_relax(); 6113 } 6114 if (napi_poll) 6115 busy_poll_stop(napi, have_poll_lock, prefer_busy_poll, budget); 6116 preempt_enable(); 6117 out: 6118 rcu_read_unlock(); 6119 } 6120 EXPORT_SYMBOL(napi_busy_loop); 6121 6122 #endif /* CONFIG_NET_RX_BUSY_POLL */ 6123 6124 static void napi_hash_add(struct napi_struct *napi) 6125 { 6126 if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state)) 6127 return; 6128 6129 spin_lock(&napi_hash_lock); 6130 6131 /* 0..NR_CPUS range is reserved for sender_cpu use */ 6132 do { 6133 if (unlikely(++napi_gen_id < MIN_NAPI_ID)) 6134 napi_gen_id = MIN_NAPI_ID; 6135 } while (napi_by_id(napi_gen_id)); 6136 napi->napi_id = napi_gen_id; 6137 6138 hlist_add_head_rcu(&napi->napi_hash_node, 6139 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]); 6140 6141 spin_unlock(&napi_hash_lock); 6142 } 6143 6144 /* Warning : caller is responsible to make sure rcu grace period 6145 * is respected before freeing memory containing @napi 6146 */ 6147 static void napi_hash_del(struct napi_struct *napi) 6148 { 6149 spin_lock(&napi_hash_lock); 6150 6151 hlist_del_init_rcu(&napi->napi_hash_node); 6152 6153 spin_unlock(&napi_hash_lock); 6154 } 6155 6156 static enum hrtimer_restart napi_watchdog(struct hrtimer *timer) 6157 { 6158 struct napi_struct *napi; 6159 6160 napi = container_of(timer, struct napi_struct, timer); 6161 6162 /* Note : we use a relaxed variant of napi_schedule_prep() not setting 6163 * NAPI_STATE_MISSED, since we do not react to a device IRQ. 6164 */ 6165 if (!napi_disable_pending(napi) && 6166 !test_and_set_bit(NAPI_STATE_SCHED, &napi->state)) { 6167 clear_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state); 6168 __napi_schedule_irqoff(napi); 6169 } 6170 6171 return HRTIMER_NORESTART; 6172 } 6173 6174 static void init_gro_hash(struct napi_struct *napi) 6175 { 6176 int i; 6177 6178 for (i = 0; i < GRO_HASH_BUCKETS; i++) { 6179 INIT_LIST_HEAD(&napi->gro_hash[i].list); 6180 napi->gro_hash[i].count = 0; 6181 } 6182 napi->gro_bitmask = 0; 6183 } 6184 6185 int dev_set_threaded(struct net_device *dev, bool threaded) 6186 { 6187 struct napi_struct *napi; 6188 int err = 0; 6189 6190 if (dev->threaded == threaded) 6191 return 0; 6192 6193 if (threaded) { 6194 list_for_each_entry(napi, &dev->napi_list, dev_list) { 6195 if (!napi->thread) { 6196 err = napi_kthread_create(napi); 6197 if (err) { 6198 threaded = false; 6199 break; 6200 } 6201 } 6202 } 6203 } 6204 6205 dev->threaded = threaded; 6206 6207 /* Make sure kthread is created before THREADED bit 6208 * is set. 6209 */ 6210 smp_mb__before_atomic(); 6211 6212 /* Setting/unsetting threaded mode on a napi might not immediately 6213 * take effect, if the current napi instance is actively being 6214 * polled. In this case, the switch between threaded mode and 6215 * softirq mode will happen in the next round of napi_schedule(). 6216 * This should not cause hiccups/stalls to the live traffic. 6217 */ 6218 list_for_each_entry(napi, &dev->napi_list, dev_list) { 6219 if (threaded) 6220 set_bit(NAPI_STATE_THREADED, &napi->state); 6221 else 6222 clear_bit(NAPI_STATE_THREADED, &napi->state); 6223 } 6224 6225 return err; 6226 } 6227 EXPORT_SYMBOL(dev_set_threaded); 6228 6229 void netif_napi_add(struct net_device *dev, struct napi_struct *napi, 6230 int (*poll)(struct napi_struct *, int), int weight) 6231 { 6232 if (WARN_ON(test_and_set_bit(NAPI_STATE_LISTED, &napi->state))) 6233 return; 6234 6235 INIT_LIST_HEAD(&napi->poll_list); 6236 INIT_HLIST_NODE(&napi->napi_hash_node); 6237 hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); 6238 napi->timer.function = napi_watchdog; 6239 init_gro_hash(napi); 6240 napi->skb = NULL; 6241 INIT_LIST_HEAD(&napi->rx_list); 6242 napi->rx_count = 0; 6243 napi->poll = poll; 6244 if (weight > NAPI_POLL_WEIGHT) 6245 netdev_err_once(dev, "%s() called with weight %d\n", __func__, 6246 weight); 6247 napi->weight = weight; 6248 napi->dev = dev; 6249 #ifdef CONFIG_NETPOLL 6250 napi->poll_owner = -1; 6251 #endif 6252 set_bit(NAPI_STATE_SCHED, &napi->state); 6253 set_bit(NAPI_STATE_NPSVC, &napi->state); 6254 list_add_rcu(&napi->dev_list, &dev->napi_list); 6255 napi_hash_add(napi); 6256 /* Create kthread for this napi if dev->threaded is set. 6257 * Clear dev->threaded if kthread creation failed so that 6258 * threaded mode will not be enabled in napi_enable(). 6259 */ 6260 if (dev->threaded && napi_kthread_create(napi)) 6261 dev->threaded = 0; 6262 } 6263 EXPORT_SYMBOL(netif_napi_add); 6264 6265 void napi_disable(struct napi_struct *n) 6266 { 6267 unsigned long val, new; 6268 6269 might_sleep(); 6270 set_bit(NAPI_STATE_DISABLE, &n->state); 6271 6272 for ( ; ; ) { 6273 val = READ_ONCE(n->state); 6274 if (val & (NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC)) { 6275 usleep_range(20, 200); 6276 continue; 6277 } 6278 6279 new = val | NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC; 6280 new &= ~(NAPIF_STATE_THREADED | NAPIF_STATE_PREFER_BUSY_POLL); 6281 6282 if (cmpxchg(&n->state, val, new) == val) 6283 break; 6284 } 6285 6286 hrtimer_cancel(&n->timer); 6287 6288 clear_bit(NAPI_STATE_DISABLE, &n->state); 6289 } 6290 EXPORT_SYMBOL(napi_disable); 6291 6292 /** 6293 * napi_enable - enable NAPI scheduling 6294 * @n: NAPI context 6295 * 6296 * Resume NAPI from being scheduled on this context. 6297 * Must be paired with napi_disable. 6298 */ 6299 void napi_enable(struct napi_struct *n) 6300 { 6301 unsigned long val, new; 6302 6303 do { 6304 val = READ_ONCE(n->state); 6305 BUG_ON(!test_bit(NAPI_STATE_SCHED, &val)); 6306 6307 new = val & ~(NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC); 6308 if (n->dev->threaded && n->thread) 6309 new |= NAPIF_STATE_THREADED; 6310 } while (cmpxchg(&n->state, val, new) != val); 6311 } 6312 EXPORT_SYMBOL(napi_enable); 6313 6314 static void flush_gro_hash(struct napi_struct *napi) 6315 { 6316 int i; 6317 6318 for (i = 0; i < GRO_HASH_BUCKETS; i++) { 6319 struct sk_buff *skb, *n; 6320 6321 list_for_each_entry_safe(skb, n, &napi->gro_hash[i].list, list) 6322 kfree_skb(skb); 6323 napi->gro_hash[i].count = 0; 6324 } 6325 } 6326 6327 /* Must be called in process context */ 6328 void __netif_napi_del(struct napi_struct *napi) 6329 { 6330 if (!test_and_clear_bit(NAPI_STATE_LISTED, &napi->state)) 6331 return; 6332 6333 napi_hash_del(napi); 6334 list_del_rcu(&napi->dev_list); 6335 napi_free_frags(napi); 6336 6337 flush_gro_hash(napi); 6338 napi->gro_bitmask = 0; 6339 6340 if (napi->thread) { 6341 kthread_stop(napi->thread); 6342 napi->thread = NULL; 6343 } 6344 } 6345 EXPORT_SYMBOL(__netif_napi_del); 6346 6347 static int __napi_poll(struct napi_struct *n, bool *repoll) 6348 { 6349 int work, weight; 6350 6351 weight = n->weight; 6352 6353 /* This NAPI_STATE_SCHED test is for avoiding a race 6354 * with netpoll's poll_napi(). Only the entity which 6355 * obtains the lock and sees NAPI_STATE_SCHED set will 6356 * actually make the ->poll() call. Therefore we avoid 6357 * accidentally calling ->poll() when NAPI is not scheduled. 6358 */ 6359 work = 0; 6360 if (test_bit(NAPI_STATE_SCHED, &n->state)) { 6361 work = n->poll(n, weight); 6362 trace_napi_poll(n, work, weight); 6363 } 6364 6365 if (unlikely(work > weight)) 6366 netdev_err_once(n->dev, "NAPI poll function %pS returned %d, exceeding its budget of %d.\n", 6367 n->poll, work, weight); 6368 6369 if (likely(work < weight)) 6370 return work; 6371 6372 /* Drivers must not modify the NAPI state if they 6373 * consume the entire weight. In such cases this code 6374 * still "owns" the NAPI instance and therefore can 6375 * move the instance around on the list at-will. 6376 */ 6377 if (unlikely(napi_disable_pending(n))) { 6378 napi_complete(n); 6379 return work; 6380 } 6381 6382 /* The NAPI context has more processing work, but busy-polling 6383 * is preferred. Exit early. 6384 */ 6385 if (napi_prefer_busy_poll(n)) { 6386 if (napi_complete_done(n, work)) { 6387 /* If timeout is not set, we need to make sure 6388 * that the NAPI is re-scheduled. 6389 */ 6390 napi_schedule(n); 6391 } 6392 return work; 6393 } 6394 6395 if (n->gro_bitmask) { 6396 /* flush too old packets 6397 * If HZ < 1000, flush all packets. 6398 */ 6399 napi_gro_flush(n, HZ >= 1000); 6400 } 6401 6402 gro_normal_list(n); 6403 6404 /* Some drivers may have called napi_schedule 6405 * prior to exhausting their budget. 6406 */ 6407 if (unlikely(!list_empty(&n->poll_list))) { 6408 pr_warn_once("%s: Budget exhausted after napi rescheduled\n", 6409 n->dev ? n->dev->name : "backlog"); 6410 return work; 6411 } 6412 6413 *repoll = true; 6414 6415 return work; 6416 } 6417 6418 static int napi_poll(struct napi_struct *n, struct list_head *repoll) 6419 { 6420 bool do_repoll = false; 6421 void *have; 6422 int work; 6423 6424 list_del_init(&n->poll_list); 6425 6426 have = netpoll_poll_lock(n); 6427 6428 work = __napi_poll(n, &do_repoll); 6429 6430 if (do_repoll) 6431 list_add_tail(&n->poll_list, repoll); 6432 6433 netpoll_poll_unlock(have); 6434 6435 return work; 6436 } 6437 6438 static int napi_thread_wait(struct napi_struct *napi) 6439 { 6440 bool woken = false; 6441 6442 set_current_state(TASK_INTERRUPTIBLE); 6443 6444 while (!kthread_should_stop()) { 6445 /* Testing SCHED_THREADED bit here to make sure the current 6446 * kthread owns this napi and could poll on this napi. 6447 * Testing SCHED bit is not enough because SCHED bit might be 6448 * set by some other busy poll thread or by napi_disable(). 6449 */ 6450 if (test_bit(NAPI_STATE_SCHED_THREADED, &napi->state) || woken) { 6451 WARN_ON(!list_empty(&napi->poll_list)); 6452 __set_current_state(TASK_RUNNING); 6453 return 0; 6454 } 6455 6456 schedule(); 6457 /* woken being true indicates this thread owns this napi. */ 6458 woken = true; 6459 set_current_state(TASK_INTERRUPTIBLE); 6460 } 6461 __set_current_state(TASK_RUNNING); 6462 6463 return -1; 6464 } 6465 6466 static int napi_threaded_poll(void *data) 6467 { 6468 struct napi_struct *napi = data; 6469 void *have; 6470 6471 while (!napi_thread_wait(napi)) { 6472 for (;;) { 6473 bool repoll = false; 6474 6475 local_bh_disable(); 6476 6477 have = netpoll_poll_lock(napi); 6478 __napi_poll(napi, &repoll); 6479 netpoll_poll_unlock(have); 6480 6481 local_bh_enable(); 6482 6483 if (!repoll) 6484 break; 6485 6486 cond_resched(); 6487 } 6488 } 6489 return 0; 6490 } 6491 6492 static __latent_entropy void net_rx_action(struct softirq_action *h) 6493 { 6494 struct softnet_data *sd = this_cpu_ptr(&softnet_data); 6495 unsigned long time_limit = jiffies + 6496 usecs_to_jiffies(netdev_budget_usecs); 6497 int budget = netdev_budget; 6498 LIST_HEAD(list); 6499 LIST_HEAD(repoll); 6500 6501 local_irq_disable(); 6502 list_splice_init(&sd->poll_list, &list); 6503 local_irq_enable(); 6504 6505 for (;;) { 6506 struct napi_struct *n; 6507 6508 if (list_empty(&list)) { 6509 if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll)) 6510 return; 6511 break; 6512 } 6513 6514 n = list_first_entry(&list, struct napi_struct, poll_list); 6515 budget -= napi_poll(n, &repoll); 6516 6517 /* If softirq window is exhausted then punt. 6518 * Allow this to run for 2 jiffies since which will allow 6519 * an average latency of 1.5/HZ. 6520 */ 6521 if (unlikely(budget <= 0 || 6522 time_after_eq(jiffies, time_limit))) { 6523 sd->time_squeeze++; 6524 break; 6525 } 6526 } 6527 6528 local_irq_disable(); 6529 6530 list_splice_tail_init(&sd->poll_list, &list); 6531 list_splice_tail(&repoll, &list); 6532 list_splice(&list, &sd->poll_list); 6533 if (!list_empty(&sd->poll_list)) 6534 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 6535 6536 net_rps_action_and_irq_enable(sd); 6537 } 6538 6539 struct netdev_adjacent { 6540 struct net_device *dev; 6541 netdevice_tracker dev_tracker; 6542 6543 /* upper master flag, there can only be one master device per list */ 6544 bool master; 6545 6546 /* lookup ignore flag */ 6547 bool ignore; 6548 6549 /* counter for the number of times this device was added to us */ 6550 u16 ref_nr; 6551 6552 /* private field for the users */ 6553 void *private; 6554 6555 struct list_head list; 6556 struct rcu_head rcu; 6557 }; 6558 6559 static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev, 6560 struct list_head *adj_list) 6561 { 6562 struct netdev_adjacent *adj; 6563 6564 list_for_each_entry(adj, adj_list, list) { 6565 if (adj->dev == adj_dev) 6566 return adj; 6567 } 6568 return NULL; 6569 } 6570 6571 static int ____netdev_has_upper_dev(struct net_device *upper_dev, 6572 struct netdev_nested_priv *priv) 6573 { 6574 struct net_device *dev = (struct net_device *)priv->data; 6575 6576 return upper_dev == dev; 6577 } 6578 6579 /** 6580 * netdev_has_upper_dev - Check if device is linked to an upper device 6581 * @dev: device 6582 * @upper_dev: upper device to check 6583 * 6584 * Find out if a device is linked to specified upper device and return true 6585 * in case it is. Note that this checks only immediate upper device, 6586 * not through a complete stack of devices. The caller must hold the RTNL lock. 6587 */ 6588 bool netdev_has_upper_dev(struct net_device *dev, 6589 struct net_device *upper_dev) 6590 { 6591 struct netdev_nested_priv priv = { 6592 .data = (void *)upper_dev, 6593 }; 6594 6595 ASSERT_RTNL(); 6596 6597 return netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev, 6598 &priv); 6599 } 6600 EXPORT_SYMBOL(netdev_has_upper_dev); 6601 6602 /** 6603 * netdev_has_upper_dev_all_rcu - Check if device is linked to an upper device 6604 * @dev: device 6605 * @upper_dev: upper device to check 6606 * 6607 * Find out if a device is linked to specified upper device and return true 6608 * in case it is. Note that this checks the entire upper device chain. 6609 * The caller must hold rcu lock. 6610 */ 6611 6612 bool netdev_has_upper_dev_all_rcu(struct net_device *dev, 6613 struct net_device *upper_dev) 6614 { 6615 struct netdev_nested_priv priv = { 6616 .data = (void *)upper_dev, 6617 }; 6618 6619 return !!netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev, 6620 &priv); 6621 } 6622 EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu); 6623 6624 /** 6625 * netdev_has_any_upper_dev - Check if device is linked to some device 6626 * @dev: device 6627 * 6628 * Find out if a device is linked to an upper device and return true in case 6629 * it is. The caller must hold the RTNL lock. 6630 */ 6631 bool netdev_has_any_upper_dev(struct net_device *dev) 6632 { 6633 ASSERT_RTNL(); 6634 6635 return !list_empty(&dev->adj_list.upper); 6636 } 6637 EXPORT_SYMBOL(netdev_has_any_upper_dev); 6638 6639 /** 6640 * netdev_master_upper_dev_get - Get master upper device 6641 * @dev: device 6642 * 6643 * Find a master upper device and return pointer to it or NULL in case 6644 * it's not there. The caller must hold the RTNL lock. 6645 */ 6646 struct net_device *netdev_master_upper_dev_get(struct net_device *dev) 6647 { 6648 struct netdev_adjacent *upper; 6649 6650 ASSERT_RTNL(); 6651 6652 if (list_empty(&dev->adj_list.upper)) 6653 return NULL; 6654 6655 upper = list_first_entry(&dev->adj_list.upper, 6656 struct netdev_adjacent, list); 6657 if (likely(upper->master)) 6658 return upper->dev; 6659 return NULL; 6660 } 6661 EXPORT_SYMBOL(netdev_master_upper_dev_get); 6662 6663 static struct net_device *__netdev_master_upper_dev_get(struct net_device *dev) 6664 { 6665 struct netdev_adjacent *upper; 6666 6667 ASSERT_RTNL(); 6668 6669 if (list_empty(&dev->adj_list.upper)) 6670 return NULL; 6671 6672 upper = list_first_entry(&dev->adj_list.upper, 6673 struct netdev_adjacent, list); 6674 if (likely(upper->master) && !upper->ignore) 6675 return upper->dev; 6676 return NULL; 6677 } 6678 6679 /** 6680 * netdev_has_any_lower_dev - Check if device is linked to some device 6681 * @dev: device 6682 * 6683 * Find out if a device is linked to a lower device and return true in case 6684 * it is. The caller must hold the RTNL lock. 6685 */ 6686 static bool netdev_has_any_lower_dev(struct net_device *dev) 6687 { 6688 ASSERT_RTNL(); 6689 6690 return !list_empty(&dev->adj_list.lower); 6691 } 6692 6693 void *netdev_adjacent_get_private(struct list_head *adj_list) 6694 { 6695 struct netdev_adjacent *adj; 6696 6697 adj = list_entry(adj_list, struct netdev_adjacent, list); 6698 6699 return adj->private; 6700 } 6701 EXPORT_SYMBOL(netdev_adjacent_get_private); 6702 6703 /** 6704 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list 6705 * @dev: device 6706 * @iter: list_head ** of the current position 6707 * 6708 * Gets the next device from the dev's upper list, starting from iter 6709 * position. The caller must hold RCU read lock. 6710 */ 6711 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, 6712 struct list_head **iter) 6713 { 6714 struct netdev_adjacent *upper; 6715 6716 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held()); 6717 6718 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 6719 6720 if (&upper->list == &dev->adj_list.upper) 6721 return NULL; 6722 6723 *iter = &upper->list; 6724 6725 return upper->dev; 6726 } 6727 EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu); 6728 6729 static struct net_device *__netdev_next_upper_dev(struct net_device *dev, 6730 struct list_head **iter, 6731 bool *ignore) 6732 { 6733 struct netdev_adjacent *upper; 6734 6735 upper = list_entry((*iter)->next, struct netdev_adjacent, list); 6736 6737 if (&upper->list == &dev->adj_list.upper) 6738 return NULL; 6739 6740 *iter = &upper->list; 6741 *ignore = upper->ignore; 6742 6743 return upper->dev; 6744 } 6745 6746 static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev, 6747 struct list_head **iter) 6748 { 6749 struct netdev_adjacent *upper; 6750 6751 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held()); 6752 6753 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 6754 6755 if (&upper->list == &dev->adj_list.upper) 6756 return NULL; 6757 6758 *iter = &upper->list; 6759 6760 return upper->dev; 6761 } 6762 6763 static int __netdev_walk_all_upper_dev(struct net_device *dev, 6764 int (*fn)(struct net_device *dev, 6765 struct netdev_nested_priv *priv), 6766 struct netdev_nested_priv *priv) 6767 { 6768 struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; 6769 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; 6770 int ret, cur = 0; 6771 bool ignore; 6772 6773 now = dev; 6774 iter = &dev->adj_list.upper; 6775 6776 while (1) { 6777 if (now != dev) { 6778 ret = fn(now, priv); 6779 if (ret) 6780 return ret; 6781 } 6782 6783 next = NULL; 6784 while (1) { 6785 udev = __netdev_next_upper_dev(now, &iter, &ignore); 6786 if (!udev) 6787 break; 6788 if (ignore) 6789 continue; 6790 6791 next = udev; 6792 niter = &udev->adj_list.upper; 6793 dev_stack[cur] = now; 6794 iter_stack[cur++] = iter; 6795 break; 6796 } 6797 6798 if (!next) { 6799 if (!cur) 6800 return 0; 6801 next = dev_stack[--cur]; 6802 niter = iter_stack[cur]; 6803 } 6804 6805 now = next; 6806 iter = niter; 6807 } 6808 6809 return 0; 6810 } 6811 6812 int netdev_walk_all_upper_dev_rcu(struct net_device *dev, 6813 int (*fn)(struct net_device *dev, 6814 struct netdev_nested_priv *priv), 6815 struct netdev_nested_priv *priv) 6816 { 6817 struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; 6818 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; 6819 int ret, cur = 0; 6820 6821 now = dev; 6822 iter = &dev->adj_list.upper; 6823 6824 while (1) { 6825 if (now != dev) { 6826 ret = fn(now, priv); 6827 if (ret) 6828 return ret; 6829 } 6830 6831 next = NULL; 6832 while (1) { 6833 udev = netdev_next_upper_dev_rcu(now, &iter); 6834 if (!udev) 6835 break; 6836 6837 next = udev; 6838 niter = &udev->adj_list.upper; 6839 dev_stack[cur] = now; 6840 iter_stack[cur++] = iter; 6841 break; 6842 } 6843 6844 if (!next) { 6845 if (!cur) 6846 return 0; 6847 next = dev_stack[--cur]; 6848 niter = iter_stack[cur]; 6849 } 6850 6851 now = next; 6852 iter = niter; 6853 } 6854 6855 return 0; 6856 } 6857 EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu); 6858 6859 static bool __netdev_has_upper_dev(struct net_device *dev, 6860 struct net_device *upper_dev) 6861 { 6862 struct netdev_nested_priv priv = { 6863 .flags = 0, 6864 .data = (void *)upper_dev, 6865 }; 6866 6867 ASSERT_RTNL(); 6868 6869 return __netdev_walk_all_upper_dev(dev, ____netdev_has_upper_dev, 6870 &priv); 6871 } 6872 6873 /** 6874 * netdev_lower_get_next_private - Get the next ->private from the 6875 * lower neighbour list 6876 * @dev: device 6877 * @iter: list_head ** of the current position 6878 * 6879 * Gets the next netdev_adjacent->private from the dev's lower neighbour 6880 * list, starting from iter position. The caller must hold either hold the 6881 * RTNL lock or its own locking that guarantees that the neighbour lower 6882 * list will remain unchanged. 6883 */ 6884 void *netdev_lower_get_next_private(struct net_device *dev, 6885 struct list_head **iter) 6886 { 6887 struct netdev_adjacent *lower; 6888 6889 lower = list_entry(*iter, struct netdev_adjacent, list); 6890 6891 if (&lower->list == &dev->adj_list.lower) 6892 return NULL; 6893 6894 *iter = lower->list.next; 6895 6896 return lower->private; 6897 } 6898 EXPORT_SYMBOL(netdev_lower_get_next_private); 6899 6900 /** 6901 * netdev_lower_get_next_private_rcu - Get the next ->private from the 6902 * lower neighbour list, RCU 6903 * variant 6904 * @dev: device 6905 * @iter: list_head ** of the current position 6906 * 6907 * Gets the next netdev_adjacent->private from the dev's lower neighbour 6908 * list, starting from iter position. The caller must hold RCU read lock. 6909 */ 6910 void *netdev_lower_get_next_private_rcu(struct net_device *dev, 6911 struct list_head **iter) 6912 { 6913 struct netdev_adjacent *lower; 6914 6915 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); 6916 6917 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 6918 6919 if (&lower->list == &dev->adj_list.lower) 6920 return NULL; 6921 6922 *iter = &lower->list; 6923 6924 return lower->private; 6925 } 6926 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu); 6927 6928 /** 6929 * netdev_lower_get_next - Get the next device from the lower neighbour 6930 * list 6931 * @dev: device 6932 * @iter: list_head ** of the current position 6933 * 6934 * Gets the next netdev_adjacent from the dev's lower neighbour 6935 * list, starting from iter position. The caller must hold RTNL lock or 6936 * its own locking that guarantees that the neighbour lower 6937 * list will remain unchanged. 6938 */ 6939 void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter) 6940 { 6941 struct netdev_adjacent *lower; 6942 6943 lower = list_entry(*iter, struct netdev_adjacent, list); 6944 6945 if (&lower->list == &dev->adj_list.lower) 6946 return NULL; 6947 6948 *iter = lower->list.next; 6949 6950 return lower->dev; 6951 } 6952 EXPORT_SYMBOL(netdev_lower_get_next); 6953 6954 static struct net_device *netdev_next_lower_dev(struct net_device *dev, 6955 struct list_head **iter) 6956 { 6957 struct netdev_adjacent *lower; 6958 6959 lower = list_entry((*iter)->next, struct netdev_adjacent, list); 6960 6961 if (&lower->list == &dev->adj_list.lower) 6962 return NULL; 6963 6964 *iter = &lower->list; 6965 6966 return lower->dev; 6967 } 6968 6969 static struct net_device *__netdev_next_lower_dev(struct net_device *dev, 6970 struct list_head **iter, 6971 bool *ignore) 6972 { 6973 struct netdev_adjacent *lower; 6974 6975 lower = list_entry((*iter)->next, struct netdev_adjacent, list); 6976 6977 if (&lower->list == &dev->adj_list.lower) 6978 return NULL; 6979 6980 *iter = &lower->list; 6981 *ignore = lower->ignore; 6982 6983 return lower->dev; 6984 } 6985 6986 int netdev_walk_all_lower_dev(struct net_device *dev, 6987 int (*fn)(struct net_device *dev, 6988 struct netdev_nested_priv *priv), 6989 struct netdev_nested_priv *priv) 6990 { 6991 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; 6992 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; 6993 int ret, cur = 0; 6994 6995 now = dev; 6996 iter = &dev->adj_list.lower; 6997 6998 while (1) { 6999 if (now != dev) { 7000 ret = fn(now, priv); 7001 if (ret) 7002 return ret; 7003 } 7004 7005 next = NULL; 7006 while (1) { 7007 ldev = netdev_next_lower_dev(now, &iter); 7008 if (!ldev) 7009 break; 7010 7011 next = ldev; 7012 niter = &ldev->adj_list.lower; 7013 dev_stack[cur] = now; 7014 iter_stack[cur++] = iter; 7015 break; 7016 } 7017 7018 if (!next) { 7019 if (!cur) 7020 return 0; 7021 next = dev_stack[--cur]; 7022 niter = iter_stack[cur]; 7023 } 7024 7025 now = next; 7026 iter = niter; 7027 } 7028 7029 return 0; 7030 } 7031 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev); 7032 7033 static int __netdev_walk_all_lower_dev(struct net_device *dev, 7034 int (*fn)(struct net_device *dev, 7035 struct netdev_nested_priv *priv), 7036 struct netdev_nested_priv *priv) 7037 { 7038 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; 7039 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; 7040 int ret, cur = 0; 7041 bool ignore; 7042 7043 now = dev; 7044 iter = &dev->adj_list.lower; 7045 7046 while (1) { 7047 if (now != dev) { 7048 ret = fn(now, priv); 7049 if (ret) 7050 return ret; 7051 } 7052 7053 next = NULL; 7054 while (1) { 7055 ldev = __netdev_next_lower_dev(now, &iter, &ignore); 7056 if (!ldev) 7057 break; 7058 if (ignore) 7059 continue; 7060 7061 next = ldev; 7062 niter = &ldev->adj_list.lower; 7063 dev_stack[cur] = now; 7064 iter_stack[cur++] = iter; 7065 break; 7066 } 7067 7068 if (!next) { 7069 if (!cur) 7070 return 0; 7071 next = dev_stack[--cur]; 7072 niter = iter_stack[cur]; 7073 } 7074 7075 now = next; 7076 iter = niter; 7077 } 7078 7079 return 0; 7080 } 7081 7082 struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev, 7083 struct list_head **iter) 7084 { 7085 struct netdev_adjacent *lower; 7086 7087 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 7088 if (&lower->list == &dev->adj_list.lower) 7089 return NULL; 7090 7091 *iter = &lower->list; 7092 7093 return lower->dev; 7094 } 7095 EXPORT_SYMBOL(netdev_next_lower_dev_rcu); 7096 7097 static u8 __netdev_upper_depth(struct net_device *dev) 7098 { 7099 struct net_device *udev; 7100 struct list_head *iter; 7101 u8 max_depth = 0; 7102 bool ignore; 7103 7104 for (iter = &dev->adj_list.upper, 7105 udev = __netdev_next_upper_dev(dev, &iter, &ignore); 7106 udev; 7107 udev = __netdev_next_upper_dev(dev, &iter, &ignore)) { 7108 if (ignore) 7109 continue; 7110 if (max_depth < udev->upper_level) 7111 max_depth = udev->upper_level; 7112 } 7113 7114 return max_depth; 7115 } 7116 7117 static u8 __netdev_lower_depth(struct net_device *dev) 7118 { 7119 struct net_device *ldev; 7120 struct list_head *iter; 7121 u8 max_depth = 0; 7122 bool ignore; 7123 7124 for (iter = &dev->adj_list.lower, 7125 ldev = __netdev_next_lower_dev(dev, &iter, &ignore); 7126 ldev; 7127 ldev = __netdev_next_lower_dev(dev, &iter, &ignore)) { 7128 if (ignore) 7129 continue; 7130 if (max_depth < ldev->lower_level) 7131 max_depth = ldev->lower_level; 7132 } 7133 7134 return max_depth; 7135 } 7136 7137 static int __netdev_update_upper_level(struct net_device *dev, 7138 struct netdev_nested_priv *__unused) 7139 { 7140 dev->upper_level = __netdev_upper_depth(dev) + 1; 7141 return 0; 7142 } 7143 7144 static int __netdev_update_lower_level(struct net_device *dev, 7145 struct netdev_nested_priv *priv) 7146 { 7147 dev->lower_level = __netdev_lower_depth(dev) + 1; 7148 7149 #ifdef CONFIG_LOCKDEP 7150 if (!priv) 7151 return 0; 7152 7153 if (priv->flags & NESTED_SYNC_IMM) 7154 dev->nested_level = dev->lower_level - 1; 7155 if (priv->flags & NESTED_SYNC_TODO) 7156 net_unlink_todo(dev); 7157 #endif 7158 return 0; 7159 } 7160 7161 int netdev_walk_all_lower_dev_rcu(struct net_device *dev, 7162 int (*fn)(struct net_device *dev, 7163 struct netdev_nested_priv *priv), 7164 struct netdev_nested_priv *priv) 7165 { 7166 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; 7167 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; 7168 int ret, cur = 0; 7169 7170 now = dev; 7171 iter = &dev->adj_list.lower; 7172 7173 while (1) { 7174 if (now != dev) { 7175 ret = fn(now, priv); 7176 if (ret) 7177 return ret; 7178 } 7179 7180 next = NULL; 7181 while (1) { 7182 ldev = netdev_next_lower_dev_rcu(now, &iter); 7183 if (!ldev) 7184 break; 7185 7186 next = ldev; 7187 niter = &ldev->adj_list.lower; 7188 dev_stack[cur] = now; 7189 iter_stack[cur++] = iter; 7190 break; 7191 } 7192 7193 if (!next) { 7194 if (!cur) 7195 return 0; 7196 next = dev_stack[--cur]; 7197 niter = iter_stack[cur]; 7198 } 7199 7200 now = next; 7201 iter = niter; 7202 } 7203 7204 return 0; 7205 } 7206 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu); 7207 7208 /** 7209 * netdev_lower_get_first_private_rcu - Get the first ->private from the 7210 * lower neighbour list, RCU 7211 * variant 7212 * @dev: device 7213 * 7214 * Gets the first netdev_adjacent->private from the dev's lower neighbour 7215 * list. The caller must hold RCU read lock. 7216 */ 7217 void *netdev_lower_get_first_private_rcu(struct net_device *dev) 7218 { 7219 struct netdev_adjacent *lower; 7220 7221 lower = list_first_or_null_rcu(&dev->adj_list.lower, 7222 struct netdev_adjacent, list); 7223 if (lower) 7224 return lower->private; 7225 return NULL; 7226 } 7227 EXPORT_SYMBOL(netdev_lower_get_first_private_rcu); 7228 7229 /** 7230 * netdev_master_upper_dev_get_rcu - Get master upper device 7231 * @dev: device 7232 * 7233 * Find a master upper device and return pointer to it or NULL in case 7234 * it's not there. The caller must hold the RCU read lock. 7235 */ 7236 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev) 7237 { 7238 struct netdev_adjacent *upper; 7239 7240 upper = list_first_or_null_rcu(&dev->adj_list.upper, 7241 struct netdev_adjacent, list); 7242 if (upper && likely(upper->master)) 7243 return upper->dev; 7244 return NULL; 7245 } 7246 EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu); 7247 7248 static int netdev_adjacent_sysfs_add(struct net_device *dev, 7249 struct net_device *adj_dev, 7250 struct list_head *dev_list) 7251 { 7252 char linkname[IFNAMSIZ+7]; 7253 7254 sprintf(linkname, dev_list == &dev->adj_list.upper ? 7255 "upper_%s" : "lower_%s", adj_dev->name); 7256 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj), 7257 linkname); 7258 } 7259 static void netdev_adjacent_sysfs_del(struct net_device *dev, 7260 char *name, 7261 struct list_head *dev_list) 7262 { 7263 char linkname[IFNAMSIZ+7]; 7264 7265 sprintf(linkname, dev_list == &dev->adj_list.upper ? 7266 "upper_%s" : "lower_%s", name); 7267 sysfs_remove_link(&(dev->dev.kobj), linkname); 7268 } 7269 7270 static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev, 7271 struct net_device *adj_dev, 7272 struct list_head *dev_list) 7273 { 7274 return (dev_list == &dev->adj_list.upper || 7275 dev_list == &dev->adj_list.lower) && 7276 net_eq(dev_net(dev), dev_net(adj_dev)); 7277 } 7278 7279 static int __netdev_adjacent_dev_insert(struct net_device *dev, 7280 struct net_device *adj_dev, 7281 struct list_head *dev_list, 7282 void *private, bool master) 7283 { 7284 struct netdev_adjacent *adj; 7285 int ret; 7286 7287 adj = __netdev_find_adj(adj_dev, dev_list); 7288 7289 if (adj) { 7290 adj->ref_nr += 1; 7291 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n", 7292 dev->name, adj_dev->name, adj->ref_nr); 7293 7294 return 0; 7295 } 7296 7297 adj = kmalloc(sizeof(*adj), GFP_KERNEL); 7298 if (!adj) 7299 return -ENOMEM; 7300 7301 adj->dev = adj_dev; 7302 adj->master = master; 7303 adj->ref_nr = 1; 7304 adj->private = private; 7305 adj->ignore = false; 7306 dev_hold_track(adj_dev, &adj->dev_tracker, GFP_KERNEL); 7307 7308 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n", 7309 dev->name, adj_dev->name, adj->ref_nr, adj_dev->name); 7310 7311 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) { 7312 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list); 7313 if (ret) 7314 goto free_adj; 7315 } 7316 7317 /* Ensure that master link is always the first item in list. */ 7318 if (master) { 7319 ret = sysfs_create_link(&(dev->dev.kobj), 7320 &(adj_dev->dev.kobj), "master"); 7321 if (ret) 7322 goto remove_symlinks; 7323 7324 list_add_rcu(&adj->list, dev_list); 7325 } else { 7326 list_add_tail_rcu(&adj->list, dev_list); 7327 } 7328 7329 return 0; 7330 7331 remove_symlinks: 7332 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) 7333 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list); 7334 free_adj: 7335 dev_put_track(adj_dev, &adj->dev_tracker); 7336 kfree(adj); 7337 7338 return ret; 7339 } 7340 7341 static void __netdev_adjacent_dev_remove(struct net_device *dev, 7342 struct net_device *adj_dev, 7343 u16 ref_nr, 7344 struct list_head *dev_list) 7345 { 7346 struct netdev_adjacent *adj; 7347 7348 pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n", 7349 dev->name, adj_dev->name, ref_nr); 7350 7351 adj = __netdev_find_adj(adj_dev, dev_list); 7352 7353 if (!adj) { 7354 pr_err("Adjacency does not exist for device %s from %s\n", 7355 dev->name, adj_dev->name); 7356 WARN_ON(1); 7357 return; 7358 } 7359 7360 if (adj->ref_nr > ref_nr) { 7361 pr_debug("adjacency: %s to %s ref_nr - %d = %d\n", 7362 dev->name, adj_dev->name, ref_nr, 7363 adj->ref_nr - ref_nr); 7364 adj->ref_nr -= ref_nr; 7365 return; 7366 } 7367 7368 if (adj->master) 7369 sysfs_remove_link(&(dev->dev.kobj), "master"); 7370 7371 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) 7372 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list); 7373 7374 list_del_rcu(&adj->list); 7375 pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n", 7376 adj_dev->name, dev->name, adj_dev->name); 7377 dev_put_track(adj_dev, &adj->dev_tracker); 7378 kfree_rcu(adj, rcu); 7379 } 7380 7381 static int __netdev_adjacent_dev_link_lists(struct net_device *dev, 7382 struct net_device *upper_dev, 7383 struct list_head *up_list, 7384 struct list_head *down_list, 7385 void *private, bool master) 7386 { 7387 int ret; 7388 7389 ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, 7390 private, master); 7391 if (ret) 7392 return ret; 7393 7394 ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, 7395 private, false); 7396 if (ret) { 7397 __netdev_adjacent_dev_remove(dev, upper_dev, 1, up_list); 7398 return ret; 7399 } 7400 7401 return 0; 7402 } 7403 7404 static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev, 7405 struct net_device *upper_dev, 7406 u16 ref_nr, 7407 struct list_head *up_list, 7408 struct list_head *down_list) 7409 { 7410 __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list); 7411 __netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list); 7412 } 7413 7414 static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev, 7415 struct net_device *upper_dev, 7416 void *private, bool master) 7417 { 7418 return __netdev_adjacent_dev_link_lists(dev, upper_dev, 7419 &dev->adj_list.upper, 7420 &upper_dev->adj_list.lower, 7421 private, master); 7422 } 7423 7424 static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev, 7425 struct net_device *upper_dev) 7426 { 7427 __netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1, 7428 &dev->adj_list.upper, 7429 &upper_dev->adj_list.lower); 7430 } 7431 7432 static int __netdev_upper_dev_link(struct net_device *dev, 7433 struct net_device *upper_dev, bool master, 7434 void *upper_priv, void *upper_info, 7435 struct netdev_nested_priv *priv, 7436 struct netlink_ext_ack *extack) 7437 { 7438 struct netdev_notifier_changeupper_info changeupper_info = { 7439 .info = { 7440 .dev = dev, 7441 .extack = extack, 7442 }, 7443 .upper_dev = upper_dev, 7444 .master = master, 7445 .linking = true, 7446 .upper_info = upper_info, 7447 }; 7448 struct net_device *master_dev; 7449 int ret = 0; 7450 7451 ASSERT_RTNL(); 7452 7453 if (dev == upper_dev) 7454 return -EBUSY; 7455 7456 /* To prevent loops, check if dev is not upper device to upper_dev. */ 7457 if (__netdev_has_upper_dev(upper_dev, dev)) 7458 return -EBUSY; 7459 7460 if ((dev->lower_level + upper_dev->upper_level) > MAX_NEST_DEV) 7461 return -EMLINK; 7462 7463 if (!master) { 7464 if (__netdev_has_upper_dev(dev, upper_dev)) 7465 return -EEXIST; 7466 } else { 7467 master_dev = __netdev_master_upper_dev_get(dev); 7468 if (master_dev) 7469 return master_dev == upper_dev ? -EEXIST : -EBUSY; 7470 } 7471 7472 ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, 7473 &changeupper_info.info); 7474 ret = notifier_to_errno(ret); 7475 if (ret) 7476 return ret; 7477 7478 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv, 7479 master); 7480 if (ret) 7481 return ret; 7482 7483 ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, 7484 &changeupper_info.info); 7485 ret = notifier_to_errno(ret); 7486 if (ret) 7487 goto rollback; 7488 7489 __netdev_update_upper_level(dev, NULL); 7490 __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL); 7491 7492 __netdev_update_lower_level(upper_dev, priv); 7493 __netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level, 7494 priv); 7495 7496 return 0; 7497 7498 rollback: 7499 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev); 7500 7501 return ret; 7502 } 7503 7504 /** 7505 * netdev_upper_dev_link - Add a link to the upper device 7506 * @dev: device 7507 * @upper_dev: new upper device 7508 * @extack: netlink extended ack 7509 * 7510 * Adds a link to device which is upper to this one. The caller must hold 7511 * the RTNL lock. On a failure a negative errno code is returned. 7512 * On success the reference counts are adjusted and the function 7513 * returns zero. 7514 */ 7515 int netdev_upper_dev_link(struct net_device *dev, 7516 struct net_device *upper_dev, 7517 struct netlink_ext_ack *extack) 7518 { 7519 struct netdev_nested_priv priv = { 7520 .flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO, 7521 .data = NULL, 7522 }; 7523 7524 return __netdev_upper_dev_link(dev, upper_dev, false, 7525 NULL, NULL, &priv, extack); 7526 } 7527 EXPORT_SYMBOL(netdev_upper_dev_link); 7528 7529 /** 7530 * netdev_master_upper_dev_link - Add a master link to the upper device 7531 * @dev: device 7532 * @upper_dev: new upper device 7533 * @upper_priv: upper device private 7534 * @upper_info: upper info to be passed down via notifier 7535 * @extack: netlink extended ack 7536 * 7537 * Adds a link to device which is upper to this one. In this case, only 7538 * one master upper device can be linked, although other non-master devices 7539 * might be linked as well. The caller must hold the RTNL lock. 7540 * On a failure a negative errno code is returned. On success the reference 7541 * counts are adjusted and the function returns zero. 7542 */ 7543 int netdev_master_upper_dev_link(struct net_device *dev, 7544 struct net_device *upper_dev, 7545 void *upper_priv, void *upper_info, 7546 struct netlink_ext_ack *extack) 7547 { 7548 struct netdev_nested_priv priv = { 7549 .flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO, 7550 .data = NULL, 7551 }; 7552 7553 return __netdev_upper_dev_link(dev, upper_dev, true, 7554 upper_priv, upper_info, &priv, extack); 7555 } 7556 EXPORT_SYMBOL(netdev_master_upper_dev_link); 7557 7558 static void __netdev_upper_dev_unlink(struct net_device *dev, 7559 struct net_device *upper_dev, 7560 struct netdev_nested_priv *priv) 7561 { 7562 struct netdev_notifier_changeupper_info changeupper_info = { 7563 .info = { 7564 .dev = dev, 7565 }, 7566 .upper_dev = upper_dev, 7567 .linking = false, 7568 }; 7569 7570 ASSERT_RTNL(); 7571 7572 changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev; 7573 7574 call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, 7575 &changeupper_info.info); 7576 7577 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev); 7578 7579 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, 7580 &changeupper_info.info); 7581 7582 __netdev_update_upper_level(dev, NULL); 7583 __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL); 7584 7585 __netdev_update_lower_level(upper_dev, priv); 7586 __netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level, 7587 priv); 7588 } 7589 7590 /** 7591 * netdev_upper_dev_unlink - Removes a link to upper device 7592 * @dev: device 7593 * @upper_dev: new upper device 7594 * 7595 * Removes a link to device which is upper to this one. The caller must hold 7596 * the RTNL lock. 7597 */ 7598 void netdev_upper_dev_unlink(struct net_device *dev, 7599 struct net_device *upper_dev) 7600 { 7601 struct netdev_nested_priv priv = { 7602 .flags = NESTED_SYNC_TODO, 7603 .data = NULL, 7604 }; 7605 7606 __netdev_upper_dev_unlink(dev, upper_dev, &priv); 7607 } 7608 EXPORT_SYMBOL(netdev_upper_dev_unlink); 7609 7610 static void __netdev_adjacent_dev_set(struct net_device *upper_dev, 7611 struct net_device *lower_dev, 7612 bool val) 7613 { 7614 struct netdev_adjacent *adj; 7615 7616 adj = __netdev_find_adj(lower_dev, &upper_dev->adj_list.lower); 7617 if (adj) 7618 adj->ignore = val; 7619 7620 adj = __netdev_find_adj(upper_dev, &lower_dev->adj_list.upper); 7621 if (adj) 7622 adj->ignore = val; 7623 } 7624 7625 static void netdev_adjacent_dev_disable(struct net_device *upper_dev, 7626 struct net_device *lower_dev) 7627 { 7628 __netdev_adjacent_dev_set(upper_dev, lower_dev, true); 7629 } 7630 7631 static void netdev_adjacent_dev_enable(struct net_device *upper_dev, 7632 struct net_device *lower_dev) 7633 { 7634 __netdev_adjacent_dev_set(upper_dev, lower_dev, false); 7635 } 7636 7637 int netdev_adjacent_change_prepare(struct net_device *old_dev, 7638 struct net_device *new_dev, 7639 struct net_device *dev, 7640 struct netlink_ext_ack *extack) 7641 { 7642 struct netdev_nested_priv priv = { 7643 .flags = 0, 7644 .data = NULL, 7645 }; 7646 int err; 7647 7648 if (!new_dev) 7649 return 0; 7650 7651 if (old_dev && new_dev != old_dev) 7652 netdev_adjacent_dev_disable(dev, old_dev); 7653 err = __netdev_upper_dev_link(new_dev, dev, false, NULL, NULL, &priv, 7654 extack); 7655 if (err) { 7656 if (old_dev && new_dev != old_dev) 7657 netdev_adjacent_dev_enable(dev, old_dev); 7658 return err; 7659 } 7660 7661 return 0; 7662 } 7663 EXPORT_SYMBOL(netdev_adjacent_change_prepare); 7664 7665 void netdev_adjacent_change_commit(struct net_device *old_dev, 7666 struct net_device *new_dev, 7667 struct net_device *dev) 7668 { 7669 struct netdev_nested_priv priv = { 7670 .flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO, 7671 .data = NULL, 7672 }; 7673 7674 if (!new_dev || !old_dev) 7675 return; 7676 7677 if (new_dev == old_dev) 7678 return; 7679 7680 netdev_adjacent_dev_enable(dev, old_dev); 7681 __netdev_upper_dev_unlink(old_dev, dev, &priv); 7682 } 7683 EXPORT_SYMBOL(netdev_adjacent_change_commit); 7684 7685 void netdev_adjacent_change_abort(struct net_device *old_dev, 7686 struct net_device *new_dev, 7687 struct net_device *dev) 7688 { 7689 struct netdev_nested_priv priv = { 7690 .flags = 0, 7691 .data = NULL, 7692 }; 7693 7694 if (!new_dev) 7695 return; 7696 7697 if (old_dev && new_dev != old_dev) 7698 netdev_adjacent_dev_enable(dev, old_dev); 7699 7700 __netdev_upper_dev_unlink(new_dev, dev, &priv); 7701 } 7702 EXPORT_SYMBOL(netdev_adjacent_change_abort); 7703 7704 /** 7705 * netdev_bonding_info_change - Dispatch event about slave change 7706 * @dev: device 7707 * @bonding_info: info to dispatch 7708 * 7709 * Send NETDEV_BONDING_INFO to netdev notifiers with info. 7710 * The caller must hold the RTNL lock. 7711 */ 7712 void netdev_bonding_info_change(struct net_device *dev, 7713 struct netdev_bonding_info *bonding_info) 7714 { 7715 struct netdev_notifier_bonding_info info = { 7716 .info.dev = dev, 7717 }; 7718 7719 memcpy(&info.bonding_info, bonding_info, 7720 sizeof(struct netdev_bonding_info)); 7721 call_netdevice_notifiers_info(NETDEV_BONDING_INFO, 7722 &info.info); 7723 } 7724 EXPORT_SYMBOL(netdev_bonding_info_change); 7725 7726 /** 7727 * netdev_get_xmit_slave - Get the xmit slave of master device 7728 * @dev: device 7729 * @skb: The packet 7730 * @all_slaves: assume all the slaves are active 7731 * 7732 * The reference counters are not incremented so the caller must be 7733 * careful with locks. The caller must hold RCU lock. 7734 * %NULL is returned if no slave is found. 7735 */ 7736 7737 struct net_device *netdev_get_xmit_slave(struct net_device *dev, 7738 struct sk_buff *skb, 7739 bool all_slaves) 7740 { 7741 const struct net_device_ops *ops = dev->netdev_ops; 7742 7743 if (!ops->ndo_get_xmit_slave) 7744 return NULL; 7745 return ops->ndo_get_xmit_slave(dev, skb, all_slaves); 7746 } 7747 EXPORT_SYMBOL(netdev_get_xmit_slave); 7748 7749 static struct net_device *netdev_sk_get_lower_dev(struct net_device *dev, 7750 struct sock *sk) 7751 { 7752 const struct net_device_ops *ops = dev->netdev_ops; 7753 7754 if (!ops->ndo_sk_get_lower_dev) 7755 return NULL; 7756 return ops->ndo_sk_get_lower_dev(dev, sk); 7757 } 7758 7759 /** 7760 * netdev_sk_get_lowest_dev - Get the lowest device in chain given device and socket 7761 * @dev: device 7762 * @sk: the socket 7763 * 7764 * %NULL is returned if no lower device is found. 7765 */ 7766 7767 struct net_device *netdev_sk_get_lowest_dev(struct net_device *dev, 7768 struct sock *sk) 7769 { 7770 struct net_device *lower; 7771 7772 lower = netdev_sk_get_lower_dev(dev, sk); 7773 while (lower) { 7774 dev = lower; 7775 lower = netdev_sk_get_lower_dev(dev, sk); 7776 } 7777 7778 return dev; 7779 } 7780 EXPORT_SYMBOL(netdev_sk_get_lowest_dev); 7781 7782 static void netdev_adjacent_add_links(struct net_device *dev) 7783 { 7784 struct netdev_adjacent *iter; 7785 7786 struct net *net = dev_net(dev); 7787 7788 list_for_each_entry(iter, &dev->adj_list.upper, list) { 7789 if (!net_eq(net, dev_net(iter->dev))) 7790 continue; 7791 netdev_adjacent_sysfs_add(iter->dev, dev, 7792 &iter->dev->adj_list.lower); 7793 netdev_adjacent_sysfs_add(dev, iter->dev, 7794 &dev->adj_list.upper); 7795 } 7796 7797 list_for_each_entry(iter, &dev->adj_list.lower, list) { 7798 if (!net_eq(net, dev_net(iter->dev))) 7799 continue; 7800 netdev_adjacent_sysfs_add(iter->dev, dev, 7801 &iter->dev->adj_list.upper); 7802 netdev_adjacent_sysfs_add(dev, iter->dev, 7803 &dev->adj_list.lower); 7804 } 7805 } 7806 7807 static void netdev_adjacent_del_links(struct net_device *dev) 7808 { 7809 struct netdev_adjacent *iter; 7810 7811 struct net *net = dev_net(dev); 7812 7813 list_for_each_entry(iter, &dev->adj_list.upper, list) { 7814 if (!net_eq(net, dev_net(iter->dev))) 7815 continue; 7816 netdev_adjacent_sysfs_del(iter->dev, dev->name, 7817 &iter->dev->adj_list.lower); 7818 netdev_adjacent_sysfs_del(dev, iter->dev->name, 7819 &dev->adj_list.upper); 7820 } 7821 7822 list_for_each_entry(iter, &dev->adj_list.lower, list) { 7823 if (!net_eq(net, dev_net(iter->dev))) 7824 continue; 7825 netdev_adjacent_sysfs_del(iter->dev, dev->name, 7826 &iter->dev->adj_list.upper); 7827 netdev_adjacent_sysfs_del(dev, iter->dev->name, 7828 &dev->adj_list.lower); 7829 } 7830 } 7831 7832 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname) 7833 { 7834 struct netdev_adjacent *iter; 7835 7836 struct net *net = dev_net(dev); 7837 7838 list_for_each_entry(iter, &dev->adj_list.upper, list) { 7839 if (!net_eq(net, dev_net(iter->dev))) 7840 continue; 7841 netdev_adjacent_sysfs_del(iter->dev, oldname, 7842 &iter->dev->adj_list.lower); 7843 netdev_adjacent_sysfs_add(iter->dev, dev, 7844 &iter->dev->adj_list.lower); 7845 } 7846 7847 list_for_each_entry(iter, &dev->adj_list.lower, list) { 7848 if (!net_eq(net, dev_net(iter->dev))) 7849 continue; 7850 netdev_adjacent_sysfs_del(iter->dev, oldname, 7851 &iter->dev->adj_list.upper); 7852 netdev_adjacent_sysfs_add(iter->dev, dev, 7853 &iter->dev->adj_list.upper); 7854 } 7855 } 7856 7857 void *netdev_lower_dev_get_private(struct net_device *dev, 7858 struct net_device *lower_dev) 7859 { 7860 struct netdev_adjacent *lower; 7861 7862 if (!lower_dev) 7863 return NULL; 7864 lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower); 7865 if (!lower) 7866 return NULL; 7867 7868 return lower->private; 7869 } 7870 EXPORT_SYMBOL(netdev_lower_dev_get_private); 7871 7872 7873 /** 7874 * netdev_lower_state_changed - Dispatch event about lower device state change 7875 * @lower_dev: device 7876 * @lower_state_info: state to dispatch 7877 * 7878 * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info. 7879 * The caller must hold the RTNL lock. 7880 */ 7881 void netdev_lower_state_changed(struct net_device *lower_dev, 7882 void *lower_state_info) 7883 { 7884 struct netdev_notifier_changelowerstate_info changelowerstate_info = { 7885 .info.dev = lower_dev, 7886 }; 7887 7888 ASSERT_RTNL(); 7889 changelowerstate_info.lower_state_info = lower_state_info; 7890 call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE, 7891 &changelowerstate_info.info); 7892 } 7893 EXPORT_SYMBOL(netdev_lower_state_changed); 7894 7895 static void dev_change_rx_flags(struct net_device *dev, int flags) 7896 { 7897 const struct net_device_ops *ops = dev->netdev_ops; 7898 7899 if (ops->ndo_change_rx_flags) 7900 ops->ndo_change_rx_flags(dev, flags); 7901 } 7902 7903 static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify) 7904 { 7905 unsigned int old_flags = dev->flags; 7906 kuid_t uid; 7907 kgid_t gid; 7908 7909 ASSERT_RTNL(); 7910 7911 dev->flags |= IFF_PROMISC; 7912 dev->promiscuity += inc; 7913 if (dev->promiscuity == 0) { 7914 /* 7915 * Avoid overflow. 7916 * If inc causes overflow, untouch promisc and return error. 7917 */ 7918 if (inc < 0) 7919 dev->flags &= ~IFF_PROMISC; 7920 else { 7921 dev->promiscuity -= inc; 7922 netdev_warn(dev, "promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n"); 7923 return -EOVERFLOW; 7924 } 7925 } 7926 if (dev->flags != old_flags) { 7927 pr_info("device %s %s promiscuous mode\n", 7928 dev->name, 7929 dev->flags & IFF_PROMISC ? "entered" : "left"); 7930 if (audit_enabled) { 7931 current_uid_gid(&uid, &gid); 7932 audit_log(audit_context(), GFP_ATOMIC, 7933 AUDIT_ANOM_PROMISCUOUS, 7934 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u", 7935 dev->name, (dev->flags & IFF_PROMISC), 7936 (old_flags & IFF_PROMISC), 7937 from_kuid(&init_user_ns, audit_get_loginuid(current)), 7938 from_kuid(&init_user_ns, uid), 7939 from_kgid(&init_user_ns, gid), 7940 audit_get_sessionid(current)); 7941 } 7942 7943 dev_change_rx_flags(dev, IFF_PROMISC); 7944 } 7945 if (notify) 7946 __dev_notify_flags(dev, old_flags, IFF_PROMISC); 7947 return 0; 7948 } 7949 7950 /** 7951 * dev_set_promiscuity - update promiscuity count on a device 7952 * @dev: device 7953 * @inc: modifier 7954 * 7955 * Add or remove promiscuity from a device. While the count in the device 7956 * remains above zero the interface remains promiscuous. Once it hits zero 7957 * the device reverts back to normal filtering operation. A negative inc 7958 * value is used to drop promiscuity on the device. 7959 * Return 0 if successful or a negative errno code on error. 7960 */ 7961 int dev_set_promiscuity(struct net_device *dev, int inc) 7962 { 7963 unsigned int old_flags = dev->flags; 7964 int err; 7965 7966 err = __dev_set_promiscuity(dev, inc, true); 7967 if (err < 0) 7968 return err; 7969 if (dev->flags != old_flags) 7970 dev_set_rx_mode(dev); 7971 return err; 7972 } 7973 EXPORT_SYMBOL(dev_set_promiscuity); 7974 7975 static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify) 7976 { 7977 unsigned int old_flags = dev->flags, old_gflags = dev->gflags; 7978 7979 ASSERT_RTNL(); 7980 7981 dev->flags |= IFF_ALLMULTI; 7982 dev->allmulti += inc; 7983 if (dev->allmulti == 0) { 7984 /* 7985 * Avoid overflow. 7986 * If inc causes overflow, untouch allmulti and return error. 7987 */ 7988 if (inc < 0) 7989 dev->flags &= ~IFF_ALLMULTI; 7990 else { 7991 dev->allmulti -= inc; 7992 netdev_warn(dev, "allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n"); 7993 return -EOVERFLOW; 7994 } 7995 } 7996 if (dev->flags ^ old_flags) { 7997 dev_change_rx_flags(dev, IFF_ALLMULTI); 7998 dev_set_rx_mode(dev); 7999 if (notify) 8000 __dev_notify_flags(dev, old_flags, 8001 dev->gflags ^ old_gflags); 8002 } 8003 return 0; 8004 } 8005 8006 /** 8007 * dev_set_allmulti - update allmulti count on a device 8008 * @dev: device 8009 * @inc: modifier 8010 * 8011 * Add or remove reception of all multicast frames to a device. While the 8012 * count in the device remains above zero the interface remains listening 8013 * to all interfaces. Once it hits zero the device reverts back to normal 8014 * filtering operation. A negative @inc value is used to drop the counter 8015 * when releasing a resource needing all multicasts. 8016 * Return 0 if successful or a negative errno code on error. 8017 */ 8018 8019 int dev_set_allmulti(struct net_device *dev, int inc) 8020 { 8021 return __dev_set_allmulti(dev, inc, true); 8022 } 8023 EXPORT_SYMBOL(dev_set_allmulti); 8024 8025 /* 8026 * Upload unicast and multicast address lists to device and 8027 * configure RX filtering. When the device doesn't support unicast 8028 * filtering it is put in promiscuous mode while unicast addresses 8029 * are present. 8030 */ 8031 void __dev_set_rx_mode(struct net_device *dev) 8032 { 8033 const struct net_device_ops *ops = dev->netdev_ops; 8034 8035 /* dev_open will call this function so the list will stay sane. */ 8036 if (!(dev->flags&IFF_UP)) 8037 return; 8038 8039 if (!netif_device_present(dev)) 8040 return; 8041 8042 if (!(dev->priv_flags & IFF_UNICAST_FLT)) { 8043 /* Unicast addresses changes may only happen under the rtnl, 8044 * therefore calling __dev_set_promiscuity here is safe. 8045 */ 8046 if (!netdev_uc_empty(dev) && !dev->uc_promisc) { 8047 __dev_set_promiscuity(dev, 1, false); 8048 dev->uc_promisc = true; 8049 } else if (netdev_uc_empty(dev) && dev->uc_promisc) { 8050 __dev_set_promiscuity(dev, -1, false); 8051 dev->uc_promisc = false; 8052 } 8053 } 8054 8055 if (ops->ndo_set_rx_mode) 8056 ops->ndo_set_rx_mode(dev); 8057 } 8058 8059 void dev_set_rx_mode(struct net_device *dev) 8060 { 8061 netif_addr_lock_bh(dev); 8062 __dev_set_rx_mode(dev); 8063 netif_addr_unlock_bh(dev); 8064 } 8065 8066 /** 8067 * dev_get_flags - get flags reported to userspace 8068 * @dev: device 8069 * 8070 * Get the combination of flag bits exported through APIs to userspace. 8071 */ 8072 unsigned int dev_get_flags(const struct net_device *dev) 8073 { 8074 unsigned int flags; 8075 8076 flags = (dev->flags & ~(IFF_PROMISC | 8077 IFF_ALLMULTI | 8078 IFF_RUNNING | 8079 IFF_LOWER_UP | 8080 IFF_DORMANT)) | 8081 (dev->gflags & (IFF_PROMISC | 8082 IFF_ALLMULTI)); 8083 8084 if (netif_running(dev)) { 8085 if (netif_oper_up(dev)) 8086 flags |= IFF_RUNNING; 8087 if (netif_carrier_ok(dev)) 8088 flags |= IFF_LOWER_UP; 8089 if (netif_dormant(dev)) 8090 flags |= IFF_DORMANT; 8091 } 8092 8093 return flags; 8094 } 8095 EXPORT_SYMBOL(dev_get_flags); 8096 8097 int __dev_change_flags(struct net_device *dev, unsigned int flags, 8098 struct netlink_ext_ack *extack) 8099 { 8100 unsigned int old_flags = dev->flags; 8101 int ret; 8102 8103 ASSERT_RTNL(); 8104 8105 /* 8106 * Set the flags on our device. 8107 */ 8108 8109 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP | 8110 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL | 8111 IFF_AUTOMEDIA)) | 8112 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC | 8113 IFF_ALLMULTI)); 8114 8115 /* 8116 * Load in the correct multicast list now the flags have changed. 8117 */ 8118 8119 if ((old_flags ^ flags) & IFF_MULTICAST) 8120 dev_change_rx_flags(dev, IFF_MULTICAST); 8121 8122 dev_set_rx_mode(dev); 8123 8124 /* 8125 * Have we downed the interface. We handle IFF_UP ourselves 8126 * according to user attempts to set it, rather than blindly 8127 * setting it. 8128 */ 8129 8130 ret = 0; 8131 if ((old_flags ^ flags) & IFF_UP) { 8132 if (old_flags & IFF_UP) 8133 __dev_close(dev); 8134 else 8135 ret = __dev_open(dev, extack); 8136 } 8137 8138 if ((flags ^ dev->gflags) & IFF_PROMISC) { 8139 int inc = (flags & IFF_PROMISC) ? 1 : -1; 8140 unsigned int old_flags = dev->flags; 8141 8142 dev->gflags ^= IFF_PROMISC; 8143 8144 if (__dev_set_promiscuity(dev, inc, false) >= 0) 8145 if (dev->flags != old_flags) 8146 dev_set_rx_mode(dev); 8147 } 8148 8149 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI 8150 * is important. Some (broken) drivers set IFF_PROMISC, when 8151 * IFF_ALLMULTI is requested not asking us and not reporting. 8152 */ 8153 if ((flags ^ dev->gflags) & IFF_ALLMULTI) { 8154 int inc = (flags & IFF_ALLMULTI) ? 1 : -1; 8155 8156 dev->gflags ^= IFF_ALLMULTI; 8157 __dev_set_allmulti(dev, inc, false); 8158 } 8159 8160 return ret; 8161 } 8162 8163 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags, 8164 unsigned int gchanges) 8165 { 8166 unsigned int changes = dev->flags ^ old_flags; 8167 8168 if (gchanges) 8169 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC); 8170 8171 if (changes & IFF_UP) { 8172 if (dev->flags & IFF_UP) 8173 call_netdevice_notifiers(NETDEV_UP, dev); 8174 else 8175 call_netdevice_notifiers(NETDEV_DOWN, dev); 8176 } 8177 8178 if (dev->flags & IFF_UP && 8179 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) { 8180 struct netdev_notifier_change_info change_info = { 8181 .info = { 8182 .dev = dev, 8183 }, 8184 .flags_changed = changes, 8185 }; 8186 8187 call_netdevice_notifiers_info(NETDEV_CHANGE, &change_info.info); 8188 } 8189 } 8190 8191 /** 8192 * dev_change_flags - change device settings 8193 * @dev: device 8194 * @flags: device state flags 8195 * @extack: netlink extended ack 8196 * 8197 * Change settings on device based state flags. The flags are 8198 * in the userspace exported format. 8199 */ 8200 int dev_change_flags(struct net_device *dev, unsigned int flags, 8201 struct netlink_ext_ack *extack) 8202 { 8203 int ret; 8204 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags; 8205 8206 ret = __dev_change_flags(dev, flags, extack); 8207 if (ret < 0) 8208 return ret; 8209 8210 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags); 8211 __dev_notify_flags(dev, old_flags, changes); 8212 return ret; 8213 } 8214 EXPORT_SYMBOL(dev_change_flags); 8215 8216 int __dev_set_mtu(struct net_device *dev, int new_mtu) 8217 { 8218 const struct net_device_ops *ops = dev->netdev_ops; 8219 8220 if (ops->ndo_change_mtu) 8221 return ops->ndo_change_mtu(dev, new_mtu); 8222 8223 /* Pairs with all the lockless reads of dev->mtu in the stack */ 8224 WRITE_ONCE(dev->mtu, new_mtu); 8225 return 0; 8226 } 8227 EXPORT_SYMBOL(__dev_set_mtu); 8228 8229 int dev_validate_mtu(struct net_device *dev, int new_mtu, 8230 struct netlink_ext_ack *extack) 8231 { 8232 /* MTU must be positive, and in range */ 8233 if (new_mtu < 0 || new_mtu < dev->min_mtu) { 8234 NL_SET_ERR_MSG(extack, "mtu less than device minimum"); 8235 return -EINVAL; 8236 } 8237 8238 if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) { 8239 NL_SET_ERR_MSG(extack, "mtu greater than device maximum"); 8240 return -EINVAL; 8241 } 8242 return 0; 8243 } 8244 8245 /** 8246 * dev_set_mtu_ext - Change maximum transfer unit 8247 * @dev: device 8248 * @new_mtu: new transfer unit 8249 * @extack: netlink extended ack 8250 * 8251 * Change the maximum transfer size of the network device. 8252 */ 8253 int dev_set_mtu_ext(struct net_device *dev, int new_mtu, 8254 struct netlink_ext_ack *extack) 8255 { 8256 int err, orig_mtu; 8257 8258 if (new_mtu == dev->mtu) 8259 return 0; 8260 8261 err = dev_validate_mtu(dev, new_mtu, extack); 8262 if (err) 8263 return err; 8264 8265 if (!netif_device_present(dev)) 8266 return -ENODEV; 8267 8268 err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev); 8269 err = notifier_to_errno(err); 8270 if (err) 8271 return err; 8272 8273 orig_mtu = dev->mtu; 8274 err = __dev_set_mtu(dev, new_mtu); 8275 8276 if (!err) { 8277 err = call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev, 8278 orig_mtu); 8279 err = notifier_to_errno(err); 8280 if (err) { 8281 /* setting mtu back and notifying everyone again, 8282 * so that they have a chance to revert changes. 8283 */ 8284 __dev_set_mtu(dev, orig_mtu); 8285 call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev, 8286 new_mtu); 8287 } 8288 } 8289 return err; 8290 } 8291 8292 int dev_set_mtu(struct net_device *dev, int new_mtu) 8293 { 8294 struct netlink_ext_ack extack; 8295 int err; 8296 8297 memset(&extack, 0, sizeof(extack)); 8298 err = dev_set_mtu_ext(dev, new_mtu, &extack); 8299 if (err && extack._msg) 8300 net_err_ratelimited("%s: %s\n", dev->name, extack._msg); 8301 return err; 8302 } 8303 EXPORT_SYMBOL(dev_set_mtu); 8304 8305 /** 8306 * dev_change_tx_queue_len - Change TX queue length of a netdevice 8307 * @dev: device 8308 * @new_len: new tx queue length 8309 */ 8310 int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len) 8311 { 8312 unsigned int orig_len = dev->tx_queue_len; 8313 int res; 8314 8315 if (new_len != (unsigned int)new_len) 8316 return -ERANGE; 8317 8318 if (new_len != orig_len) { 8319 dev->tx_queue_len = new_len; 8320 res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev); 8321 res = notifier_to_errno(res); 8322 if (res) 8323 goto err_rollback; 8324 res = dev_qdisc_change_tx_queue_len(dev); 8325 if (res) 8326 goto err_rollback; 8327 } 8328 8329 return 0; 8330 8331 err_rollback: 8332 netdev_err(dev, "refused to change device tx_queue_len\n"); 8333 dev->tx_queue_len = orig_len; 8334 return res; 8335 } 8336 8337 /** 8338 * dev_set_group - Change group this device belongs to 8339 * @dev: device 8340 * @new_group: group this device should belong to 8341 */ 8342 void dev_set_group(struct net_device *dev, int new_group) 8343 { 8344 dev->group = new_group; 8345 } 8346 EXPORT_SYMBOL(dev_set_group); 8347 8348 /** 8349 * dev_pre_changeaddr_notify - Call NETDEV_PRE_CHANGEADDR. 8350 * @dev: device 8351 * @addr: new address 8352 * @extack: netlink extended ack 8353 */ 8354 int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr, 8355 struct netlink_ext_ack *extack) 8356 { 8357 struct netdev_notifier_pre_changeaddr_info info = { 8358 .info.dev = dev, 8359 .info.extack = extack, 8360 .dev_addr = addr, 8361 }; 8362 int rc; 8363 8364 rc = call_netdevice_notifiers_info(NETDEV_PRE_CHANGEADDR, &info.info); 8365 return notifier_to_errno(rc); 8366 } 8367 EXPORT_SYMBOL(dev_pre_changeaddr_notify); 8368 8369 /** 8370 * dev_set_mac_address - Change Media Access Control Address 8371 * @dev: device 8372 * @sa: new address 8373 * @extack: netlink extended ack 8374 * 8375 * Change the hardware (MAC) address of the device 8376 */ 8377 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa, 8378 struct netlink_ext_ack *extack) 8379 { 8380 const struct net_device_ops *ops = dev->netdev_ops; 8381 int err; 8382 8383 if (!ops->ndo_set_mac_address) 8384 return -EOPNOTSUPP; 8385 if (sa->sa_family != dev->type) 8386 return -EINVAL; 8387 if (!netif_device_present(dev)) 8388 return -ENODEV; 8389 err = dev_pre_changeaddr_notify(dev, sa->sa_data, extack); 8390 if (err) 8391 return err; 8392 err = ops->ndo_set_mac_address(dev, sa); 8393 if (err) 8394 return err; 8395 dev->addr_assign_type = NET_ADDR_SET; 8396 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 8397 add_device_randomness(dev->dev_addr, dev->addr_len); 8398 return 0; 8399 } 8400 EXPORT_SYMBOL(dev_set_mac_address); 8401 8402 static DECLARE_RWSEM(dev_addr_sem); 8403 8404 int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa, 8405 struct netlink_ext_ack *extack) 8406 { 8407 int ret; 8408 8409 down_write(&dev_addr_sem); 8410 ret = dev_set_mac_address(dev, sa, extack); 8411 up_write(&dev_addr_sem); 8412 return ret; 8413 } 8414 EXPORT_SYMBOL(dev_set_mac_address_user); 8415 8416 int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name) 8417 { 8418 size_t size = sizeof(sa->sa_data); 8419 struct net_device *dev; 8420 int ret = 0; 8421 8422 down_read(&dev_addr_sem); 8423 rcu_read_lock(); 8424 8425 dev = dev_get_by_name_rcu(net, dev_name); 8426 if (!dev) { 8427 ret = -ENODEV; 8428 goto unlock; 8429 } 8430 if (!dev->addr_len) 8431 memset(sa->sa_data, 0, size); 8432 else 8433 memcpy(sa->sa_data, dev->dev_addr, 8434 min_t(size_t, size, dev->addr_len)); 8435 sa->sa_family = dev->type; 8436 8437 unlock: 8438 rcu_read_unlock(); 8439 up_read(&dev_addr_sem); 8440 return ret; 8441 } 8442 EXPORT_SYMBOL(dev_get_mac_address); 8443 8444 /** 8445 * dev_change_carrier - Change device carrier 8446 * @dev: device 8447 * @new_carrier: new value 8448 * 8449 * Change device carrier 8450 */ 8451 int dev_change_carrier(struct net_device *dev, bool new_carrier) 8452 { 8453 const struct net_device_ops *ops = dev->netdev_ops; 8454 8455 if (!ops->ndo_change_carrier) 8456 return -EOPNOTSUPP; 8457 if (!netif_device_present(dev)) 8458 return -ENODEV; 8459 return ops->ndo_change_carrier(dev, new_carrier); 8460 } 8461 EXPORT_SYMBOL(dev_change_carrier); 8462 8463 /** 8464 * dev_get_phys_port_id - Get device physical port ID 8465 * @dev: device 8466 * @ppid: port ID 8467 * 8468 * Get device physical port ID 8469 */ 8470 int dev_get_phys_port_id(struct net_device *dev, 8471 struct netdev_phys_item_id *ppid) 8472 { 8473 const struct net_device_ops *ops = dev->netdev_ops; 8474 8475 if (!ops->ndo_get_phys_port_id) 8476 return -EOPNOTSUPP; 8477 return ops->ndo_get_phys_port_id(dev, ppid); 8478 } 8479 EXPORT_SYMBOL(dev_get_phys_port_id); 8480 8481 /** 8482 * dev_get_phys_port_name - Get device physical port name 8483 * @dev: device 8484 * @name: port name 8485 * @len: limit of bytes to copy to name 8486 * 8487 * Get device physical port name 8488 */ 8489 int dev_get_phys_port_name(struct net_device *dev, 8490 char *name, size_t len) 8491 { 8492 const struct net_device_ops *ops = dev->netdev_ops; 8493 int err; 8494 8495 if (ops->ndo_get_phys_port_name) { 8496 err = ops->ndo_get_phys_port_name(dev, name, len); 8497 if (err != -EOPNOTSUPP) 8498 return err; 8499 } 8500 return devlink_compat_phys_port_name_get(dev, name, len); 8501 } 8502 EXPORT_SYMBOL(dev_get_phys_port_name); 8503 8504 /** 8505 * dev_get_port_parent_id - Get the device's port parent identifier 8506 * @dev: network device 8507 * @ppid: pointer to a storage for the port's parent identifier 8508 * @recurse: allow/disallow recursion to lower devices 8509 * 8510 * Get the devices's port parent identifier 8511 */ 8512 int dev_get_port_parent_id(struct net_device *dev, 8513 struct netdev_phys_item_id *ppid, 8514 bool recurse) 8515 { 8516 const struct net_device_ops *ops = dev->netdev_ops; 8517 struct netdev_phys_item_id first = { }; 8518 struct net_device *lower_dev; 8519 struct list_head *iter; 8520 int err; 8521 8522 if (ops->ndo_get_port_parent_id) { 8523 err = ops->ndo_get_port_parent_id(dev, ppid); 8524 if (err != -EOPNOTSUPP) 8525 return err; 8526 } 8527 8528 err = devlink_compat_switch_id_get(dev, ppid); 8529 if (!recurse || err != -EOPNOTSUPP) 8530 return err; 8531 8532 netdev_for_each_lower_dev(dev, lower_dev, iter) { 8533 err = dev_get_port_parent_id(lower_dev, ppid, true); 8534 if (err) 8535 break; 8536 if (!first.id_len) 8537 first = *ppid; 8538 else if (memcmp(&first, ppid, sizeof(*ppid))) 8539 return -EOPNOTSUPP; 8540 } 8541 8542 return err; 8543 } 8544 EXPORT_SYMBOL(dev_get_port_parent_id); 8545 8546 /** 8547 * netdev_port_same_parent_id - Indicate if two network devices have 8548 * the same port parent identifier 8549 * @a: first network device 8550 * @b: second network device 8551 */ 8552 bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b) 8553 { 8554 struct netdev_phys_item_id a_id = { }; 8555 struct netdev_phys_item_id b_id = { }; 8556 8557 if (dev_get_port_parent_id(a, &a_id, true) || 8558 dev_get_port_parent_id(b, &b_id, true)) 8559 return false; 8560 8561 return netdev_phys_item_id_same(&a_id, &b_id); 8562 } 8563 EXPORT_SYMBOL(netdev_port_same_parent_id); 8564 8565 /** 8566 * dev_change_proto_down - set carrier according to proto_down. 8567 * 8568 * @dev: device 8569 * @proto_down: new value 8570 */ 8571 int dev_change_proto_down(struct net_device *dev, bool proto_down) 8572 { 8573 if (!(dev->priv_flags & IFF_CHANGE_PROTO_DOWN)) 8574 return -EOPNOTSUPP; 8575 if (!netif_device_present(dev)) 8576 return -ENODEV; 8577 if (proto_down) 8578 netif_carrier_off(dev); 8579 else 8580 netif_carrier_on(dev); 8581 dev->proto_down = proto_down; 8582 return 0; 8583 } 8584 EXPORT_SYMBOL(dev_change_proto_down); 8585 8586 /** 8587 * dev_change_proto_down_reason - proto down reason 8588 * 8589 * @dev: device 8590 * @mask: proto down mask 8591 * @value: proto down value 8592 */ 8593 void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask, 8594 u32 value) 8595 { 8596 int b; 8597 8598 if (!mask) { 8599 dev->proto_down_reason = value; 8600 } else { 8601 for_each_set_bit(b, &mask, 32) { 8602 if (value & (1 << b)) 8603 dev->proto_down_reason |= BIT(b); 8604 else 8605 dev->proto_down_reason &= ~BIT(b); 8606 } 8607 } 8608 } 8609 EXPORT_SYMBOL(dev_change_proto_down_reason); 8610 8611 struct bpf_xdp_link { 8612 struct bpf_link link; 8613 struct net_device *dev; /* protected by rtnl_lock, no refcnt held */ 8614 int flags; 8615 }; 8616 8617 static enum bpf_xdp_mode dev_xdp_mode(struct net_device *dev, u32 flags) 8618 { 8619 if (flags & XDP_FLAGS_HW_MODE) 8620 return XDP_MODE_HW; 8621 if (flags & XDP_FLAGS_DRV_MODE) 8622 return XDP_MODE_DRV; 8623 if (flags & XDP_FLAGS_SKB_MODE) 8624 return XDP_MODE_SKB; 8625 return dev->netdev_ops->ndo_bpf ? XDP_MODE_DRV : XDP_MODE_SKB; 8626 } 8627 8628 static bpf_op_t dev_xdp_bpf_op(struct net_device *dev, enum bpf_xdp_mode mode) 8629 { 8630 switch (mode) { 8631 case XDP_MODE_SKB: 8632 return generic_xdp_install; 8633 case XDP_MODE_DRV: 8634 case XDP_MODE_HW: 8635 return dev->netdev_ops->ndo_bpf; 8636 default: 8637 return NULL; 8638 } 8639 } 8640 8641 static struct bpf_xdp_link *dev_xdp_link(struct net_device *dev, 8642 enum bpf_xdp_mode mode) 8643 { 8644 return dev->xdp_state[mode].link; 8645 } 8646 8647 static struct bpf_prog *dev_xdp_prog(struct net_device *dev, 8648 enum bpf_xdp_mode mode) 8649 { 8650 struct bpf_xdp_link *link = dev_xdp_link(dev, mode); 8651 8652 if (link) 8653 return link->link.prog; 8654 return dev->xdp_state[mode].prog; 8655 } 8656 8657 u8 dev_xdp_prog_count(struct net_device *dev) 8658 { 8659 u8 count = 0; 8660 int i; 8661 8662 for (i = 0; i < __MAX_XDP_MODE; i++) 8663 if (dev->xdp_state[i].prog || dev->xdp_state[i].link) 8664 count++; 8665 return count; 8666 } 8667 EXPORT_SYMBOL_GPL(dev_xdp_prog_count); 8668 8669 u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode) 8670 { 8671 struct bpf_prog *prog = dev_xdp_prog(dev, mode); 8672 8673 return prog ? prog->aux->id : 0; 8674 } 8675 8676 static void dev_xdp_set_link(struct net_device *dev, enum bpf_xdp_mode mode, 8677 struct bpf_xdp_link *link) 8678 { 8679 dev->xdp_state[mode].link = link; 8680 dev->xdp_state[mode].prog = NULL; 8681 } 8682 8683 static void dev_xdp_set_prog(struct net_device *dev, enum bpf_xdp_mode mode, 8684 struct bpf_prog *prog) 8685 { 8686 dev->xdp_state[mode].link = NULL; 8687 dev->xdp_state[mode].prog = prog; 8688 } 8689 8690 static int dev_xdp_install(struct net_device *dev, enum bpf_xdp_mode mode, 8691 bpf_op_t bpf_op, struct netlink_ext_ack *extack, 8692 u32 flags, struct bpf_prog *prog) 8693 { 8694 struct netdev_bpf xdp; 8695 int err; 8696 8697 memset(&xdp, 0, sizeof(xdp)); 8698 xdp.command = mode == XDP_MODE_HW ? XDP_SETUP_PROG_HW : XDP_SETUP_PROG; 8699 xdp.extack = extack; 8700 xdp.flags = flags; 8701 xdp.prog = prog; 8702 8703 /* Drivers assume refcnt is already incremented (i.e, prog pointer is 8704 * "moved" into driver), so they don't increment it on their own, but 8705 * they do decrement refcnt when program is detached or replaced. 8706 * Given net_device also owns link/prog, we need to bump refcnt here 8707 * to prevent drivers from underflowing it. 8708 */ 8709 if (prog) 8710 bpf_prog_inc(prog); 8711 err = bpf_op(dev, &xdp); 8712 if (err) { 8713 if (prog) 8714 bpf_prog_put(prog); 8715 return err; 8716 } 8717 8718 if (mode != XDP_MODE_HW) 8719 bpf_prog_change_xdp(dev_xdp_prog(dev, mode), prog); 8720 8721 return 0; 8722 } 8723 8724 static void dev_xdp_uninstall(struct net_device *dev) 8725 { 8726 struct bpf_xdp_link *link; 8727 struct bpf_prog *prog; 8728 enum bpf_xdp_mode mode; 8729 bpf_op_t bpf_op; 8730 8731 ASSERT_RTNL(); 8732 8733 for (mode = XDP_MODE_SKB; mode < __MAX_XDP_MODE; mode++) { 8734 prog = dev_xdp_prog(dev, mode); 8735 if (!prog) 8736 continue; 8737 8738 bpf_op = dev_xdp_bpf_op(dev, mode); 8739 if (!bpf_op) 8740 continue; 8741 8742 WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL)); 8743 8744 /* auto-detach link from net device */ 8745 link = dev_xdp_link(dev, mode); 8746 if (link) 8747 link->dev = NULL; 8748 else 8749 bpf_prog_put(prog); 8750 8751 dev_xdp_set_link(dev, mode, NULL); 8752 } 8753 } 8754 8755 static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack, 8756 struct bpf_xdp_link *link, struct bpf_prog *new_prog, 8757 struct bpf_prog *old_prog, u32 flags) 8758 { 8759 unsigned int num_modes = hweight32(flags & XDP_FLAGS_MODES); 8760 struct bpf_prog *cur_prog; 8761 struct net_device *upper; 8762 struct list_head *iter; 8763 enum bpf_xdp_mode mode; 8764 bpf_op_t bpf_op; 8765 int err; 8766 8767 ASSERT_RTNL(); 8768 8769 /* either link or prog attachment, never both */ 8770 if (link && (new_prog || old_prog)) 8771 return -EINVAL; 8772 /* link supports only XDP mode flags */ 8773 if (link && (flags & ~XDP_FLAGS_MODES)) { 8774 NL_SET_ERR_MSG(extack, "Invalid XDP flags for BPF link attachment"); 8775 return -EINVAL; 8776 } 8777 /* just one XDP mode bit should be set, zero defaults to drv/skb mode */ 8778 if (num_modes > 1) { 8779 NL_SET_ERR_MSG(extack, "Only one XDP mode flag can be set"); 8780 return -EINVAL; 8781 } 8782 /* avoid ambiguity if offload + drv/skb mode progs are both loaded */ 8783 if (!num_modes && dev_xdp_prog_count(dev) > 1) { 8784 NL_SET_ERR_MSG(extack, 8785 "More than one program loaded, unset mode is ambiguous"); 8786 return -EINVAL; 8787 } 8788 /* old_prog != NULL implies XDP_FLAGS_REPLACE is set */ 8789 if (old_prog && !(flags & XDP_FLAGS_REPLACE)) { 8790 NL_SET_ERR_MSG(extack, "XDP_FLAGS_REPLACE is not specified"); 8791 return -EINVAL; 8792 } 8793 8794 mode = dev_xdp_mode(dev, flags); 8795 /* can't replace attached link */ 8796 if (dev_xdp_link(dev, mode)) { 8797 NL_SET_ERR_MSG(extack, "Can't replace active BPF XDP link"); 8798 return -EBUSY; 8799 } 8800 8801 /* don't allow if an upper device already has a program */ 8802 netdev_for_each_upper_dev_rcu(dev, upper, iter) { 8803 if (dev_xdp_prog_count(upper) > 0) { 8804 NL_SET_ERR_MSG(extack, "Cannot attach when an upper device already has a program"); 8805 return -EEXIST; 8806 } 8807 } 8808 8809 cur_prog = dev_xdp_prog(dev, mode); 8810 /* can't replace attached prog with link */ 8811 if (link && cur_prog) { 8812 NL_SET_ERR_MSG(extack, "Can't replace active XDP program with BPF link"); 8813 return -EBUSY; 8814 } 8815 if ((flags & XDP_FLAGS_REPLACE) && cur_prog != old_prog) { 8816 NL_SET_ERR_MSG(extack, "Active program does not match expected"); 8817 return -EEXIST; 8818 } 8819 8820 /* put effective new program into new_prog */ 8821 if (link) 8822 new_prog = link->link.prog; 8823 8824 if (new_prog) { 8825 bool offload = mode == XDP_MODE_HW; 8826 enum bpf_xdp_mode other_mode = mode == XDP_MODE_SKB 8827 ? XDP_MODE_DRV : XDP_MODE_SKB; 8828 8829 if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && cur_prog) { 8830 NL_SET_ERR_MSG(extack, "XDP program already attached"); 8831 return -EBUSY; 8832 } 8833 if (!offload && dev_xdp_prog(dev, other_mode)) { 8834 NL_SET_ERR_MSG(extack, "Native and generic XDP can't be active at the same time"); 8835 return -EEXIST; 8836 } 8837 if (!offload && bpf_prog_is_dev_bound(new_prog->aux)) { 8838 NL_SET_ERR_MSG(extack, "Using device-bound program without HW_MODE flag is not supported"); 8839 return -EINVAL; 8840 } 8841 if (new_prog->expected_attach_type == BPF_XDP_DEVMAP) { 8842 NL_SET_ERR_MSG(extack, "BPF_XDP_DEVMAP programs can not be attached to a device"); 8843 return -EINVAL; 8844 } 8845 if (new_prog->expected_attach_type == BPF_XDP_CPUMAP) { 8846 NL_SET_ERR_MSG(extack, "BPF_XDP_CPUMAP programs can not be attached to a device"); 8847 return -EINVAL; 8848 } 8849 } 8850 8851 /* don't call drivers if the effective program didn't change */ 8852 if (new_prog != cur_prog) { 8853 bpf_op = dev_xdp_bpf_op(dev, mode); 8854 if (!bpf_op) { 8855 NL_SET_ERR_MSG(extack, "Underlying driver does not support XDP in native mode"); 8856 return -EOPNOTSUPP; 8857 } 8858 8859 err = dev_xdp_install(dev, mode, bpf_op, extack, flags, new_prog); 8860 if (err) 8861 return err; 8862 } 8863 8864 if (link) 8865 dev_xdp_set_link(dev, mode, link); 8866 else 8867 dev_xdp_set_prog(dev, mode, new_prog); 8868 if (cur_prog) 8869 bpf_prog_put(cur_prog); 8870 8871 return 0; 8872 } 8873 8874 static int dev_xdp_attach_link(struct net_device *dev, 8875 struct netlink_ext_ack *extack, 8876 struct bpf_xdp_link *link) 8877 { 8878 return dev_xdp_attach(dev, extack, link, NULL, NULL, link->flags); 8879 } 8880 8881 static int dev_xdp_detach_link(struct net_device *dev, 8882 struct netlink_ext_ack *extack, 8883 struct bpf_xdp_link *link) 8884 { 8885 enum bpf_xdp_mode mode; 8886 bpf_op_t bpf_op; 8887 8888 ASSERT_RTNL(); 8889 8890 mode = dev_xdp_mode(dev, link->flags); 8891 if (dev_xdp_link(dev, mode) != link) 8892 return -EINVAL; 8893 8894 bpf_op = dev_xdp_bpf_op(dev, mode); 8895 WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL)); 8896 dev_xdp_set_link(dev, mode, NULL); 8897 return 0; 8898 } 8899 8900 static void bpf_xdp_link_release(struct bpf_link *link) 8901 { 8902 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link); 8903 8904 rtnl_lock(); 8905 8906 /* if racing with net_device's tear down, xdp_link->dev might be 8907 * already NULL, in which case link was already auto-detached 8908 */ 8909 if (xdp_link->dev) { 8910 WARN_ON(dev_xdp_detach_link(xdp_link->dev, NULL, xdp_link)); 8911 xdp_link->dev = NULL; 8912 } 8913 8914 rtnl_unlock(); 8915 } 8916 8917 static int bpf_xdp_link_detach(struct bpf_link *link) 8918 { 8919 bpf_xdp_link_release(link); 8920 return 0; 8921 } 8922 8923 static void bpf_xdp_link_dealloc(struct bpf_link *link) 8924 { 8925 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link); 8926 8927 kfree(xdp_link); 8928 } 8929 8930 static void bpf_xdp_link_show_fdinfo(const struct bpf_link *link, 8931 struct seq_file *seq) 8932 { 8933 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link); 8934 u32 ifindex = 0; 8935 8936 rtnl_lock(); 8937 if (xdp_link->dev) 8938 ifindex = xdp_link->dev->ifindex; 8939 rtnl_unlock(); 8940 8941 seq_printf(seq, "ifindex:\t%u\n", ifindex); 8942 } 8943 8944 static int bpf_xdp_link_fill_link_info(const struct bpf_link *link, 8945 struct bpf_link_info *info) 8946 { 8947 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link); 8948 u32 ifindex = 0; 8949 8950 rtnl_lock(); 8951 if (xdp_link->dev) 8952 ifindex = xdp_link->dev->ifindex; 8953 rtnl_unlock(); 8954 8955 info->xdp.ifindex = ifindex; 8956 return 0; 8957 } 8958 8959 static int bpf_xdp_link_update(struct bpf_link *link, struct bpf_prog *new_prog, 8960 struct bpf_prog *old_prog) 8961 { 8962 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link); 8963 enum bpf_xdp_mode mode; 8964 bpf_op_t bpf_op; 8965 int err = 0; 8966 8967 rtnl_lock(); 8968 8969 /* link might have been auto-released already, so fail */ 8970 if (!xdp_link->dev) { 8971 err = -ENOLINK; 8972 goto out_unlock; 8973 } 8974 8975 if (old_prog && link->prog != old_prog) { 8976 err = -EPERM; 8977 goto out_unlock; 8978 } 8979 old_prog = link->prog; 8980 if (old_prog->type != new_prog->type || 8981 old_prog->expected_attach_type != new_prog->expected_attach_type) { 8982 err = -EINVAL; 8983 goto out_unlock; 8984 } 8985 8986 if (old_prog == new_prog) { 8987 /* no-op, don't disturb drivers */ 8988 bpf_prog_put(new_prog); 8989 goto out_unlock; 8990 } 8991 8992 mode = dev_xdp_mode(xdp_link->dev, xdp_link->flags); 8993 bpf_op = dev_xdp_bpf_op(xdp_link->dev, mode); 8994 err = dev_xdp_install(xdp_link->dev, mode, bpf_op, NULL, 8995 xdp_link->flags, new_prog); 8996 if (err) 8997 goto out_unlock; 8998 8999 old_prog = xchg(&link->prog, new_prog); 9000 bpf_prog_put(old_prog); 9001 9002 out_unlock: 9003 rtnl_unlock(); 9004 return err; 9005 } 9006 9007 static const struct bpf_link_ops bpf_xdp_link_lops = { 9008 .release = bpf_xdp_link_release, 9009 .dealloc = bpf_xdp_link_dealloc, 9010 .detach = bpf_xdp_link_detach, 9011 .show_fdinfo = bpf_xdp_link_show_fdinfo, 9012 .fill_link_info = bpf_xdp_link_fill_link_info, 9013 .update_prog = bpf_xdp_link_update, 9014 }; 9015 9016 int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 9017 { 9018 struct net *net = current->nsproxy->net_ns; 9019 struct bpf_link_primer link_primer; 9020 struct bpf_xdp_link *link; 9021 struct net_device *dev; 9022 int err, fd; 9023 9024 rtnl_lock(); 9025 dev = dev_get_by_index(net, attr->link_create.target_ifindex); 9026 if (!dev) { 9027 rtnl_unlock(); 9028 return -EINVAL; 9029 } 9030 9031 link = kzalloc(sizeof(*link), GFP_USER); 9032 if (!link) { 9033 err = -ENOMEM; 9034 goto unlock; 9035 } 9036 9037 bpf_link_init(&link->link, BPF_LINK_TYPE_XDP, &bpf_xdp_link_lops, prog); 9038 link->dev = dev; 9039 link->flags = attr->link_create.flags; 9040 9041 err = bpf_link_prime(&link->link, &link_primer); 9042 if (err) { 9043 kfree(link); 9044 goto unlock; 9045 } 9046 9047 err = dev_xdp_attach_link(dev, NULL, link); 9048 rtnl_unlock(); 9049 9050 if (err) { 9051 link->dev = NULL; 9052 bpf_link_cleanup(&link_primer); 9053 goto out_put_dev; 9054 } 9055 9056 fd = bpf_link_settle(&link_primer); 9057 /* link itself doesn't hold dev's refcnt to not complicate shutdown */ 9058 dev_put(dev); 9059 return fd; 9060 9061 unlock: 9062 rtnl_unlock(); 9063 9064 out_put_dev: 9065 dev_put(dev); 9066 return err; 9067 } 9068 9069 /** 9070 * dev_change_xdp_fd - set or clear a bpf program for a device rx path 9071 * @dev: device 9072 * @extack: netlink extended ack 9073 * @fd: new program fd or negative value to clear 9074 * @expected_fd: old program fd that userspace expects to replace or clear 9075 * @flags: xdp-related flags 9076 * 9077 * Set or clear a bpf program for a device 9078 */ 9079 int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, 9080 int fd, int expected_fd, u32 flags) 9081 { 9082 enum bpf_xdp_mode mode = dev_xdp_mode(dev, flags); 9083 struct bpf_prog *new_prog = NULL, *old_prog = NULL; 9084 int err; 9085 9086 ASSERT_RTNL(); 9087 9088 if (fd >= 0) { 9089 new_prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP, 9090 mode != XDP_MODE_SKB); 9091 if (IS_ERR(new_prog)) 9092 return PTR_ERR(new_prog); 9093 } 9094 9095 if (expected_fd >= 0) { 9096 old_prog = bpf_prog_get_type_dev(expected_fd, BPF_PROG_TYPE_XDP, 9097 mode != XDP_MODE_SKB); 9098 if (IS_ERR(old_prog)) { 9099 err = PTR_ERR(old_prog); 9100 old_prog = NULL; 9101 goto err_out; 9102 } 9103 } 9104 9105 err = dev_xdp_attach(dev, extack, NULL, new_prog, old_prog, flags); 9106 9107 err_out: 9108 if (err && new_prog) 9109 bpf_prog_put(new_prog); 9110 if (old_prog) 9111 bpf_prog_put(old_prog); 9112 return err; 9113 } 9114 9115 /** 9116 * dev_new_index - allocate an ifindex 9117 * @net: the applicable net namespace 9118 * 9119 * Returns a suitable unique value for a new device interface 9120 * number. The caller must hold the rtnl semaphore or the 9121 * dev_base_lock to be sure it remains unique. 9122 */ 9123 static int dev_new_index(struct net *net) 9124 { 9125 int ifindex = net->ifindex; 9126 9127 for (;;) { 9128 if (++ifindex <= 0) 9129 ifindex = 1; 9130 if (!__dev_get_by_index(net, ifindex)) 9131 return net->ifindex = ifindex; 9132 } 9133 } 9134 9135 /* Delayed registration/unregisteration */ 9136 static LIST_HEAD(net_todo_list); 9137 DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq); 9138 9139 static void net_set_todo(struct net_device *dev) 9140 { 9141 list_add_tail(&dev->todo_list, &net_todo_list); 9142 atomic_inc(&dev_net(dev)->dev_unreg_count); 9143 } 9144 9145 static netdev_features_t netdev_sync_upper_features(struct net_device *lower, 9146 struct net_device *upper, netdev_features_t features) 9147 { 9148 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES; 9149 netdev_features_t feature; 9150 int feature_bit; 9151 9152 for_each_netdev_feature(upper_disables, feature_bit) { 9153 feature = __NETIF_F_BIT(feature_bit); 9154 if (!(upper->wanted_features & feature) 9155 && (features & feature)) { 9156 netdev_dbg(lower, "Dropping feature %pNF, upper dev %s has it off.\n", 9157 &feature, upper->name); 9158 features &= ~feature; 9159 } 9160 } 9161 9162 return features; 9163 } 9164 9165 static void netdev_sync_lower_features(struct net_device *upper, 9166 struct net_device *lower, netdev_features_t features) 9167 { 9168 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES; 9169 netdev_features_t feature; 9170 int feature_bit; 9171 9172 for_each_netdev_feature(upper_disables, feature_bit) { 9173 feature = __NETIF_F_BIT(feature_bit); 9174 if (!(features & feature) && (lower->features & feature)) { 9175 netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n", 9176 &feature, lower->name); 9177 lower->wanted_features &= ~feature; 9178 __netdev_update_features(lower); 9179 9180 if (unlikely(lower->features & feature)) 9181 netdev_WARN(upper, "failed to disable %pNF on %s!\n", 9182 &feature, lower->name); 9183 else 9184 netdev_features_change(lower); 9185 } 9186 } 9187 } 9188 9189 static netdev_features_t netdev_fix_features(struct net_device *dev, 9190 netdev_features_t features) 9191 { 9192 /* Fix illegal checksum combinations */ 9193 if ((features & NETIF_F_HW_CSUM) && 9194 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { 9195 netdev_warn(dev, "mixed HW and IP checksum settings.\n"); 9196 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); 9197 } 9198 9199 /* TSO requires that SG is present as well. */ 9200 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) { 9201 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n"); 9202 features &= ~NETIF_F_ALL_TSO; 9203 } 9204 9205 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) && 9206 !(features & NETIF_F_IP_CSUM)) { 9207 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n"); 9208 features &= ~NETIF_F_TSO; 9209 features &= ~NETIF_F_TSO_ECN; 9210 } 9211 9212 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) && 9213 !(features & NETIF_F_IPV6_CSUM)) { 9214 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n"); 9215 features &= ~NETIF_F_TSO6; 9216 } 9217 9218 /* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */ 9219 if ((features & NETIF_F_TSO_MANGLEID) && !(features & NETIF_F_TSO)) 9220 features &= ~NETIF_F_TSO_MANGLEID; 9221 9222 /* TSO ECN requires that TSO is present as well. */ 9223 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN) 9224 features &= ~NETIF_F_TSO_ECN; 9225 9226 /* Software GSO depends on SG. */ 9227 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) { 9228 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n"); 9229 features &= ~NETIF_F_GSO; 9230 } 9231 9232 /* GSO partial features require GSO partial be set */ 9233 if ((features & dev->gso_partial_features) && 9234 !(features & NETIF_F_GSO_PARTIAL)) { 9235 netdev_dbg(dev, 9236 "Dropping partially supported GSO features since no GSO partial.\n"); 9237 features &= ~dev->gso_partial_features; 9238 } 9239 9240 if (!(features & NETIF_F_RXCSUM)) { 9241 /* NETIF_F_GRO_HW implies doing RXCSUM since every packet 9242 * successfully merged by hardware must also have the 9243 * checksum verified by hardware. If the user does not 9244 * want to enable RXCSUM, logically, we should disable GRO_HW. 9245 */ 9246 if (features & NETIF_F_GRO_HW) { 9247 netdev_dbg(dev, "Dropping NETIF_F_GRO_HW since no RXCSUM feature.\n"); 9248 features &= ~NETIF_F_GRO_HW; 9249 } 9250 } 9251 9252 /* LRO/HW-GRO features cannot be combined with RX-FCS */ 9253 if (features & NETIF_F_RXFCS) { 9254 if (features & NETIF_F_LRO) { 9255 netdev_dbg(dev, "Dropping LRO feature since RX-FCS is requested.\n"); 9256 features &= ~NETIF_F_LRO; 9257 } 9258 9259 if (features & NETIF_F_GRO_HW) { 9260 netdev_dbg(dev, "Dropping HW-GRO feature since RX-FCS is requested.\n"); 9261 features &= ~NETIF_F_GRO_HW; 9262 } 9263 } 9264 9265 if ((features & NETIF_F_GRO_HW) && (features & NETIF_F_LRO)) { 9266 netdev_dbg(dev, "Dropping LRO feature since HW-GRO is requested.\n"); 9267 features &= ~NETIF_F_LRO; 9268 } 9269 9270 if (features & NETIF_F_HW_TLS_TX) { 9271 bool ip_csum = (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) == 9272 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); 9273 bool hw_csum = features & NETIF_F_HW_CSUM; 9274 9275 if (!ip_csum && !hw_csum) { 9276 netdev_dbg(dev, "Dropping TLS TX HW offload feature since no CSUM feature.\n"); 9277 features &= ~NETIF_F_HW_TLS_TX; 9278 } 9279 } 9280 9281 if ((features & NETIF_F_HW_TLS_RX) && !(features & NETIF_F_RXCSUM)) { 9282 netdev_dbg(dev, "Dropping TLS RX HW offload feature since no RXCSUM feature.\n"); 9283 features &= ~NETIF_F_HW_TLS_RX; 9284 } 9285 9286 return features; 9287 } 9288 9289 int __netdev_update_features(struct net_device *dev) 9290 { 9291 struct net_device *upper, *lower; 9292 netdev_features_t features; 9293 struct list_head *iter; 9294 int err = -1; 9295 9296 ASSERT_RTNL(); 9297 9298 features = netdev_get_wanted_features(dev); 9299 9300 if (dev->netdev_ops->ndo_fix_features) 9301 features = dev->netdev_ops->ndo_fix_features(dev, features); 9302 9303 /* driver might be less strict about feature dependencies */ 9304 features = netdev_fix_features(dev, features); 9305 9306 /* some features can't be enabled if they're off on an upper device */ 9307 netdev_for_each_upper_dev_rcu(dev, upper, iter) 9308 features = netdev_sync_upper_features(dev, upper, features); 9309 9310 if (dev->features == features) 9311 goto sync_lower; 9312 9313 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n", 9314 &dev->features, &features); 9315 9316 if (dev->netdev_ops->ndo_set_features) 9317 err = dev->netdev_ops->ndo_set_features(dev, features); 9318 else 9319 err = 0; 9320 9321 if (unlikely(err < 0)) { 9322 netdev_err(dev, 9323 "set_features() failed (%d); wanted %pNF, left %pNF\n", 9324 err, &features, &dev->features); 9325 /* return non-0 since some features might have changed and 9326 * it's better to fire a spurious notification than miss it 9327 */ 9328 return -1; 9329 } 9330 9331 sync_lower: 9332 /* some features must be disabled on lower devices when disabled 9333 * on an upper device (think: bonding master or bridge) 9334 */ 9335 netdev_for_each_lower_dev(dev, lower, iter) 9336 netdev_sync_lower_features(dev, lower, features); 9337 9338 if (!err) { 9339 netdev_features_t diff = features ^ dev->features; 9340 9341 if (diff & NETIF_F_RX_UDP_TUNNEL_PORT) { 9342 /* udp_tunnel_{get,drop}_rx_info both need 9343 * NETIF_F_RX_UDP_TUNNEL_PORT enabled on the 9344 * device, or they won't do anything. 9345 * Thus we need to update dev->features 9346 * *before* calling udp_tunnel_get_rx_info, 9347 * but *after* calling udp_tunnel_drop_rx_info. 9348 */ 9349 if (features & NETIF_F_RX_UDP_TUNNEL_PORT) { 9350 dev->features = features; 9351 udp_tunnel_get_rx_info(dev); 9352 } else { 9353 udp_tunnel_drop_rx_info(dev); 9354 } 9355 } 9356 9357 if (diff & NETIF_F_HW_VLAN_CTAG_FILTER) { 9358 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) { 9359 dev->features = features; 9360 err |= vlan_get_rx_ctag_filter_info(dev); 9361 } else { 9362 vlan_drop_rx_ctag_filter_info(dev); 9363 } 9364 } 9365 9366 if (diff & NETIF_F_HW_VLAN_STAG_FILTER) { 9367 if (features & NETIF_F_HW_VLAN_STAG_FILTER) { 9368 dev->features = features; 9369 err |= vlan_get_rx_stag_filter_info(dev); 9370 } else { 9371 vlan_drop_rx_stag_filter_info(dev); 9372 } 9373 } 9374 9375 dev->features = features; 9376 } 9377 9378 return err < 0 ? 0 : 1; 9379 } 9380 9381 /** 9382 * netdev_update_features - recalculate device features 9383 * @dev: the device to check 9384 * 9385 * Recalculate dev->features set and send notifications if it 9386 * has changed. Should be called after driver or hardware dependent 9387 * conditions might have changed that influence the features. 9388 */ 9389 void netdev_update_features(struct net_device *dev) 9390 { 9391 if (__netdev_update_features(dev)) 9392 netdev_features_change(dev); 9393 } 9394 EXPORT_SYMBOL(netdev_update_features); 9395 9396 /** 9397 * netdev_change_features - recalculate device features 9398 * @dev: the device to check 9399 * 9400 * Recalculate dev->features set and send notifications even 9401 * if they have not changed. Should be called instead of 9402 * netdev_update_features() if also dev->vlan_features might 9403 * have changed to allow the changes to be propagated to stacked 9404 * VLAN devices. 9405 */ 9406 void netdev_change_features(struct net_device *dev) 9407 { 9408 __netdev_update_features(dev); 9409 netdev_features_change(dev); 9410 } 9411 EXPORT_SYMBOL(netdev_change_features); 9412 9413 /** 9414 * netif_stacked_transfer_operstate - transfer operstate 9415 * @rootdev: the root or lower level device to transfer state from 9416 * @dev: the device to transfer operstate to 9417 * 9418 * Transfer operational state from root to device. This is normally 9419 * called when a stacking relationship exists between the root 9420 * device and the device(a leaf device). 9421 */ 9422 void netif_stacked_transfer_operstate(const struct net_device *rootdev, 9423 struct net_device *dev) 9424 { 9425 if (rootdev->operstate == IF_OPER_DORMANT) 9426 netif_dormant_on(dev); 9427 else 9428 netif_dormant_off(dev); 9429 9430 if (rootdev->operstate == IF_OPER_TESTING) 9431 netif_testing_on(dev); 9432 else 9433 netif_testing_off(dev); 9434 9435 if (netif_carrier_ok(rootdev)) 9436 netif_carrier_on(dev); 9437 else 9438 netif_carrier_off(dev); 9439 } 9440 EXPORT_SYMBOL(netif_stacked_transfer_operstate); 9441 9442 static int netif_alloc_rx_queues(struct net_device *dev) 9443 { 9444 unsigned int i, count = dev->num_rx_queues; 9445 struct netdev_rx_queue *rx; 9446 size_t sz = count * sizeof(*rx); 9447 int err = 0; 9448 9449 BUG_ON(count < 1); 9450 9451 rx = kvzalloc(sz, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL); 9452 if (!rx) 9453 return -ENOMEM; 9454 9455 dev->_rx = rx; 9456 9457 for (i = 0; i < count; i++) { 9458 rx[i].dev = dev; 9459 9460 /* XDP RX-queue setup */ 9461 err = xdp_rxq_info_reg(&rx[i].xdp_rxq, dev, i, 0); 9462 if (err < 0) 9463 goto err_rxq_info; 9464 } 9465 return 0; 9466 9467 err_rxq_info: 9468 /* Rollback successful reg's and free other resources */ 9469 while (i--) 9470 xdp_rxq_info_unreg(&rx[i].xdp_rxq); 9471 kvfree(dev->_rx); 9472 dev->_rx = NULL; 9473 return err; 9474 } 9475 9476 static void netif_free_rx_queues(struct net_device *dev) 9477 { 9478 unsigned int i, count = dev->num_rx_queues; 9479 9480 /* netif_alloc_rx_queues alloc failed, resources have been unreg'ed */ 9481 if (!dev->_rx) 9482 return; 9483 9484 for (i = 0; i < count; i++) 9485 xdp_rxq_info_unreg(&dev->_rx[i].xdp_rxq); 9486 9487 kvfree(dev->_rx); 9488 } 9489 9490 static void netdev_init_one_queue(struct net_device *dev, 9491 struct netdev_queue *queue, void *_unused) 9492 { 9493 /* Initialize queue lock */ 9494 spin_lock_init(&queue->_xmit_lock); 9495 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type); 9496 queue->xmit_lock_owner = -1; 9497 netdev_queue_numa_node_write(queue, NUMA_NO_NODE); 9498 queue->dev = dev; 9499 #ifdef CONFIG_BQL 9500 dql_init(&queue->dql, HZ); 9501 #endif 9502 } 9503 9504 static void netif_free_tx_queues(struct net_device *dev) 9505 { 9506 kvfree(dev->_tx); 9507 } 9508 9509 static int netif_alloc_netdev_queues(struct net_device *dev) 9510 { 9511 unsigned int count = dev->num_tx_queues; 9512 struct netdev_queue *tx; 9513 size_t sz = count * sizeof(*tx); 9514 9515 if (count < 1 || count > 0xffff) 9516 return -EINVAL; 9517 9518 tx = kvzalloc(sz, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL); 9519 if (!tx) 9520 return -ENOMEM; 9521 9522 dev->_tx = tx; 9523 9524 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL); 9525 spin_lock_init(&dev->tx_global_lock); 9526 9527 return 0; 9528 } 9529 9530 void netif_tx_stop_all_queues(struct net_device *dev) 9531 { 9532 unsigned int i; 9533 9534 for (i = 0; i < dev->num_tx_queues; i++) { 9535 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 9536 9537 netif_tx_stop_queue(txq); 9538 } 9539 } 9540 EXPORT_SYMBOL(netif_tx_stop_all_queues); 9541 9542 /** 9543 * register_netdevice - register a network device 9544 * @dev: device to register 9545 * 9546 * Take a completed network device structure and add it to the kernel 9547 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier 9548 * chain. 0 is returned on success. A negative errno code is returned 9549 * on a failure to set up the device, or if the name is a duplicate. 9550 * 9551 * Callers must hold the rtnl semaphore. You may want 9552 * register_netdev() instead of this. 9553 * 9554 * BUGS: 9555 * The locking appears insufficient to guarantee two parallel registers 9556 * will not get the same name. 9557 */ 9558 9559 int register_netdevice(struct net_device *dev) 9560 { 9561 int ret; 9562 struct net *net = dev_net(dev); 9563 9564 BUILD_BUG_ON(sizeof(netdev_features_t) * BITS_PER_BYTE < 9565 NETDEV_FEATURE_COUNT); 9566 BUG_ON(dev_boot_phase); 9567 ASSERT_RTNL(); 9568 9569 might_sleep(); 9570 9571 /* When net_device's are persistent, this will be fatal. */ 9572 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED); 9573 BUG_ON(!net); 9574 9575 ret = ethtool_check_ops(dev->ethtool_ops); 9576 if (ret) 9577 return ret; 9578 9579 spin_lock_init(&dev->addr_list_lock); 9580 netdev_set_addr_lockdep_class(dev); 9581 9582 ret = dev_get_valid_name(net, dev, dev->name); 9583 if (ret < 0) 9584 goto out; 9585 9586 ret = -ENOMEM; 9587 dev->name_node = netdev_name_node_head_alloc(dev); 9588 if (!dev->name_node) 9589 goto out; 9590 9591 /* Init, if this function is available */ 9592 if (dev->netdev_ops->ndo_init) { 9593 ret = dev->netdev_ops->ndo_init(dev); 9594 if (ret) { 9595 if (ret > 0) 9596 ret = -EIO; 9597 goto err_free_name; 9598 } 9599 } 9600 9601 if (((dev->hw_features | dev->features) & 9602 NETIF_F_HW_VLAN_CTAG_FILTER) && 9603 (!dev->netdev_ops->ndo_vlan_rx_add_vid || 9604 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) { 9605 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n"); 9606 ret = -EINVAL; 9607 goto err_uninit; 9608 } 9609 9610 ret = -EBUSY; 9611 if (!dev->ifindex) 9612 dev->ifindex = dev_new_index(net); 9613 else if (__dev_get_by_index(net, dev->ifindex)) 9614 goto err_uninit; 9615 9616 /* Transfer changeable features to wanted_features and enable 9617 * software offloads (GSO and GRO). 9618 */ 9619 dev->hw_features |= (NETIF_F_SOFT_FEATURES | NETIF_F_SOFT_FEATURES_OFF); 9620 dev->features |= NETIF_F_SOFT_FEATURES; 9621 9622 if (dev->udp_tunnel_nic_info) { 9623 dev->features |= NETIF_F_RX_UDP_TUNNEL_PORT; 9624 dev->hw_features |= NETIF_F_RX_UDP_TUNNEL_PORT; 9625 } 9626 9627 dev->wanted_features = dev->features & dev->hw_features; 9628 9629 if (!(dev->flags & IFF_LOOPBACK)) 9630 dev->hw_features |= NETIF_F_NOCACHE_COPY; 9631 9632 /* If IPv4 TCP segmentation offload is supported we should also 9633 * allow the device to enable segmenting the frame with the option 9634 * of ignoring a static IP ID value. This doesn't enable the 9635 * feature itself but allows the user to enable it later. 9636 */ 9637 if (dev->hw_features & NETIF_F_TSO) 9638 dev->hw_features |= NETIF_F_TSO_MANGLEID; 9639 if (dev->vlan_features & NETIF_F_TSO) 9640 dev->vlan_features |= NETIF_F_TSO_MANGLEID; 9641 if (dev->mpls_features & NETIF_F_TSO) 9642 dev->mpls_features |= NETIF_F_TSO_MANGLEID; 9643 if (dev->hw_enc_features & NETIF_F_TSO) 9644 dev->hw_enc_features |= NETIF_F_TSO_MANGLEID; 9645 9646 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices. 9647 */ 9648 dev->vlan_features |= NETIF_F_HIGHDMA; 9649 9650 /* Make NETIF_F_SG inheritable to tunnel devices. 9651 */ 9652 dev->hw_enc_features |= NETIF_F_SG | NETIF_F_GSO_PARTIAL; 9653 9654 /* Make NETIF_F_SG inheritable to MPLS. 9655 */ 9656 dev->mpls_features |= NETIF_F_SG; 9657 9658 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev); 9659 ret = notifier_to_errno(ret); 9660 if (ret) 9661 goto err_uninit; 9662 9663 ret = netdev_register_kobject(dev); 9664 if (ret) { 9665 dev->reg_state = NETREG_UNREGISTERED; 9666 goto err_uninit; 9667 } 9668 dev->reg_state = NETREG_REGISTERED; 9669 9670 __netdev_update_features(dev); 9671 9672 /* 9673 * Default initial state at registry is that the 9674 * device is present. 9675 */ 9676 9677 set_bit(__LINK_STATE_PRESENT, &dev->state); 9678 9679 linkwatch_init_dev(dev); 9680 9681 dev_init_scheduler(dev); 9682 9683 dev_hold_track(dev, &dev->dev_registered_tracker, GFP_KERNEL); 9684 list_netdevice(dev); 9685 9686 add_device_randomness(dev->dev_addr, dev->addr_len); 9687 9688 /* If the device has permanent device address, driver should 9689 * set dev_addr and also addr_assign_type should be set to 9690 * NET_ADDR_PERM (default value). 9691 */ 9692 if (dev->addr_assign_type == NET_ADDR_PERM) 9693 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 9694 9695 /* Notify protocols, that a new device appeared. */ 9696 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev); 9697 ret = notifier_to_errno(ret); 9698 if (ret) { 9699 /* Expect explicit free_netdev() on failure */ 9700 dev->needs_free_netdev = false; 9701 unregister_netdevice_queue(dev, NULL); 9702 goto out; 9703 } 9704 /* 9705 * Prevent userspace races by waiting until the network 9706 * device is fully setup before sending notifications. 9707 */ 9708 if (!dev->rtnl_link_ops || 9709 dev->rtnl_link_state == RTNL_LINK_INITIALIZED) 9710 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL); 9711 9712 out: 9713 return ret; 9714 9715 err_uninit: 9716 if (dev->netdev_ops->ndo_uninit) 9717 dev->netdev_ops->ndo_uninit(dev); 9718 if (dev->priv_destructor) 9719 dev->priv_destructor(dev); 9720 err_free_name: 9721 netdev_name_node_free(dev->name_node); 9722 goto out; 9723 } 9724 EXPORT_SYMBOL(register_netdevice); 9725 9726 /** 9727 * init_dummy_netdev - init a dummy network device for NAPI 9728 * @dev: device to init 9729 * 9730 * This takes a network device structure and initialize the minimum 9731 * amount of fields so it can be used to schedule NAPI polls without 9732 * registering a full blown interface. This is to be used by drivers 9733 * that need to tie several hardware interfaces to a single NAPI 9734 * poll scheduler due to HW limitations. 9735 */ 9736 int init_dummy_netdev(struct net_device *dev) 9737 { 9738 /* Clear everything. Note we don't initialize spinlocks 9739 * are they aren't supposed to be taken by any of the 9740 * NAPI code and this dummy netdev is supposed to be 9741 * only ever used for NAPI polls 9742 */ 9743 memset(dev, 0, sizeof(struct net_device)); 9744 9745 /* make sure we BUG if trying to hit standard 9746 * register/unregister code path 9747 */ 9748 dev->reg_state = NETREG_DUMMY; 9749 9750 /* NAPI wants this */ 9751 INIT_LIST_HEAD(&dev->napi_list); 9752 9753 /* a dummy interface is started by default */ 9754 set_bit(__LINK_STATE_PRESENT, &dev->state); 9755 set_bit(__LINK_STATE_START, &dev->state); 9756 9757 /* napi_busy_loop stats accounting wants this */ 9758 dev_net_set(dev, &init_net); 9759 9760 /* Note : We dont allocate pcpu_refcnt for dummy devices, 9761 * because users of this 'device' dont need to change 9762 * its refcount. 9763 */ 9764 9765 return 0; 9766 } 9767 EXPORT_SYMBOL_GPL(init_dummy_netdev); 9768 9769 9770 /** 9771 * register_netdev - register a network device 9772 * @dev: device to register 9773 * 9774 * Take a completed network device structure and add it to the kernel 9775 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier 9776 * chain. 0 is returned on success. A negative errno code is returned 9777 * on a failure to set up the device, or if the name is a duplicate. 9778 * 9779 * This is a wrapper around register_netdevice that takes the rtnl semaphore 9780 * and expands the device name if you passed a format string to 9781 * alloc_netdev. 9782 */ 9783 int register_netdev(struct net_device *dev) 9784 { 9785 int err; 9786 9787 if (rtnl_lock_killable()) 9788 return -EINTR; 9789 err = register_netdevice(dev); 9790 rtnl_unlock(); 9791 return err; 9792 } 9793 EXPORT_SYMBOL(register_netdev); 9794 9795 int netdev_refcnt_read(const struct net_device *dev) 9796 { 9797 #ifdef CONFIG_PCPU_DEV_REFCNT 9798 int i, refcnt = 0; 9799 9800 for_each_possible_cpu(i) 9801 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i); 9802 return refcnt; 9803 #else 9804 return refcount_read(&dev->dev_refcnt); 9805 #endif 9806 } 9807 EXPORT_SYMBOL(netdev_refcnt_read); 9808 9809 int netdev_unregister_timeout_secs __read_mostly = 10; 9810 9811 #define WAIT_REFS_MIN_MSECS 1 9812 #define WAIT_REFS_MAX_MSECS 250 9813 /** 9814 * netdev_wait_allrefs - wait until all references are gone. 9815 * @dev: target net_device 9816 * 9817 * This is called when unregistering network devices. 9818 * 9819 * Any protocol or device that holds a reference should register 9820 * for netdevice notification, and cleanup and put back the 9821 * reference if they receive an UNREGISTER event. 9822 * We can get stuck here if buggy protocols don't correctly 9823 * call dev_put. 9824 */ 9825 static void netdev_wait_allrefs(struct net_device *dev) 9826 { 9827 unsigned long rebroadcast_time, warning_time; 9828 int wait = 0, refcnt; 9829 9830 linkwatch_forget_dev(dev); 9831 9832 rebroadcast_time = warning_time = jiffies; 9833 refcnt = netdev_refcnt_read(dev); 9834 9835 while (refcnt != 1) { 9836 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) { 9837 rtnl_lock(); 9838 9839 /* Rebroadcast unregister notification */ 9840 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 9841 9842 __rtnl_unlock(); 9843 rcu_barrier(); 9844 rtnl_lock(); 9845 9846 if (test_bit(__LINK_STATE_LINKWATCH_PENDING, 9847 &dev->state)) { 9848 /* We must not have linkwatch events 9849 * pending on unregister. If this 9850 * happens, we simply run the queue 9851 * unscheduled, resulting in a noop 9852 * for this device. 9853 */ 9854 linkwatch_run_queue(); 9855 } 9856 9857 __rtnl_unlock(); 9858 9859 rebroadcast_time = jiffies; 9860 } 9861 9862 if (!wait) { 9863 rcu_barrier(); 9864 wait = WAIT_REFS_MIN_MSECS; 9865 } else { 9866 msleep(wait); 9867 wait = min(wait << 1, WAIT_REFS_MAX_MSECS); 9868 } 9869 9870 refcnt = netdev_refcnt_read(dev); 9871 9872 if (refcnt != 1 && 9873 time_after(jiffies, warning_time + 9874 netdev_unregister_timeout_secs * HZ)) { 9875 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n", 9876 dev->name, refcnt); 9877 ref_tracker_dir_print(&dev->refcnt_tracker, 10); 9878 warning_time = jiffies; 9879 } 9880 } 9881 } 9882 9883 /* The sequence is: 9884 * 9885 * rtnl_lock(); 9886 * ... 9887 * register_netdevice(x1); 9888 * register_netdevice(x2); 9889 * ... 9890 * unregister_netdevice(y1); 9891 * unregister_netdevice(y2); 9892 * ... 9893 * rtnl_unlock(); 9894 * free_netdev(y1); 9895 * free_netdev(y2); 9896 * 9897 * We are invoked by rtnl_unlock(). 9898 * This allows us to deal with problems: 9899 * 1) We can delete sysfs objects which invoke hotplug 9900 * without deadlocking with linkwatch via keventd. 9901 * 2) Since we run with the RTNL semaphore not held, we can sleep 9902 * safely in order to wait for the netdev refcnt to drop to zero. 9903 * 9904 * We must not return until all unregister events added during 9905 * the interval the lock was held have been completed. 9906 */ 9907 void netdev_run_todo(void) 9908 { 9909 struct list_head list; 9910 #ifdef CONFIG_LOCKDEP 9911 struct list_head unlink_list; 9912 9913 list_replace_init(&net_unlink_list, &unlink_list); 9914 9915 while (!list_empty(&unlink_list)) { 9916 struct net_device *dev = list_first_entry(&unlink_list, 9917 struct net_device, 9918 unlink_list); 9919 list_del_init(&dev->unlink_list); 9920 dev->nested_level = dev->lower_level - 1; 9921 } 9922 #endif 9923 9924 /* Snapshot list, allow later requests */ 9925 list_replace_init(&net_todo_list, &list); 9926 9927 __rtnl_unlock(); 9928 9929 9930 /* Wait for rcu callbacks to finish before next phase */ 9931 if (!list_empty(&list)) 9932 rcu_barrier(); 9933 9934 while (!list_empty(&list)) { 9935 struct net_device *dev 9936 = list_first_entry(&list, struct net_device, todo_list); 9937 list_del(&dev->todo_list); 9938 9939 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) { 9940 pr_err("network todo '%s' but state %d\n", 9941 dev->name, dev->reg_state); 9942 dump_stack(); 9943 continue; 9944 } 9945 9946 dev->reg_state = NETREG_UNREGISTERED; 9947 9948 netdev_wait_allrefs(dev); 9949 9950 /* paranoia */ 9951 BUG_ON(netdev_refcnt_read(dev) != 1); 9952 BUG_ON(!list_empty(&dev->ptype_all)); 9953 BUG_ON(!list_empty(&dev->ptype_specific)); 9954 WARN_ON(rcu_access_pointer(dev->ip_ptr)); 9955 WARN_ON(rcu_access_pointer(dev->ip6_ptr)); 9956 #if IS_ENABLED(CONFIG_DECNET) 9957 WARN_ON(dev->dn_ptr); 9958 #endif 9959 if (dev->priv_destructor) 9960 dev->priv_destructor(dev); 9961 if (dev->needs_free_netdev) 9962 free_netdev(dev); 9963 9964 if (atomic_dec_and_test(&dev_net(dev)->dev_unreg_count)) 9965 wake_up(&netdev_unregistering_wq); 9966 9967 /* Free network device */ 9968 kobject_put(&dev->dev.kobj); 9969 } 9970 } 9971 9972 /* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has 9973 * all the same fields in the same order as net_device_stats, with only 9974 * the type differing, but rtnl_link_stats64 may have additional fields 9975 * at the end for newer counters. 9976 */ 9977 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, 9978 const struct net_device_stats *netdev_stats) 9979 { 9980 #if BITS_PER_LONG == 64 9981 BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats)); 9982 memcpy(stats64, netdev_stats, sizeof(*netdev_stats)); 9983 /* zero out counters that only exist in rtnl_link_stats64 */ 9984 memset((char *)stats64 + sizeof(*netdev_stats), 0, 9985 sizeof(*stats64) - sizeof(*netdev_stats)); 9986 #else 9987 size_t i, n = sizeof(*netdev_stats) / sizeof(unsigned long); 9988 const unsigned long *src = (const unsigned long *)netdev_stats; 9989 u64 *dst = (u64 *)stats64; 9990 9991 BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64)); 9992 for (i = 0; i < n; i++) 9993 dst[i] = src[i]; 9994 /* zero out counters that only exist in rtnl_link_stats64 */ 9995 memset((char *)stats64 + n * sizeof(u64), 0, 9996 sizeof(*stats64) - n * sizeof(u64)); 9997 #endif 9998 } 9999 EXPORT_SYMBOL(netdev_stats_to_stats64); 10000 10001 /** 10002 * dev_get_stats - get network device statistics 10003 * @dev: device to get statistics from 10004 * @storage: place to store stats 10005 * 10006 * Get network statistics from device. Return @storage. 10007 * The device driver may provide its own method by setting 10008 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats; 10009 * otherwise the internal statistics structure is used. 10010 */ 10011 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, 10012 struct rtnl_link_stats64 *storage) 10013 { 10014 const struct net_device_ops *ops = dev->netdev_ops; 10015 10016 if (ops->ndo_get_stats64) { 10017 memset(storage, 0, sizeof(*storage)); 10018 ops->ndo_get_stats64(dev, storage); 10019 } else if (ops->ndo_get_stats) { 10020 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev)); 10021 } else { 10022 netdev_stats_to_stats64(storage, &dev->stats); 10023 } 10024 storage->rx_dropped += (unsigned long)atomic_long_read(&dev->rx_dropped); 10025 storage->tx_dropped += (unsigned long)atomic_long_read(&dev->tx_dropped); 10026 storage->rx_nohandler += (unsigned long)atomic_long_read(&dev->rx_nohandler); 10027 return storage; 10028 } 10029 EXPORT_SYMBOL(dev_get_stats); 10030 10031 /** 10032 * dev_fetch_sw_netstats - get per-cpu network device statistics 10033 * @s: place to store stats 10034 * @netstats: per-cpu network stats to read from 10035 * 10036 * Read per-cpu network statistics and populate the related fields in @s. 10037 */ 10038 void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s, 10039 const struct pcpu_sw_netstats __percpu *netstats) 10040 { 10041 int cpu; 10042 10043 for_each_possible_cpu(cpu) { 10044 const struct pcpu_sw_netstats *stats; 10045 struct pcpu_sw_netstats tmp; 10046 unsigned int start; 10047 10048 stats = per_cpu_ptr(netstats, cpu); 10049 do { 10050 start = u64_stats_fetch_begin_irq(&stats->syncp); 10051 tmp.rx_packets = stats->rx_packets; 10052 tmp.rx_bytes = stats->rx_bytes; 10053 tmp.tx_packets = stats->tx_packets; 10054 tmp.tx_bytes = stats->tx_bytes; 10055 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 10056 10057 s->rx_packets += tmp.rx_packets; 10058 s->rx_bytes += tmp.rx_bytes; 10059 s->tx_packets += tmp.tx_packets; 10060 s->tx_bytes += tmp.tx_bytes; 10061 } 10062 } 10063 EXPORT_SYMBOL_GPL(dev_fetch_sw_netstats); 10064 10065 /** 10066 * dev_get_tstats64 - ndo_get_stats64 implementation 10067 * @dev: device to get statistics from 10068 * @s: place to store stats 10069 * 10070 * Populate @s from dev->stats and dev->tstats. Can be used as 10071 * ndo_get_stats64() callback. 10072 */ 10073 void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s) 10074 { 10075 netdev_stats_to_stats64(s, &dev->stats); 10076 dev_fetch_sw_netstats(s, dev->tstats); 10077 } 10078 EXPORT_SYMBOL_GPL(dev_get_tstats64); 10079 10080 struct netdev_queue *dev_ingress_queue_create(struct net_device *dev) 10081 { 10082 struct netdev_queue *queue = dev_ingress_queue(dev); 10083 10084 #ifdef CONFIG_NET_CLS_ACT 10085 if (queue) 10086 return queue; 10087 queue = kzalloc(sizeof(*queue), GFP_KERNEL); 10088 if (!queue) 10089 return NULL; 10090 netdev_init_one_queue(dev, queue, NULL); 10091 RCU_INIT_POINTER(queue->qdisc, &noop_qdisc); 10092 queue->qdisc_sleeping = &noop_qdisc; 10093 rcu_assign_pointer(dev->ingress_queue, queue); 10094 #endif 10095 return queue; 10096 } 10097 10098 static const struct ethtool_ops default_ethtool_ops; 10099 10100 void netdev_set_default_ethtool_ops(struct net_device *dev, 10101 const struct ethtool_ops *ops) 10102 { 10103 if (dev->ethtool_ops == &default_ethtool_ops) 10104 dev->ethtool_ops = ops; 10105 } 10106 EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops); 10107 10108 void netdev_freemem(struct net_device *dev) 10109 { 10110 char *addr = (char *)dev - dev->padded; 10111 10112 kvfree(addr); 10113 } 10114 10115 /** 10116 * alloc_netdev_mqs - allocate network device 10117 * @sizeof_priv: size of private data to allocate space for 10118 * @name: device name format string 10119 * @name_assign_type: origin of device name 10120 * @setup: callback to initialize device 10121 * @txqs: the number of TX subqueues to allocate 10122 * @rxqs: the number of RX subqueues to allocate 10123 * 10124 * Allocates a struct net_device with private data area for driver use 10125 * and performs basic initialization. Also allocates subqueue structs 10126 * for each queue on the device. 10127 */ 10128 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, 10129 unsigned char name_assign_type, 10130 void (*setup)(struct net_device *), 10131 unsigned int txqs, unsigned int rxqs) 10132 { 10133 struct net_device *dev; 10134 unsigned int alloc_size; 10135 struct net_device *p; 10136 10137 BUG_ON(strlen(name) >= sizeof(dev->name)); 10138 10139 if (txqs < 1) { 10140 pr_err("alloc_netdev: Unable to allocate device with zero queues\n"); 10141 return NULL; 10142 } 10143 10144 if (rxqs < 1) { 10145 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n"); 10146 return NULL; 10147 } 10148 10149 alloc_size = sizeof(struct net_device); 10150 if (sizeof_priv) { 10151 /* ensure 32-byte alignment of private area */ 10152 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN); 10153 alloc_size += sizeof_priv; 10154 } 10155 /* ensure 32-byte alignment of whole construct */ 10156 alloc_size += NETDEV_ALIGN - 1; 10157 10158 p = kvzalloc(alloc_size, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL); 10159 if (!p) 10160 return NULL; 10161 10162 dev = PTR_ALIGN(p, NETDEV_ALIGN); 10163 dev->padded = (char *)dev - (char *)p; 10164 10165 ref_tracker_dir_init(&dev->refcnt_tracker, 128); 10166 #ifdef CONFIG_PCPU_DEV_REFCNT 10167 dev->pcpu_refcnt = alloc_percpu(int); 10168 if (!dev->pcpu_refcnt) 10169 goto free_dev; 10170 __dev_hold(dev); 10171 #else 10172 refcount_set(&dev->dev_refcnt, 1); 10173 #endif 10174 10175 if (dev_addr_init(dev)) 10176 goto free_pcpu; 10177 10178 dev_mc_init(dev); 10179 dev_uc_init(dev); 10180 10181 dev_net_set(dev, &init_net); 10182 10183 dev->gso_max_size = GSO_MAX_SIZE; 10184 dev->gso_max_segs = GSO_MAX_SEGS; 10185 dev->gro_max_size = GRO_MAX_SIZE; 10186 dev->upper_level = 1; 10187 dev->lower_level = 1; 10188 #ifdef CONFIG_LOCKDEP 10189 dev->nested_level = 0; 10190 INIT_LIST_HEAD(&dev->unlink_list); 10191 #endif 10192 10193 INIT_LIST_HEAD(&dev->napi_list); 10194 INIT_LIST_HEAD(&dev->unreg_list); 10195 INIT_LIST_HEAD(&dev->close_list); 10196 INIT_LIST_HEAD(&dev->link_watch_list); 10197 INIT_LIST_HEAD(&dev->adj_list.upper); 10198 INIT_LIST_HEAD(&dev->adj_list.lower); 10199 INIT_LIST_HEAD(&dev->ptype_all); 10200 INIT_LIST_HEAD(&dev->ptype_specific); 10201 INIT_LIST_HEAD(&dev->net_notifier_list); 10202 #ifdef CONFIG_NET_SCHED 10203 hash_init(dev->qdisc_hash); 10204 #endif 10205 dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM; 10206 setup(dev); 10207 10208 if (!dev->tx_queue_len) { 10209 dev->priv_flags |= IFF_NO_QUEUE; 10210 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN; 10211 } 10212 10213 dev->num_tx_queues = txqs; 10214 dev->real_num_tx_queues = txqs; 10215 if (netif_alloc_netdev_queues(dev)) 10216 goto free_all; 10217 10218 dev->num_rx_queues = rxqs; 10219 dev->real_num_rx_queues = rxqs; 10220 if (netif_alloc_rx_queues(dev)) 10221 goto free_all; 10222 10223 strcpy(dev->name, name); 10224 dev->name_assign_type = name_assign_type; 10225 dev->group = INIT_NETDEV_GROUP; 10226 if (!dev->ethtool_ops) 10227 dev->ethtool_ops = &default_ethtool_ops; 10228 10229 nf_hook_netdev_init(dev); 10230 10231 return dev; 10232 10233 free_all: 10234 free_netdev(dev); 10235 return NULL; 10236 10237 free_pcpu: 10238 #ifdef CONFIG_PCPU_DEV_REFCNT 10239 free_percpu(dev->pcpu_refcnt); 10240 free_dev: 10241 #endif 10242 netdev_freemem(dev); 10243 return NULL; 10244 } 10245 EXPORT_SYMBOL(alloc_netdev_mqs); 10246 10247 /** 10248 * free_netdev - free network device 10249 * @dev: device 10250 * 10251 * This function does the last stage of destroying an allocated device 10252 * interface. The reference to the device object is released. If this 10253 * is the last reference then it will be freed.Must be called in process 10254 * context. 10255 */ 10256 void free_netdev(struct net_device *dev) 10257 { 10258 struct napi_struct *p, *n; 10259 10260 might_sleep(); 10261 10262 /* When called immediately after register_netdevice() failed the unwind 10263 * handling may still be dismantling the device. Handle that case by 10264 * deferring the free. 10265 */ 10266 if (dev->reg_state == NETREG_UNREGISTERING) { 10267 ASSERT_RTNL(); 10268 dev->needs_free_netdev = true; 10269 return; 10270 } 10271 10272 netif_free_tx_queues(dev); 10273 netif_free_rx_queues(dev); 10274 10275 kfree(rcu_dereference_protected(dev->ingress_queue, 1)); 10276 10277 /* Flush device addresses */ 10278 dev_addr_flush(dev); 10279 10280 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list) 10281 netif_napi_del(p); 10282 10283 ref_tracker_dir_exit(&dev->refcnt_tracker); 10284 #ifdef CONFIG_PCPU_DEV_REFCNT 10285 free_percpu(dev->pcpu_refcnt); 10286 dev->pcpu_refcnt = NULL; 10287 #endif 10288 free_percpu(dev->xdp_bulkq); 10289 dev->xdp_bulkq = NULL; 10290 10291 /* Compatibility with error handling in drivers */ 10292 if (dev->reg_state == NETREG_UNINITIALIZED) { 10293 netdev_freemem(dev); 10294 return; 10295 } 10296 10297 BUG_ON(dev->reg_state != NETREG_UNREGISTERED); 10298 dev->reg_state = NETREG_RELEASED; 10299 10300 /* will free via device release */ 10301 put_device(&dev->dev); 10302 } 10303 EXPORT_SYMBOL(free_netdev); 10304 10305 /** 10306 * synchronize_net - Synchronize with packet receive processing 10307 * 10308 * Wait for packets currently being received to be done. 10309 * Does not block later packets from starting. 10310 */ 10311 void synchronize_net(void) 10312 { 10313 might_sleep(); 10314 if (rtnl_is_locked()) 10315 synchronize_rcu_expedited(); 10316 else 10317 synchronize_rcu(); 10318 } 10319 EXPORT_SYMBOL(synchronize_net); 10320 10321 /** 10322 * unregister_netdevice_queue - remove device from the kernel 10323 * @dev: device 10324 * @head: list 10325 * 10326 * This function shuts down a device interface and removes it 10327 * from the kernel tables. 10328 * If head not NULL, device is queued to be unregistered later. 10329 * 10330 * Callers must hold the rtnl semaphore. You may want 10331 * unregister_netdev() instead of this. 10332 */ 10333 10334 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head) 10335 { 10336 ASSERT_RTNL(); 10337 10338 if (head) { 10339 list_move_tail(&dev->unreg_list, head); 10340 } else { 10341 LIST_HEAD(single); 10342 10343 list_add(&dev->unreg_list, &single); 10344 unregister_netdevice_many(&single); 10345 } 10346 } 10347 EXPORT_SYMBOL(unregister_netdevice_queue); 10348 10349 /** 10350 * unregister_netdevice_many - unregister many devices 10351 * @head: list of devices 10352 * 10353 * Note: As most callers use a stack allocated list_head, 10354 * we force a list_del() to make sure stack wont be corrupted later. 10355 */ 10356 void unregister_netdevice_many(struct list_head *head) 10357 { 10358 struct net_device *dev, *tmp; 10359 LIST_HEAD(close_head); 10360 10361 BUG_ON(dev_boot_phase); 10362 ASSERT_RTNL(); 10363 10364 if (list_empty(head)) 10365 return; 10366 10367 list_for_each_entry_safe(dev, tmp, head, unreg_list) { 10368 /* Some devices call without registering 10369 * for initialization unwind. Remove those 10370 * devices and proceed with the remaining. 10371 */ 10372 if (dev->reg_state == NETREG_UNINITIALIZED) { 10373 pr_debug("unregister_netdevice: device %s/%p never was registered\n", 10374 dev->name, dev); 10375 10376 WARN_ON(1); 10377 list_del(&dev->unreg_list); 10378 continue; 10379 } 10380 dev->dismantle = true; 10381 BUG_ON(dev->reg_state != NETREG_REGISTERED); 10382 } 10383 10384 /* If device is running, close it first. */ 10385 list_for_each_entry(dev, head, unreg_list) 10386 list_add_tail(&dev->close_list, &close_head); 10387 dev_close_many(&close_head, true); 10388 10389 list_for_each_entry(dev, head, unreg_list) { 10390 /* And unlink it from device chain. */ 10391 unlist_netdevice(dev); 10392 10393 dev->reg_state = NETREG_UNREGISTERING; 10394 } 10395 flush_all_backlogs(); 10396 10397 synchronize_net(); 10398 10399 list_for_each_entry(dev, head, unreg_list) { 10400 struct sk_buff *skb = NULL; 10401 10402 /* Shutdown queueing discipline. */ 10403 dev_shutdown(dev); 10404 10405 dev_xdp_uninstall(dev); 10406 10407 /* Notify protocols, that we are about to destroy 10408 * this device. They should clean all the things. 10409 */ 10410 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 10411 10412 if (!dev->rtnl_link_ops || 10413 dev->rtnl_link_state == RTNL_LINK_INITIALIZED) 10414 skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0, 10415 GFP_KERNEL, NULL, 0); 10416 10417 /* 10418 * Flush the unicast and multicast chains 10419 */ 10420 dev_uc_flush(dev); 10421 dev_mc_flush(dev); 10422 10423 netdev_name_node_alt_flush(dev); 10424 netdev_name_node_free(dev->name_node); 10425 10426 if (dev->netdev_ops->ndo_uninit) 10427 dev->netdev_ops->ndo_uninit(dev); 10428 10429 if (skb) 10430 rtmsg_ifinfo_send(skb, dev, GFP_KERNEL); 10431 10432 /* Notifier chain MUST detach us all upper devices. */ 10433 WARN_ON(netdev_has_any_upper_dev(dev)); 10434 WARN_ON(netdev_has_any_lower_dev(dev)); 10435 10436 /* Remove entries from kobject tree */ 10437 netdev_unregister_kobject(dev); 10438 #ifdef CONFIG_XPS 10439 /* Remove XPS queueing entries */ 10440 netif_reset_xps_queues_gt(dev, 0); 10441 #endif 10442 } 10443 10444 synchronize_net(); 10445 10446 list_for_each_entry(dev, head, unreg_list) { 10447 dev_put_track(dev, &dev->dev_registered_tracker); 10448 net_set_todo(dev); 10449 } 10450 10451 list_del(head); 10452 } 10453 EXPORT_SYMBOL(unregister_netdevice_many); 10454 10455 /** 10456 * unregister_netdev - remove device from the kernel 10457 * @dev: device 10458 * 10459 * This function shuts down a device interface and removes it 10460 * from the kernel tables. 10461 * 10462 * This is just a wrapper for unregister_netdevice that takes 10463 * the rtnl semaphore. In general you want to use this and not 10464 * unregister_netdevice. 10465 */ 10466 void unregister_netdev(struct net_device *dev) 10467 { 10468 rtnl_lock(); 10469 unregister_netdevice(dev); 10470 rtnl_unlock(); 10471 } 10472 EXPORT_SYMBOL(unregister_netdev); 10473 10474 /** 10475 * __dev_change_net_namespace - move device to different nethost namespace 10476 * @dev: device 10477 * @net: network namespace 10478 * @pat: If not NULL name pattern to try if the current device name 10479 * is already taken in the destination network namespace. 10480 * @new_ifindex: If not zero, specifies device index in the target 10481 * namespace. 10482 * 10483 * This function shuts down a device interface and moves it 10484 * to a new network namespace. On success 0 is returned, on 10485 * a failure a netagive errno code is returned. 10486 * 10487 * Callers must hold the rtnl semaphore. 10488 */ 10489 10490 int __dev_change_net_namespace(struct net_device *dev, struct net *net, 10491 const char *pat, int new_ifindex) 10492 { 10493 struct net *net_old = dev_net(dev); 10494 int err, new_nsid; 10495 10496 ASSERT_RTNL(); 10497 10498 /* Don't allow namespace local devices to be moved. */ 10499 err = -EINVAL; 10500 if (dev->features & NETIF_F_NETNS_LOCAL) 10501 goto out; 10502 10503 /* Ensure the device has been registrered */ 10504 if (dev->reg_state != NETREG_REGISTERED) 10505 goto out; 10506 10507 /* Get out if there is nothing todo */ 10508 err = 0; 10509 if (net_eq(net_old, net)) 10510 goto out; 10511 10512 /* Pick the destination device name, and ensure 10513 * we can use it in the destination network namespace. 10514 */ 10515 err = -EEXIST; 10516 if (netdev_name_in_use(net, dev->name)) { 10517 /* We get here if we can't use the current device name */ 10518 if (!pat) 10519 goto out; 10520 err = dev_get_valid_name(net, dev, pat); 10521 if (err < 0) 10522 goto out; 10523 } 10524 10525 /* Check that new_ifindex isn't used yet. */ 10526 err = -EBUSY; 10527 if (new_ifindex && __dev_get_by_index(net, new_ifindex)) 10528 goto out; 10529 10530 /* 10531 * And now a mini version of register_netdevice unregister_netdevice. 10532 */ 10533 10534 /* If device is running close it first. */ 10535 dev_close(dev); 10536 10537 /* And unlink it from device chain */ 10538 unlist_netdevice(dev); 10539 10540 synchronize_net(); 10541 10542 /* Shutdown queueing discipline. */ 10543 dev_shutdown(dev); 10544 10545 /* Notify protocols, that we are about to destroy 10546 * this device. They should clean all the things. 10547 * 10548 * Note that dev->reg_state stays at NETREG_REGISTERED. 10549 * This is wanted because this way 8021q and macvlan know 10550 * the device is just moving and can keep their slaves up. 10551 */ 10552 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 10553 rcu_barrier(); 10554 10555 new_nsid = peernet2id_alloc(dev_net(dev), net, GFP_KERNEL); 10556 /* If there is an ifindex conflict assign a new one */ 10557 if (!new_ifindex) { 10558 if (__dev_get_by_index(net, dev->ifindex)) 10559 new_ifindex = dev_new_index(net); 10560 else 10561 new_ifindex = dev->ifindex; 10562 } 10563 10564 rtmsg_ifinfo_newnet(RTM_DELLINK, dev, ~0U, GFP_KERNEL, &new_nsid, 10565 new_ifindex); 10566 10567 /* 10568 * Flush the unicast and multicast chains 10569 */ 10570 dev_uc_flush(dev); 10571 dev_mc_flush(dev); 10572 10573 /* Send a netdev-removed uevent to the old namespace */ 10574 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE); 10575 netdev_adjacent_del_links(dev); 10576 10577 /* Move per-net netdevice notifiers that are following the netdevice */ 10578 move_netdevice_notifiers_dev_net(dev, net); 10579 10580 /* Actually switch the network namespace */ 10581 dev_net_set(dev, net); 10582 dev->ifindex = new_ifindex; 10583 10584 /* Send a netdev-add uevent to the new namespace */ 10585 kobject_uevent(&dev->dev.kobj, KOBJ_ADD); 10586 netdev_adjacent_add_links(dev); 10587 10588 /* Fixup kobjects */ 10589 err = device_rename(&dev->dev, dev->name); 10590 WARN_ON(err); 10591 10592 /* Adapt owner in case owning user namespace of target network 10593 * namespace is different from the original one. 10594 */ 10595 err = netdev_change_owner(dev, net_old, net); 10596 WARN_ON(err); 10597 10598 /* Add the device back in the hashes */ 10599 list_netdevice(dev); 10600 10601 /* Notify protocols, that a new device appeared. */ 10602 call_netdevice_notifiers(NETDEV_REGISTER, dev); 10603 10604 /* 10605 * Prevent userspace races by waiting until the network 10606 * device is fully setup before sending notifications. 10607 */ 10608 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL); 10609 10610 synchronize_net(); 10611 err = 0; 10612 out: 10613 return err; 10614 } 10615 EXPORT_SYMBOL_GPL(__dev_change_net_namespace); 10616 10617 static int dev_cpu_dead(unsigned int oldcpu) 10618 { 10619 struct sk_buff **list_skb; 10620 struct sk_buff *skb; 10621 unsigned int cpu; 10622 struct softnet_data *sd, *oldsd, *remsd = NULL; 10623 10624 local_irq_disable(); 10625 cpu = smp_processor_id(); 10626 sd = &per_cpu(softnet_data, cpu); 10627 oldsd = &per_cpu(softnet_data, oldcpu); 10628 10629 /* Find end of our completion_queue. */ 10630 list_skb = &sd->completion_queue; 10631 while (*list_skb) 10632 list_skb = &(*list_skb)->next; 10633 /* Append completion queue from offline CPU. */ 10634 *list_skb = oldsd->completion_queue; 10635 oldsd->completion_queue = NULL; 10636 10637 /* Append output queue from offline CPU. */ 10638 if (oldsd->output_queue) { 10639 *sd->output_queue_tailp = oldsd->output_queue; 10640 sd->output_queue_tailp = oldsd->output_queue_tailp; 10641 oldsd->output_queue = NULL; 10642 oldsd->output_queue_tailp = &oldsd->output_queue; 10643 } 10644 /* Append NAPI poll list from offline CPU, with one exception : 10645 * process_backlog() must be called by cpu owning percpu backlog. 10646 * We properly handle process_queue & input_pkt_queue later. 10647 */ 10648 while (!list_empty(&oldsd->poll_list)) { 10649 struct napi_struct *napi = list_first_entry(&oldsd->poll_list, 10650 struct napi_struct, 10651 poll_list); 10652 10653 list_del_init(&napi->poll_list); 10654 if (napi->poll == process_backlog) 10655 napi->state = 0; 10656 else 10657 ____napi_schedule(sd, napi); 10658 } 10659 10660 raise_softirq_irqoff(NET_TX_SOFTIRQ); 10661 local_irq_enable(); 10662 10663 #ifdef CONFIG_RPS 10664 remsd = oldsd->rps_ipi_list; 10665 oldsd->rps_ipi_list = NULL; 10666 #endif 10667 /* send out pending IPI's on offline CPU */ 10668 net_rps_send_ipi(remsd); 10669 10670 /* Process offline CPU's input_pkt_queue */ 10671 while ((skb = __skb_dequeue(&oldsd->process_queue))) { 10672 netif_rx_ni(skb); 10673 input_queue_head_incr(oldsd); 10674 } 10675 while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) { 10676 netif_rx_ni(skb); 10677 input_queue_head_incr(oldsd); 10678 } 10679 10680 return 0; 10681 } 10682 10683 /** 10684 * netdev_increment_features - increment feature set by one 10685 * @all: current feature set 10686 * @one: new feature set 10687 * @mask: mask feature set 10688 * 10689 * Computes a new feature set after adding a device with feature set 10690 * @one to the master device with current feature set @all. Will not 10691 * enable anything that is off in @mask. Returns the new feature set. 10692 */ 10693 netdev_features_t netdev_increment_features(netdev_features_t all, 10694 netdev_features_t one, netdev_features_t mask) 10695 { 10696 if (mask & NETIF_F_HW_CSUM) 10697 mask |= NETIF_F_CSUM_MASK; 10698 mask |= NETIF_F_VLAN_CHALLENGED; 10699 10700 all |= one & (NETIF_F_ONE_FOR_ALL | NETIF_F_CSUM_MASK) & mask; 10701 all &= one | ~NETIF_F_ALL_FOR_ALL; 10702 10703 /* If one device supports hw checksumming, set for all. */ 10704 if (all & NETIF_F_HW_CSUM) 10705 all &= ~(NETIF_F_CSUM_MASK & ~NETIF_F_HW_CSUM); 10706 10707 return all; 10708 } 10709 EXPORT_SYMBOL(netdev_increment_features); 10710 10711 static struct hlist_head * __net_init netdev_create_hash(void) 10712 { 10713 int i; 10714 struct hlist_head *hash; 10715 10716 hash = kmalloc_array(NETDEV_HASHENTRIES, sizeof(*hash), GFP_KERNEL); 10717 if (hash != NULL) 10718 for (i = 0; i < NETDEV_HASHENTRIES; i++) 10719 INIT_HLIST_HEAD(&hash[i]); 10720 10721 return hash; 10722 } 10723 10724 /* Initialize per network namespace state */ 10725 static int __net_init netdev_init(struct net *net) 10726 { 10727 BUILD_BUG_ON(GRO_HASH_BUCKETS > 10728 8 * sizeof_field(struct napi_struct, gro_bitmask)); 10729 10730 INIT_LIST_HEAD(&net->dev_base_head); 10731 10732 net->dev_name_head = netdev_create_hash(); 10733 if (net->dev_name_head == NULL) 10734 goto err_name; 10735 10736 net->dev_index_head = netdev_create_hash(); 10737 if (net->dev_index_head == NULL) 10738 goto err_idx; 10739 10740 RAW_INIT_NOTIFIER_HEAD(&net->netdev_chain); 10741 10742 return 0; 10743 10744 err_idx: 10745 kfree(net->dev_name_head); 10746 err_name: 10747 return -ENOMEM; 10748 } 10749 10750 /** 10751 * netdev_drivername - network driver for the device 10752 * @dev: network device 10753 * 10754 * Determine network driver for device. 10755 */ 10756 const char *netdev_drivername(const struct net_device *dev) 10757 { 10758 const struct device_driver *driver; 10759 const struct device *parent; 10760 const char *empty = ""; 10761 10762 parent = dev->dev.parent; 10763 if (!parent) 10764 return empty; 10765 10766 driver = parent->driver; 10767 if (driver && driver->name) 10768 return driver->name; 10769 return empty; 10770 } 10771 10772 static void __netdev_printk(const char *level, const struct net_device *dev, 10773 struct va_format *vaf) 10774 { 10775 if (dev && dev->dev.parent) { 10776 dev_printk_emit(level[1] - '0', 10777 dev->dev.parent, 10778 "%s %s %s%s: %pV", 10779 dev_driver_string(dev->dev.parent), 10780 dev_name(dev->dev.parent), 10781 netdev_name(dev), netdev_reg_state(dev), 10782 vaf); 10783 } else if (dev) { 10784 printk("%s%s%s: %pV", 10785 level, netdev_name(dev), netdev_reg_state(dev), vaf); 10786 } else { 10787 printk("%s(NULL net_device): %pV", level, vaf); 10788 } 10789 } 10790 10791 void netdev_printk(const char *level, const struct net_device *dev, 10792 const char *format, ...) 10793 { 10794 struct va_format vaf; 10795 va_list args; 10796 10797 va_start(args, format); 10798 10799 vaf.fmt = format; 10800 vaf.va = &args; 10801 10802 __netdev_printk(level, dev, &vaf); 10803 10804 va_end(args); 10805 } 10806 EXPORT_SYMBOL(netdev_printk); 10807 10808 #define define_netdev_printk_level(func, level) \ 10809 void func(const struct net_device *dev, const char *fmt, ...) \ 10810 { \ 10811 struct va_format vaf; \ 10812 va_list args; \ 10813 \ 10814 va_start(args, fmt); \ 10815 \ 10816 vaf.fmt = fmt; \ 10817 vaf.va = &args; \ 10818 \ 10819 __netdev_printk(level, dev, &vaf); \ 10820 \ 10821 va_end(args); \ 10822 } \ 10823 EXPORT_SYMBOL(func); 10824 10825 define_netdev_printk_level(netdev_emerg, KERN_EMERG); 10826 define_netdev_printk_level(netdev_alert, KERN_ALERT); 10827 define_netdev_printk_level(netdev_crit, KERN_CRIT); 10828 define_netdev_printk_level(netdev_err, KERN_ERR); 10829 define_netdev_printk_level(netdev_warn, KERN_WARNING); 10830 define_netdev_printk_level(netdev_notice, KERN_NOTICE); 10831 define_netdev_printk_level(netdev_info, KERN_INFO); 10832 10833 static void __net_exit netdev_exit(struct net *net) 10834 { 10835 kfree(net->dev_name_head); 10836 kfree(net->dev_index_head); 10837 if (net != &init_net) 10838 WARN_ON_ONCE(!list_empty(&net->dev_base_head)); 10839 } 10840 10841 static struct pernet_operations __net_initdata netdev_net_ops = { 10842 .init = netdev_init, 10843 .exit = netdev_exit, 10844 }; 10845 10846 static void __net_exit default_device_exit_net(struct net *net) 10847 { 10848 struct net_device *dev, *aux; 10849 /* 10850 * Push all migratable network devices back to the 10851 * initial network namespace 10852 */ 10853 ASSERT_RTNL(); 10854 for_each_netdev_safe(net, dev, aux) { 10855 int err; 10856 char fb_name[IFNAMSIZ]; 10857 10858 /* Ignore unmoveable devices (i.e. loopback) */ 10859 if (dev->features & NETIF_F_NETNS_LOCAL) 10860 continue; 10861 10862 /* Leave virtual devices for the generic cleanup */ 10863 if (dev->rtnl_link_ops && !dev->rtnl_link_ops->netns_refund) 10864 continue; 10865 10866 /* Push remaining network devices to init_net */ 10867 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex); 10868 if (netdev_name_in_use(&init_net, fb_name)) 10869 snprintf(fb_name, IFNAMSIZ, "dev%%d"); 10870 err = dev_change_net_namespace(dev, &init_net, fb_name); 10871 if (err) { 10872 pr_emerg("%s: failed to move %s to init_net: %d\n", 10873 __func__, dev->name, err); 10874 BUG(); 10875 } 10876 } 10877 } 10878 10879 static void __net_exit rtnl_lock_unregistering(struct list_head *net_list) 10880 { 10881 /* Return (with the rtnl_lock held) when there are no network 10882 * devices unregistering in any network namespace in net_list. 10883 */ 10884 DEFINE_WAIT_FUNC(wait, woken_wake_function); 10885 bool unregistering; 10886 struct net *net; 10887 10888 ASSERT_RTNL(); 10889 add_wait_queue(&netdev_unregistering_wq, &wait); 10890 for (;;) { 10891 unregistering = false; 10892 10893 list_for_each_entry(net, net_list, exit_list) { 10894 if (atomic_read(&net->dev_unreg_count) > 0) { 10895 unregistering = true; 10896 break; 10897 } 10898 } 10899 if (!unregistering) 10900 break; 10901 __rtnl_unlock(); 10902 10903 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); 10904 rtnl_lock(); 10905 } 10906 remove_wait_queue(&netdev_unregistering_wq, &wait); 10907 } 10908 10909 static void __net_exit default_device_exit_batch(struct list_head *net_list) 10910 { 10911 /* At exit all network devices most be removed from a network 10912 * namespace. Do this in the reverse order of registration. 10913 * Do this across as many network namespaces as possible to 10914 * improve batching efficiency. 10915 */ 10916 struct net_device *dev; 10917 struct net *net; 10918 LIST_HEAD(dev_kill_list); 10919 10920 rtnl_lock(); 10921 list_for_each_entry(net, net_list, exit_list) { 10922 default_device_exit_net(net); 10923 cond_resched(); 10924 } 10925 /* To prevent network device cleanup code from dereferencing 10926 * loopback devices or network devices that have been freed 10927 * wait here for all pending unregistrations to complete, 10928 * before unregistring the loopback device and allowing the 10929 * network namespace be freed. 10930 * 10931 * The netdev todo list containing all network devices 10932 * unregistrations that happen in default_device_exit_batch 10933 * will run in the rtnl_unlock() at the end of 10934 * default_device_exit_batch. 10935 */ 10936 rtnl_lock_unregistering(net_list); 10937 10938 list_for_each_entry(net, net_list, exit_list) { 10939 for_each_netdev_reverse(net, dev) { 10940 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) 10941 dev->rtnl_link_ops->dellink(dev, &dev_kill_list); 10942 else 10943 unregister_netdevice_queue(dev, &dev_kill_list); 10944 } 10945 } 10946 unregister_netdevice_many(&dev_kill_list); 10947 rtnl_unlock(); 10948 } 10949 10950 static struct pernet_operations __net_initdata default_device_ops = { 10951 .exit_batch = default_device_exit_batch, 10952 }; 10953 10954 /* 10955 * Initialize the DEV module. At boot time this walks the device list and 10956 * unhooks any devices that fail to initialise (normally hardware not 10957 * present) and leaves us with a valid list of present and active devices. 10958 * 10959 */ 10960 10961 /* 10962 * This is called single threaded during boot, so no need 10963 * to take the rtnl semaphore. 10964 */ 10965 static int __init net_dev_init(void) 10966 { 10967 int i, rc = -ENOMEM; 10968 10969 BUG_ON(!dev_boot_phase); 10970 10971 if (dev_proc_init()) 10972 goto out; 10973 10974 if (netdev_kobject_init()) 10975 goto out; 10976 10977 INIT_LIST_HEAD(&ptype_all); 10978 for (i = 0; i < PTYPE_HASH_SIZE; i++) 10979 INIT_LIST_HEAD(&ptype_base[i]); 10980 10981 if (register_pernet_subsys(&netdev_net_ops)) 10982 goto out; 10983 10984 /* 10985 * Initialise the packet receive queues. 10986 */ 10987 10988 for_each_possible_cpu(i) { 10989 struct work_struct *flush = per_cpu_ptr(&flush_works, i); 10990 struct softnet_data *sd = &per_cpu(softnet_data, i); 10991 10992 INIT_WORK(flush, flush_backlog); 10993 10994 skb_queue_head_init(&sd->input_pkt_queue); 10995 skb_queue_head_init(&sd->process_queue); 10996 #ifdef CONFIG_XFRM_OFFLOAD 10997 skb_queue_head_init(&sd->xfrm_backlog); 10998 #endif 10999 INIT_LIST_HEAD(&sd->poll_list); 11000 sd->output_queue_tailp = &sd->output_queue; 11001 #ifdef CONFIG_RPS 11002 INIT_CSD(&sd->csd, rps_trigger_softirq, sd); 11003 sd->cpu = i; 11004 #endif 11005 11006 init_gro_hash(&sd->backlog); 11007 sd->backlog.poll = process_backlog; 11008 sd->backlog.weight = weight_p; 11009 } 11010 11011 dev_boot_phase = 0; 11012 11013 /* The loopback device is special if any other network devices 11014 * is present in a network namespace the loopback device must 11015 * be present. Since we now dynamically allocate and free the 11016 * loopback device ensure this invariant is maintained by 11017 * keeping the loopback device as the first device on the 11018 * list of network devices. Ensuring the loopback devices 11019 * is the first device that appears and the last network device 11020 * that disappears. 11021 */ 11022 if (register_pernet_device(&loopback_net_ops)) 11023 goto out; 11024 11025 if (register_pernet_device(&default_device_ops)) 11026 goto out; 11027 11028 open_softirq(NET_TX_SOFTIRQ, net_tx_action); 11029 open_softirq(NET_RX_SOFTIRQ, net_rx_action); 11030 11031 rc = cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD, "net/dev:dead", 11032 NULL, dev_cpu_dead); 11033 WARN_ON(rc < 0); 11034 rc = 0; 11035 out: 11036 return rc; 11037 } 11038 11039 subsys_initcall(net_dev_init); 11040