1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * NET3 Protocol independent device support routines. 4 * 5 * Derived from the non IP parts of dev.c 1.0.19 6 * Authors: Ross Biro 7 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 8 * Mark Evans, <evansmp@uhura.aston.ac.uk> 9 * 10 * Additional Authors: 11 * Florian la Roche <rzsfl@rz.uni-sb.de> 12 * Alan Cox <gw4pts@gw4pts.ampr.org> 13 * David Hinds <dahinds@users.sourceforge.net> 14 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> 15 * Adam Sulmicki <adam@cfar.umd.edu> 16 * Pekka Riikonen <priikone@poesidon.pspt.fi> 17 * 18 * Changes: 19 * D.J. Barrow : Fixed bug where dev->refcnt gets set 20 * to 2 if register_netdev gets called 21 * before net_dev_init & also removed a 22 * few lines of code in the process. 23 * Alan Cox : device private ioctl copies fields back. 24 * Alan Cox : Transmit queue code does relevant 25 * stunts to keep the queue safe. 26 * Alan Cox : Fixed double lock. 27 * Alan Cox : Fixed promisc NULL pointer trap 28 * ???????? : Support the full private ioctl range 29 * Alan Cox : Moved ioctl permission check into 30 * drivers 31 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI 32 * Alan Cox : 100 backlog just doesn't cut it when 33 * you start doing multicast video 8) 34 * Alan Cox : Rewrote net_bh and list manager. 35 * Alan Cox : Fix ETH_P_ALL echoback lengths. 36 * Alan Cox : Took out transmit every packet pass 37 * Saved a few bytes in the ioctl handler 38 * Alan Cox : Network driver sets packet type before 39 * calling netif_rx. Saves a function 40 * call a packet. 41 * Alan Cox : Hashed net_bh() 42 * Richard Kooijman: Timestamp fixes. 43 * Alan Cox : Wrong field in SIOCGIFDSTADDR 44 * Alan Cox : Device lock protection. 45 * Alan Cox : Fixed nasty side effect of device close 46 * changes. 47 * Rudi Cilibrasi : Pass the right thing to 48 * set_mac_address() 49 * Dave Miller : 32bit quantity for the device lock to 50 * make it work out on a Sparc. 51 * Bjorn Ekwall : Added KERNELD hack. 52 * Alan Cox : Cleaned up the backlog initialise. 53 * Craig Metz : SIOCGIFCONF fix if space for under 54 * 1 device. 55 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there 56 * is no device open function. 57 * Andi Kleen : Fix error reporting for SIOCGIFCONF 58 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF 59 * Cyrus Durgin : Cleaned for KMOD 60 * Adam Sulmicki : Bug Fix : Network Device Unload 61 * A network device unload needs to purge 62 * the backlog queue. 63 * Paul Rusty Russell : SIOCSIFNAME 64 * Pekka Riikonen : Netdev boot-time settings code 65 * Andrew Morton : Make unregister_netdevice wait 66 * indefinitely on dev->refcnt 67 * J Hadi Salim : - Backlog queue sampling 68 * - netif_rx() feedback 69 */ 70 71 #include <linux/uaccess.h> 72 #include <linux/bitops.h> 73 #include <linux/capability.h> 74 #include <linux/cpu.h> 75 #include <linux/types.h> 76 #include <linux/kernel.h> 77 #include <linux/hash.h> 78 #include <linux/slab.h> 79 #include <linux/sched.h> 80 #include <linux/sched/mm.h> 81 #include <linux/mutex.h> 82 #include <linux/rwsem.h> 83 #include <linux/string.h> 84 #include <linux/mm.h> 85 #include <linux/socket.h> 86 #include <linux/sockios.h> 87 #include <linux/errno.h> 88 #include <linux/interrupt.h> 89 #include <linux/if_ether.h> 90 #include <linux/netdevice.h> 91 #include <linux/etherdevice.h> 92 #include <linux/ethtool.h> 93 #include <linux/skbuff.h> 94 #include <linux/kthread.h> 95 #include <linux/bpf.h> 96 #include <linux/bpf_trace.h> 97 #include <net/net_namespace.h> 98 #include <net/sock.h> 99 #include <net/busy_poll.h> 100 #include <linux/rtnetlink.h> 101 #include <linux/stat.h> 102 #include <net/dsa.h> 103 #include <net/dst.h> 104 #include <net/dst_metadata.h> 105 #include <net/gro.h> 106 #include <net/pkt_sched.h> 107 #include <net/pkt_cls.h> 108 #include <net/checksum.h> 109 #include <net/xfrm.h> 110 #include <linux/highmem.h> 111 #include <linux/init.h> 112 #include <linux/module.h> 113 #include <linux/netpoll.h> 114 #include <linux/rcupdate.h> 115 #include <linux/delay.h> 116 #include <net/iw_handler.h> 117 #include <asm/current.h> 118 #include <linux/audit.h> 119 #include <linux/dmaengine.h> 120 #include <linux/err.h> 121 #include <linux/ctype.h> 122 #include <linux/if_arp.h> 123 #include <linux/if_vlan.h> 124 #include <linux/ip.h> 125 #include <net/ip.h> 126 #include <net/mpls.h> 127 #include <linux/ipv6.h> 128 #include <linux/in.h> 129 #include <linux/jhash.h> 130 #include <linux/random.h> 131 #include <trace/events/napi.h> 132 #include <trace/events/net.h> 133 #include <trace/events/skb.h> 134 #include <trace/events/qdisc.h> 135 #include <linux/inetdevice.h> 136 #include <linux/cpu_rmap.h> 137 #include <linux/static_key.h> 138 #include <linux/hashtable.h> 139 #include <linux/vmalloc.h> 140 #include <linux/if_macvlan.h> 141 #include <linux/errqueue.h> 142 #include <linux/hrtimer.h> 143 #include <linux/netfilter_ingress.h> 144 #include <linux/crash_dump.h> 145 #include <linux/sctp.h> 146 #include <net/udp_tunnel.h> 147 #include <linux/net_namespace.h> 148 #include <linux/indirect_call_wrapper.h> 149 #include <net/devlink.h> 150 #include <linux/pm_runtime.h> 151 #include <linux/prandom.h> 152 #include <linux/once_lite.h> 153 154 #include "net-sysfs.h" 155 156 #define MAX_GRO_SKBS 8 157 158 /* This should be increased if a protocol with a bigger head is added. */ 159 #define GRO_MAX_HEAD (MAX_HEADER + 128) 160 161 static DEFINE_SPINLOCK(ptype_lock); 162 static DEFINE_SPINLOCK(offload_lock); 163 struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; 164 struct list_head ptype_all __read_mostly; /* Taps */ 165 static struct list_head offload_base __read_mostly; 166 167 static int netif_rx_internal(struct sk_buff *skb); 168 static int call_netdevice_notifiers_info(unsigned long val, 169 struct netdev_notifier_info *info); 170 static int call_netdevice_notifiers_extack(unsigned long val, 171 struct net_device *dev, 172 struct netlink_ext_ack *extack); 173 static struct napi_struct *napi_by_id(unsigned int napi_id); 174 175 /* 176 * The @dev_base_head list is protected by @dev_base_lock and the rtnl 177 * semaphore. 178 * 179 * Pure readers hold dev_base_lock for reading, or rcu_read_lock() 180 * 181 * Writers must hold the rtnl semaphore while they loop through the 182 * dev_base_head list, and hold dev_base_lock for writing when they do the 183 * actual updates. This allows pure readers to access the list even 184 * while a writer is preparing to update it. 185 * 186 * To put it another way, dev_base_lock is held for writing only to 187 * protect against pure readers; the rtnl semaphore provides the 188 * protection against other writers. 189 * 190 * See, for example usages, register_netdevice() and 191 * unregister_netdevice(), which must be called with the rtnl 192 * semaphore held. 193 */ 194 DEFINE_RWLOCK(dev_base_lock); 195 EXPORT_SYMBOL(dev_base_lock); 196 197 static DEFINE_MUTEX(ifalias_mutex); 198 199 /* protects napi_hash addition/deletion and napi_gen_id */ 200 static DEFINE_SPINLOCK(napi_hash_lock); 201 202 static unsigned int napi_gen_id = NR_CPUS; 203 static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8); 204 205 static DECLARE_RWSEM(devnet_rename_sem); 206 207 static inline void dev_base_seq_inc(struct net *net) 208 { 209 while (++net->dev_base_seq == 0) 210 ; 211 } 212 213 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name) 214 { 215 unsigned int hash = full_name_hash(net, name, strnlen(name, IFNAMSIZ)); 216 217 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)]; 218 } 219 220 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) 221 { 222 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)]; 223 } 224 225 static inline void rps_lock(struct softnet_data *sd) 226 { 227 #ifdef CONFIG_RPS 228 spin_lock(&sd->input_pkt_queue.lock); 229 #endif 230 } 231 232 static inline void rps_unlock(struct softnet_data *sd) 233 { 234 #ifdef CONFIG_RPS 235 spin_unlock(&sd->input_pkt_queue.lock); 236 #endif 237 } 238 239 static struct netdev_name_node *netdev_name_node_alloc(struct net_device *dev, 240 const char *name) 241 { 242 struct netdev_name_node *name_node; 243 244 name_node = kmalloc(sizeof(*name_node), GFP_KERNEL); 245 if (!name_node) 246 return NULL; 247 INIT_HLIST_NODE(&name_node->hlist); 248 name_node->dev = dev; 249 name_node->name = name; 250 return name_node; 251 } 252 253 static struct netdev_name_node * 254 netdev_name_node_head_alloc(struct net_device *dev) 255 { 256 struct netdev_name_node *name_node; 257 258 name_node = netdev_name_node_alloc(dev, dev->name); 259 if (!name_node) 260 return NULL; 261 INIT_LIST_HEAD(&name_node->list); 262 return name_node; 263 } 264 265 static void netdev_name_node_free(struct netdev_name_node *name_node) 266 { 267 kfree(name_node); 268 } 269 270 static void netdev_name_node_add(struct net *net, 271 struct netdev_name_node *name_node) 272 { 273 hlist_add_head_rcu(&name_node->hlist, 274 dev_name_hash(net, name_node->name)); 275 } 276 277 static void netdev_name_node_del(struct netdev_name_node *name_node) 278 { 279 hlist_del_rcu(&name_node->hlist); 280 } 281 282 static struct netdev_name_node *netdev_name_node_lookup(struct net *net, 283 const char *name) 284 { 285 struct hlist_head *head = dev_name_hash(net, name); 286 struct netdev_name_node *name_node; 287 288 hlist_for_each_entry(name_node, head, hlist) 289 if (!strcmp(name_node->name, name)) 290 return name_node; 291 return NULL; 292 } 293 294 static struct netdev_name_node *netdev_name_node_lookup_rcu(struct net *net, 295 const char *name) 296 { 297 struct hlist_head *head = dev_name_hash(net, name); 298 struct netdev_name_node *name_node; 299 300 hlist_for_each_entry_rcu(name_node, head, hlist) 301 if (!strcmp(name_node->name, name)) 302 return name_node; 303 return NULL; 304 } 305 306 int netdev_name_node_alt_create(struct net_device *dev, const char *name) 307 { 308 struct netdev_name_node *name_node; 309 struct net *net = dev_net(dev); 310 311 name_node = netdev_name_node_lookup(net, name); 312 if (name_node) 313 return -EEXIST; 314 name_node = netdev_name_node_alloc(dev, name); 315 if (!name_node) 316 return -ENOMEM; 317 netdev_name_node_add(net, name_node); 318 /* The node that holds dev->name acts as a head of per-device list. */ 319 list_add_tail(&name_node->list, &dev->name_node->list); 320 321 return 0; 322 } 323 EXPORT_SYMBOL(netdev_name_node_alt_create); 324 325 static void __netdev_name_node_alt_destroy(struct netdev_name_node *name_node) 326 { 327 list_del(&name_node->list); 328 netdev_name_node_del(name_node); 329 kfree(name_node->name); 330 netdev_name_node_free(name_node); 331 } 332 333 int netdev_name_node_alt_destroy(struct net_device *dev, const char *name) 334 { 335 struct netdev_name_node *name_node; 336 struct net *net = dev_net(dev); 337 338 name_node = netdev_name_node_lookup(net, name); 339 if (!name_node) 340 return -ENOENT; 341 /* lookup might have found our primary name or a name belonging 342 * to another device. 343 */ 344 if (name_node == dev->name_node || name_node->dev != dev) 345 return -EINVAL; 346 347 __netdev_name_node_alt_destroy(name_node); 348 349 return 0; 350 } 351 EXPORT_SYMBOL(netdev_name_node_alt_destroy); 352 353 static void netdev_name_node_alt_flush(struct net_device *dev) 354 { 355 struct netdev_name_node *name_node, *tmp; 356 357 list_for_each_entry_safe(name_node, tmp, &dev->name_node->list, list) 358 __netdev_name_node_alt_destroy(name_node); 359 } 360 361 /* Device list insertion */ 362 static void list_netdevice(struct net_device *dev) 363 { 364 struct net *net = dev_net(dev); 365 366 ASSERT_RTNL(); 367 368 write_lock_bh(&dev_base_lock); 369 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head); 370 netdev_name_node_add(net, dev->name_node); 371 hlist_add_head_rcu(&dev->index_hlist, 372 dev_index_hash(net, dev->ifindex)); 373 write_unlock_bh(&dev_base_lock); 374 375 dev_base_seq_inc(net); 376 } 377 378 /* Device list removal 379 * caller must respect a RCU grace period before freeing/reusing dev 380 */ 381 static void unlist_netdevice(struct net_device *dev) 382 { 383 ASSERT_RTNL(); 384 385 /* Unlink dev from the device chain */ 386 write_lock_bh(&dev_base_lock); 387 list_del_rcu(&dev->dev_list); 388 netdev_name_node_del(dev->name_node); 389 hlist_del_rcu(&dev->index_hlist); 390 write_unlock_bh(&dev_base_lock); 391 392 dev_base_seq_inc(dev_net(dev)); 393 } 394 395 /* 396 * Our notifier list 397 */ 398 399 static RAW_NOTIFIER_HEAD(netdev_chain); 400 401 /* 402 * Device drivers call our routines to queue packets here. We empty the 403 * queue in the local softnet handler. 404 */ 405 406 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); 407 EXPORT_PER_CPU_SYMBOL(softnet_data); 408 409 #ifdef CONFIG_LOCKDEP 410 /* 411 * register_netdevice() inits txq->_xmit_lock and sets lockdep class 412 * according to dev->type 413 */ 414 static const unsigned short netdev_lock_type[] = { 415 ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25, 416 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET, 417 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM, 418 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP, 419 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD, 420 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25, 421 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP, 422 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD, 423 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI, 424 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE, 425 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET, 426 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL, 427 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM, 428 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE, 429 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE}; 430 431 static const char *const netdev_lock_name[] = { 432 "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25", 433 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET", 434 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM", 435 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP", 436 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD", 437 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25", 438 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP", 439 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD", 440 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI", 441 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE", 442 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET", 443 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL", 444 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM", 445 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE", 446 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"}; 447 448 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)]; 449 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)]; 450 451 static inline unsigned short netdev_lock_pos(unsigned short dev_type) 452 { 453 int i; 454 455 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++) 456 if (netdev_lock_type[i] == dev_type) 457 return i; 458 /* the last key is used by default */ 459 return ARRAY_SIZE(netdev_lock_type) - 1; 460 } 461 462 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, 463 unsigned short dev_type) 464 { 465 int i; 466 467 i = netdev_lock_pos(dev_type); 468 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i], 469 netdev_lock_name[i]); 470 } 471 472 static inline void netdev_set_addr_lockdep_class(struct net_device *dev) 473 { 474 int i; 475 476 i = netdev_lock_pos(dev->type); 477 lockdep_set_class_and_name(&dev->addr_list_lock, 478 &netdev_addr_lock_key[i], 479 netdev_lock_name[i]); 480 } 481 #else 482 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, 483 unsigned short dev_type) 484 { 485 } 486 487 static inline void netdev_set_addr_lockdep_class(struct net_device *dev) 488 { 489 } 490 #endif 491 492 /******************************************************************************* 493 * 494 * Protocol management and registration routines 495 * 496 *******************************************************************************/ 497 498 499 /* 500 * Add a protocol ID to the list. Now that the input handler is 501 * smarter we can dispense with all the messy stuff that used to be 502 * here. 503 * 504 * BEWARE!!! Protocol handlers, mangling input packets, 505 * MUST BE last in hash buckets and checking protocol handlers 506 * MUST start from promiscuous ptype_all chain in net_bh. 507 * It is true now, do not change it. 508 * Explanation follows: if protocol handler, mangling packet, will 509 * be the first on list, it is not able to sense, that packet 510 * is cloned and should be copied-on-write, so that it will 511 * change it and subsequent readers will get broken packet. 512 * --ANK (980803) 513 */ 514 515 static inline struct list_head *ptype_head(const struct packet_type *pt) 516 { 517 if (pt->type == htons(ETH_P_ALL)) 518 return pt->dev ? &pt->dev->ptype_all : &ptype_all; 519 else 520 return pt->dev ? &pt->dev->ptype_specific : 521 &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK]; 522 } 523 524 /** 525 * dev_add_pack - add packet handler 526 * @pt: packet type declaration 527 * 528 * Add a protocol handler to the networking stack. The passed &packet_type 529 * is linked into kernel lists and may not be freed until it has been 530 * removed from the kernel lists. 531 * 532 * This call does not sleep therefore it can not 533 * guarantee all CPU's that are in middle of receiving packets 534 * will see the new packet type (until the next received packet). 535 */ 536 537 void dev_add_pack(struct packet_type *pt) 538 { 539 struct list_head *head = ptype_head(pt); 540 541 spin_lock(&ptype_lock); 542 list_add_rcu(&pt->list, head); 543 spin_unlock(&ptype_lock); 544 } 545 EXPORT_SYMBOL(dev_add_pack); 546 547 /** 548 * __dev_remove_pack - remove packet handler 549 * @pt: packet type declaration 550 * 551 * Remove a protocol handler that was previously added to the kernel 552 * protocol handlers by dev_add_pack(). The passed &packet_type is removed 553 * from the kernel lists and can be freed or reused once this function 554 * returns. 555 * 556 * The packet type might still be in use by receivers 557 * and must not be freed until after all the CPU's have gone 558 * through a quiescent state. 559 */ 560 void __dev_remove_pack(struct packet_type *pt) 561 { 562 struct list_head *head = ptype_head(pt); 563 struct packet_type *pt1; 564 565 spin_lock(&ptype_lock); 566 567 list_for_each_entry(pt1, head, list) { 568 if (pt == pt1) { 569 list_del_rcu(&pt->list); 570 goto out; 571 } 572 } 573 574 pr_warn("dev_remove_pack: %p not found\n", pt); 575 out: 576 spin_unlock(&ptype_lock); 577 } 578 EXPORT_SYMBOL(__dev_remove_pack); 579 580 /** 581 * dev_remove_pack - remove packet handler 582 * @pt: packet type declaration 583 * 584 * Remove a protocol handler that was previously added to the kernel 585 * protocol handlers by dev_add_pack(). The passed &packet_type is removed 586 * from the kernel lists and can be freed or reused once this function 587 * returns. 588 * 589 * This call sleeps to guarantee that no CPU is looking at the packet 590 * type after return. 591 */ 592 void dev_remove_pack(struct packet_type *pt) 593 { 594 __dev_remove_pack(pt); 595 596 synchronize_net(); 597 } 598 EXPORT_SYMBOL(dev_remove_pack); 599 600 601 /** 602 * dev_add_offload - register offload handlers 603 * @po: protocol offload declaration 604 * 605 * Add protocol offload handlers to the networking stack. The passed 606 * &proto_offload is linked into kernel lists and may not be freed until 607 * it has been removed from the kernel lists. 608 * 609 * This call does not sleep therefore it can not 610 * guarantee all CPU's that are in middle of receiving packets 611 * will see the new offload handlers (until the next received packet). 612 */ 613 void dev_add_offload(struct packet_offload *po) 614 { 615 struct packet_offload *elem; 616 617 spin_lock(&offload_lock); 618 list_for_each_entry(elem, &offload_base, list) { 619 if (po->priority < elem->priority) 620 break; 621 } 622 list_add_rcu(&po->list, elem->list.prev); 623 spin_unlock(&offload_lock); 624 } 625 EXPORT_SYMBOL(dev_add_offload); 626 627 /** 628 * __dev_remove_offload - remove offload handler 629 * @po: packet offload declaration 630 * 631 * Remove a protocol offload handler that was previously added to the 632 * kernel offload handlers by dev_add_offload(). The passed &offload_type 633 * is removed from the kernel lists and can be freed or reused once this 634 * function returns. 635 * 636 * The packet type might still be in use by receivers 637 * and must not be freed until after all the CPU's have gone 638 * through a quiescent state. 639 */ 640 static void __dev_remove_offload(struct packet_offload *po) 641 { 642 struct list_head *head = &offload_base; 643 struct packet_offload *po1; 644 645 spin_lock(&offload_lock); 646 647 list_for_each_entry(po1, head, list) { 648 if (po == po1) { 649 list_del_rcu(&po->list); 650 goto out; 651 } 652 } 653 654 pr_warn("dev_remove_offload: %p not found\n", po); 655 out: 656 spin_unlock(&offload_lock); 657 } 658 659 /** 660 * dev_remove_offload - remove packet offload handler 661 * @po: packet offload declaration 662 * 663 * Remove a packet offload handler that was previously added to the kernel 664 * offload handlers by dev_add_offload(). The passed &offload_type is 665 * removed from the kernel lists and can be freed or reused once this 666 * function returns. 667 * 668 * This call sleeps to guarantee that no CPU is looking at the packet 669 * type after return. 670 */ 671 void dev_remove_offload(struct packet_offload *po) 672 { 673 __dev_remove_offload(po); 674 675 synchronize_net(); 676 } 677 EXPORT_SYMBOL(dev_remove_offload); 678 679 /******************************************************************************* 680 * 681 * Device Interface Subroutines 682 * 683 *******************************************************************************/ 684 685 /** 686 * dev_get_iflink - get 'iflink' value of a interface 687 * @dev: targeted interface 688 * 689 * Indicates the ifindex the interface is linked to. 690 * Physical interfaces have the same 'ifindex' and 'iflink' values. 691 */ 692 693 int dev_get_iflink(const struct net_device *dev) 694 { 695 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink) 696 return dev->netdev_ops->ndo_get_iflink(dev); 697 698 return dev->ifindex; 699 } 700 EXPORT_SYMBOL(dev_get_iflink); 701 702 /** 703 * dev_fill_metadata_dst - Retrieve tunnel egress information. 704 * @dev: targeted interface 705 * @skb: The packet. 706 * 707 * For better visibility of tunnel traffic OVS needs to retrieve 708 * egress tunnel information for a packet. Following API allows 709 * user to get this info. 710 */ 711 int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) 712 { 713 struct ip_tunnel_info *info; 714 715 if (!dev->netdev_ops || !dev->netdev_ops->ndo_fill_metadata_dst) 716 return -EINVAL; 717 718 info = skb_tunnel_info_unclone(skb); 719 if (!info) 720 return -ENOMEM; 721 if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX))) 722 return -EINVAL; 723 724 return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb); 725 } 726 EXPORT_SYMBOL_GPL(dev_fill_metadata_dst); 727 728 static struct net_device_path *dev_fwd_path(struct net_device_path_stack *stack) 729 { 730 int k = stack->num_paths++; 731 732 if (WARN_ON_ONCE(k >= NET_DEVICE_PATH_STACK_MAX)) 733 return NULL; 734 735 return &stack->path[k]; 736 } 737 738 int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr, 739 struct net_device_path_stack *stack) 740 { 741 const struct net_device *last_dev; 742 struct net_device_path_ctx ctx = { 743 .dev = dev, 744 .daddr = daddr, 745 }; 746 struct net_device_path *path; 747 int ret = 0; 748 749 stack->num_paths = 0; 750 while (ctx.dev && ctx.dev->netdev_ops->ndo_fill_forward_path) { 751 last_dev = ctx.dev; 752 path = dev_fwd_path(stack); 753 if (!path) 754 return -1; 755 756 memset(path, 0, sizeof(struct net_device_path)); 757 ret = ctx.dev->netdev_ops->ndo_fill_forward_path(&ctx, path); 758 if (ret < 0) 759 return -1; 760 761 if (WARN_ON_ONCE(last_dev == ctx.dev)) 762 return -1; 763 } 764 path = dev_fwd_path(stack); 765 if (!path) 766 return -1; 767 path->type = DEV_PATH_ETHERNET; 768 path->dev = ctx.dev; 769 770 return ret; 771 } 772 EXPORT_SYMBOL_GPL(dev_fill_forward_path); 773 774 /** 775 * __dev_get_by_name - find a device by its name 776 * @net: the applicable net namespace 777 * @name: name to find 778 * 779 * Find an interface by name. Must be called under RTNL semaphore 780 * or @dev_base_lock. If the name is found a pointer to the device 781 * is returned. If the name is not found then %NULL is returned. The 782 * reference counters are not incremented so the caller must be 783 * careful with locks. 784 */ 785 786 struct net_device *__dev_get_by_name(struct net *net, const char *name) 787 { 788 struct netdev_name_node *node_name; 789 790 node_name = netdev_name_node_lookup(net, name); 791 return node_name ? node_name->dev : NULL; 792 } 793 EXPORT_SYMBOL(__dev_get_by_name); 794 795 /** 796 * dev_get_by_name_rcu - find a device by its name 797 * @net: the applicable net namespace 798 * @name: name to find 799 * 800 * Find an interface by name. 801 * If the name is found a pointer to the device is returned. 802 * If the name is not found then %NULL is returned. 803 * The reference counters are not incremented so the caller must be 804 * careful with locks. The caller must hold RCU lock. 805 */ 806 807 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name) 808 { 809 struct netdev_name_node *node_name; 810 811 node_name = netdev_name_node_lookup_rcu(net, name); 812 return node_name ? node_name->dev : NULL; 813 } 814 EXPORT_SYMBOL(dev_get_by_name_rcu); 815 816 /** 817 * dev_get_by_name - find a device by its name 818 * @net: the applicable net namespace 819 * @name: name to find 820 * 821 * Find an interface by name. This can be called from any 822 * context and does its own locking. The returned handle has 823 * the usage count incremented and the caller must use dev_put() to 824 * release it when it is no longer needed. %NULL is returned if no 825 * matching device is found. 826 */ 827 828 struct net_device *dev_get_by_name(struct net *net, const char *name) 829 { 830 struct net_device *dev; 831 832 rcu_read_lock(); 833 dev = dev_get_by_name_rcu(net, name); 834 dev_hold(dev); 835 rcu_read_unlock(); 836 return dev; 837 } 838 EXPORT_SYMBOL(dev_get_by_name); 839 840 /** 841 * __dev_get_by_index - find a device by its ifindex 842 * @net: the applicable net namespace 843 * @ifindex: index of device 844 * 845 * Search for an interface by index. Returns %NULL if the device 846 * is not found or a pointer to the device. The device has not 847 * had its reference counter increased so the caller must be careful 848 * about locking. The caller must hold either the RTNL semaphore 849 * or @dev_base_lock. 850 */ 851 852 struct net_device *__dev_get_by_index(struct net *net, int ifindex) 853 { 854 struct net_device *dev; 855 struct hlist_head *head = dev_index_hash(net, ifindex); 856 857 hlist_for_each_entry(dev, head, index_hlist) 858 if (dev->ifindex == ifindex) 859 return dev; 860 861 return NULL; 862 } 863 EXPORT_SYMBOL(__dev_get_by_index); 864 865 /** 866 * dev_get_by_index_rcu - find a device by its ifindex 867 * @net: the applicable net namespace 868 * @ifindex: index of device 869 * 870 * Search for an interface by index. Returns %NULL if the device 871 * is not found or a pointer to the device. The device has not 872 * had its reference counter increased so the caller must be careful 873 * about locking. The caller must hold RCU lock. 874 */ 875 876 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex) 877 { 878 struct net_device *dev; 879 struct hlist_head *head = dev_index_hash(net, ifindex); 880 881 hlist_for_each_entry_rcu(dev, head, index_hlist) 882 if (dev->ifindex == ifindex) 883 return dev; 884 885 return NULL; 886 } 887 EXPORT_SYMBOL(dev_get_by_index_rcu); 888 889 890 /** 891 * dev_get_by_index - find a device by its ifindex 892 * @net: the applicable net namespace 893 * @ifindex: index of device 894 * 895 * Search for an interface by index. Returns NULL if the device 896 * is not found or a pointer to the device. The device returned has 897 * had a reference added and the pointer is safe until the user calls 898 * dev_put to indicate they have finished with it. 899 */ 900 901 struct net_device *dev_get_by_index(struct net *net, int ifindex) 902 { 903 struct net_device *dev; 904 905 rcu_read_lock(); 906 dev = dev_get_by_index_rcu(net, ifindex); 907 dev_hold(dev); 908 rcu_read_unlock(); 909 return dev; 910 } 911 EXPORT_SYMBOL(dev_get_by_index); 912 913 /** 914 * dev_get_by_napi_id - find a device by napi_id 915 * @napi_id: ID of the NAPI struct 916 * 917 * Search for an interface by NAPI ID. Returns %NULL if the device 918 * is not found or a pointer to the device. The device has not had 919 * its reference counter increased so the caller must be careful 920 * about locking. The caller must hold RCU lock. 921 */ 922 923 struct net_device *dev_get_by_napi_id(unsigned int napi_id) 924 { 925 struct napi_struct *napi; 926 927 WARN_ON_ONCE(!rcu_read_lock_held()); 928 929 if (napi_id < MIN_NAPI_ID) 930 return NULL; 931 932 napi = napi_by_id(napi_id); 933 934 return napi ? napi->dev : NULL; 935 } 936 EXPORT_SYMBOL(dev_get_by_napi_id); 937 938 /** 939 * netdev_get_name - get a netdevice name, knowing its ifindex. 940 * @net: network namespace 941 * @name: a pointer to the buffer where the name will be stored. 942 * @ifindex: the ifindex of the interface to get the name from. 943 */ 944 int netdev_get_name(struct net *net, char *name, int ifindex) 945 { 946 struct net_device *dev; 947 int ret; 948 949 down_read(&devnet_rename_sem); 950 rcu_read_lock(); 951 952 dev = dev_get_by_index_rcu(net, ifindex); 953 if (!dev) { 954 ret = -ENODEV; 955 goto out; 956 } 957 958 strcpy(name, dev->name); 959 960 ret = 0; 961 out: 962 rcu_read_unlock(); 963 up_read(&devnet_rename_sem); 964 return ret; 965 } 966 967 /** 968 * dev_getbyhwaddr_rcu - find a device by its hardware address 969 * @net: the applicable net namespace 970 * @type: media type of device 971 * @ha: hardware address 972 * 973 * Search for an interface by MAC address. Returns NULL if the device 974 * is not found or a pointer to the device. 975 * The caller must hold RCU or RTNL. 976 * The returned device has not had its ref count increased 977 * and the caller must therefore be careful about locking 978 * 979 */ 980 981 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, 982 const char *ha) 983 { 984 struct net_device *dev; 985 986 for_each_netdev_rcu(net, dev) 987 if (dev->type == type && 988 !memcmp(dev->dev_addr, ha, dev->addr_len)) 989 return dev; 990 991 return NULL; 992 } 993 EXPORT_SYMBOL(dev_getbyhwaddr_rcu); 994 995 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type) 996 { 997 struct net_device *dev, *ret = NULL; 998 999 rcu_read_lock(); 1000 for_each_netdev_rcu(net, dev) 1001 if (dev->type == type) { 1002 dev_hold(dev); 1003 ret = dev; 1004 break; 1005 } 1006 rcu_read_unlock(); 1007 return ret; 1008 } 1009 EXPORT_SYMBOL(dev_getfirstbyhwtype); 1010 1011 /** 1012 * __dev_get_by_flags - find any device with given flags 1013 * @net: the applicable net namespace 1014 * @if_flags: IFF_* values 1015 * @mask: bitmask of bits in if_flags to check 1016 * 1017 * Search for any interface with the given flags. Returns NULL if a device 1018 * is not found or a pointer to the device. Must be called inside 1019 * rtnl_lock(), and result refcount is unchanged. 1020 */ 1021 1022 struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags, 1023 unsigned short mask) 1024 { 1025 struct net_device *dev, *ret; 1026 1027 ASSERT_RTNL(); 1028 1029 ret = NULL; 1030 for_each_netdev(net, dev) { 1031 if (((dev->flags ^ if_flags) & mask) == 0) { 1032 ret = dev; 1033 break; 1034 } 1035 } 1036 return ret; 1037 } 1038 EXPORT_SYMBOL(__dev_get_by_flags); 1039 1040 /** 1041 * dev_valid_name - check if name is okay for network device 1042 * @name: name string 1043 * 1044 * Network device names need to be valid file names to 1045 * allow sysfs to work. We also disallow any kind of 1046 * whitespace. 1047 */ 1048 bool dev_valid_name(const char *name) 1049 { 1050 if (*name == '\0') 1051 return false; 1052 if (strnlen(name, IFNAMSIZ) == IFNAMSIZ) 1053 return false; 1054 if (!strcmp(name, ".") || !strcmp(name, "..")) 1055 return false; 1056 1057 while (*name) { 1058 if (*name == '/' || *name == ':' || isspace(*name)) 1059 return false; 1060 name++; 1061 } 1062 return true; 1063 } 1064 EXPORT_SYMBOL(dev_valid_name); 1065 1066 /** 1067 * __dev_alloc_name - allocate a name for a device 1068 * @net: network namespace to allocate the device name in 1069 * @name: name format string 1070 * @buf: scratch buffer and result name string 1071 * 1072 * Passed a format string - eg "lt%d" it will try and find a suitable 1073 * id. It scans list of devices to build up a free map, then chooses 1074 * the first empty slot. The caller must hold the dev_base or rtnl lock 1075 * while allocating the name and adding the device in order to avoid 1076 * duplicates. 1077 * Limited to bits_per_byte * page size devices (ie 32K on most platforms). 1078 * Returns the number of the unit assigned or a negative errno code. 1079 */ 1080 1081 static int __dev_alloc_name(struct net *net, const char *name, char *buf) 1082 { 1083 int i = 0; 1084 const char *p; 1085 const int max_netdevices = 8*PAGE_SIZE; 1086 unsigned long *inuse; 1087 struct net_device *d; 1088 1089 if (!dev_valid_name(name)) 1090 return -EINVAL; 1091 1092 p = strchr(name, '%'); 1093 if (p) { 1094 /* 1095 * Verify the string as this thing may have come from 1096 * the user. There must be either one "%d" and no other "%" 1097 * characters. 1098 */ 1099 if (p[1] != 'd' || strchr(p + 2, '%')) 1100 return -EINVAL; 1101 1102 /* Use one page as a bit array of possible slots */ 1103 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC); 1104 if (!inuse) 1105 return -ENOMEM; 1106 1107 for_each_netdev(net, d) { 1108 struct netdev_name_node *name_node; 1109 list_for_each_entry(name_node, &d->name_node->list, list) { 1110 if (!sscanf(name_node->name, name, &i)) 1111 continue; 1112 if (i < 0 || i >= max_netdevices) 1113 continue; 1114 1115 /* avoid cases where sscanf is not exact inverse of printf */ 1116 snprintf(buf, IFNAMSIZ, name, i); 1117 if (!strncmp(buf, name_node->name, IFNAMSIZ)) 1118 set_bit(i, inuse); 1119 } 1120 if (!sscanf(d->name, name, &i)) 1121 continue; 1122 if (i < 0 || i >= max_netdevices) 1123 continue; 1124 1125 /* avoid cases where sscanf is not exact inverse of printf */ 1126 snprintf(buf, IFNAMSIZ, name, i); 1127 if (!strncmp(buf, d->name, IFNAMSIZ)) 1128 set_bit(i, inuse); 1129 } 1130 1131 i = find_first_zero_bit(inuse, max_netdevices); 1132 free_page((unsigned long) inuse); 1133 } 1134 1135 snprintf(buf, IFNAMSIZ, name, i); 1136 if (!__dev_get_by_name(net, buf)) 1137 return i; 1138 1139 /* It is possible to run out of possible slots 1140 * when the name is long and there isn't enough space left 1141 * for the digits, or if all bits are used. 1142 */ 1143 return -ENFILE; 1144 } 1145 1146 static int dev_alloc_name_ns(struct net *net, 1147 struct net_device *dev, 1148 const char *name) 1149 { 1150 char buf[IFNAMSIZ]; 1151 int ret; 1152 1153 BUG_ON(!net); 1154 ret = __dev_alloc_name(net, name, buf); 1155 if (ret >= 0) 1156 strlcpy(dev->name, buf, IFNAMSIZ); 1157 return ret; 1158 } 1159 1160 /** 1161 * dev_alloc_name - allocate a name for a device 1162 * @dev: device 1163 * @name: name format string 1164 * 1165 * Passed a format string - eg "lt%d" it will try and find a suitable 1166 * id. It scans list of devices to build up a free map, then chooses 1167 * the first empty slot. The caller must hold the dev_base or rtnl lock 1168 * while allocating the name and adding the device in order to avoid 1169 * duplicates. 1170 * Limited to bits_per_byte * page size devices (ie 32K on most platforms). 1171 * Returns the number of the unit assigned or a negative errno code. 1172 */ 1173 1174 int dev_alloc_name(struct net_device *dev, const char *name) 1175 { 1176 return dev_alloc_name_ns(dev_net(dev), dev, name); 1177 } 1178 EXPORT_SYMBOL(dev_alloc_name); 1179 1180 static int dev_get_valid_name(struct net *net, struct net_device *dev, 1181 const char *name) 1182 { 1183 BUG_ON(!net); 1184 1185 if (!dev_valid_name(name)) 1186 return -EINVAL; 1187 1188 if (strchr(name, '%')) 1189 return dev_alloc_name_ns(net, dev, name); 1190 else if (__dev_get_by_name(net, name)) 1191 return -EEXIST; 1192 else if (dev->name != name) 1193 strlcpy(dev->name, name, IFNAMSIZ); 1194 1195 return 0; 1196 } 1197 1198 /** 1199 * dev_change_name - change name of a device 1200 * @dev: device 1201 * @newname: name (or format string) must be at least IFNAMSIZ 1202 * 1203 * Change name of a device, can pass format strings "eth%d". 1204 * for wildcarding. 1205 */ 1206 int dev_change_name(struct net_device *dev, const char *newname) 1207 { 1208 unsigned char old_assign_type; 1209 char oldname[IFNAMSIZ]; 1210 int err = 0; 1211 int ret; 1212 struct net *net; 1213 1214 ASSERT_RTNL(); 1215 BUG_ON(!dev_net(dev)); 1216 1217 net = dev_net(dev); 1218 1219 /* Some auto-enslaved devices e.g. failover slaves are 1220 * special, as userspace might rename the device after 1221 * the interface had been brought up and running since 1222 * the point kernel initiated auto-enslavement. Allow 1223 * live name change even when these slave devices are 1224 * up and running. 1225 * 1226 * Typically, users of these auto-enslaving devices 1227 * don't actually care about slave name change, as 1228 * they are supposed to operate on master interface 1229 * directly. 1230 */ 1231 if (dev->flags & IFF_UP && 1232 likely(!(dev->priv_flags & IFF_LIVE_RENAME_OK))) 1233 return -EBUSY; 1234 1235 down_write(&devnet_rename_sem); 1236 1237 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) { 1238 up_write(&devnet_rename_sem); 1239 return 0; 1240 } 1241 1242 memcpy(oldname, dev->name, IFNAMSIZ); 1243 1244 err = dev_get_valid_name(net, dev, newname); 1245 if (err < 0) { 1246 up_write(&devnet_rename_sem); 1247 return err; 1248 } 1249 1250 if (oldname[0] && !strchr(oldname, '%')) 1251 netdev_info(dev, "renamed from %s\n", oldname); 1252 1253 old_assign_type = dev->name_assign_type; 1254 dev->name_assign_type = NET_NAME_RENAMED; 1255 1256 rollback: 1257 ret = device_rename(&dev->dev, dev->name); 1258 if (ret) { 1259 memcpy(dev->name, oldname, IFNAMSIZ); 1260 dev->name_assign_type = old_assign_type; 1261 up_write(&devnet_rename_sem); 1262 return ret; 1263 } 1264 1265 up_write(&devnet_rename_sem); 1266 1267 netdev_adjacent_rename_links(dev, oldname); 1268 1269 write_lock_bh(&dev_base_lock); 1270 netdev_name_node_del(dev->name_node); 1271 write_unlock_bh(&dev_base_lock); 1272 1273 synchronize_rcu(); 1274 1275 write_lock_bh(&dev_base_lock); 1276 netdev_name_node_add(net, dev->name_node); 1277 write_unlock_bh(&dev_base_lock); 1278 1279 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev); 1280 ret = notifier_to_errno(ret); 1281 1282 if (ret) { 1283 /* err >= 0 after dev_alloc_name() or stores the first errno */ 1284 if (err >= 0) { 1285 err = ret; 1286 down_write(&devnet_rename_sem); 1287 memcpy(dev->name, oldname, IFNAMSIZ); 1288 memcpy(oldname, newname, IFNAMSIZ); 1289 dev->name_assign_type = old_assign_type; 1290 old_assign_type = NET_NAME_RENAMED; 1291 goto rollback; 1292 } else { 1293 pr_err("%s: name change rollback failed: %d\n", 1294 dev->name, ret); 1295 } 1296 } 1297 1298 return err; 1299 } 1300 1301 /** 1302 * dev_set_alias - change ifalias of a device 1303 * @dev: device 1304 * @alias: name up to IFALIASZ 1305 * @len: limit of bytes to copy from info 1306 * 1307 * Set ifalias for a device, 1308 */ 1309 int dev_set_alias(struct net_device *dev, const char *alias, size_t len) 1310 { 1311 struct dev_ifalias *new_alias = NULL; 1312 1313 if (len >= IFALIASZ) 1314 return -EINVAL; 1315 1316 if (len) { 1317 new_alias = kmalloc(sizeof(*new_alias) + len + 1, GFP_KERNEL); 1318 if (!new_alias) 1319 return -ENOMEM; 1320 1321 memcpy(new_alias->ifalias, alias, len); 1322 new_alias->ifalias[len] = 0; 1323 } 1324 1325 mutex_lock(&ifalias_mutex); 1326 new_alias = rcu_replace_pointer(dev->ifalias, new_alias, 1327 mutex_is_locked(&ifalias_mutex)); 1328 mutex_unlock(&ifalias_mutex); 1329 1330 if (new_alias) 1331 kfree_rcu(new_alias, rcuhead); 1332 1333 return len; 1334 } 1335 EXPORT_SYMBOL(dev_set_alias); 1336 1337 /** 1338 * dev_get_alias - get ifalias of a device 1339 * @dev: device 1340 * @name: buffer to store name of ifalias 1341 * @len: size of buffer 1342 * 1343 * get ifalias for a device. Caller must make sure dev cannot go 1344 * away, e.g. rcu read lock or own a reference count to device. 1345 */ 1346 int dev_get_alias(const struct net_device *dev, char *name, size_t len) 1347 { 1348 const struct dev_ifalias *alias; 1349 int ret = 0; 1350 1351 rcu_read_lock(); 1352 alias = rcu_dereference(dev->ifalias); 1353 if (alias) 1354 ret = snprintf(name, len, "%s", alias->ifalias); 1355 rcu_read_unlock(); 1356 1357 return ret; 1358 } 1359 1360 /** 1361 * netdev_features_change - device changes features 1362 * @dev: device to cause notification 1363 * 1364 * Called to indicate a device has changed features. 1365 */ 1366 void netdev_features_change(struct net_device *dev) 1367 { 1368 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev); 1369 } 1370 EXPORT_SYMBOL(netdev_features_change); 1371 1372 /** 1373 * netdev_state_change - device changes state 1374 * @dev: device to cause notification 1375 * 1376 * Called to indicate a device has changed state. This function calls 1377 * the notifier chains for netdev_chain and sends a NEWLINK message 1378 * to the routing socket. 1379 */ 1380 void netdev_state_change(struct net_device *dev) 1381 { 1382 if (dev->flags & IFF_UP) { 1383 struct netdev_notifier_change_info change_info = { 1384 .info.dev = dev, 1385 }; 1386 1387 call_netdevice_notifiers_info(NETDEV_CHANGE, 1388 &change_info.info); 1389 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL); 1390 } 1391 } 1392 EXPORT_SYMBOL(netdev_state_change); 1393 1394 /** 1395 * __netdev_notify_peers - notify network peers about existence of @dev, 1396 * to be called when rtnl lock is already held. 1397 * @dev: network device 1398 * 1399 * Generate traffic such that interested network peers are aware of 1400 * @dev, such as by generating a gratuitous ARP. This may be used when 1401 * a device wants to inform the rest of the network about some sort of 1402 * reconfiguration such as a failover event or virtual machine 1403 * migration. 1404 */ 1405 void __netdev_notify_peers(struct net_device *dev) 1406 { 1407 ASSERT_RTNL(); 1408 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev); 1409 call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev); 1410 } 1411 EXPORT_SYMBOL(__netdev_notify_peers); 1412 1413 /** 1414 * netdev_notify_peers - notify network peers about existence of @dev 1415 * @dev: network device 1416 * 1417 * Generate traffic such that interested network peers are aware of 1418 * @dev, such as by generating a gratuitous ARP. This may be used when 1419 * a device wants to inform the rest of the network about some sort of 1420 * reconfiguration such as a failover event or virtual machine 1421 * migration. 1422 */ 1423 void netdev_notify_peers(struct net_device *dev) 1424 { 1425 rtnl_lock(); 1426 __netdev_notify_peers(dev); 1427 rtnl_unlock(); 1428 } 1429 EXPORT_SYMBOL(netdev_notify_peers); 1430 1431 static int napi_threaded_poll(void *data); 1432 1433 static int napi_kthread_create(struct napi_struct *n) 1434 { 1435 int err = 0; 1436 1437 /* Create and wake up the kthread once to put it in 1438 * TASK_INTERRUPTIBLE mode to avoid the blocked task 1439 * warning and work with loadavg. 1440 */ 1441 n->thread = kthread_run(napi_threaded_poll, n, "napi/%s-%d", 1442 n->dev->name, n->napi_id); 1443 if (IS_ERR(n->thread)) { 1444 err = PTR_ERR(n->thread); 1445 pr_err("kthread_run failed with err %d\n", err); 1446 n->thread = NULL; 1447 } 1448 1449 return err; 1450 } 1451 1452 static int __dev_open(struct net_device *dev, struct netlink_ext_ack *extack) 1453 { 1454 const struct net_device_ops *ops = dev->netdev_ops; 1455 int ret; 1456 1457 ASSERT_RTNL(); 1458 1459 if (!netif_device_present(dev)) { 1460 /* may be detached because parent is runtime-suspended */ 1461 if (dev->dev.parent) 1462 pm_runtime_resume(dev->dev.parent); 1463 if (!netif_device_present(dev)) 1464 return -ENODEV; 1465 } 1466 1467 /* Block netpoll from trying to do any rx path servicing. 1468 * If we don't do this there is a chance ndo_poll_controller 1469 * or ndo_poll may be running while we open the device 1470 */ 1471 netpoll_poll_disable(dev); 1472 1473 ret = call_netdevice_notifiers_extack(NETDEV_PRE_UP, dev, extack); 1474 ret = notifier_to_errno(ret); 1475 if (ret) 1476 return ret; 1477 1478 set_bit(__LINK_STATE_START, &dev->state); 1479 1480 if (ops->ndo_validate_addr) 1481 ret = ops->ndo_validate_addr(dev); 1482 1483 if (!ret && ops->ndo_open) 1484 ret = ops->ndo_open(dev); 1485 1486 netpoll_poll_enable(dev); 1487 1488 if (ret) 1489 clear_bit(__LINK_STATE_START, &dev->state); 1490 else { 1491 dev->flags |= IFF_UP; 1492 dev_set_rx_mode(dev); 1493 dev_activate(dev); 1494 add_device_randomness(dev->dev_addr, dev->addr_len); 1495 } 1496 1497 return ret; 1498 } 1499 1500 /** 1501 * dev_open - prepare an interface for use. 1502 * @dev: device to open 1503 * @extack: netlink extended ack 1504 * 1505 * Takes a device from down to up state. The device's private open 1506 * function is invoked and then the multicast lists are loaded. Finally 1507 * the device is moved into the up state and a %NETDEV_UP message is 1508 * sent to the netdev notifier chain. 1509 * 1510 * Calling this function on an active interface is a nop. On a failure 1511 * a negative errno code is returned. 1512 */ 1513 int dev_open(struct net_device *dev, struct netlink_ext_ack *extack) 1514 { 1515 int ret; 1516 1517 if (dev->flags & IFF_UP) 1518 return 0; 1519 1520 ret = __dev_open(dev, extack); 1521 if (ret < 0) 1522 return ret; 1523 1524 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL); 1525 call_netdevice_notifiers(NETDEV_UP, dev); 1526 1527 return ret; 1528 } 1529 EXPORT_SYMBOL(dev_open); 1530 1531 static void __dev_close_many(struct list_head *head) 1532 { 1533 struct net_device *dev; 1534 1535 ASSERT_RTNL(); 1536 might_sleep(); 1537 1538 list_for_each_entry(dev, head, close_list) { 1539 /* Temporarily disable netpoll until the interface is down */ 1540 netpoll_poll_disable(dev); 1541 1542 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev); 1543 1544 clear_bit(__LINK_STATE_START, &dev->state); 1545 1546 /* Synchronize to scheduled poll. We cannot touch poll list, it 1547 * can be even on different cpu. So just clear netif_running(). 1548 * 1549 * dev->stop() will invoke napi_disable() on all of it's 1550 * napi_struct instances on this device. 1551 */ 1552 smp_mb__after_atomic(); /* Commit netif_running(). */ 1553 } 1554 1555 dev_deactivate_many(head); 1556 1557 list_for_each_entry(dev, head, close_list) { 1558 const struct net_device_ops *ops = dev->netdev_ops; 1559 1560 /* 1561 * Call the device specific close. This cannot fail. 1562 * Only if device is UP 1563 * 1564 * We allow it to be called even after a DETACH hot-plug 1565 * event. 1566 */ 1567 if (ops->ndo_stop) 1568 ops->ndo_stop(dev); 1569 1570 dev->flags &= ~IFF_UP; 1571 netpoll_poll_enable(dev); 1572 } 1573 } 1574 1575 static void __dev_close(struct net_device *dev) 1576 { 1577 LIST_HEAD(single); 1578 1579 list_add(&dev->close_list, &single); 1580 __dev_close_many(&single); 1581 list_del(&single); 1582 } 1583 1584 void dev_close_many(struct list_head *head, bool unlink) 1585 { 1586 struct net_device *dev, *tmp; 1587 1588 /* Remove the devices that don't need to be closed */ 1589 list_for_each_entry_safe(dev, tmp, head, close_list) 1590 if (!(dev->flags & IFF_UP)) 1591 list_del_init(&dev->close_list); 1592 1593 __dev_close_many(head); 1594 1595 list_for_each_entry_safe(dev, tmp, head, close_list) { 1596 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL); 1597 call_netdevice_notifiers(NETDEV_DOWN, dev); 1598 if (unlink) 1599 list_del_init(&dev->close_list); 1600 } 1601 } 1602 EXPORT_SYMBOL(dev_close_many); 1603 1604 /** 1605 * dev_close - shutdown an interface. 1606 * @dev: device to shutdown 1607 * 1608 * This function moves an active device into down state. A 1609 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device 1610 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier 1611 * chain. 1612 */ 1613 void dev_close(struct net_device *dev) 1614 { 1615 if (dev->flags & IFF_UP) { 1616 LIST_HEAD(single); 1617 1618 list_add(&dev->close_list, &single); 1619 dev_close_many(&single, true); 1620 list_del(&single); 1621 } 1622 } 1623 EXPORT_SYMBOL(dev_close); 1624 1625 1626 /** 1627 * dev_disable_lro - disable Large Receive Offload on a device 1628 * @dev: device 1629 * 1630 * Disable Large Receive Offload (LRO) on a net device. Must be 1631 * called under RTNL. This is needed if received packets may be 1632 * forwarded to another interface. 1633 */ 1634 void dev_disable_lro(struct net_device *dev) 1635 { 1636 struct net_device *lower_dev; 1637 struct list_head *iter; 1638 1639 dev->wanted_features &= ~NETIF_F_LRO; 1640 netdev_update_features(dev); 1641 1642 if (unlikely(dev->features & NETIF_F_LRO)) 1643 netdev_WARN(dev, "failed to disable LRO!\n"); 1644 1645 netdev_for_each_lower_dev(dev, lower_dev, iter) 1646 dev_disable_lro(lower_dev); 1647 } 1648 EXPORT_SYMBOL(dev_disable_lro); 1649 1650 /** 1651 * dev_disable_gro_hw - disable HW Generic Receive Offload on a device 1652 * @dev: device 1653 * 1654 * Disable HW Generic Receive Offload (GRO_HW) on a net device. Must be 1655 * called under RTNL. This is needed if Generic XDP is installed on 1656 * the device. 1657 */ 1658 static void dev_disable_gro_hw(struct net_device *dev) 1659 { 1660 dev->wanted_features &= ~NETIF_F_GRO_HW; 1661 netdev_update_features(dev); 1662 1663 if (unlikely(dev->features & NETIF_F_GRO_HW)) 1664 netdev_WARN(dev, "failed to disable GRO_HW!\n"); 1665 } 1666 1667 const char *netdev_cmd_to_name(enum netdev_cmd cmd) 1668 { 1669 #define N(val) \ 1670 case NETDEV_##val: \ 1671 return "NETDEV_" __stringify(val); 1672 switch (cmd) { 1673 N(UP) N(DOWN) N(REBOOT) N(CHANGE) N(REGISTER) N(UNREGISTER) 1674 N(CHANGEMTU) N(CHANGEADDR) N(GOING_DOWN) N(CHANGENAME) N(FEAT_CHANGE) 1675 N(BONDING_FAILOVER) N(PRE_UP) N(PRE_TYPE_CHANGE) N(POST_TYPE_CHANGE) 1676 N(POST_INIT) N(RELEASE) N(NOTIFY_PEERS) N(JOIN) N(CHANGEUPPER) 1677 N(RESEND_IGMP) N(PRECHANGEMTU) N(CHANGEINFODATA) N(BONDING_INFO) 1678 N(PRECHANGEUPPER) N(CHANGELOWERSTATE) N(UDP_TUNNEL_PUSH_INFO) 1679 N(UDP_TUNNEL_DROP_INFO) N(CHANGE_TX_QUEUE_LEN) 1680 N(CVLAN_FILTER_PUSH_INFO) N(CVLAN_FILTER_DROP_INFO) 1681 N(SVLAN_FILTER_PUSH_INFO) N(SVLAN_FILTER_DROP_INFO) 1682 N(PRE_CHANGEADDR) 1683 } 1684 #undef N 1685 return "UNKNOWN_NETDEV_EVENT"; 1686 } 1687 EXPORT_SYMBOL_GPL(netdev_cmd_to_name); 1688 1689 static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val, 1690 struct net_device *dev) 1691 { 1692 struct netdev_notifier_info info = { 1693 .dev = dev, 1694 }; 1695 1696 return nb->notifier_call(nb, val, &info); 1697 } 1698 1699 static int call_netdevice_register_notifiers(struct notifier_block *nb, 1700 struct net_device *dev) 1701 { 1702 int err; 1703 1704 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev); 1705 err = notifier_to_errno(err); 1706 if (err) 1707 return err; 1708 1709 if (!(dev->flags & IFF_UP)) 1710 return 0; 1711 1712 call_netdevice_notifier(nb, NETDEV_UP, dev); 1713 return 0; 1714 } 1715 1716 static void call_netdevice_unregister_notifiers(struct notifier_block *nb, 1717 struct net_device *dev) 1718 { 1719 if (dev->flags & IFF_UP) { 1720 call_netdevice_notifier(nb, NETDEV_GOING_DOWN, 1721 dev); 1722 call_netdevice_notifier(nb, NETDEV_DOWN, dev); 1723 } 1724 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev); 1725 } 1726 1727 static int call_netdevice_register_net_notifiers(struct notifier_block *nb, 1728 struct net *net) 1729 { 1730 struct net_device *dev; 1731 int err; 1732 1733 for_each_netdev(net, dev) { 1734 err = call_netdevice_register_notifiers(nb, dev); 1735 if (err) 1736 goto rollback; 1737 } 1738 return 0; 1739 1740 rollback: 1741 for_each_netdev_continue_reverse(net, dev) 1742 call_netdevice_unregister_notifiers(nb, dev); 1743 return err; 1744 } 1745 1746 static void call_netdevice_unregister_net_notifiers(struct notifier_block *nb, 1747 struct net *net) 1748 { 1749 struct net_device *dev; 1750 1751 for_each_netdev(net, dev) 1752 call_netdevice_unregister_notifiers(nb, dev); 1753 } 1754 1755 static int dev_boot_phase = 1; 1756 1757 /** 1758 * register_netdevice_notifier - register a network notifier block 1759 * @nb: notifier 1760 * 1761 * Register a notifier to be called when network device events occur. 1762 * The notifier passed is linked into the kernel structures and must 1763 * not be reused until it has been unregistered. A negative errno code 1764 * is returned on a failure. 1765 * 1766 * When registered all registration and up events are replayed 1767 * to the new notifier to allow device to have a race free 1768 * view of the network device list. 1769 */ 1770 1771 int register_netdevice_notifier(struct notifier_block *nb) 1772 { 1773 struct net *net; 1774 int err; 1775 1776 /* Close race with setup_net() and cleanup_net() */ 1777 down_write(&pernet_ops_rwsem); 1778 rtnl_lock(); 1779 err = raw_notifier_chain_register(&netdev_chain, nb); 1780 if (err) 1781 goto unlock; 1782 if (dev_boot_phase) 1783 goto unlock; 1784 for_each_net(net) { 1785 err = call_netdevice_register_net_notifiers(nb, net); 1786 if (err) 1787 goto rollback; 1788 } 1789 1790 unlock: 1791 rtnl_unlock(); 1792 up_write(&pernet_ops_rwsem); 1793 return err; 1794 1795 rollback: 1796 for_each_net_continue_reverse(net) 1797 call_netdevice_unregister_net_notifiers(nb, net); 1798 1799 raw_notifier_chain_unregister(&netdev_chain, nb); 1800 goto unlock; 1801 } 1802 EXPORT_SYMBOL(register_netdevice_notifier); 1803 1804 /** 1805 * unregister_netdevice_notifier - unregister a network notifier block 1806 * @nb: notifier 1807 * 1808 * Unregister a notifier previously registered by 1809 * register_netdevice_notifier(). The notifier is unlinked into the 1810 * kernel structures and may then be reused. A negative errno code 1811 * is returned on a failure. 1812 * 1813 * After unregistering unregister and down device events are synthesized 1814 * for all devices on the device list to the removed notifier to remove 1815 * the need for special case cleanup code. 1816 */ 1817 1818 int unregister_netdevice_notifier(struct notifier_block *nb) 1819 { 1820 struct net *net; 1821 int err; 1822 1823 /* Close race with setup_net() and cleanup_net() */ 1824 down_write(&pernet_ops_rwsem); 1825 rtnl_lock(); 1826 err = raw_notifier_chain_unregister(&netdev_chain, nb); 1827 if (err) 1828 goto unlock; 1829 1830 for_each_net(net) 1831 call_netdevice_unregister_net_notifiers(nb, net); 1832 1833 unlock: 1834 rtnl_unlock(); 1835 up_write(&pernet_ops_rwsem); 1836 return err; 1837 } 1838 EXPORT_SYMBOL(unregister_netdevice_notifier); 1839 1840 static int __register_netdevice_notifier_net(struct net *net, 1841 struct notifier_block *nb, 1842 bool ignore_call_fail) 1843 { 1844 int err; 1845 1846 err = raw_notifier_chain_register(&net->netdev_chain, nb); 1847 if (err) 1848 return err; 1849 if (dev_boot_phase) 1850 return 0; 1851 1852 err = call_netdevice_register_net_notifiers(nb, net); 1853 if (err && !ignore_call_fail) 1854 goto chain_unregister; 1855 1856 return 0; 1857 1858 chain_unregister: 1859 raw_notifier_chain_unregister(&net->netdev_chain, nb); 1860 return err; 1861 } 1862 1863 static int __unregister_netdevice_notifier_net(struct net *net, 1864 struct notifier_block *nb) 1865 { 1866 int err; 1867 1868 err = raw_notifier_chain_unregister(&net->netdev_chain, nb); 1869 if (err) 1870 return err; 1871 1872 call_netdevice_unregister_net_notifiers(nb, net); 1873 return 0; 1874 } 1875 1876 /** 1877 * register_netdevice_notifier_net - register a per-netns network notifier block 1878 * @net: network namespace 1879 * @nb: notifier 1880 * 1881 * Register a notifier to be called when network device events occur. 1882 * The notifier passed is linked into the kernel structures and must 1883 * not be reused until it has been unregistered. A negative errno code 1884 * is returned on a failure. 1885 * 1886 * When registered all registration and up events are replayed 1887 * to the new notifier to allow device to have a race free 1888 * view of the network device list. 1889 */ 1890 1891 int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb) 1892 { 1893 int err; 1894 1895 rtnl_lock(); 1896 err = __register_netdevice_notifier_net(net, nb, false); 1897 rtnl_unlock(); 1898 return err; 1899 } 1900 EXPORT_SYMBOL(register_netdevice_notifier_net); 1901 1902 /** 1903 * unregister_netdevice_notifier_net - unregister a per-netns 1904 * network notifier block 1905 * @net: network namespace 1906 * @nb: notifier 1907 * 1908 * Unregister a notifier previously registered by 1909 * register_netdevice_notifier(). The notifier is unlinked into the 1910 * kernel structures and may then be reused. A negative errno code 1911 * is returned on a failure. 1912 * 1913 * After unregistering unregister and down device events are synthesized 1914 * for all devices on the device list to the removed notifier to remove 1915 * the need for special case cleanup code. 1916 */ 1917 1918 int unregister_netdevice_notifier_net(struct net *net, 1919 struct notifier_block *nb) 1920 { 1921 int err; 1922 1923 rtnl_lock(); 1924 err = __unregister_netdevice_notifier_net(net, nb); 1925 rtnl_unlock(); 1926 return err; 1927 } 1928 EXPORT_SYMBOL(unregister_netdevice_notifier_net); 1929 1930 int register_netdevice_notifier_dev_net(struct net_device *dev, 1931 struct notifier_block *nb, 1932 struct netdev_net_notifier *nn) 1933 { 1934 int err; 1935 1936 rtnl_lock(); 1937 err = __register_netdevice_notifier_net(dev_net(dev), nb, false); 1938 if (!err) { 1939 nn->nb = nb; 1940 list_add(&nn->list, &dev->net_notifier_list); 1941 } 1942 rtnl_unlock(); 1943 return err; 1944 } 1945 EXPORT_SYMBOL(register_netdevice_notifier_dev_net); 1946 1947 int unregister_netdevice_notifier_dev_net(struct net_device *dev, 1948 struct notifier_block *nb, 1949 struct netdev_net_notifier *nn) 1950 { 1951 int err; 1952 1953 rtnl_lock(); 1954 list_del(&nn->list); 1955 err = __unregister_netdevice_notifier_net(dev_net(dev), nb); 1956 rtnl_unlock(); 1957 return err; 1958 } 1959 EXPORT_SYMBOL(unregister_netdevice_notifier_dev_net); 1960 1961 static void move_netdevice_notifiers_dev_net(struct net_device *dev, 1962 struct net *net) 1963 { 1964 struct netdev_net_notifier *nn; 1965 1966 list_for_each_entry(nn, &dev->net_notifier_list, list) { 1967 __unregister_netdevice_notifier_net(dev_net(dev), nn->nb); 1968 __register_netdevice_notifier_net(net, nn->nb, true); 1969 } 1970 } 1971 1972 /** 1973 * call_netdevice_notifiers_info - call all network notifier blocks 1974 * @val: value passed unmodified to notifier function 1975 * @info: notifier information data 1976 * 1977 * Call all network notifier blocks. Parameters and return value 1978 * are as for raw_notifier_call_chain(). 1979 */ 1980 1981 static int call_netdevice_notifiers_info(unsigned long val, 1982 struct netdev_notifier_info *info) 1983 { 1984 struct net *net = dev_net(info->dev); 1985 int ret; 1986 1987 ASSERT_RTNL(); 1988 1989 /* Run per-netns notifier block chain first, then run the global one. 1990 * Hopefully, one day, the global one is going to be removed after 1991 * all notifier block registrators get converted to be per-netns. 1992 */ 1993 ret = raw_notifier_call_chain(&net->netdev_chain, val, info); 1994 if (ret & NOTIFY_STOP_MASK) 1995 return ret; 1996 return raw_notifier_call_chain(&netdev_chain, val, info); 1997 } 1998 1999 static int call_netdevice_notifiers_extack(unsigned long val, 2000 struct net_device *dev, 2001 struct netlink_ext_ack *extack) 2002 { 2003 struct netdev_notifier_info info = { 2004 .dev = dev, 2005 .extack = extack, 2006 }; 2007 2008 return call_netdevice_notifiers_info(val, &info); 2009 } 2010 2011 /** 2012 * call_netdevice_notifiers - call all network notifier blocks 2013 * @val: value passed unmodified to notifier function 2014 * @dev: net_device pointer passed unmodified to notifier function 2015 * 2016 * Call all network notifier blocks. Parameters and return value 2017 * are as for raw_notifier_call_chain(). 2018 */ 2019 2020 int call_netdevice_notifiers(unsigned long val, struct net_device *dev) 2021 { 2022 return call_netdevice_notifiers_extack(val, dev, NULL); 2023 } 2024 EXPORT_SYMBOL(call_netdevice_notifiers); 2025 2026 /** 2027 * call_netdevice_notifiers_mtu - call all network notifier blocks 2028 * @val: value passed unmodified to notifier function 2029 * @dev: net_device pointer passed unmodified to notifier function 2030 * @arg: additional u32 argument passed to the notifier function 2031 * 2032 * Call all network notifier blocks. Parameters and return value 2033 * are as for raw_notifier_call_chain(). 2034 */ 2035 static int call_netdevice_notifiers_mtu(unsigned long val, 2036 struct net_device *dev, u32 arg) 2037 { 2038 struct netdev_notifier_info_ext info = { 2039 .info.dev = dev, 2040 .ext.mtu = arg, 2041 }; 2042 2043 BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext, info) != 0); 2044 2045 return call_netdevice_notifiers_info(val, &info.info); 2046 } 2047 2048 #ifdef CONFIG_NET_INGRESS 2049 static DEFINE_STATIC_KEY_FALSE(ingress_needed_key); 2050 2051 void net_inc_ingress_queue(void) 2052 { 2053 static_branch_inc(&ingress_needed_key); 2054 } 2055 EXPORT_SYMBOL_GPL(net_inc_ingress_queue); 2056 2057 void net_dec_ingress_queue(void) 2058 { 2059 static_branch_dec(&ingress_needed_key); 2060 } 2061 EXPORT_SYMBOL_GPL(net_dec_ingress_queue); 2062 #endif 2063 2064 #ifdef CONFIG_NET_EGRESS 2065 static DEFINE_STATIC_KEY_FALSE(egress_needed_key); 2066 2067 void net_inc_egress_queue(void) 2068 { 2069 static_branch_inc(&egress_needed_key); 2070 } 2071 EXPORT_SYMBOL_GPL(net_inc_egress_queue); 2072 2073 void net_dec_egress_queue(void) 2074 { 2075 static_branch_dec(&egress_needed_key); 2076 } 2077 EXPORT_SYMBOL_GPL(net_dec_egress_queue); 2078 #endif 2079 2080 static DEFINE_STATIC_KEY_FALSE(netstamp_needed_key); 2081 #ifdef CONFIG_JUMP_LABEL 2082 static atomic_t netstamp_needed_deferred; 2083 static atomic_t netstamp_wanted; 2084 static void netstamp_clear(struct work_struct *work) 2085 { 2086 int deferred = atomic_xchg(&netstamp_needed_deferred, 0); 2087 int wanted; 2088 2089 wanted = atomic_add_return(deferred, &netstamp_wanted); 2090 if (wanted > 0) 2091 static_branch_enable(&netstamp_needed_key); 2092 else 2093 static_branch_disable(&netstamp_needed_key); 2094 } 2095 static DECLARE_WORK(netstamp_work, netstamp_clear); 2096 #endif 2097 2098 void net_enable_timestamp(void) 2099 { 2100 #ifdef CONFIG_JUMP_LABEL 2101 int wanted; 2102 2103 while (1) { 2104 wanted = atomic_read(&netstamp_wanted); 2105 if (wanted <= 0) 2106 break; 2107 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted + 1) == wanted) 2108 return; 2109 } 2110 atomic_inc(&netstamp_needed_deferred); 2111 schedule_work(&netstamp_work); 2112 #else 2113 static_branch_inc(&netstamp_needed_key); 2114 #endif 2115 } 2116 EXPORT_SYMBOL(net_enable_timestamp); 2117 2118 void net_disable_timestamp(void) 2119 { 2120 #ifdef CONFIG_JUMP_LABEL 2121 int wanted; 2122 2123 while (1) { 2124 wanted = atomic_read(&netstamp_wanted); 2125 if (wanted <= 1) 2126 break; 2127 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted - 1) == wanted) 2128 return; 2129 } 2130 atomic_dec(&netstamp_needed_deferred); 2131 schedule_work(&netstamp_work); 2132 #else 2133 static_branch_dec(&netstamp_needed_key); 2134 #endif 2135 } 2136 EXPORT_SYMBOL(net_disable_timestamp); 2137 2138 static inline void net_timestamp_set(struct sk_buff *skb) 2139 { 2140 skb->tstamp = 0; 2141 if (static_branch_unlikely(&netstamp_needed_key)) 2142 __net_timestamp(skb); 2143 } 2144 2145 #define net_timestamp_check(COND, SKB) \ 2146 if (static_branch_unlikely(&netstamp_needed_key)) { \ 2147 if ((COND) && !(SKB)->tstamp) \ 2148 __net_timestamp(SKB); \ 2149 } \ 2150 2151 bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb) 2152 { 2153 return __is_skb_forwardable(dev, skb, true); 2154 } 2155 EXPORT_SYMBOL_GPL(is_skb_forwardable); 2156 2157 static int __dev_forward_skb2(struct net_device *dev, struct sk_buff *skb, 2158 bool check_mtu) 2159 { 2160 int ret = ____dev_forward_skb(dev, skb, check_mtu); 2161 2162 if (likely(!ret)) { 2163 skb->protocol = eth_type_trans(skb, dev); 2164 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); 2165 } 2166 2167 return ret; 2168 } 2169 2170 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb) 2171 { 2172 return __dev_forward_skb2(dev, skb, true); 2173 } 2174 EXPORT_SYMBOL_GPL(__dev_forward_skb); 2175 2176 /** 2177 * dev_forward_skb - loopback an skb to another netif 2178 * 2179 * @dev: destination network device 2180 * @skb: buffer to forward 2181 * 2182 * return values: 2183 * NET_RX_SUCCESS (no congestion) 2184 * NET_RX_DROP (packet was dropped, but freed) 2185 * 2186 * dev_forward_skb can be used for injecting an skb from the 2187 * start_xmit function of one device into the receive queue 2188 * of another device. 2189 * 2190 * The receiving device may be in another namespace, so 2191 * we have to clear all information in the skb that could 2192 * impact namespace isolation. 2193 */ 2194 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) 2195 { 2196 return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb); 2197 } 2198 EXPORT_SYMBOL_GPL(dev_forward_skb); 2199 2200 int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb) 2201 { 2202 return __dev_forward_skb2(dev, skb, false) ?: netif_rx_internal(skb); 2203 } 2204 2205 static inline int deliver_skb(struct sk_buff *skb, 2206 struct packet_type *pt_prev, 2207 struct net_device *orig_dev) 2208 { 2209 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) 2210 return -ENOMEM; 2211 refcount_inc(&skb->users); 2212 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev); 2213 } 2214 2215 static inline void deliver_ptype_list_skb(struct sk_buff *skb, 2216 struct packet_type **pt, 2217 struct net_device *orig_dev, 2218 __be16 type, 2219 struct list_head *ptype_list) 2220 { 2221 struct packet_type *ptype, *pt_prev = *pt; 2222 2223 list_for_each_entry_rcu(ptype, ptype_list, list) { 2224 if (ptype->type != type) 2225 continue; 2226 if (pt_prev) 2227 deliver_skb(skb, pt_prev, orig_dev); 2228 pt_prev = ptype; 2229 } 2230 *pt = pt_prev; 2231 } 2232 2233 static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb) 2234 { 2235 if (!ptype->af_packet_priv || !skb->sk) 2236 return false; 2237 2238 if (ptype->id_match) 2239 return ptype->id_match(ptype, skb->sk); 2240 else if ((struct sock *)ptype->af_packet_priv == skb->sk) 2241 return true; 2242 2243 return false; 2244 } 2245 2246 /** 2247 * dev_nit_active - return true if any network interface taps are in use 2248 * 2249 * @dev: network device to check for the presence of taps 2250 */ 2251 bool dev_nit_active(struct net_device *dev) 2252 { 2253 return !list_empty(&ptype_all) || !list_empty(&dev->ptype_all); 2254 } 2255 EXPORT_SYMBOL_GPL(dev_nit_active); 2256 2257 /* 2258 * Support routine. Sends outgoing frames to any network 2259 * taps currently in use. 2260 */ 2261 2262 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) 2263 { 2264 struct packet_type *ptype; 2265 struct sk_buff *skb2 = NULL; 2266 struct packet_type *pt_prev = NULL; 2267 struct list_head *ptype_list = &ptype_all; 2268 2269 rcu_read_lock(); 2270 again: 2271 list_for_each_entry_rcu(ptype, ptype_list, list) { 2272 if (ptype->ignore_outgoing) 2273 continue; 2274 2275 /* Never send packets back to the socket 2276 * they originated from - MvS (miquels@drinkel.ow.org) 2277 */ 2278 if (skb_loop_sk(ptype, skb)) 2279 continue; 2280 2281 if (pt_prev) { 2282 deliver_skb(skb2, pt_prev, skb->dev); 2283 pt_prev = ptype; 2284 continue; 2285 } 2286 2287 /* need to clone skb, done only once */ 2288 skb2 = skb_clone(skb, GFP_ATOMIC); 2289 if (!skb2) 2290 goto out_unlock; 2291 2292 net_timestamp_set(skb2); 2293 2294 /* skb->nh should be correctly 2295 * set by sender, so that the second statement is 2296 * just protection against buggy protocols. 2297 */ 2298 skb_reset_mac_header(skb2); 2299 2300 if (skb_network_header(skb2) < skb2->data || 2301 skb_network_header(skb2) > skb_tail_pointer(skb2)) { 2302 net_crit_ratelimited("protocol %04x is buggy, dev %s\n", 2303 ntohs(skb2->protocol), 2304 dev->name); 2305 skb_reset_network_header(skb2); 2306 } 2307 2308 skb2->transport_header = skb2->network_header; 2309 skb2->pkt_type = PACKET_OUTGOING; 2310 pt_prev = ptype; 2311 } 2312 2313 if (ptype_list == &ptype_all) { 2314 ptype_list = &dev->ptype_all; 2315 goto again; 2316 } 2317 out_unlock: 2318 if (pt_prev) { 2319 if (!skb_orphan_frags_rx(skb2, GFP_ATOMIC)) 2320 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev); 2321 else 2322 kfree_skb(skb2); 2323 } 2324 rcu_read_unlock(); 2325 } 2326 EXPORT_SYMBOL_GPL(dev_queue_xmit_nit); 2327 2328 /** 2329 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change 2330 * @dev: Network device 2331 * @txq: number of queues available 2332 * 2333 * If real_num_tx_queues is changed the tc mappings may no longer be 2334 * valid. To resolve this verify the tc mapping remains valid and if 2335 * not NULL the mapping. With no priorities mapping to this 2336 * offset/count pair it will no longer be used. In the worst case TC0 2337 * is invalid nothing can be done so disable priority mappings. If is 2338 * expected that drivers will fix this mapping if they can before 2339 * calling netif_set_real_num_tx_queues. 2340 */ 2341 static void netif_setup_tc(struct net_device *dev, unsigned int txq) 2342 { 2343 int i; 2344 struct netdev_tc_txq *tc = &dev->tc_to_txq[0]; 2345 2346 /* If TC0 is invalidated disable TC mapping */ 2347 if (tc->offset + tc->count > txq) { 2348 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n"); 2349 dev->num_tc = 0; 2350 return; 2351 } 2352 2353 /* Invalidated prio to tc mappings set to TC0 */ 2354 for (i = 1; i < TC_BITMASK + 1; i++) { 2355 int q = netdev_get_prio_tc_map(dev, i); 2356 2357 tc = &dev->tc_to_txq[q]; 2358 if (tc->offset + tc->count > txq) { 2359 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n", 2360 i, q); 2361 netdev_set_prio_tc_map(dev, i, 0); 2362 } 2363 } 2364 } 2365 2366 int netdev_txq_to_tc(struct net_device *dev, unsigned int txq) 2367 { 2368 if (dev->num_tc) { 2369 struct netdev_tc_txq *tc = &dev->tc_to_txq[0]; 2370 int i; 2371 2372 /* walk through the TCs and see if it falls into any of them */ 2373 for (i = 0; i < TC_MAX_QUEUE; i++, tc++) { 2374 if ((txq - tc->offset) < tc->count) 2375 return i; 2376 } 2377 2378 /* didn't find it, just return -1 to indicate no match */ 2379 return -1; 2380 } 2381 2382 return 0; 2383 } 2384 EXPORT_SYMBOL(netdev_txq_to_tc); 2385 2386 #ifdef CONFIG_XPS 2387 static struct static_key xps_needed __read_mostly; 2388 static struct static_key xps_rxqs_needed __read_mostly; 2389 static DEFINE_MUTEX(xps_map_mutex); 2390 #define xmap_dereference(P) \ 2391 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex)) 2392 2393 static bool remove_xps_queue(struct xps_dev_maps *dev_maps, 2394 struct xps_dev_maps *old_maps, int tci, u16 index) 2395 { 2396 struct xps_map *map = NULL; 2397 int pos; 2398 2399 if (dev_maps) 2400 map = xmap_dereference(dev_maps->attr_map[tci]); 2401 if (!map) 2402 return false; 2403 2404 for (pos = map->len; pos--;) { 2405 if (map->queues[pos] != index) 2406 continue; 2407 2408 if (map->len > 1) { 2409 map->queues[pos] = map->queues[--map->len]; 2410 break; 2411 } 2412 2413 if (old_maps) 2414 RCU_INIT_POINTER(old_maps->attr_map[tci], NULL); 2415 RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL); 2416 kfree_rcu(map, rcu); 2417 return false; 2418 } 2419 2420 return true; 2421 } 2422 2423 static bool remove_xps_queue_cpu(struct net_device *dev, 2424 struct xps_dev_maps *dev_maps, 2425 int cpu, u16 offset, u16 count) 2426 { 2427 int num_tc = dev_maps->num_tc; 2428 bool active = false; 2429 int tci; 2430 2431 for (tci = cpu * num_tc; num_tc--; tci++) { 2432 int i, j; 2433 2434 for (i = count, j = offset; i--; j++) { 2435 if (!remove_xps_queue(dev_maps, NULL, tci, j)) 2436 break; 2437 } 2438 2439 active |= i < 0; 2440 } 2441 2442 return active; 2443 } 2444 2445 static void reset_xps_maps(struct net_device *dev, 2446 struct xps_dev_maps *dev_maps, 2447 enum xps_map_type type) 2448 { 2449 static_key_slow_dec_cpuslocked(&xps_needed); 2450 if (type == XPS_RXQS) 2451 static_key_slow_dec_cpuslocked(&xps_rxqs_needed); 2452 2453 RCU_INIT_POINTER(dev->xps_maps[type], NULL); 2454 2455 kfree_rcu(dev_maps, rcu); 2456 } 2457 2458 static void clean_xps_maps(struct net_device *dev, enum xps_map_type type, 2459 u16 offset, u16 count) 2460 { 2461 struct xps_dev_maps *dev_maps; 2462 bool active = false; 2463 int i, j; 2464 2465 dev_maps = xmap_dereference(dev->xps_maps[type]); 2466 if (!dev_maps) 2467 return; 2468 2469 for (j = 0; j < dev_maps->nr_ids; j++) 2470 active |= remove_xps_queue_cpu(dev, dev_maps, j, offset, count); 2471 if (!active) 2472 reset_xps_maps(dev, dev_maps, type); 2473 2474 if (type == XPS_CPUS) { 2475 for (i = offset + (count - 1); count--; i--) 2476 netdev_queue_numa_node_write( 2477 netdev_get_tx_queue(dev, i), NUMA_NO_NODE); 2478 } 2479 } 2480 2481 static void netif_reset_xps_queues(struct net_device *dev, u16 offset, 2482 u16 count) 2483 { 2484 if (!static_key_false(&xps_needed)) 2485 return; 2486 2487 cpus_read_lock(); 2488 mutex_lock(&xps_map_mutex); 2489 2490 if (static_key_false(&xps_rxqs_needed)) 2491 clean_xps_maps(dev, XPS_RXQS, offset, count); 2492 2493 clean_xps_maps(dev, XPS_CPUS, offset, count); 2494 2495 mutex_unlock(&xps_map_mutex); 2496 cpus_read_unlock(); 2497 } 2498 2499 static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index) 2500 { 2501 netif_reset_xps_queues(dev, index, dev->num_tx_queues - index); 2502 } 2503 2504 static struct xps_map *expand_xps_map(struct xps_map *map, int attr_index, 2505 u16 index, bool is_rxqs_map) 2506 { 2507 struct xps_map *new_map; 2508 int alloc_len = XPS_MIN_MAP_ALLOC; 2509 int i, pos; 2510 2511 for (pos = 0; map && pos < map->len; pos++) { 2512 if (map->queues[pos] != index) 2513 continue; 2514 return map; 2515 } 2516 2517 /* Need to add tx-queue to this CPU's/rx-queue's existing map */ 2518 if (map) { 2519 if (pos < map->alloc_len) 2520 return map; 2521 2522 alloc_len = map->alloc_len * 2; 2523 } 2524 2525 /* Need to allocate new map to store tx-queue on this CPU's/rx-queue's 2526 * map 2527 */ 2528 if (is_rxqs_map) 2529 new_map = kzalloc(XPS_MAP_SIZE(alloc_len), GFP_KERNEL); 2530 else 2531 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL, 2532 cpu_to_node(attr_index)); 2533 if (!new_map) 2534 return NULL; 2535 2536 for (i = 0; i < pos; i++) 2537 new_map->queues[i] = map->queues[i]; 2538 new_map->alloc_len = alloc_len; 2539 new_map->len = pos; 2540 2541 return new_map; 2542 } 2543 2544 /* Copy xps maps at a given index */ 2545 static void xps_copy_dev_maps(struct xps_dev_maps *dev_maps, 2546 struct xps_dev_maps *new_dev_maps, int index, 2547 int tc, bool skip_tc) 2548 { 2549 int i, tci = index * dev_maps->num_tc; 2550 struct xps_map *map; 2551 2552 /* copy maps belonging to foreign traffic classes */ 2553 for (i = 0; i < dev_maps->num_tc; i++, tci++) { 2554 if (i == tc && skip_tc) 2555 continue; 2556 2557 /* fill in the new device map from the old device map */ 2558 map = xmap_dereference(dev_maps->attr_map[tci]); 2559 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map); 2560 } 2561 } 2562 2563 /* Must be called under cpus_read_lock */ 2564 int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, 2565 u16 index, enum xps_map_type type) 2566 { 2567 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL, *old_dev_maps = NULL; 2568 const unsigned long *online_mask = NULL; 2569 bool active = false, copy = false; 2570 int i, j, tci, numa_node_id = -2; 2571 int maps_sz, num_tc = 1, tc = 0; 2572 struct xps_map *map, *new_map; 2573 unsigned int nr_ids; 2574 2575 if (dev->num_tc) { 2576 /* Do not allow XPS on subordinate device directly */ 2577 num_tc = dev->num_tc; 2578 if (num_tc < 0) 2579 return -EINVAL; 2580 2581 /* If queue belongs to subordinate dev use its map */ 2582 dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev; 2583 2584 tc = netdev_txq_to_tc(dev, index); 2585 if (tc < 0) 2586 return -EINVAL; 2587 } 2588 2589 mutex_lock(&xps_map_mutex); 2590 2591 dev_maps = xmap_dereference(dev->xps_maps[type]); 2592 if (type == XPS_RXQS) { 2593 maps_sz = XPS_RXQ_DEV_MAPS_SIZE(num_tc, dev->num_rx_queues); 2594 nr_ids = dev->num_rx_queues; 2595 } else { 2596 maps_sz = XPS_CPU_DEV_MAPS_SIZE(num_tc); 2597 if (num_possible_cpus() > 1) 2598 online_mask = cpumask_bits(cpu_online_mask); 2599 nr_ids = nr_cpu_ids; 2600 } 2601 2602 if (maps_sz < L1_CACHE_BYTES) 2603 maps_sz = L1_CACHE_BYTES; 2604 2605 /* The old dev_maps could be larger or smaller than the one we're 2606 * setting up now, as dev->num_tc or nr_ids could have been updated in 2607 * between. We could try to be smart, but let's be safe instead and only 2608 * copy foreign traffic classes if the two map sizes match. 2609 */ 2610 if (dev_maps && 2611 dev_maps->num_tc == num_tc && dev_maps->nr_ids == nr_ids) 2612 copy = true; 2613 2614 /* allocate memory for queue storage */ 2615 for (j = -1; j = netif_attrmask_next_and(j, online_mask, mask, nr_ids), 2616 j < nr_ids;) { 2617 if (!new_dev_maps) { 2618 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL); 2619 if (!new_dev_maps) { 2620 mutex_unlock(&xps_map_mutex); 2621 return -ENOMEM; 2622 } 2623 2624 new_dev_maps->nr_ids = nr_ids; 2625 new_dev_maps->num_tc = num_tc; 2626 } 2627 2628 tci = j * num_tc + tc; 2629 map = copy ? xmap_dereference(dev_maps->attr_map[tci]) : NULL; 2630 2631 map = expand_xps_map(map, j, index, type == XPS_RXQS); 2632 if (!map) 2633 goto error; 2634 2635 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map); 2636 } 2637 2638 if (!new_dev_maps) 2639 goto out_no_new_maps; 2640 2641 if (!dev_maps) { 2642 /* Increment static keys at most once per type */ 2643 static_key_slow_inc_cpuslocked(&xps_needed); 2644 if (type == XPS_RXQS) 2645 static_key_slow_inc_cpuslocked(&xps_rxqs_needed); 2646 } 2647 2648 for (j = 0; j < nr_ids; j++) { 2649 bool skip_tc = false; 2650 2651 tci = j * num_tc + tc; 2652 if (netif_attr_test_mask(j, mask, nr_ids) && 2653 netif_attr_test_online(j, online_mask, nr_ids)) { 2654 /* add tx-queue to CPU/rx-queue maps */ 2655 int pos = 0; 2656 2657 skip_tc = true; 2658 2659 map = xmap_dereference(new_dev_maps->attr_map[tci]); 2660 while ((pos < map->len) && (map->queues[pos] != index)) 2661 pos++; 2662 2663 if (pos == map->len) 2664 map->queues[map->len++] = index; 2665 #ifdef CONFIG_NUMA 2666 if (type == XPS_CPUS) { 2667 if (numa_node_id == -2) 2668 numa_node_id = cpu_to_node(j); 2669 else if (numa_node_id != cpu_to_node(j)) 2670 numa_node_id = -1; 2671 } 2672 #endif 2673 } 2674 2675 if (copy) 2676 xps_copy_dev_maps(dev_maps, new_dev_maps, j, tc, 2677 skip_tc); 2678 } 2679 2680 rcu_assign_pointer(dev->xps_maps[type], new_dev_maps); 2681 2682 /* Cleanup old maps */ 2683 if (!dev_maps) 2684 goto out_no_old_maps; 2685 2686 for (j = 0; j < dev_maps->nr_ids; j++) { 2687 for (i = num_tc, tci = j * dev_maps->num_tc; i--; tci++) { 2688 map = xmap_dereference(dev_maps->attr_map[tci]); 2689 if (!map) 2690 continue; 2691 2692 if (copy) { 2693 new_map = xmap_dereference(new_dev_maps->attr_map[tci]); 2694 if (map == new_map) 2695 continue; 2696 } 2697 2698 RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL); 2699 kfree_rcu(map, rcu); 2700 } 2701 } 2702 2703 old_dev_maps = dev_maps; 2704 2705 out_no_old_maps: 2706 dev_maps = new_dev_maps; 2707 active = true; 2708 2709 out_no_new_maps: 2710 if (type == XPS_CPUS) 2711 /* update Tx queue numa node */ 2712 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index), 2713 (numa_node_id >= 0) ? 2714 numa_node_id : NUMA_NO_NODE); 2715 2716 if (!dev_maps) 2717 goto out_no_maps; 2718 2719 /* removes tx-queue from unused CPUs/rx-queues */ 2720 for (j = 0; j < dev_maps->nr_ids; j++) { 2721 tci = j * dev_maps->num_tc; 2722 2723 for (i = 0; i < dev_maps->num_tc; i++, tci++) { 2724 if (i == tc && 2725 netif_attr_test_mask(j, mask, dev_maps->nr_ids) && 2726 netif_attr_test_online(j, online_mask, dev_maps->nr_ids)) 2727 continue; 2728 2729 active |= remove_xps_queue(dev_maps, 2730 copy ? old_dev_maps : NULL, 2731 tci, index); 2732 } 2733 } 2734 2735 if (old_dev_maps) 2736 kfree_rcu(old_dev_maps, rcu); 2737 2738 /* free map if not active */ 2739 if (!active) 2740 reset_xps_maps(dev, dev_maps, type); 2741 2742 out_no_maps: 2743 mutex_unlock(&xps_map_mutex); 2744 2745 return 0; 2746 error: 2747 /* remove any maps that we added */ 2748 for (j = 0; j < nr_ids; j++) { 2749 for (i = num_tc, tci = j * num_tc; i--; tci++) { 2750 new_map = xmap_dereference(new_dev_maps->attr_map[tci]); 2751 map = copy ? 2752 xmap_dereference(dev_maps->attr_map[tci]) : 2753 NULL; 2754 if (new_map && new_map != map) 2755 kfree(new_map); 2756 } 2757 } 2758 2759 mutex_unlock(&xps_map_mutex); 2760 2761 kfree(new_dev_maps); 2762 return -ENOMEM; 2763 } 2764 EXPORT_SYMBOL_GPL(__netif_set_xps_queue); 2765 2766 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, 2767 u16 index) 2768 { 2769 int ret; 2770 2771 cpus_read_lock(); 2772 ret = __netif_set_xps_queue(dev, cpumask_bits(mask), index, XPS_CPUS); 2773 cpus_read_unlock(); 2774 2775 return ret; 2776 } 2777 EXPORT_SYMBOL(netif_set_xps_queue); 2778 2779 #endif 2780 static void netdev_unbind_all_sb_channels(struct net_device *dev) 2781 { 2782 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues]; 2783 2784 /* Unbind any subordinate channels */ 2785 while (txq-- != &dev->_tx[0]) { 2786 if (txq->sb_dev) 2787 netdev_unbind_sb_channel(dev, txq->sb_dev); 2788 } 2789 } 2790 2791 void netdev_reset_tc(struct net_device *dev) 2792 { 2793 #ifdef CONFIG_XPS 2794 netif_reset_xps_queues_gt(dev, 0); 2795 #endif 2796 netdev_unbind_all_sb_channels(dev); 2797 2798 /* Reset TC configuration of device */ 2799 dev->num_tc = 0; 2800 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq)); 2801 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map)); 2802 } 2803 EXPORT_SYMBOL(netdev_reset_tc); 2804 2805 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset) 2806 { 2807 if (tc >= dev->num_tc) 2808 return -EINVAL; 2809 2810 #ifdef CONFIG_XPS 2811 netif_reset_xps_queues(dev, offset, count); 2812 #endif 2813 dev->tc_to_txq[tc].count = count; 2814 dev->tc_to_txq[tc].offset = offset; 2815 return 0; 2816 } 2817 EXPORT_SYMBOL(netdev_set_tc_queue); 2818 2819 int netdev_set_num_tc(struct net_device *dev, u8 num_tc) 2820 { 2821 if (num_tc > TC_MAX_QUEUE) 2822 return -EINVAL; 2823 2824 #ifdef CONFIG_XPS 2825 netif_reset_xps_queues_gt(dev, 0); 2826 #endif 2827 netdev_unbind_all_sb_channels(dev); 2828 2829 dev->num_tc = num_tc; 2830 return 0; 2831 } 2832 EXPORT_SYMBOL(netdev_set_num_tc); 2833 2834 void netdev_unbind_sb_channel(struct net_device *dev, 2835 struct net_device *sb_dev) 2836 { 2837 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues]; 2838 2839 #ifdef CONFIG_XPS 2840 netif_reset_xps_queues_gt(sb_dev, 0); 2841 #endif 2842 memset(sb_dev->tc_to_txq, 0, sizeof(sb_dev->tc_to_txq)); 2843 memset(sb_dev->prio_tc_map, 0, sizeof(sb_dev->prio_tc_map)); 2844 2845 while (txq-- != &dev->_tx[0]) { 2846 if (txq->sb_dev == sb_dev) 2847 txq->sb_dev = NULL; 2848 } 2849 } 2850 EXPORT_SYMBOL(netdev_unbind_sb_channel); 2851 2852 int netdev_bind_sb_channel_queue(struct net_device *dev, 2853 struct net_device *sb_dev, 2854 u8 tc, u16 count, u16 offset) 2855 { 2856 /* Make certain the sb_dev and dev are already configured */ 2857 if (sb_dev->num_tc >= 0 || tc >= dev->num_tc) 2858 return -EINVAL; 2859 2860 /* We cannot hand out queues we don't have */ 2861 if ((offset + count) > dev->real_num_tx_queues) 2862 return -EINVAL; 2863 2864 /* Record the mapping */ 2865 sb_dev->tc_to_txq[tc].count = count; 2866 sb_dev->tc_to_txq[tc].offset = offset; 2867 2868 /* Provide a way for Tx queue to find the tc_to_txq map or 2869 * XPS map for itself. 2870 */ 2871 while (count--) 2872 netdev_get_tx_queue(dev, count + offset)->sb_dev = sb_dev; 2873 2874 return 0; 2875 } 2876 EXPORT_SYMBOL(netdev_bind_sb_channel_queue); 2877 2878 int netdev_set_sb_channel(struct net_device *dev, u16 channel) 2879 { 2880 /* Do not use a multiqueue device to represent a subordinate channel */ 2881 if (netif_is_multiqueue(dev)) 2882 return -ENODEV; 2883 2884 /* We allow channels 1 - 32767 to be used for subordinate channels. 2885 * Channel 0 is meant to be "native" mode and used only to represent 2886 * the main root device. We allow writing 0 to reset the device back 2887 * to normal mode after being used as a subordinate channel. 2888 */ 2889 if (channel > S16_MAX) 2890 return -EINVAL; 2891 2892 dev->num_tc = -channel; 2893 2894 return 0; 2895 } 2896 EXPORT_SYMBOL(netdev_set_sb_channel); 2897 2898 /* 2899 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues 2900 * greater than real_num_tx_queues stale skbs on the qdisc must be flushed. 2901 */ 2902 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) 2903 { 2904 bool disabling; 2905 int rc; 2906 2907 disabling = txq < dev->real_num_tx_queues; 2908 2909 if (txq < 1 || txq > dev->num_tx_queues) 2910 return -EINVAL; 2911 2912 if (dev->reg_state == NETREG_REGISTERED || 2913 dev->reg_state == NETREG_UNREGISTERING) { 2914 ASSERT_RTNL(); 2915 2916 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues, 2917 txq); 2918 if (rc) 2919 return rc; 2920 2921 if (dev->num_tc) 2922 netif_setup_tc(dev, txq); 2923 2924 dev->real_num_tx_queues = txq; 2925 2926 if (disabling) { 2927 synchronize_net(); 2928 qdisc_reset_all_tx_gt(dev, txq); 2929 #ifdef CONFIG_XPS 2930 netif_reset_xps_queues_gt(dev, txq); 2931 #endif 2932 } 2933 } else { 2934 dev->real_num_tx_queues = txq; 2935 } 2936 2937 return 0; 2938 } 2939 EXPORT_SYMBOL(netif_set_real_num_tx_queues); 2940 2941 #ifdef CONFIG_SYSFS 2942 /** 2943 * netif_set_real_num_rx_queues - set actual number of RX queues used 2944 * @dev: Network device 2945 * @rxq: Actual number of RX queues 2946 * 2947 * This must be called either with the rtnl_lock held or before 2948 * registration of the net device. Returns 0 on success, or a 2949 * negative error code. If called before registration, it always 2950 * succeeds. 2951 */ 2952 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq) 2953 { 2954 int rc; 2955 2956 if (rxq < 1 || rxq > dev->num_rx_queues) 2957 return -EINVAL; 2958 2959 if (dev->reg_state == NETREG_REGISTERED) { 2960 ASSERT_RTNL(); 2961 2962 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues, 2963 rxq); 2964 if (rc) 2965 return rc; 2966 } 2967 2968 dev->real_num_rx_queues = rxq; 2969 return 0; 2970 } 2971 EXPORT_SYMBOL(netif_set_real_num_rx_queues); 2972 #endif 2973 2974 /** 2975 * netif_set_real_num_queues - set actual number of RX and TX queues used 2976 * @dev: Network device 2977 * @txq: Actual number of TX queues 2978 * @rxq: Actual number of RX queues 2979 * 2980 * Set the real number of both TX and RX queues. 2981 * Does nothing if the number of queues is already correct. 2982 */ 2983 int netif_set_real_num_queues(struct net_device *dev, 2984 unsigned int txq, unsigned int rxq) 2985 { 2986 unsigned int old_rxq = dev->real_num_rx_queues; 2987 int err; 2988 2989 if (txq < 1 || txq > dev->num_tx_queues || 2990 rxq < 1 || rxq > dev->num_rx_queues) 2991 return -EINVAL; 2992 2993 /* Start from increases, so the error path only does decreases - 2994 * decreases can't fail. 2995 */ 2996 if (rxq > dev->real_num_rx_queues) { 2997 err = netif_set_real_num_rx_queues(dev, rxq); 2998 if (err) 2999 return err; 3000 } 3001 if (txq > dev->real_num_tx_queues) { 3002 err = netif_set_real_num_tx_queues(dev, txq); 3003 if (err) 3004 goto undo_rx; 3005 } 3006 if (rxq < dev->real_num_rx_queues) 3007 WARN_ON(netif_set_real_num_rx_queues(dev, rxq)); 3008 if (txq < dev->real_num_tx_queues) 3009 WARN_ON(netif_set_real_num_tx_queues(dev, txq)); 3010 3011 return 0; 3012 undo_rx: 3013 WARN_ON(netif_set_real_num_rx_queues(dev, old_rxq)); 3014 return err; 3015 } 3016 EXPORT_SYMBOL(netif_set_real_num_queues); 3017 3018 /** 3019 * netif_get_num_default_rss_queues - default number of RSS queues 3020 * 3021 * This routine should set an upper limit on the number of RSS queues 3022 * used by default by multiqueue devices. 3023 */ 3024 int netif_get_num_default_rss_queues(void) 3025 { 3026 return is_kdump_kernel() ? 3027 1 : min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus()); 3028 } 3029 EXPORT_SYMBOL(netif_get_num_default_rss_queues); 3030 3031 static void __netif_reschedule(struct Qdisc *q) 3032 { 3033 struct softnet_data *sd; 3034 unsigned long flags; 3035 3036 local_irq_save(flags); 3037 sd = this_cpu_ptr(&softnet_data); 3038 q->next_sched = NULL; 3039 *sd->output_queue_tailp = q; 3040 sd->output_queue_tailp = &q->next_sched; 3041 raise_softirq_irqoff(NET_TX_SOFTIRQ); 3042 local_irq_restore(flags); 3043 } 3044 3045 void __netif_schedule(struct Qdisc *q) 3046 { 3047 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) 3048 __netif_reschedule(q); 3049 } 3050 EXPORT_SYMBOL(__netif_schedule); 3051 3052 struct dev_kfree_skb_cb { 3053 enum skb_free_reason reason; 3054 }; 3055 3056 static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb) 3057 { 3058 return (struct dev_kfree_skb_cb *)skb->cb; 3059 } 3060 3061 void netif_schedule_queue(struct netdev_queue *txq) 3062 { 3063 rcu_read_lock(); 3064 if (!netif_xmit_stopped(txq)) { 3065 struct Qdisc *q = rcu_dereference(txq->qdisc); 3066 3067 __netif_schedule(q); 3068 } 3069 rcu_read_unlock(); 3070 } 3071 EXPORT_SYMBOL(netif_schedule_queue); 3072 3073 void netif_tx_wake_queue(struct netdev_queue *dev_queue) 3074 { 3075 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) { 3076 struct Qdisc *q; 3077 3078 rcu_read_lock(); 3079 q = rcu_dereference(dev_queue->qdisc); 3080 __netif_schedule(q); 3081 rcu_read_unlock(); 3082 } 3083 } 3084 EXPORT_SYMBOL(netif_tx_wake_queue); 3085 3086 void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason) 3087 { 3088 unsigned long flags; 3089 3090 if (unlikely(!skb)) 3091 return; 3092 3093 if (likely(refcount_read(&skb->users) == 1)) { 3094 smp_rmb(); 3095 refcount_set(&skb->users, 0); 3096 } else if (likely(!refcount_dec_and_test(&skb->users))) { 3097 return; 3098 } 3099 get_kfree_skb_cb(skb)->reason = reason; 3100 local_irq_save(flags); 3101 skb->next = __this_cpu_read(softnet_data.completion_queue); 3102 __this_cpu_write(softnet_data.completion_queue, skb); 3103 raise_softirq_irqoff(NET_TX_SOFTIRQ); 3104 local_irq_restore(flags); 3105 } 3106 EXPORT_SYMBOL(__dev_kfree_skb_irq); 3107 3108 void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason) 3109 { 3110 if (in_hardirq() || irqs_disabled()) 3111 __dev_kfree_skb_irq(skb, reason); 3112 else 3113 dev_kfree_skb(skb); 3114 } 3115 EXPORT_SYMBOL(__dev_kfree_skb_any); 3116 3117 3118 /** 3119 * netif_device_detach - mark device as removed 3120 * @dev: network device 3121 * 3122 * Mark device as removed from system and therefore no longer available. 3123 */ 3124 void netif_device_detach(struct net_device *dev) 3125 { 3126 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) && 3127 netif_running(dev)) { 3128 netif_tx_stop_all_queues(dev); 3129 } 3130 } 3131 EXPORT_SYMBOL(netif_device_detach); 3132 3133 /** 3134 * netif_device_attach - mark device as attached 3135 * @dev: network device 3136 * 3137 * Mark device as attached from system and restart if needed. 3138 */ 3139 void netif_device_attach(struct net_device *dev) 3140 { 3141 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) && 3142 netif_running(dev)) { 3143 netif_tx_wake_all_queues(dev); 3144 __netdev_watchdog_up(dev); 3145 } 3146 } 3147 EXPORT_SYMBOL(netif_device_attach); 3148 3149 /* 3150 * Returns a Tx hash based on the given packet descriptor a Tx queues' number 3151 * to be used as a distribution range. 3152 */ 3153 static u16 skb_tx_hash(const struct net_device *dev, 3154 const struct net_device *sb_dev, 3155 struct sk_buff *skb) 3156 { 3157 u32 hash; 3158 u16 qoffset = 0; 3159 u16 qcount = dev->real_num_tx_queues; 3160 3161 if (dev->num_tc) { 3162 u8 tc = netdev_get_prio_tc_map(dev, skb->priority); 3163 3164 qoffset = sb_dev->tc_to_txq[tc].offset; 3165 qcount = sb_dev->tc_to_txq[tc].count; 3166 } 3167 3168 if (skb_rx_queue_recorded(skb)) { 3169 hash = skb_get_rx_queue(skb); 3170 if (hash >= qoffset) 3171 hash -= qoffset; 3172 while (unlikely(hash >= qcount)) 3173 hash -= qcount; 3174 return hash + qoffset; 3175 } 3176 3177 return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset; 3178 } 3179 3180 static void skb_warn_bad_offload(const struct sk_buff *skb) 3181 { 3182 static const netdev_features_t null_features; 3183 struct net_device *dev = skb->dev; 3184 const char *name = ""; 3185 3186 if (!net_ratelimit()) 3187 return; 3188 3189 if (dev) { 3190 if (dev->dev.parent) 3191 name = dev_driver_string(dev->dev.parent); 3192 else 3193 name = netdev_name(dev); 3194 } 3195 skb_dump(KERN_WARNING, skb, false); 3196 WARN(1, "%s: caps=(%pNF, %pNF)\n", 3197 name, dev ? &dev->features : &null_features, 3198 skb->sk ? &skb->sk->sk_route_caps : &null_features); 3199 } 3200 3201 /* 3202 * Invalidate hardware checksum when packet is to be mangled, and 3203 * complete checksum manually on outgoing path. 3204 */ 3205 int skb_checksum_help(struct sk_buff *skb) 3206 { 3207 __wsum csum; 3208 int ret = 0, offset; 3209 3210 if (skb->ip_summed == CHECKSUM_COMPLETE) 3211 goto out_set_summed; 3212 3213 if (unlikely(skb_is_gso(skb))) { 3214 skb_warn_bad_offload(skb); 3215 return -EINVAL; 3216 } 3217 3218 /* Before computing a checksum, we should make sure no frag could 3219 * be modified by an external entity : checksum could be wrong. 3220 */ 3221 if (skb_has_shared_frag(skb)) { 3222 ret = __skb_linearize(skb); 3223 if (ret) 3224 goto out; 3225 } 3226 3227 offset = skb_checksum_start_offset(skb); 3228 BUG_ON(offset >= skb_headlen(skb)); 3229 csum = skb_checksum(skb, offset, skb->len - offset, 0); 3230 3231 offset += skb->csum_offset; 3232 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb)); 3233 3234 ret = skb_ensure_writable(skb, offset + sizeof(__sum16)); 3235 if (ret) 3236 goto out; 3237 3238 *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0; 3239 out_set_summed: 3240 skb->ip_summed = CHECKSUM_NONE; 3241 out: 3242 return ret; 3243 } 3244 EXPORT_SYMBOL(skb_checksum_help); 3245 3246 int skb_crc32c_csum_help(struct sk_buff *skb) 3247 { 3248 __le32 crc32c_csum; 3249 int ret = 0, offset, start; 3250 3251 if (skb->ip_summed != CHECKSUM_PARTIAL) 3252 goto out; 3253 3254 if (unlikely(skb_is_gso(skb))) 3255 goto out; 3256 3257 /* Before computing a checksum, we should make sure no frag could 3258 * be modified by an external entity : checksum could be wrong. 3259 */ 3260 if (unlikely(skb_has_shared_frag(skb))) { 3261 ret = __skb_linearize(skb); 3262 if (ret) 3263 goto out; 3264 } 3265 start = skb_checksum_start_offset(skb); 3266 offset = start + offsetof(struct sctphdr, checksum); 3267 if (WARN_ON_ONCE(offset >= skb_headlen(skb))) { 3268 ret = -EINVAL; 3269 goto out; 3270 } 3271 3272 ret = skb_ensure_writable(skb, offset + sizeof(__le32)); 3273 if (ret) 3274 goto out; 3275 3276 crc32c_csum = cpu_to_le32(~__skb_checksum(skb, start, 3277 skb->len - start, ~(__u32)0, 3278 crc32c_csum_stub)); 3279 *(__le32 *)(skb->data + offset) = crc32c_csum; 3280 skb->ip_summed = CHECKSUM_NONE; 3281 skb->csum_not_inet = 0; 3282 out: 3283 return ret; 3284 } 3285 3286 __be16 skb_network_protocol(struct sk_buff *skb, int *depth) 3287 { 3288 __be16 type = skb->protocol; 3289 3290 /* Tunnel gso handlers can set protocol to ethernet. */ 3291 if (type == htons(ETH_P_TEB)) { 3292 struct ethhdr *eth; 3293 3294 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr)))) 3295 return 0; 3296 3297 eth = (struct ethhdr *)skb->data; 3298 type = eth->h_proto; 3299 } 3300 3301 return __vlan_get_protocol(skb, type, depth); 3302 } 3303 3304 /** 3305 * skb_mac_gso_segment - mac layer segmentation handler. 3306 * @skb: buffer to segment 3307 * @features: features for the output path (see dev->features) 3308 */ 3309 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, 3310 netdev_features_t features) 3311 { 3312 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); 3313 struct packet_offload *ptype; 3314 int vlan_depth = skb->mac_len; 3315 __be16 type = skb_network_protocol(skb, &vlan_depth); 3316 3317 if (unlikely(!type)) 3318 return ERR_PTR(-EINVAL); 3319 3320 __skb_pull(skb, vlan_depth); 3321 3322 rcu_read_lock(); 3323 list_for_each_entry_rcu(ptype, &offload_base, list) { 3324 if (ptype->type == type && ptype->callbacks.gso_segment) { 3325 segs = ptype->callbacks.gso_segment(skb, features); 3326 break; 3327 } 3328 } 3329 rcu_read_unlock(); 3330 3331 __skb_push(skb, skb->data - skb_mac_header(skb)); 3332 3333 return segs; 3334 } 3335 EXPORT_SYMBOL(skb_mac_gso_segment); 3336 3337 3338 /* openvswitch calls this on rx path, so we need a different check. 3339 */ 3340 static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path) 3341 { 3342 if (tx_path) 3343 return skb->ip_summed != CHECKSUM_PARTIAL && 3344 skb->ip_summed != CHECKSUM_UNNECESSARY; 3345 3346 return skb->ip_summed == CHECKSUM_NONE; 3347 } 3348 3349 /** 3350 * __skb_gso_segment - Perform segmentation on skb. 3351 * @skb: buffer to segment 3352 * @features: features for the output path (see dev->features) 3353 * @tx_path: whether it is called in TX path 3354 * 3355 * This function segments the given skb and returns a list of segments. 3356 * 3357 * It may return NULL if the skb requires no segmentation. This is 3358 * only possible when GSO is used for verifying header integrity. 3359 * 3360 * Segmentation preserves SKB_GSO_CB_OFFSET bytes of previous skb cb. 3361 */ 3362 struct sk_buff *__skb_gso_segment(struct sk_buff *skb, 3363 netdev_features_t features, bool tx_path) 3364 { 3365 struct sk_buff *segs; 3366 3367 if (unlikely(skb_needs_check(skb, tx_path))) { 3368 int err; 3369 3370 /* We're going to init ->check field in TCP or UDP header */ 3371 err = skb_cow_head(skb, 0); 3372 if (err < 0) 3373 return ERR_PTR(err); 3374 } 3375 3376 /* Only report GSO partial support if it will enable us to 3377 * support segmentation on this frame without needing additional 3378 * work. 3379 */ 3380 if (features & NETIF_F_GSO_PARTIAL) { 3381 netdev_features_t partial_features = NETIF_F_GSO_ROBUST; 3382 struct net_device *dev = skb->dev; 3383 3384 partial_features |= dev->features & dev->gso_partial_features; 3385 if (!skb_gso_ok(skb, features | partial_features)) 3386 features &= ~NETIF_F_GSO_PARTIAL; 3387 } 3388 3389 BUILD_BUG_ON(SKB_GSO_CB_OFFSET + 3390 sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb)); 3391 3392 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb); 3393 SKB_GSO_CB(skb)->encap_level = 0; 3394 3395 skb_reset_mac_header(skb); 3396 skb_reset_mac_len(skb); 3397 3398 segs = skb_mac_gso_segment(skb, features); 3399 3400 if (segs != skb && unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs))) 3401 skb_warn_bad_offload(skb); 3402 3403 return segs; 3404 } 3405 EXPORT_SYMBOL(__skb_gso_segment); 3406 3407 /* Take action when hardware reception checksum errors are detected. */ 3408 #ifdef CONFIG_BUG 3409 static void do_netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb) 3410 { 3411 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>"); 3412 skb_dump(KERN_ERR, skb, true); 3413 dump_stack(); 3414 } 3415 3416 void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb) 3417 { 3418 DO_ONCE_LITE(do_netdev_rx_csum_fault, dev, skb); 3419 } 3420 EXPORT_SYMBOL(netdev_rx_csum_fault); 3421 #endif 3422 3423 /* XXX: check that highmem exists at all on the given machine. */ 3424 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb) 3425 { 3426 #ifdef CONFIG_HIGHMEM 3427 int i; 3428 3429 if (!(dev->features & NETIF_F_HIGHDMA)) { 3430 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 3431 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3432 3433 if (PageHighMem(skb_frag_page(frag))) 3434 return 1; 3435 } 3436 } 3437 #endif 3438 return 0; 3439 } 3440 3441 /* If MPLS offload request, verify we are testing hardware MPLS features 3442 * instead of standard features for the netdev. 3443 */ 3444 #if IS_ENABLED(CONFIG_NET_MPLS_GSO) 3445 static netdev_features_t net_mpls_features(struct sk_buff *skb, 3446 netdev_features_t features, 3447 __be16 type) 3448 { 3449 if (eth_p_mpls(type)) 3450 features &= skb->dev->mpls_features; 3451 3452 return features; 3453 } 3454 #else 3455 static netdev_features_t net_mpls_features(struct sk_buff *skb, 3456 netdev_features_t features, 3457 __be16 type) 3458 { 3459 return features; 3460 } 3461 #endif 3462 3463 static netdev_features_t harmonize_features(struct sk_buff *skb, 3464 netdev_features_t features) 3465 { 3466 __be16 type; 3467 3468 type = skb_network_protocol(skb, NULL); 3469 features = net_mpls_features(skb, features, type); 3470 3471 if (skb->ip_summed != CHECKSUM_NONE && 3472 !can_checksum_protocol(features, type)) { 3473 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 3474 } 3475 if (illegal_highdma(skb->dev, skb)) 3476 features &= ~NETIF_F_SG; 3477 3478 return features; 3479 } 3480 3481 netdev_features_t passthru_features_check(struct sk_buff *skb, 3482 struct net_device *dev, 3483 netdev_features_t features) 3484 { 3485 return features; 3486 } 3487 EXPORT_SYMBOL(passthru_features_check); 3488 3489 static netdev_features_t dflt_features_check(struct sk_buff *skb, 3490 struct net_device *dev, 3491 netdev_features_t features) 3492 { 3493 return vlan_features_check(skb, features); 3494 } 3495 3496 static netdev_features_t gso_features_check(const struct sk_buff *skb, 3497 struct net_device *dev, 3498 netdev_features_t features) 3499 { 3500 u16 gso_segs = skb_shinfo(skb)->gso_segs; 3501 3502 if (gso_segs > dev->gso_max_segs) 3503 return features & ~NETIF_F_GSO_MASK; 3504 3505 if (!skb_shinfo(skb)->gso_type) { 3506 skb_warn_bad_offload(skb); 3507 return features & ~NETIF_F_GSO_MASK; 3508 } 3509 3510 /* Support for GSO partial features requires software 3511 * intervention before we can actually process the packets 3512 * so we need to strip support for any partial features now 3513 * and we can pull them back in after we have partially 3514 * segmented the frame. 3515 */ 3516 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL)) 3517 features &= ~dev->gso_partial_features; 3518 3519 /* Make sure to clear the IPv4 ID mangling feature if the 3520 * IPv4 header has the potential to be fragmented. 3521 */ 3522 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) { 3523 struct iphdr *iph = skb->encapsulation ? 3524 inner_ip_hdr(skb) : ip_hdr(skb); 3525 3526 if (!(iph->frag_off & htons(IP_DF))) 3527 features &= ~NETIF_F_TSO_MANGLEID; 3528 } 3529 3530 return features; 3531 } 3532 3533 netdev_features_t netif_skb_features(struct sk_buff *skb) 3534 { 3535 struct net_device *dev = skb->dev; 3536 netdev_features_t features = dev->features; 3537 3538 if (skb_is_gso(skb)) 3539 features = gso_features_check(skb, dev, features); 3540 3541 /* If encapsulation offload request, verify we are testing 3542 * hardware encapsulation features instead of standard 3543 * features for the netdev 3544 */ 3545 if (skb->encapsulation) 3546 features &= dev->hw_enc_features; 3547 3548 if (skb_vlan_tagged(skb)) 3549 features = netdev_intersect_features(features, 3550 dev->vlan_features | 3551 NETIF_F_HW_VLAN_CTAG_TX | 3552 NETIF_F_HW_VLAN_STAG_TX); 3553 3554 if (dev->netdev_ops->ndo_features_check) 3555 features &= dev->netdev_ops->ndo_features_check(skb, dev, 3556 features); 3557 else 3558 features &= dflt_features_check(skb, dev, features); 3559 3560 return harmonize_features(skb, features); 3561 } 3562 EXPORT_SYMBOL(netif_skb_features); 3563 3564 static int xmit_one(struct sk_buff *skb, struct net_device *dev, 3565 struct netdev_queue *txq, bool more) 3566 { 3567 unsigned int len; 3568 int rc; 3569 3570 if (dev_nit_active(dev)) 3571 dev_queue_xmit_nit(skb, dev); 3572 3573 len = skb->len; 3574 PRANDOM_ADD_NOISE(skb, dev, txq, len + jiffies); 3575 trace_net_dev_start_xmit(skb, dev); 3576 rc = netdev_start_xmit(skb, dev, txq, more); 3577 trace_net_dev_xmit(skb, rc, dev, len); 3578 3579 return rc; 3580 } 3581 3582 struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev, 3583 struct netdev_queue *txq, int *ret) 3584 { 3585 struct sk_buff *skb = first; 3586 int rc = NETDEV_TX_OK; 3587 3588 while (skb) { 3589 struct sk_buff *next = skb->next; 3590 3591 skb_mark_not_on_list(skb); 3592 rc = xmit_one(skb, dev, txq, next != NULL); 3593 if (unlikely(!dev_xmit_complete(rc))) { 3594 skb->next = next; 3595 goto out; 3596 } 3597 3598 skb = next; 3599 if (netif_tx_queue_stopped(txq) && skb) { 3600 rc = NETDEV_TX_BUSY; 3601 break; 3602 } 3603 } 3604 3605 out: 3606 *ret = rc; 3607 return skb; 3608 } 3609 3610 static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb, 3611 netdev_features_t features) 3612 { 3613 if (skb_vlan_tag_present(skb) && 3614 !vlan_hw_offload_capable(features, skb->vlan_proto)) 3615 skb = __vlan_hwaccel_push_inside(skb); 3616 return skb; 3617 } 3618 3619 int skb_csum_hwoffload_help(struct sk_buff *skb, 3620 const netdev_features_t features) 3621 { 3622 if (unlikely(skb_csum_is_sctp(skb))) 3623 return !!(features & NETIF_F_SCTP_CRC) ? 0 : 3624 skb_crc32c_csum_help(skb); 3625 3626 if (features & NETIF_F_HW_CSUM) 3627 return 0; 3628 3629 if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) { 3630 switch (skb->csum_offset) { 3631 case offsetof(struct tcphdr, check): 3632 case offsetof(struct udphdr, check): 3633 return 0; 3634 } 3635 } 3636 3637 return skb_checksum_help(skb); 3638 } 3639 EXPORT_SYMBOL(skb_csum_hwoffload_help); 3640 3641 static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again) 3642 { 3643 netdev_features_t features; 3644 3645 features = netif_skb_features(skb); 3646 skb = validate_xmit_vlan(skb, features); 3647 if (unlikely(!skb)) 3648 goto out_null; 3649 3650 skb = sk_validate_xmit_skb(skb, dev); 3651 if (unlikely(!skb)) 3652 goto out_null; 3653 3654 if (netif_needs_gso(skb, features)) { 3655 struct sk_buff *segs; 3656 3657 segs = skb_gso_segment(skb, features); 3658 if (IS_ERR(segs)) { 3659 goto out_kfree_skb; 3660 } else if (segs) { 3661 consume_skb(skb); 3662 skb = segs; 3663 } 3664 } else { 3665 if (skb_needs_linearize(skb, features) && 3666 __skb_linearize(skb)) 3667 goto out_kfree_skb; 3668 3669 /* If packet is not checksummed and device does not 3670 * support checksumming for this protocol, complete 3671 * checksumming here. 3672 */ 3673 if (skb->ip_summed == CHECKSUM_PARTIAL) { 3674 if (skb->encapsulation) 3675 skb_set_inner_transport_header(skb, 3676 skb_checksum_start_offset(skb)); 3677 else 3678 skb_set_transport_header(skb, 3679 skb_checksum_start_offset(skb)); 3680 if (skb_csum_hwoffload_help(skb, features)) 3681 goto out_kfree_skb; 3682 } 3683 } 3684 3685 skb = validate_xmit_xfrm(skb, features, again); 3686 3687 return skb; 3688 3689 out_kfree_skb: 3690 kfree_skb(skb); 3691 out_null: 3692 atomic_long_inc(&dev->tx_dropped); 3693 return NULL; 3694 } 3695 3696 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again) 3697 { 3698 struct sk_buff *next, *head = NULL, *tail; 3699 3700 for (; skb != NULL; skb = next) { 3701 next = skb->next; 3702 skb_mark_not_on_list(skb); 3703 3704 /* in case skb wont be segmented, point to itself */ 3705 skb->prev = skb; 3706 3707 skb = validate_xmit_skb(skb, dev, again); 3708 if (!skb) 3709 continue; 3710 3711 if (!head) 3712 head = skb; 3713 else 3714 tail->next = skb; 3715 /* If skb was segmented, skb->prev points to 3716 * the last segment. If not, it still contains skb. 3717 */ 3718 tail = skb->prev; 3719 } 3720 return head; 3721 } 3722 EXPORT_SYMBOL_GPL(validate_xmit_skb_list); 3723 3724 static void qdisc_pkt_len_init(struct sk_buff *skb) 3725 { 3726 const struct skb_shared_info *shinfo = skb_shinfo(skb); 3727 3728 qdisc_skb_cb(skb)->pkt_len = skb->len; 3729 3730 /* To get more precise estimation of bytes sent on wire, 3731 * we add to pkt_len the headers size of all segments 3732 */ 3733 if (shinfo->gso_size && skb_transport_header_was_set(skb)) { 3734 unsigned int hdr_len; 3735 u16 gso_segs = shinfo->gso_segs; 3736 3737 /* mac layer + network layer */ 3738 hdr_len = skb_transport_header(skb) - skb_mac_header(skb); 3739 3740 /* + transport layer */ 3741 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) { 3742 const struct tcphdr *th; 3743 struct tcphdr _tcphdr; 3744 3745 th = skb_header_pointer(skb, skb_transport_offset(skb), 3746 sizeof(_tcphdr), &_tcphdr); 3747 if (likely(th)) 3748 hdr_len += __tcp_hdrlen(th); 3749 } else { 3750 struct udphdr _udphdr; 3751 3752 if (skb_header_pointer(skb, skb_transport_offset(skb), 3753 sizeof(_udphdr), &_udphdr)) 3754 hdr_len += sizeof(struct udphdr); 3755 } 3756 3757 if (shinfo->gso_type & SKB_GSO_DODGY) 3758 gso_segs = DIV_ROUND_UP(skb->len - hdr_len, 3759 shinfo->gso_size); 3760 3761 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len; 3762 } 3763 } 3764 3765 static int dev_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *q, 3766 struct sk_buff **to_free, 3767 struct netdev_queue *txq) 3768 { 3769 int rc; 3770 3771 rc = q->enqueue(skb, q, to_free) & NET_XMIT_MASK; 3772 if (rc == NET_XMIT_SUCCESS) 3773 trace_qdisc_enqueue(q, txq, skb); 3774 return rc; 3775 } 3776 3777 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, 3778 struct net_device *dev, 3779 struct netdev_queue *txq) 3780 { 3781 spinlock_t *root_lock = qdisc_lock(q); 3782 struct sk_buff *to_free = NULL; 3783 bool contended; 3784 int rc; 3785 3786 qdisc_calculate_pkt_len(skb, q); 3787 3788 if (q->flags & TCQ_F_NOLOCK) { 3789 if (q->flags & TCQ_F_CAN_BYPASS && nolock_qdisc_is_empty(q) && 3790 qdisc_run_begin(q)) { 3791 /* Retest nolock_qdisc_is_empty() within the protection 3792 * of q->seqlock to protect from racing with requeuing. 3793 */ 3794 if (unlikely(!nolock_qdisc_is_empty(q))) { 3795 rc = dev_qdisc_enqueue(skb, q, &to_free, txq); 3796 __qdisc_run(q); 3797 qdisc_run_end(q); 3798 3799 goto no_lock_out; 3800 } 3801 3802 qdisc_bstats_cpu_update(q, skb); 3803 if (sch_direct_xmit(skb, q, dev, txq, NULL, true) && 3804 !nolock_qdisc_is_empty(q)) 3805 __qdisc_run(q); 3806 3807 qdisc_run_end(q); 3808 return NET_XMIT_SUCCESS; 3809 } 3810 3811 rc = dev_qdisc_enqueue(skb, q, &to_free, txq); 3812 qdisc_run(q); 3813 3814 no_lock_out: 3815 if (unlikely(to_free)) 3816 kfree_skb_list(to_free); 3817 return rc; 3818 } 3819 3820 /* 3821 * Heuristic to force contended enqueues to serialize on a 3822 * separate lock before trying to get qdisc main lock. 3823 * This permits qdisc->running owner to get the lock more 3824 * often and dequeue packets faster. 3825 */ 3826 contended = qdisc_is_running(q); 3827 if (unlikely(contended)) 3828 spin_lock(&q->busylock); 3829 3830 spin_lock(root_lock); 3831 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) { 3832 __qdisc_drop(skb, &to_free); 3833 rc = NET_XMIT_DROP; 3834 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) && 3835 qdisc_run_begin(q)) { 3836 /* 3837 * This is a work-conserving queue; there are no old skbs 3838 * waiting to be sent out; and the qdisc is not running - 3839 * xmit the skb directly. 3840 */ 3841 3842 qdisc_bstats_update(q, skb); 3843 3844 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) { 3845 if (unlikely(contended)) { 3846 spin_unlock(&q->busylock); 3847 contended = false; 3848 } 3849 __qdisc_run(q); 3850 } 3851 3852 qdisc_run_end(q); 3853 rc = NET_XMIT_SUCCESS; 3854 } else { 3855 rc = dev_qdisc_enqueue(skb, q, &to_free, txq); 3856 if (qdisc_run_begin(q)) { 3857 if (unlikely(contended)) { 3858 spin_unlock(&q->busylock); 3859 contended = false; 3860 } 3861 __qdisc_run(q); 3862 qdisc_run_end(q); 3863 } 3864 } 3865 spin_unlock(root_lock); 3866 if (unlikely(to_free)) 3867 kfree_skb_list(to_free); 3868 if (unlikely(contended)) 3869 spin_unlock(&q->busylock); 3870 return rc; 3871 } 3872 3873 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) 3874 static void skb_update_prio(struct sk_buff *skb) 3875 { 3876 const struct netprio_map *map; 3877 const struct sock *sk; 3878 unsigned int prioidx; 3879 3880 if (skb->priority) 3881 return; 3882 map = rcu_dereference_bh(skb->dev->priomap); 3883 if (!map) 3884 return; 3885 sk = skb_to_full_sk(skb); 3886 if (!sk) 3887 return; 3888 3889 prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data); 3890 3891 if (prioidx < map->priomap_len) 3892 skb->priority = map->priomap[prioidx]; 3893 } 3894 #else 3895 #define skb_update_prio(skb) 3896 #endif 3897 3898 /** 3899 * dev_loopback_xmit - loop back @skb 3900 * @net: network namespace this loopback is happening in 3901 * @sk: sk needed to be a netfilter okfn 3902 * @skb: buffer to transmit 3903 */ 3904 int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb) 3905 { 3906 skb_reset_mac_header(skb); 3907 __skb_pull(skb, skb_network_offset(skb)); 3908 skb->pkt_type = PACKET_LOOPBACK; 3909 skb->ip_summed = CHECKSUM_UNNECESSARY; 3910 WARN_ON(!skb_dst(skb)); 3911 skb_dst_force(skb); 3912 netif_rx_ni(skb); 3913 return 0; 3914 } 3915 EXPORT_SYMBOL(dev_loopback_xmit); 3916 3917 #ifdef CONFIG_NET_EGRESS 3918 static struct sk_buff * 3919 sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev) 3920 { 3921 struct mini_Qdisc *miniq = rcu_dereference_bh(dev->miniq_egress); 3922 struct tcf_result cl_res; 3923 3924 if (!miniq) 3925 return skb; 3926 3927 /* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */ 3928 qdisc_skb_cb(skb)->mru = 0; 3929 qdisc_skb_cb(skb)->post_ct = false; 3930 mini_qdisc_bstats_cpu_update(miniq, skb); 3931 3932 switch (tcf_classify(skb, miniq->block, miniq->filter_list, &cl_res, false)) { 3933 case TC_ACT_OK: 3934 case TC_ACT_RECLASSIFY: 3935 skb->tc_index = TC_H_MIN(cl_res.classid); 3936 break; 3937 case TC_ACT_SHOT: 3938 mini_qdisc_qstats_cpu_drop(miniq); 3939 *ret = NET_XMIT_DROP; 3940 kfree_skb(skb); 3941 return NULL; 3942 case TC_ACT_STOLEN: 3943 case TC_ACT_QUEUED: 3944 case TC_ACT_TRAP: 3945 *ret = NET_XMIT_SUCCESS; 3946 consume_skb(skb); 3947 return NULL; 3948 case TC_ACT_REDIRECT: 3949 /* No need to push/pop skb's mac_header here on egress! */ 3950 skb_do_redirect(skb); 3951 *ret = NET_XMIT_SUCCESS; 3952 return NULL; 3953 default: 3954 break; 3955 } 3956 3957 return skb; 3958 } 3959 #endif /* CONFIG_NET_EGRESS */ 3960 3961 #ifdef CONFIG_XPS 3962 static int __get_xps_queue_idx(struct net_device *dev, struct sk_buff *skb, 3963 struct xps_dev_maps *dev_maps, unsigned int tci) 3964 { 3965 int tc = netdev_get_prio_tc_map(dev, skb->priority); 3966 struct xps_map *map; 3967 int queue_index = -1; 3968 3969 if (tc >= dev_maps->num_tc || tci >= dev_maps->nr_ids) 3970 return queue_index; 3971 3972 tci *= dev_maps->num_tc; 3973 tci += tc; 3974 3975 map = rcu_dereference(dev_maps->attr_map[tci]); 3976 if (map) { 3977 if (map->len == 1) 3978 queue_index = map->queues[0]; 3979 else 3980 queue_index = map->queues[reciprocal_scale( 3981 skb_get_hash(skb), map->len)]; 3982 if (unlikely(queue_index >= dev->real_num_tx_queues)) 3983 queue_index = -1; 3984 } 3985 return queue_index; 3986 } 3987 #endif 3988 3989 static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev, 3990 struct sk_buff *skb) 3991 { 3992 #ifdef CONFIG_XPS 3993 struct xps_dev_maps *dev_maps; 3994 struct sock *sk = skb->sk; 3995 int queue_index = -1; 3996 3997 if (!static_key_false(&xps_needed)) 3998 return -1; 3999 4000 rcu_read_lock(); 4001 if (!static_key_false(&xps_rxqs_needed)) 4002 goto get_cpus_map; 4003 4004 dev_maps = rcu_dereference(sb_dev->xps_maps[XPS_RXQS]); 4005 if (dev_maps) { 4006 int tci = sk_rx_queue_get(sk); 4007 4008 if (tci >= 0) 4009 queue_index = __get_xps_queue_idx(dev, skb, dev_maps, 4010 tci); 4011 } 4012 4013 get_cpus_map: 4014 if (queue_index < 0) { 4015 dev_maps = rcu_dereference(sb_dev->xps_maps[XPS_CPUS]); 4016 if (dev_maps) { 4017 unsigned int tci = skb->sender_cpu - 1; 4018 4019 queue_index = __get_xps_queue_idx(dev, skb, dev_maps, 4020 tci); 4021 } 4022 } 4023 rcu_read_unlock(); 4024 4025 return queue_index; 4026 #else 4027 return -1; 4028 #endif 4029 } 4030 4031 u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb, 4032 struct net_device *sb_dev) 4033 { 4034 return 0; 4035 } 4036 EXPORT_SYMBOL(dev_pick_tx_zero); 4037 4038 u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb, 4039 struct net_device *sb_dev) 4040 { 4041 return (u16)raw_smp_processor_id() % dev->real_num_tx_queues; 4042 } 4043 EXPORT_SYMBOL(dev_pick_tx_cpu_id); 4044 4045 u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, 4046 struct net_device *sb_dev) 4047 { 4048 struct sock *sk = skb->sk; 4049 int queue_index = sk_tx_queue_get(sk); 4050 4051 sb_dev = sb_dev ? : dev; 4052 4053 if (queue_index < 0 || skb->ooo_okay || 4054 queue_index >= dev->real_num_tx_queues) { 4055 int new_index = get_xps_queue(dev, sb_dev, skb); 4056 4057 if (new_index < 0) 4058 new_index = skb_tx_hash(dev, sb_dev, skb); 4059 4060 if (queue_index != new_index && sk && 4061 sk_fullsock(sk) && 4062 rcu_access_pointer(sk->sk_dst_cache)) 4063 sk_tx_queue_set(sk, new_index); 4064 4065 queue_index = new_index; 4066 } 4067 4068 return queue_index; 4069 } 4070 EXPORT_SYMBOL(netdev_pick_tx); 4071 4072 struct netdev_queue *netdev_core_pick_tx(struct net_device *dev, 4073 struct sk_buff *skb, 4074 struct net_device *sb_dev) 4075 { 4076 int queue_index = 0; 4077 4078 #ifdef CONFIG_XPS 4079 u32 sender_cpu = skb->sender_cpu - 1; 4080 4081 if (sender_cpu >= (u32)NR_CPUS) 4082 skb->sender_cpu = raw_smp_processor_id() + 1; 4083 #endif 4084 4085 if (dev->real_num_tx_queues != 1) { 4086 const struct net_device_ops *ops = dev->netdev_ops; 4087 4088 if (ops->ndo_select_queue) 4089 queue_index = ops->ndo_select_queue(dev, skb, sb_dev); 4090 else 4091 queue_index = netdev_pick_tx(dev, skb, sb_dev); 4092 4093 queue_index = netdev_cap_txqueue(dev, queue_index); 4094 } 4095 4096 skb_set_queue_mapping(skb, queue_index); 4097 return netdev_get_tx_queue(dev, queue_index); 4098 } 4099 4100 /** 4101 * __dev_queue_xmit - transmit a buffer 4102 * @skb: buffer to transmit 4103 * @sb_dev: suboordinate device used for L2 forwarding offload 4104 * 4105 * Queue a buffer for transmission to a network device. The caller must 4106 * have set the device and priority and built the buffer before calling 4107 * this function. The function can be called from an interrupt. 4108 * 4109 * A negative errno code is returned on a failure. A success does not 4110 * guarantee the frame will be transmitted as it may be dropped due 4111 * to congestion or traffic shaping. 4112 * 4113 * ----------------------------------------------------------------------------------- 4114 * I notice this method can also return errors from the queue disciplines, 4115 * including NET_XMIT_DROP, which is a positive value. So, errors can also 4116 * be positive. 4117 * 4118 * Regardless of the return value, the skb is consumed, so it is currently 4119 * difficult to retry a send to this method. (You can bump the ref count 4120 * before sending to hold a reference for retry if you are careful.) 4121 * 4122 * When calling this method, interrupts MUST be enabled. This is because 4123 * the BH enable code must have IRQs enabled so that it will not deadlock. 4124 * --BLG 4125 */ 4126 static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev) 4127 { 4128 struct net_device *dev = skb->dev; 4129 struct netdev_queue *txq; 4130 struct Qdisc *q; 4131 int rc = -ENOMEM; 4132 bool again = false; 4133 4134 skb_reset_mac_header(skb); 4135 4136 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP)) 4137 __skb_tstamp_tx(skb, NULL, NULL, skb->sk, SCM_TSTAMP_SCHED); 4138 4139 /* Disable soft irqs for various locks below. Also 4140 * stops preemption for RCU. 4141 */ 4142 rcu_read_lock_bh(); 4143 4144 skb_update_prio(skb); 4145 4146 qdisc_pkt_len_init(skb); 4147 #ifdef CONFIG_NET_CLS_ACT 4148 skb->tc_at_ingress = 0; 4149 # ifdef CONFIG_NET_EGRESS 4150 if (static_branch_unlikely(&egress_needed_key)) { 4151 skb = sch_handle_egress(skb, &rc, dev); 4152 if (!skb) 4153 goto out; 4154 } 4155 # endif 4156 #endif 4157 /* If device/qdisc don't need skb->dst, release it right now while 4158 * its hot in this cpu cache. 4159 */ 4160 if (dev->priv_flags & IFF_XMIT_DST_RELEASE) 4161 skb_dst_drop(skb); 4162 else 4163 skb_dst_force(skb); 4164 4165 txq = netdev_core_pick_tx(dev, skb, sb_dev); 4166 q = rcu_dereference_bh(txq->qdisc); 4167 4168 trace_net_dev_queue(skb); 4169 if (q->enqueue) { 4170 rc = __dev_xmit_skb(skb, q, dev, txq); 4171 goto out; 4172 } 4173 4174 /* The device has no queue. Common case for software devices: 4175 * loopback, all the sorts of tunnels... 4176 4177 * Really, it is unlikely that netif_tx_lock protection is necessary 4178 * here. (f.e. loopback and IP tunnels are clean ignoring statistics 4179 * counters.) 4180 * However, it is possible, that they rely on protection 4181 * made by us here. 4182 4183 * Check this and shot the lock. It is not prone from deadlocks. 4184 *Either shot noqueue qdisc, it is even simpler 8) 4185 */ 4186 if (dev->flags & IFF_UP) { 4187 int cpu = smp_processor_id(); /* ok because BHs are off */ 4188 4189 if (txq->xmit_lock_owner != cpu) { 4190 if (dev_xmit_recursion()) 4191 goto recursion_alert; 4192 4193 skb = validate_xmit_skb(skb, dev, &again); 4194 if (!skb) 4195 goto out; 4196 4197 PRANDOM_ADD_NOISE(skb, dev, txq, jiffies); 4198 HARD_TX_LOCK(dev, txq, cpu); 4199 4200 if (!netif_xmit_stopped(txq)) { 4201 dev_xmit_recursion_inc(); 4202 skb = dev_hard_start_xmit(skb, dev, txq, &rc); 4203 dev_xmit_recursion_dec(); 4204 if (dev_xmit_complete(rc)) { 4205 HARD_TX_UNLOCK(dev, txq); 4206 goto out; 4207 } 4208 } 4209 HARD_TX_UNLOCK(dev, txq); 4210 net_crit_ratelimited("Virtual device %s asks to queue packet!\n", 4211 dev->name); 4212 } else { 4213 /* Recursion is detected! It is possible, 4214 * unfortunately 4215 */ 4216 recursion_alert: 4217 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n", 4218 dev->name); 4219 } 4220 } 4221 4222 rc = -ENETDOWN; 4223 rcu_read_unlock_bh(); 4224 4225 atomic_long_inc(&dev->tx_dropped); 4226 kfree_skb_list(skb); 4227 return rc; 4228 out: 4229 rcu_read_unlock_bh(); 4230 return rc; 4231 } 4232 4233 int dev_queue_xmit(struct sk_buff *skb) 4234 { 4235 return __dev_queue_xmit(skb, NULL); 4236 } 4237 EXPORT_SYMBOL(dev_queue_xmit); 4238 4239 int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev) 4240 { 4241 return __dev_queue_xmit(skb, sb_dev); 4242 } 4243 EXPORT_SYMBOL(dev_queue_xmit_accel); 4244 4245 int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id) 4246 { 4247 struct net_device *dev = skb->dev; 4248 struct sk_buff *orig_skb = skb; 4249 struct netdev_queue *txq; 4250 int ret = NETDEV_TX_BUSY; 4251 bool again = false; 4252 4253 if (unlikely(!netif_running(dev) || 4254 !netif_carrier_ok(dev))) 4255 goto drop; 4256 4257 skb = validate_xmit_skb_list(skb, dev, &again); 4258 if (skb != orig_skb) 4259 goto drop; 4260 4261 skb_set_queue_mapping(skb, queue_id); 4262 txq = skb_get_tx_queue(dev, skb); 4263 PRANDOM_ADD_NOISE(skb, dev, txq, jiffies); 4264 4265 local_bh_disable(); 4266 4267 dev_xmit_recursion_inc(); 4268 HARD_TX_LOCK(dev, txq, smp_processor_id()); 4269 if (!netif_xmit_frozen_or_drv_stopped(txq)) 4270 ret = netdev_start_xmit(skb, dev, txq, false); 4271 HARD_TX_UNLOCK(dev, txq); 4272 dev_xmit_recursion_dec(); 4273 4274 local_bh_enable(); 4275 return ret; 4276 drop: 4277 atomic_long_inc(&dev->tx_dropped); 4278 kfree_skb_list(skb); 4279 return NET_XMIT_DROP; 4280 } 4281 EXPORT_SYMBOL(__dev_direct_xmit); 4282 4283 /************************************************************************* 4284 * Receiver routines 4285 *************************************************************************/ 4286 4287 int netdev_max_backlog __read_mostly = 1000; 4288 EXPORT_SYMBOL(netdev_max_backlog); 4289 4290 int netdev_tstamp_prequeue __read_mostly = 1; 4291 int netdev_budget __read_mostly = 300; 4292 /* Must be at least 2 jiffes to guarantee 1 jiffy timeout */ 4293 unsigned int __read_mostly netdev_budget_usecs = 2 * USEC_PER_SEC / HZ; 4294 int weight_p __read_mostly = 64; /* old backlog weight */ 4295 int dev_weight_rx_bias __read_mostly = 1; /* bias for backlog weight */ 4296 int dev_weight_tx_bias __read_mostly = 1; /* bias for output_queue quota */ 4297 int dev_rx_weight __read_mostly = 64; 4298 int dev_tx_weight __read_mostly = 64; 4299 /* Maximum number of GRO_NORMAL skbs to batch up for list-RX */ 4300 int gro_normal_batch __read_mostly = 8; 4301 4302 /* Called with irq disabled */ 4303 static inline void ____napi_schedule(struct softnet_data *sd, 4304 struct napi_struct *napi) 4305 { 4306 struct task_struct *thread; 4307 4308 if (test_bit(NAPI_STATE_THREADED, &napi->state)) { 4309 /* Paired with smp_mb__before_atomic() in 4310 * napi_enable()/dev_set_threaded(). 4311 * Use READ_ONCE() to guarantee a complete 4312 * read on napi->thread. Only call 4313 * wake_up_process() when it's not NULL. 4314 */ 4315 thread = READ_ONCE(napi->thread); 4316 if (thread) { 4317 /* Avoid doing set_bit() if the thread is in 4318 * INTERRUPTIBLE state, cause napi_thread_wait() 4319 * makes sure to proceed with napi polling 4320 * if the thread is explicitly woken from here. 4321 */ 4322 if (READ_ONCE(thread->__state) != TASK_INTERRUPTIBLE) 4323 set_bit(NAPI_STATE_SCHED_THREADED, &napi->state); 4324 wake_up_process(thread); 4325 return; 4326 } 4327 } 4328 4329 list_add_tail(&napi->poll_list, &sd->poll_list); 4330 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 4331 } 4332 4333 #ifdef CONFIG_RPS 4334 4335 /* One global table that all flow-based protocols share. */ 4336 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly; 4337 EXPORT_SYMBOL(rps_sock_flow_table); 4338 u32 rps_cpu_mask __read_mostly; 4339 EXPORT_SYMBOL(rps_cpu_mask); 4340 4341 struct static_key_false rps_needed __read_mostly; 4342 EXPORT_SYMBOL(rps_needed); 4343 struct static_key_false rfs_needed __read_mostly; 4344 EXPORT_SYMBOL(rfs_needed); 4345 4346 static struct rps_dev_flow * 4347 set_rps_cpu(struct net_device *dev, struct sk_buff *skb, 4348 struct rps_dev_flow *rflow, u16 next_cpu) 4349 { 4350 if (next_cpu < nr_cpu_ids) { 4351 #ifdef CONFIG_RFS_ACCEL 4352 struct netdev_rx_queue *rxqueue; 4353 struct rps_dev_flow_table *flow_table; 4354 struct rps_dev_flow *old_rflow; 4355 u32 flow_id; 4356 u16 rxq_index; 4357 int rc; 4358 4359 /* Should we steer this flow to a different hardware queue? */ 4360 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap || 4361 !(dev->features & NETIF_F_NTUPLE)) 4362 goto out; 4363 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu); 4364 if (rxq_index == skb_get_rx_queue(skb)) 4365 goto out; 4366 4367 rxqueue = dev->_rx + rxq_index; 4368 flow_table = rcu_dereference(rxqueue->rps_flow_table); 4369 if (!flow_table) 4370 goto out; 4371 flow_id = skb_get_hash(skb) & flow_table->mask; 4372 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb, 4373 rxq_index, flow_id); 4374 if (rc < 0) 4375 goto out; 4376 old_rflow = rflow; 4377 rflow = &flow_table->flows[flow_id]; 4378 rflow->filter = rc; 4379 if (old_rflow->filter == rflow->filter) 4380 old_rflow->filter = RPS_NO_FILTER; 4381 out: 4382 #endif 4383 rflow->last_qtail = 4384 per_cpu(softnet_data, next_cpu).input_queue_head; 4385 } 4386 4387 rflow->cpu = next_cpu; 4388 return rflow; 4389 } 4390 4391 /* 4392 * get_rps_cpu is called from netif_receive_skb and returns the target 4393 * CPU from the RPS map of the receiving queue for a given skb. 4394 * rcu_read_lock must be held on entry. 4395 */ 4396 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, 4397 struct rps_dev_flow **rflowp) 4398 { 4399 const struct rps_sock_flow_table *sock_flow_table; 4400 struct netdev_rx_queue *rxqueue = dev->_rx; 4401 struct rps_dev_flow_table *flow_table; 4402 struct rps_map *map; 4403 int cpu = -1; 4404 u32 tcpu; 4405 u32 hash; 4406 4407 if (skb_rx_queue_recorded(skb)) { 4408 u16 index = skb_get_rx_queue(skb); 4409 4410 if (unlikely(index >= dev->real_num_rx_queues)) { 4411 WARN_ONCE(dev->real_num_rx_queues > 1, 4412 "%s received packet on queue %u, but number " 4413 "of RX queues is %u\n", 4414 dev->name, index, dev->real_num_rx_queues); 4415 goto done; 4416 } 4417 rxqueue += index; 4418 } 4419 4420 /* Avoid computing hash if RFS/RPS is not active for this rxqueue */ 4421 4422 flow_table = rcu_dereference(rxqueue->rps_flow_table); 4423 map = rcu_dereference(rxqueue->rps_map); 4424 if (!flow_table && !map) 4425 goto done; 4426 4427 skb_reset_network_header(skb); 4428 hash = skb_get_hash(skb); 4429 if (!hash) 4430 goto done; 4431 4432 sock_flow_table = rcu_dereference(rps_sock_flow_table); 4433 if (flow_table && sock_flow_table) { 4434 struct rps_dev_flow *rflow; 4435 u32 next_cpu; 4436 u32 ident; 4437 4438 /* First check into global flow table if there is a match */ 4439 ident = sock_flow_table->ents[hash & sock_flow_table->mask]; 4440 if ((ident ^ hash) & ~rps_cpu_mask) 4441 goto try_rps; 4442 4443 next_cpu = ident & rps_cpu_mask; 4444 4445 /* OK, now we know there is a match, 4446 * we can look at the local (per receive queue) flow table 4447 */ 4448 rflow = &flow_table->flows[hash & flow_table->mask]; 4449 tcpu = rflow->cpu; 4450 4451 /* 4452 * If the desired CPU (where last recvmsg was done) is 4453 * different from current CPU (one in the rx-queue flow 4454 * table entry), switch if one of the following holds: 4455 * - Current CPU is unset (>= nr_cpu_ids). 4456 * - Current CPU is offline. 4457 * - The current CPU's queue tail has advanced beyond the 4458 * last packet that was enqueued using this table entry. 4459 * This guarantees that all previous packets for the flow 4460 * have been dequeued, thus preserving in order delivery. 4461 */ 4462 if (unlikely(tcpu != next_cpu) && 4463 (tcpu >= nr_cpu_ids || !cpu_online(tcpu) || 4464 ((int)(per_cpu(softnet_data, tcpu).input_queue_head - 4465 rflow->last_qtail)) >= 0)) { 4466 tcpu = next_cpu; 4467 rflow = set_rps_cpu(dev, skb, rflow, next_cpu); 4468 } 4469 4470 if (tcpu < nr_cpu_ids && cpu_online(tcpu)) { 4471 *rflowp = rflow; 4472 cpu = tcpu; 4473 goto done; 4474 } 4475 } 4476 4477 try_rps: 4478 4479 if (map) { 4480 tcpu = map->cpus[reciprocal_scale(hash, map->len)]; 4481 if (cpu_online(tcpu)) { 4482 cpu = tcpu; 4483 goto done; 4484 } 4485 } 4486 4487 done: 4488 return cpu; 4489 } 4490 4491 #ifdef CONFIG_RFS_ACCEL 4492 4493 /** 4494 * rps_may_expire_flow - check whether an RFS hardware filter may be removed 4495 * @dev: Device on which the filter was set 4496 * @rxq_index: RX queue index 4497 * @flow_id: Flow ID passed to ndo_rx_flow_steer() 4498 * @filter_id: Filter ID returned by ndo_rx_flow_steer() 4499 * 4500 * Drivers that implement ndo_rx_flow_steer() should periodically call 4501 * this function for each installed filter and remove the filters for 4502 * which it returns %true. 4503 */ 4504 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, 4505 u32 flow_id, u16 filter_id) 4506 { 4507 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index; 4508 struct rps_dev_flow_table *flow_table; 4509 struct rps_dev_flow *rflow; 4510 bool expire = true; 4511 unsigned int cpu; 4512 4513 rcu_read_lock(); 4514 flow_table = rcu_dereference(rxqueue->rps_flow_table); 4515 if (flow_table && flow_id <= flow_table->mask) { 4516 rflow = &flow_table->flows[flow_id]; 4517 cpu = READ_ONCE(rflow->cpu); 4518 if (rflow->filter == filter_id && cpu < nr_cpu_ids && 4519 ((int)(per_cpu(softnet_data, cpu).input_queue_head - 4520 rflow->last_qtail) < 4521 (int)(10 * flow_table->mask))) 4522 expire = false; 4523 } 4524 rcu_read_unlock(); 4525 return expire; 4526 } 4527 EXPORT_SYMBOL(rps_may_expire_flow); 4528 4529 #endif /* CONFIG_RFS_ACCEL */ 4530 4531 /* Called from hardirq (IPI) context */ 4532 static void rps_trigger_softirq(void *data) 4533 { 4534 struct softnet_data *sd = data; 4535 4536 ____napi_schedule(sd, &sd->backlog); 4537 sd->received_rps++; 4538 } 4539 4540 #endif /* CONFIG_RPS */ 4541 4542 /* 4543 * Check if this softnet_data structure is another cpu one 4544 * If yes, queue it to our IPI list and return 1 4545 * If no, return 0 4546 */ 4547 static int rps_ipi_queued(struct softnet_data *sd) 4548 { 4549 #ifdef CONFIG_RPS 4550 struct softnet_data *mysd = this_cpu_ptr(&softnet_data); 4551 4552 if (sd != mysd) { 4553 sd->rps_ipi_next = mysd->rps_ipi_list; 4554 mysd->rps_ipi_list = sd; 4555 4556 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 4557 return 1; 4558 } 4559 #endif /* CONFIG_RPS */ 4560 return 0; 4561 } 4562 4563 #ifdef CONFIG_NET_FLOW_LIMIT 4564 int netdev_flow_limit_table_len __read_mostly = (1 << 12); 4565 #endif 4566 4567 static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen) 4568 { 4569 #ifdef CONFIG_NET_FLOW_LIMIT 4570 struct sd_flow_limit *fl; 4571 struct softnet_data *sd; 4572 unsigned int old_flow, new_flow; 4573 4574 if (qlen < (netdev_max_backlog >> 1)) 4575 return false; 4576 4577 sd = this_cpu_ptr(&softnet_data); 4578 4579 rcu_read_lock(); 4580 fl = rcu_dereference(sd->flow_limit); 4581 if (fl) { 4582 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1); 4583 old_flow = fl->history[fl->history_head]; 4584 fl->history[fl->history_head] = new_flow; 4585 4586 fl->history_head++; 4587 fl->history_head &= FLOW_LIMIT_HISTORY - 1; 4588 4589 if (likely(fl->buckets[old_flow])) 4590 fl->buckets[old_flow]--; 4591 4592 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) { 4593 fl->count++; 4594 rcu_read_unlock(); 4595 return true; 4596 } 4597 } 4598 rcu_read_unlock(); 4599 #endif 4600 return false; 4601 } 4602 4603 /* 4604 * enqueue_to_backlog is called to queue an skb to a per CPU backlog 4605 * queue (may be a remote CPU queue). 4606 */ 4607 static int enqueue_to_backlog(struct sk_buff *skb, int cpu, 4608 unsigned int *qtail) 4609 { 4610 struct softnet_data *sd; 4611 unsigned long flags; 4612 unsigned int qlen; 4613 4614 sd = &per_cpu(softnet_data, cpu); 4615 4616 local_irq_save(flags); 4617 4618 rps_lock(sd); 4619 if (!netif_running(skb->dev)) 4620 goto drop; 4621 qlen = skb_queue_len(&sd->input_pkt_queue); 4622 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) { 4623 if (qlen) { 4624 enqueue: 4625 __skb_queue_tail(&sd->input_pkt_queue, skb); 4626 input_queue_tail_incr_save(sd, qtail); 4627 rps_unlock(sd); 4628 local_irq_restore(flags); 4629 return NET_RX_SUCCESS; 4630 } 4631 4632 /* Schedule NAPI for backlog device 4633 * We can use non atomic operation since we own the queue lock 4634 */ 4635 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) { 4636 if (!rps_ipi_queued(sd)) 4637 ____napi_schedule(sd, &sd->backlog); 4638 } 4639 goto enqueue; 4640 } 4641 4642 drop: 4643 sd->dropped++; 4644 rps_unlock(sd); 4645 4646 local_irq_restore(flags); 4647 4648 atomic_long_inc(&skb->dev->rx_dropped); 4649 kfree_skb(skb); 4650 return NET_RX_DROP; 4651 } 4652 4653 static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb) 4654 { 4655 struct net_device *dev = skb->dev; 4656 struct netdev_rx_queue *rxqueue; 4657 4658 rxqueue = dev->_rx; 4659 4660 if (skb_rx_queue_recorded(skb)) { 4661 u16 index = skb_get_rx_queue(skb); 4662 4663 if (unlikely(index >= dev->real_num_rx_queues)) { 4664 WARN_ONCE(dev->real_num_rx_queues > 1, 4665 "%s received packet on queue %u, but number " 4666 "of RX queues is %u\n", 4667 dev->name, index, dev->real_num_rx_queues); 4668 4669 return rxqueue; /* Return first rxqueue */ 4670 } 4671 rxqueue += index; 4672 } 4673 return rxqueue; 4674 } 4675 4676 u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp, 4677 struct bpf_prog *xdp_prog) 4678 { 4679 void *orig_data, *orig_data_end, *hard_start; 4680 struct netdev_rx_queue *rxqueue; 4681 bool orig_bcast, orig_host; 4682 u32 mac_len, frame_sz; 4683 __be16 orig_eth_type; 4684 struct ethhdr *eth; 4685 u32 metalen, act; 4686 int off; 4687 4688 /* The XDP program wants to see the packet starting at the MAC 4689 * header. 4690 */ 4691 mac_len = skb->data - skb_mac_header(skb); 4692 hard_start = skb->data - skb_headroom(skb); 4693 4694 /* SKB "head" area always have tailroom for skb_shared_info */ 4695 frame_sz = (void *)skb_end_pointer(skb) - hard_start; 4696 frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 4697 4698 rxqueue = netif_get_rxqueue(skb); 4699 xdp_init_buff(xdp, frame_sz, &rxqueue->xdp_rxq); 4700 xdp_prepare_buff(xdp, hard_start, skb_headroom(skb) - mac_len, 4701 skb_headlen(skb) + mac_len, true); 4702 4703 orig_data_end = xdp->data_end; 4704 orig_data = xdp->data; 4705 eth = (struct ethhdr *)xdp->data; 4706 orig_host = ether_addr_equal_64bits(eth->h_dest, skb->dev->dev_addr); 4707 orig_bcast = is_multicast_ether_addr_64bits(eth->h_dest); 4708 orig_eth_type = eth->h_proto; 4709 4710 act = bpf_prog_run_xdp(xdp_prog, xdp); 4711 4712 /* check if bpf_xdp_adjust_head was used */ 4713 off = xdp->data - orig_data; 4714 if (off) { 4715 if (off > 0) 4716 __skb_pull(skb, off); 4717 else if (off < 0) 4718 __skb_push(skb, -off); 4719 4720 skb->mac_header += off; 4721 skb_reset_network_header(skb); 4722 } 4723 4724 /* check if bpf_xdp_adjust_tail was used */ 4725 off = xdp->data_end - orig_data_end; 4726 if (off != 0) { 4727 skb_set_tail_pointer(skb, xdp->data_end - xdp->data); 4728 skb->len += off; /* positive on grow, negative on shrink */ 4729 } 4730 4731 /* check if XDP changed eth hdr such SKB needs update */ 4732 eth = (struct ethhdr *)xdp->data; 4733 if ((orig_eth_type != eth->h_proto) || 4734 (orig_host != ether_addr_equal_64bits(eth->h_dest, 4735 skb->dev->dev_addr)) || 4736 (orig_bcast != is_multicast_ether_addr_64bits(eth->h_dest))) { 4737 __skb_push(skb, ETH_HLEN); 4738 skb->pkt_type = PACKET_HOST; 4739 skb->protocol = eth_type_trans(skb, skb->dev); 4740 } 4741 4742 /* Redirect/Tx gives L2 packet, code that will reuse skb must __skb_pull 4743 * before calling us again on redirect path. We do not call do_redirect 4744 * as we leave that up to the caller. 4745 * 4746 * Caller is responsible for managing lifetime of skb (i.e. calling 4747 * kfree_skb in response to actions it cannot handle/XDP_DROP). 4748 */ 4749 switch (act) { 4750 case XDP_REDIRECT: 4751 case XDP_TX: 4752 __skb_push(skb, mac_len); 4753 break; 4754 case XDP_PASS: 4755 metalen = xdp->data - xdp->data_meta; 4756 if (metalen) 4757 skb_metadata_set(skb, metalen); 4758 break; 4759 } 4760 4761 return act; 4762 } 4763 4764 static u32 netif_receive_generic_xdp(struct sk_buff *skb, 4765 struct xdp_buff *xdp, 4766 struct bpf_prog *xdp_prog) 4767 { 4768 u32 act = XDP_DROP; 4769 4770 /* Reinjected packets coming from act_mirred or similar should 4771 * not get XDP generic processing. 4772 */ 4773 if (skb_is_redirected(skb)) 4774 return XDP_PASS; 4775 4776 /* XDP packets must be linear and must have sufficient headroom 4777 * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also 4778 * native XDP provides, thus we need to do it here as well. 4779 */ 4780 if (skb_cloned(skb) || skb_is_nonlinear(skb) || 4781 skb_headroom(skb) < XDP_PACKET_HEADROOM) { 4782 int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb); 4783 int troom = skb->tail + skb->data_len - skb->end; 4784 4785 /* In case we have to go down the path and also linearize, 4786 * then lets do the pskb_expand_head() work just once here. 4787 */ 4788 if (pskb_expand_head(skb, 4789 hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0, 4790 troom > 0 ? troom + 128 : 0, GFP_ATOMIC)) 4791 goto do_drop; 4792 if (skb_linearize(skb)) 4793 goto do_drop; 4794 } 4795 4796 act = bpf_prog_run_generic_xdp(skb, xdp, xdp_prog); 4797 switch (act) { 4798 case XDP_REDIRECT: 4799 case XDP_TX: 4800 case XDP_PASS: 4801 break; 4802 default: 4803 bpf_warn_invalid_xdp_action(act); 4804 fallthrough; 4805 case XDP_ABORTED: 4806 trace_xdp_exception(skb->dev, xdp_prog, act); 4807 fallthrough; 4808 case XDP_DROP: 4809 do_drop: 4810 kfree_skb(skb); 4811 break; 4812 } 4813 4814 return act; 4815 } 4816 4817 /* When doing generic XDP we have to bypass the qdisc layer and the 4818 * network taps in order to match in-driver-XDP behavior. 4819 */ 4820 void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog) 4821 { 4822 struct net_device *dev = skb->dev; 4823 struct netdev_queue *txq; 4824 bool free_skb = true; 4825 int cpu, rc; 4826 4827 txq = netdev_core_pick_tx(dev, skb, NULL); 4828 cpu = smp_processor_id(); 4829 HARD_TX_LOCK(dev, txq, cpu); 4830 if (!netif_xmit_stopped(txq)) { 4831 rc = netdev_start_xmit(skb, dev, txq, 0); 4832 if (dev_xmit_complete(rc)) 4833 free_skb = false; 4834 } 4835 HARD_TX_UNLOCK(dev, txq); 4836 if (free_skb) { 4837 trace_xdp_exception(dev, xdp_prog, XDP_TX); 4838 kfree_skb(skb); 4839 } 4840 } 4841 4842 static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key); 4843 4844 int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb) 4845 { 4846 if (xdp_prog) { 4847 struct xdp_buff xdp; 4848 u32 act; 4849 int err; 4850 4851 act = netif_receive_generic_xdp(skb, &xdp, xdp_prog); 4852 if (act != XDP_PASS) { 4853 switch (act) { 4854 case XDP_REDIRECT: 4855 err = xdp_do_generic_redirect(skb->dev, skb, 4856 &xdp, xdp_prog); 4857 if (err) 4858 goto out_redir; 4859 break; 4860 case XDP_TX: 4861 generic_xdp_tx(skb, xdp_prog); 4862 break; 4863 } 4864 return XDP_DROP; 4865 } 4866 } 4867 return XDP_PASS; 4868 out_redir: 4869 kfree_skb(skb); 4870 return XDP_DROP; 4871 } 4872 EXPORT_SYMBOL_GPL(do_xdp_generic); 4873 4874 static int netif_rx_internal(struct sk_buff *skb) 4875 { 4876 int ret; 4877 4878 net_timestamp_check(netdev_tstamp_prequeue, skb); 4879 4880 trace_netif_rx(skb); 4881 4882 #ifdef CONFIG_RPS 4883 if (static_branch_unlikely(&rps_needed)) { 4884 struct rps_dev_flow voidflow, *rflow = &voidflow; 4885 int cpu; 4886 4887 preempt_disable(); 4888 rcu_read_lock(); 4889 4890 cpu = get_rps_cpu(skb->dev, skb, &rflow); 4891 if (cpu < 0) 4892 cpu = smp_processor_id(); 4893 4894 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 4895 4896 rcu_read_unlock(); 4897 preempt_enable(); 4898 } else 4899 #endif 4900 { 4901 unsigned int qtail; 4902 4903 ret = enqueue_to_backlog(skb, get_cpu(), &qtail); 4904 put_cpu(); 4905 } 4906 return ret; 4907 } 4908 4909 /** 4910 * netif_rx - post buffer to the network code 4911 * @skb: buffer to post 4912 * 4913 * This function receives a packet from a device driver and queues it for 4914 * the upper (protocol) levels to process. It always succeeds. The buffer 4915 * may be dropped during processing for congestion control or by the 4916 * protocol layers. 4917 * 4918 * return values: 4919 * NET_RX_SUCCESS (no congestion) 4920 * NET_RX_DROP (packet was dropped) 4921 * 4922 */ 4923 4924 int netif_rx(struct sk_buff *skb) 4925 { 4926 int ret; 4927 4928 trace_netif_rx_entry(skb); 4929 4930 ret = netif_rx_internal(skb); 4931 trace_netif_rx_exit(ret); 4932 4933 return ret; 4934 } 4935 EXPORT_SYMBOL(netif_rx); 4936 4937 int netif_rx_ni(struct sk_buff *skb) 4938 { 4939 int err; 4940 4941 trace_netif_rx_ni_entry(skb); 4942 4943 preempt_disable(); 4944 err = netif_rx_internal(skb); 4945 if (local_softirq_pending()) 4946 do_softirq(); 4947 preempt_enable(); 4948 trace_netif_rx_ni_exit(err); 4949 4950 return err; 4951 } 4952 EXPORT_SYMBOL(netif_rx_ni); 4953 4954 int netif_rx_any_context(struct sk_buff *skb) 4955 { 4956 /* 4957 * If invoked from contexts which do not invoke bottom half 4958 * processing either at return from interrupt or when softrqs are 4959 * reenabled, use netif_rx_ni() which invokes bottomhalf processing 4960 * directly. 4961 */ 4962 if (in_interrupt()) 4963 return netif_rx(skb); 4964 else 4965 return netif_rx_ni(skb); 4966 } 4967 EXPORT_SYMBOL(netif_rx_any_context); 4968 4969 static __latent_entropy void net_tx_action(struct softirq_action *h) 4970 { 4971 struct softnet_data *sd = this_cpu_ptr(&softnet_data); 4972 4973 if (sd->completion_queue) { 4974 struct sk_buff *clist; 4975 4976 local_irq_disable(); 4977 clist = sd->completion_queue; 4978 sd->completion_queue = NULL; 4979 local_irq_enable(); 4980 4981 while (clist) { 4982 struct sk_buff *skb = clist; 4983 4984 clist = clist->next; 4985 4986 WARN_ON(refcount_read(&skb->users)); 4987 if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED)) 4988 trace_consume_skb(skb); 4989 else 4990 trace_kfree_skb(skb, net_tx_action); 4991 4992 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) 4993 __kfree_skb(skb); 4994 else 4995 __kfree_skb_defer(skb); 4996 } 4997 } 4998 4999 if (sd->output_queue) { 5000 struct Qdisc *head; 5001 5002 local_irq_disable(); 5003 head = sd->output_queue; 5004 sd->output_queue = NULL; 5005 sd->output_queue_tailp = &sd->output_queue; 5006 local_irq_enable(); 5007 5008 rcu_read_lock(); 5009 5010 while (head) { 5011 struct Qdisc *q = head; 5012 spinlock_t *root_lock = NULL; 5013 5014 head = head->next_sched; 5015 5016 /* We need to make sure head->next_sched is read 5017 * before clearing __QDISC_STATE_SCHED 5018 */ 5019 smp_mb__before_atomic(); 5020 5021 if (!(q->flags & TCQ_F_NOLOCK)) { 5022 root_lock = qdisc_lock(q); 5023 spin_lock(root_lock); 5024 } else if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, 5025 &q->state))) { 5026 /* There is a synchronize_net() between 5027 * STATE_DEACTIVATED flag being set and 5028 * qdisc_reset()/some_qdisc_is_busy() in 5029 * dev_deactivate(), so we can safely bail out 5030 * early here to avoid data race between 5031 * qdisc_deactivate() and some_qdisc_is_busy() 5032 * for lockless qdisc. 5033 */ 5034 clear_bit(__QDISC_STATE_SCHED, &q->state); 5035 continue; 5036 } 5037 5038 clear_bit(__QDISC_STATE_SCHED, &q->state); 5039 qdisc_run(q); 5040 if (root_lock) 5041 spin_unlock(root_lock); 5042 } 5043 5044 rcu_read_unlock(); 5045 } 5046 5047 xfrm_dev_backlog(sd); 5048 } 5049 5050 #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE) 5051 /* This hook is defined here for ATM LANE */ 5052 int (*br_fdb_test_addr_hook)(struct net_device *dev, 5053 unsigned char *addr) __read_mostly; 5054 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook); 5055 #endif 5056 5057 static inline struct sk_buff * 5058 sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret, 5059 struct net_device *orig_dev, bool *another) 5060 { 5061 #ifdef CONFIG_NET_CLS_ACT 5062 struct mini_Qdisc *miniq = rcu_dereference_bh(skb->dev->miniq_ingress); 5063 struct tcf_result cl_res; 5064 5065 /* If there's at least one ingress present somewhere (so 5066 * we get here via enabled static key), remaining devices 5067 * that are not configured with an ingress qdisc will bail 5068 * out here. 5069 */ 5070 if (!miniq) 5071 return skb; 5072 5073 if (*pt_prev) { 5074 *ret = deliver_skb(skb, *pt_prev, orig_dev); 5075 *pt_prev = NULL; 5076 } 5077 5078 qdisc_skb_cb(skb)->pkt_len = skb->len; 5079 qdisc_skb_cb(skb)->mru = 0; 5080 qdisc_skb_cb(skb)->post_ct = false; 5081 skb->tc_at_ingress = 1; 5082 mini_qdisc_bstats_cpu_update(miniq, skb); 5083 5084 switch (tcf_classify(skb, miniq->block, miniq->filter_list, &cl_res, false)) { 5085 case TC_ACT_OK: 5086 case TC_ACT_RECLASSIFY: 5087 skb->tc_index = TC_H_MIN(cl_res.classid); 5088 break; 5089 case TC_ACT_SHOT: 5090 mini_qdisc_qstats_cpu_drop(miniq); 5091 kfree_skb(skb); 5092 return NULL; 5093 case TC_ACT_STOLEN: 5094 case TC_ACT_QUEUED: 5095 case TC_ACT_TRAP: 5096 consume_skb(skb); 5097 return NULL; 5098 case TC_ACT_REDIRECT: 5099 /* skb_mac_header check was done by cls/act_bpf, so 5100 * we can safely push the L2 header back before 5101 * redirecting to another netdev 5102 */ 5103 __skb_push(skb, skb->mac_len); 5104 if (skb_do_redirect(skb) == -EAGAIN) { 5105 __skb_pull(skb, skb->mac_len); 5106 *another = true; 5107 break; 5108 } 5109 return NULL; 5110 case TC_ACT_CONSUMED: 5111 return NULL; 5112 default: 5113 break; 5114 } 5115 #endif /* CONFIG_NET_CLS_ACT */ 5116 return skb; 5117 } 5118 5119 /** 5120 * netdev_is_rx_handler_busy - check if receive handler is registered 5121 * @dev: device to check 5122 * 5123 * Check if a receive handler is already registered for a given device. 5124 * Return true if there one. 5125 * 5126 * The caller must hold the rtnl_mutex. 5127 */ 5128 bool netdev_is_rx_handler_busy(struct net_device *dev) 5129 { 5130 ASSERT_RTNL(); 5131 return dev && rtnl_dereference(dev->rx_handler); 5132 } 5133 EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy); 5134 5135 /** 5136 * netdev_rx_handler_register - register receive handler 5137 * @dev: device to register a handler for 5138 * @rx_handler: receive handler to register 5139 * @rx_handler_data: data pointer that is used by rx handler 5140 * 5141 * Register a receive handler for a device. This handler will then be 5142 * called from __netif_receive_skb. A negative errno code is returned 5143 * on a failure. 5144 * 5145 * The caller must hold the rtnl_mutex. 5146 * 5147 * For a general description of rx_handler, see enum rx_handler_result. 5148 */ 5149 int netdev_rx_handler_register(struct net_device *dev, 5150 rx_handler_func_t *rx_handler, 5151 void *rx_handler_data) 5152 { 5153 if (netdev_is_rx_handler_busy(dev)) 5154 return -EBUSY; 5155 5156 if (dev->priv_flags & IFF_NO_RX_HANDLER) 5157 return -EINVAL; 5158 5159 /* Note: rx_handler_data must be set before rx_handler */ 5160 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data); 5161 rcu_assign_pointer(dev->rx_handler, rx_handler); 5162 5163 return 0; 5164 } 5165 EXPORT_SYMBOL_GPL(netdev_rx_handler_register); 5166 5167 /** 5168 * netdev_rx_handler_unregister - unregister receive handler 5169 * @dev: device to unregister a handler from 5170 * 5171 * Unregister a receive handler from a device. 5172 * 5173 * The caller must hold the rtnl_mutex. 5174 */ 5175 void netdev_rx_handler_unregister(struct net_device *dev) 5176 { 5177 5178 ASSERT_RTNL(); 5179 RCU_INIT_POINTER(dev->rx_handler, NULL); 5180 /* a reader seeing a non NULL rx_handler in a rcu_read_lock() 5181 * section has a guarantee to see a non NULL rx_handler_data 5182 * as well. 5183 */ 5184 synchronize_net(); 5185 RCU_INIT_POINTER(dev->rx_handler_data, NULL); 5186 } 5187 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); 5188 5189 /* 5190 * Limit the use of PFMEMALLOC reserves to those protocols that implement 5191 * the special handling of PFMEMALLOC skbs. 5192 */ 5193 static bool skb_pfmemalloc_protocol(struct sk_buff *skb) 5194 { 5195 switch (skb->protocol) { 5196 case htons(ETH_P_ARP): 5197 case htons(ETH_P_IP): 5198 case htons(ETH_P_IPV6): 5199 case htons(ETH_P_8021Q): 5200 case htons(ETH_P_8021AD): 5201 return true; 5202 default: 5203 return false; 5204 } 5205 } 5206 5207 static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev, 5208 int *ret, struct net_device *orig_dev) 5209 { 5210 if (nf_hook_ingress_active(skb)) { 5211 int ingress_retval; 5212 5213 if (*pt_prev) { 5214 *ret = deliver_skb(skb, *pt_prev, orig_dev); 5215 *pt_prev = NULL; 5216 } 5217 5218 rcu_read_lock(); 5219 ingress_retval = nf_hook_ingress(skb); 5220 rcu_read_unlock(); 5221 return ingress_retval; 5222 } 5223 return 0; 5224 } 5225 5226 static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc, 5227 struct packet_type **ppt_prev) 5228 { 5229 struct packet_type *ptype, *pt_prev; 5230 rx_handler_func_t *rx_handler; 5231 struct sk_buff *skb = *pskb; 5232 struct net_device *orig_dev; 5233 bool deliver_exact = false; 5234 int ret = NET_RX_DROP; 5235 __be16 type; 5236 5237 net_timestamp_check(!netdev_tstamp_prequeue, skb); 5238 5239 trace_netif_receive_skb(skb); 5240 5241 orig_dev = skb->dev; 5242 5243 skb_reset_network_header(skb); 5244 if (!skb_transport_header_was_set(skb)) 5245 skb_reset_transport_header(skb); 5246 skb_reset_mac_len(skb); 5247 5248 pt_prev = NULL; 5249 5250 another_round: 5251 skb->skb_iif = skb->dev->ifindex; 5252 5253 __this_cpu_inc(softnet_data.processed); 5254 5255 if (static_branch_unlikely(&generic_xdp_needed_key)) { 5256 int ret2; 5257 5258 migrate_disable(); 5259 ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb); 5260 migrate_enable(); 5261 5262 if (ret2 != XDP_PASS) { 5263 ret = NET_RX_DROP; 5264 goto out; 5265 } 5266 } 5267 5268 if (eth_type_vlan(skb->protocol)) { 5269 skb = skb_vlan_untag(skb); 5270 if (unlikely(!skb)) 5271 goto out; 5272 } 5273 5274 if (skb_skip_tc_classify(skb)) 5275 goto skip_classify; 5276 5277 if (pfmemalloc) 5278 goto skip_taps; 5279 5280 list_for_each_entry_rcu(ptype, &ptype_all, list) { 5281 if (pt_prev) 5282 ret = deliver_skb(skb, pt_prev, orig_dev); 5283 pt_prev = ptype; 5284 } 5285 5286 list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) { 5287 if (pt_prev) 5288 ret = deliver_skb(skb, pt_prev, orig_dev); 5289 pt_prev = ptype; 5290 } 5291 5292 skip_taps: 5293 #ifdef CONFIG_NET_INGRESS 5294 if (static_branch_unlikely(&ingress_needed_key)) { 5295 bool another = false; 5296 5297 skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev, 5298 &another); 5299 if (another) 5300 goto another_round; 5301 if (!skb) 5302 goto out; 5303 5304 if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0) 5305 goto out; 5306 } 5307 #endif 5308 skb_reset_redirect(skb); 5309 skip_classify: 5310 if (pfmemalloc && !skb_pfmemalloc_protocol(skb)) 5311 goto drop; 5312 5313 if (skb_vlan_tag_present(skb)) { 5314 if (pt_prev) { 5315 ret = deliver_skb(skb, pt_prev, orig_dev); 5316 pt_prev = NULL; 5317 } 5318 if (vlan_do_receive(&skb)) 5319 goto another_round; 5320 else if (unlikely(!skb)) 5321 goto out; 5322 } 5323 5324 rx_handler = rcu_dereference(skb->dev->rx_handler); 5325 if (rx_handler) { 5326 if (pt_prev) { 5327 ret = deliver_skb(skb, pt_prev, orig_dev); 5328 pt_prev = NULL; 5329 } 5330 switch (rx_handler(&skb)) { 5331 case RX_HANDLER_CONSUMED: 5332 ret = NET_RX_SUCCESS; 5333 goto out; 5334 case RX_HANDLER_ANOTHER: 5335 goto another_round; 5336 case RX_HANDLER_EXACT: 5337 deliver_exact = true; 5338 break; 5339 case RX_HANDLER_PASS: 5340 break; 5341 default: 5342 BUG(); 5343 } 5344 } 5345 5346 if (unlikely(skb_vlan_tag_present(skb)) && !netdev_uses_dsa(skb->dev)) { 5347 check_vlan_id: 5348 if (skb_vlan_tag_get_id(skb)) { 5349 /* Vlan id is non 0 and vlan_do_receive() above couldn't 5350 * find vlan device. 5351 */ 5352 skb->pkt_type = PACKET_OTHERHOST; 5353 } else if (eth_type_vlan(skb->protocol)) { 5354 /* Outer header is 802.1P with vlan 0, inner header is 5355 * 802.1Q or 802.1AD and vlan_do_receive() above could 5356 * not find vlan dev for vlan id 0. 5357 */ 5358 __vlan_hwaccel_clear_tag(skb); 5359 skb = skb_vlan_untag(skb); 5360 if (unlikely(!skb)) 5361 goto out; 5362 if (vlan_do_receive(&skb)) 5363 /* After stripping off 802.1P header with vlan 0 5364 * vlan dev is found for inner header. 5365 */ 5366 goto another_round; 5367 else if (unlikely(!skb)) 5368 goto out; 5369 else 5370 /* We have stripped outer 802.1P vlan 0 header. 5371 * But could not find vlan dev. 5372 * check again for vlan id to set OTHERHOST. 5373 */ 5374 goto check_vlan_id; 5375 } 5376 /* Note: we might in the future use prio bits 5377 * and set skb->priority like in vlan_do_receive() 5378 * For the time being, just ignore Priority Code Point 5379 */ 5380 __vlan_hwaccel_clear_tag(skb); 5381 } 5382 5383 type = skb->protocol; 5384 5385 /* deliver only exact match when indicated */ 5386 if (likely(!deliver_exact)) { 5387 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, 5388 &ptype_base[ntohs(type) & 5389 PTYPE_HASH_MASK]); 5390 } 5391 5392 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, 5393 &orig_dev->ptype_specific); 5394 5395 if (unlikely(skb->dev != orig_dev)) { 5396 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, 5397 &skb->dev->ptype_specific); 5398 } 5399 5400 if (pt_prev) { 5401 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) 5402 goto drop; 5403 *ppt_prev = pt_prev; 5404 } else { 5405 drop: 5406 if (!deliver_exact) 5407 atomic_long_inc(&skb->dev->rx_dropped); 5408 else 5409 atomic_long_inc(&skb->dev->rx_nohandler); 5410 kfree_skb(skb); 5411 /* Jamal, now you will not able to escape explaining 5412 * me how you were going to use this. :-) 5413 */ 5414 ret = NET_RX_DROP; 5415 } 5416 5417 out: 5418 /* The invariant here is that if *ppt_prev is not NULL 5419 * then skb should also be non-NULL. 5420 * 5421 * Apparently *ppt_prev assignment above holds this invariant due to 5422 * skb dereferencing near it. 5423 */ 5424 *pskb = skb; 5425 return ret; 5426 } 5427 5428 static int __netif_receive_skb_one_core(struct sk_buff *skb, bool pfmemalloc) 5429 { 5430 struct net_device *orig_dev = skb->dev; 5431 struct packet_type *pt_prev = NULL; 5432 int ret; 5433 5434 ret = __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev); 5435 if (pt_prev) 5436 ret = INDIRECT_CALL_INET(pt_prev->func, ipv6_rcv, ip_rcv, skb, 5437 skb->dev, pt_prev, orig_dev); 5438 return ret; 5439 } 5440 5441 /** 5442 * netif_receive_skb_core - special purpose version of netif_receive_skb 5443 * @skb: buffer to process 5444 * 5445 * More direct receive version of netif_receive_skb(). It should 5446 * only be used by callers that have a need to skip RPS and Generic XDP. 5447 * Caller must also take care of handling if ``(page_is_)pfmemalloc``. 5448 * 5449 * This function may only be called from softirq context and interrupts 5450 * should be enabled. 5451 * 5452 * Return values (usually ignored): 5453 * NET_RX_SUCCESS: no congestion 5454 * NET_RX_DROP: packet was dropped 5455 */ 5456 int netif_receive_skb_core(struct sk_buff *skb) 5457 { 5458 int ret; 5459 5460 rcu_read_lock(); 5461 ret = __netif_receive_skb_one_core(skb, false); 5462 rcu_read_unlock(); 5463 5464 return ret; 5465 } 5466 EXPORT_SYMBOL(netif_receive_skb_core); 5467 5468 static inline void __netif_receive_skb_list_ptype(struct list_head *head, 5469 struct packet_type *pt_prev, 5470 struct net_device *orig_dev) 5471 { 5472 struct sk_buff *skb, *next; 5473 5474 if (!pt_prev) 5475 return; 5476 if (list_empty(head)) 5477 return; 5478 if (pt_prev->list_func != NULL) 5479 INDIRECT_CALL_INET(pt_prev->list_func, ipv6_list_rcv, 5480 ip_list_rcv, head, pt_prev, orig_dev); 5481 else 5482 list_for_each_entry_safe(skb, next, head, list) { 5483 skb_list_del_init(skb); 5484 pt_prev->func(skb, skb->dev, pt_prev, orig_dev); 5485 } 5486 } 5487 5488 static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc) 5489 { 5490 /* Fast-path assumptions: 5491 * - There is no RX handler. 5492 * - Only one packet_type matches. 5493 * If either of these fails, we will end up doing some per-packet 5494 * processing in-line, then handling the 'last ptype' for the whole 5495 * sublist. This can't cause out-of-order delivery to any single ptype, 5496 * because the 'last ptype' must be constant across the sublist, and all 5497 * other ptypes are handled per-packet. 5498 */ 5499 /* Current (common) ptype of sublist */ 5500 struct packet_type *pt_curr = NULL; 5501 /* Current (common) orig_dev of sublist */ 5502 struct net_device *od_curr = NULL; 5503 struct list_head sublist; 5504 struct sk_buff *skb, *next; 5505 5506 INIT_LIST_HEAD(&sublist); 5507 list_for_each_entry_safe(skb, next, head, list) { 5508 struct net_device *orig_dev = skb->dev; 5509 struct packet_type *pt_prev = NULL; 5510 5511 skb_list_del_init(skb); 5512 __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev); 5513 if (!pt_prev) 5514 continue; 5515 if (pt_curr != pt_prev || od_curr != orig_dev) { 5516 /* dispatch old sublist */ 5517 __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr); 5518 /* start new sublist */ 5519 INIT_LIST_HEAD(&sublist); 5520 pt_curr = pt_prev; 5521 od_curr = orig_dev; 5522 } 5523 list_add_tail(&skb->list, &sublist); 5524 } 5525 5526 /* dispatch final sublist */ 5527 __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr); 5528 } 5529 5530 static int __netif_receive_skb(struct sk_buff *skb) 5531 { 5532 int ret; 5533 5534 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) { 5535 unsigned int noreclaim_flag; 5536 5537 /* 5538 * PFMEMALLOC skbs are special, they should 5539 * - be delivered to SOCK_MEMALLOC sockets only 5540 * - stay away from userspace 5541 * - have bounded memory usage 5542 * 5543 * Use PF_MEMALLOC as this saves us from propagating the allocation 5544 * context down to all allocation sites. 5545 */ 5546 noreclaim_flag = memalloc_noreclaim_save(); 5547 ret = __netif_receive_skb_one_core(skb, true); 5548 memalloc_noreclaim_restore(noreclaim_flag); 5549 } else 5550 ret = __netif_receive_skb_one_core(skb, false); 5551 5552 return ret; 5553 } 5554 5555 static void __netif_receive_skb_list(struct list_head *head) 5556 { 5557 unsigned long noreclaim_flag = 0; 5558 struct sk_buff *skb, *next; 5559 bool pfmemalloc = false; /* Is current sublist PF_MEMALLOC? */ 5560 5561 list_for_each_entry_safe(skb, next, head, list) { 5562 if ((sk_memalloc_socks() && skb_pfmemalloc(skb)) != pfmemalloc) { 5563 struct list_head sublist; 5564 5565 /* Handle the previous sublist */ 5566 list_cut_before(&sublist, head, &skb->list); 5567 if (!list_empty(&sublist)) 5568 __netif_receive_skb_list_core(&sublist, pfmemalloc); 5569 pfmemalloc = !pfmemalloc; 5570 /* See comments in __netif_receive_skb */ 5571 if (pfmemalloc) 5572 noreclaim_flag = memalloc_noreclaim_save(); 5573 else 5574 memalloc_noreclaim_restore(noreclaim_flag); 5575 } 5576 } 5577 /* Handle the remaining sublist */ 5578 if (!list_empty(head)) 5579 __netif_receive_skb_list_core(head, pfmemalloc); 5580 /* Restore pflags */ 5581 if (pfmemalloc) 5582 memalloc_noreclaim_restore(noreclaim_flag); 5583 } 5584 5585 static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp) 5586 { 5587 struct bpf_prog *old = rtnl_dereference(dev->xdp_prog); 5588 struct bpf_prog *new = xdp->prog; 5589 int ret = 0; 5590 5591 switch (xdp->command) { 5592 case XDP_SETUP_PROG: 5593 rcu_assign_pointer(dev->xdp_prog, new); 5594 if (old) 5595 bpf_prog_put(old); 5596 5597 if (old && !new) { 5598 static_branch_dec(&generic_xdp_needed_key); 5599 } else if (new && !old) { 5600 static_branch_inc(&generic_xdp_needed_key); 5601 dev_disable_lro(dev); 5602 dev_disable_gro_hw(dev); 5603 } 5604 break; 5605 5606 default: 5607 ret = -EINVAL; 5608 break; 5609 } 5610 5611 return ret; 5612 } 5613 5614 static int netif_receive_skb_internal(struct sk_buff *skb) 5615 { 5616 int ret; 5617 5618 net_timestamp_check(netdev_tstamp_prequeue, skb); 5619 5620 if (skb_defer_rx_timestamp(skb)) 5621 return NET_RX_SUCCESS; 5622 5623 rcu_read_lock(); 5624 #ifdef CONFIG_RPS 5625 if (static_branch_unlikely(&rps_needed)) { 5626 struct rps_dev_flow voidflow, *rflow = &voidflow; 5627 int cpu = get_rps_cpu(skb->dev, skb, &rflow); 5628 5629 if (cpu >= 0) { 5630 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 5631 rcu_read_unlock(); 5632 return ret; 5633 } 5634 } 5635 #endif 5636 ret = __netif_receive_skb(skb); 5637 rcu_read_unlock(); 5638 return ret; 5639 } 5640 5641 static void netif_receive_skb_list_internal(struct list_head *head) 5642 { 5643 struct sk_buff *skb, *next; 5644 struct list_head sublist; 5645 5646 INIT_LIST_HEAD(&sublist); 5647 list_for_each_entry_safe(skb, next, head, list) { 5648 net_timestamp_check(netdev_tstamp_prequeue, skb); 5649 skb_list_del_init(skb); 5650 if (!skb_defer_rx_timestamp(skb)) 5651 list_add_tail(&skb->list, &sublist); 5652 } 5653 list_splice_init(&sublist, head); 5654 5655 rcu_read_lock(); 5656 #ifdef CONFIG_RPS 5657 if (static_branch_unlikely(&rps_needed)) { 5658 list_for_each_entry_safe(skb, next, head, list) { 5659 struct rps_dev_flow voidflow, *rflow = &voidflow; 5660 int cpu = get_rps_cpu(skb->dev, skb, &rflow); 5661 5662 if (cpu >= 0) { 5663 /* Will be handled, remove from list */ 5664 skb_list_del_init(skb); 5665 enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 5666 } 5667 } 5668 } 5669 #endif 5670 __netif_receive_skb_list(head); 5671 rcu_read_unlock(); 5672 } 5673 5674 /** 5675 * netif_receive_skb - process receive buffer from network 5676 * @skb: buffer to process 5677 * 5678 * netif_receive_skb() is the main receive data processing function. 5679 * It always succeeds. The buffer may be dropped during processing 5680 * for congestion control or by the protocol layers. 5681 * 5682 * This function may only be called from softirq context and interrupts 5683 * should be enabled. 5684 * 5685 * Return values (usually ignored): 5686 * NET_RX_SUCCESS: no congestion 5687 * NET_RX_DROP: packet was dropped 5688 */ 5689 int netif_receive_skb(struct sk_buff *skb) 5690 { 5691 int ret; 5692 5693 trace_netif_receive_skb_entry(skb); 5694 5695 ret = netif_receive_skb_internal(skb); 5696 trace_netif_receive_skb_exit(ret); 5697 5698 return ret; 5699 } 5700 EXPORT_SYMBOL(netif_receive_skb); 5701 5702 /** 5703 * netif_receive_skb_list - process many receive buffers from network 5704 * @head: list of skbs to process. 5705 * 5706 * Since return value of netif_receive_skb() is normally ignored, and 5707 * wouldn't be meaningful for a list, this function returns void. 5708 * 5709 * This function may only be called from softirq context and interrupts 5710 * should be enabled. 5711 */ 5712 void netif_receive_skb_list(struct list_head *head) 5713 { 5714 struct sk_buff *skb; 5715 5716 if (list_empty(head)) 5717 return; 5718 if (trace_netif_receive_skb_list_entry_enabled()) { 5719 list_for_each_entry(skb, head, list) 5720 trace_netif_receive_skb_list_entry(skb); 5721 } 5722 netif_receive_skb_list_internal(head); 5723 trace_netif_receive_skb_list_exit(0); 5724 } 5725 EXPORT_SYMBOL(netif_receive_skb_list); 5726 5727 static DEFINE_PER_CPU(struct work_struct, flush_works); 5728 5729 /* Network device is going away, flush any packets still pending */ 5730 static void flush_backlog(struct work_struct *work) 5731 { 5732 struct sk_buff *skb, *tmp; 5733 struct softnet_data *sd; 5734 5735 local_bh_disable(); 5736 sd = this_cpu_ptr(&softnet_data); 5737 5738 local_irq_disable(); 5739 rps_lock(sd); 5740 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) { 5741 if (skb->dev->reg_state == NETREG_UNREGISTERING) { 5742 __skb_unlink(skb, &sd->input_pkt_queue); 5743 dev_kfree_skb_irq(skb); 5744 input_queue_head_incr(sd); 5745 } 5746 } 5747 rps_unlock(sd); 5748 local_irq_enable(); 5749 5750 skb_queue_walk_safe(&sd->process_queue, skb, tmp) { 5751 if (skb->dev->reg_state == NETREG_UNREGISTERING) { 5752 __skb_unlink(skb, &sd->process_queue); 5753 kfree_skb(skb); 5754 input_queue_head_incr(sd); 5755 } 5756 } 5757 local_bh_enable(); 5758 } 5759 5760 static bool flush_required(int cpu) 5761 { 5762 #if IS_ENABLED(CONFIG_RPS) 5763 struct softnet_data *sd = &per_cpu(softnet_data, cpu); 5764 bool do_flush; 5765 5766 local_irq_disable(); 5767 rps_lock(sd); 5768 5769 /* as insertion into process_queue happens with the rps lock held, 5770 * process_queue access may race only with dequeue 5771 */ 5772 do_flush = !skb_queue_empty(&sd->input_pkt_queue) || 5773 !skb_queue_empty_lockless(&sd->process_queue); 5774 rps_unlock(sd); 5775 local_irq_enable(); 5776 5777 return do_flush; 5778 #endif 5779 /* without RPS we can't safely check input_pkt_queue: during a 5780 * concurrent remote skb_queue_splice() we can detect as empty both 5781 * input_pkt_queue and process_queue even if the latter could end-up 5782 * containing a lot of packets. 5783 */ 5784 return true; 5785 } 5786 5787 static void flush_all_backlogs(void) 5788 { 5789 static cpumask_t flush_cpus; 5790 unsigned int cpu; 5791 5792 /* since we are under rtnl lock protection we can use static data 5793 * for the cpumask and avoid allocating on stack the possibly 5794 * large mask 5795 */ 5796 ASSERT_RTNL(); 5797 5798 cpus_read_lock(); 5799 5800 cpumask_clear(&flush_cpus); 5801 for_each_online_cpu(cpu) { 5802 if (flush_required(cpu)) { 5803 queue_work_on(cpu, system_highpri_wq, 5804 per_cpu_ptr(&flush_works, cpu)); 5805 cpumask_set_cpu(cpu, &flush_cpus); 5806 } 5807 } 5808 5809 /* we can have in flight packet[s] on the cpus we are not flushing, 5810 * synchronize_net() in unregister_netdevice_many() will take care of 5811 * them 5812 */ 5813 for_each_cpu(cpu, &flush_cpus) 5814 flush_work(per_cpu_ptr(&flush_works, cpu)); 5815 5816 cpus_read_unlock(); 5817 } 5818 5819 /* Pass the currently batched GRO_NORMAL SKBs up to the stack. */ 5820 static void gro_normal_list(struct napi_struct *napi) 5821 { 5822 if (!napi->rx_count) 5823 return; 5824 netif_receive_skb_list_internal(&napi->rx_list); 5825 INIT_LIST_HEAD(&napi->rx_list); 5826 napi->rx_count = 0; 5827 } 5828 5829 /* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded, 5830 * pass the whole batch up to the stack. 5831 */ 5832 static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb, int segs) 5833 { 5834 list_add_tail(&skb->list, &napi->rx_list); 5835 napi->rx_count += segs; 5836 if (napi->rx_count >= gro_normal_batch) 5837 gro_normal_list(napi); 5838 } 5839 5840 static int napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb) 5841 { 5842 struct packet_offload *ptype; 5843 __be16 type = skb->protocol; 5844 struct list_head *head = &offload_base; 5845 int err = -ENOENT; 5846 5847 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb)); 5848 5849 if (NAPI_GRO_CB(skb)->count == 1) { 5850 skb_shinfo(skb)->gso_size = 0; 5851 goto out; 5852 } 5853 5854 rcu_read_lock(); 5855 list_for_each_entry_rcu(ptype, head, list) { 5856 if (ptype->type != type || !ptype->callbacks.gro_complete) 5857 continue; 5858 5859 err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete, 5860 ipv6_gro_complete, inet_gro_complete, 5861 skb, 0); 5862 break; 5863 } 5864 rcu_read_unlock(); 5865 5866 if (err) { 5867 WARN_ON(&ptype->list == head); 5868 kfree_skb(skb); 5869 return NET_RX_SUCCESS; 5870 } 5871 5872 out: 5873 gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count); 5874 return NET_RX_SUCCESS; 5875 } 5876 5877 static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index, 5878 bool flush_old) 5879 { 5880 struct list_head *head = &napi->gro_hash[index].list; 5881 struct sk_buff *skb, *p; 5882 5883 list_for_each_entry_safe_reverse(skb, p, head, list) { 5884 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies) 5885 return; 5886 skb_list_del_init(skb); 5887 napi_gro_complete(napi, skb); 5888 napi->gro_hash[index].count--; 5889 } 5890 5891 if (!napi->gro_hash[index].count) 5892 __clear_bit(index, &napi->gro_bitmask); 5893 } 5894 5895 /* napi->gro_hash[].list contains packets ordered by age. 5896 * youngest packets at the head of it. 5897 * Complete skbs in reverse order to reduce latencies. 5898 */ 5899 void napi_gro_flush(struct napi_struct *napi, bool flush_old) 5900 { 5901 unsigned long bitmask = napi->gro_bitmask; 5902 unsigned int i, base = ~0U; 5903 5904 while ((i = ffs(bitmask)) != 0) { 5905 bitmask >>= i; 5906 base += i; 5907 __napi_gro_flush_chain(napi, base, flush_old); 5908 } 5909 } 5910 EXPORT_SYMBOL(napi_gro_flush); 5911 5912 static void gro_list_prepare(const struct list_head *head, 5913 const struct sk_buff *skb) 5914 { 5915 unsigned int maclen = skb->dev->hard_header_len; 5916 u32 hash = skb_get_hash_raw(skb); 5917 struct sk_buff *p; 5918 5919 list_for_each_entry(p, head, list) { 5920 unsigned long diffs; 5921 5922 NAPI_GRO_CB(p)->flush = 0; 5923 5924 if (hash != skb_get_hash_raw(p)) { 5925 NAPI_GRO_CB(p)->same_flow = 0; 5926 continue; 5927 } 5928 5929 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev; 5930 diffs |= skb_vlan_tag_present(p) ^ skb_vlan_tag_present(skb); 5931 if (skb_vlan_tag_present(p)) 5932 diffs |= skb_vlan_tag_get(p) ^ skb_vlan_tag_get(skb); 5933 diffs |= skb_metadata_differs(p, skb); 5934 if (maclen == ETH_HLEN) 5935 diffs |= compare_ether_header(skb_mac_header(p), 5936 skb_mac_header(skb)); 5937 else if (!diffs) 5938 diffs = memcmp(skb_mac_header(p), 5939 skb_mac_header(skb), 5940 maclen); 5941 5942 /* in most common scenarions 'slow_gro' is 0 5943 * otherwise we are already on some slower paths 5944 * either skip all the infrequent tests altogether or 5945 * avoid trying too hard to skip each of them individually 5946 */ 5947 if (!diffs && unlikely(skb->slow_gro | p->slow_gro)) { 5948 #if IS_ENABLED(CONFIG_SKB_EXTENSIONS) && IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 5949 struct tc_skb_ext *skb_ext; 5950 struct tc_skb_ext *p_ext; 5951 #endif 5952 5953 diffs |= p->sk != skb->sk; 5954 diffs |= skb_metadata_dst_cmp(p, skb); 5955 diffs |= skb_get_nfct(p) ^ skb_get_nfct(skb); 5956 5957 #if IS_ENABLED(CONFIG_SKB_EXTENSIONS) && IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 5958 skb_ext = skb_ext_find(skb, TC_SKB_EXT); 5959 p_ext = skb_ext_find(p, TC_SKB_EXT); 5960 5961 diffs |= (!!p_ext) ^ (!!skb_ext); 5962 if (!diffs && unlikely(skb_ext)) 5963 diffs |= p_ext->chain ^ skb_ext->chain; 5964 #endif 5965 } 5966 5967 NAPI_GRO_CB(p)->same_flow = !diffs; 5968 } 5969 } 5970 5971 static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff) 5972 { 5973 const struct skb_shared_info *pinfo = skb_shinfo(skb); 5974 const skb_frag_t *frag0 = &pinfo->frags[0]; 5975 5976 NAPI_GRO_CB(skb)->data_offset = 0; 5977 NAPI_GRO_CB(skb)->frag0 = NULL; 5978 NAPI_GRO_CB(skb)->frag0_len = 0; 5979 5980 if (!skb_headlen(skb) && pinfo->nr_frags && 5981 !PageHighMem(skb_frag_page(frag0)) && 5982 (!NET_IP_ALIGN || !((skb_frag_off(frag0) + nhoff) & 3))) { 5983 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0); 5984 NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int, 5985 skb_frag_size(frag0), 5986 skb->end - skb->tail); 5987 } 5988 } 5989 5990 static void gro_pull_from_frag0(struct sk_buff *skb, int grow) 5991 { 5992 struct skb_shared_info *pinfo = skb_shinfo(skb); 5993 5994 BUG_ON(skb->end - skb->tail < grow); 5995 5996 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow); 5997 5998 skb->data_len -= grow; 5999 skb->tail += grow; 6000 6001 skb_frag_off_add(&pinfo->frags[0], grow); 6002 skb_frag_size_sub(&pinfo->frags[0], grow); 6003 6004 if (unlikely(!skb_frag_size(&pinfo->frags[0]))) { 6005 skb_frag_unref(skb, 0); 6006 memmove(pinfo->frags, pinfo->frags + 1, 6007 --pinfo->nr_frags * sizeof(pinfo->frags[0])); 6008 } 6009 } 6010 6011 static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head) 6012 { 6013 struct sk_buff *oldest; 6014 6015 oldest = list_last_entry(head, struct sk_buff, list); 6016 6017 /* We are called with head length >= MAX_GRO_SKBS, so this is 6018 * impossible. 6019 */ 6020 if (WARN_ON_ONCE(!oldest)) 6021 return; 6022 6023 /* Do not adjust napi->gro_hash[].count, caller is adding a new 6024 * SKB to the chain. 6025 */ 6026 skb_list_del_init(oldest); 6027 napi_gro_complete(napi, oldest); 6028 } 6029 6030 static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 6031 { 6032 u32 bucket = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1); 6033 struct gro_list *gro_list = &napi->gro_hash[bucket]; 6034 struct list_head *head = &offload_base; 6035 struct packet_offload *ptype; 6036 __be16 type = skb->protocol; 6037 struct sk_buff *pp = NULL; 6038 enum gro_result ret; 6039 int same_flow; 6040 int grow; 6041 6042 if (netif_elide_gro(skb->dev)) 6043 goto normal; 6044 6045 gro_list_prepare(&gro_list->list, skb); 6046 6047 rcu_read_lock(); 6048 list_for_each_entry_rcu(ptype, head, list) { 6049 if (ptype->type != type || !ptype->callbacks.gro_receive) 6050 continue; 6051 6052 skb_set_network_header(skb, skb_gro_offset(skb)); 6053 skb_reset_mac_len(skb); 6054 NAPI_GRO_CB(skb)->same_flow = 0; 6055 NAPI_GRO_CB(skb)->flush = skb_is_gso(skb) || skb_has_frag_list(skb); 6056 NAPI_GRO_CB(skb)->free = 0; 6057 NAPI_GRO_CB(skb)->encap_mark = 0; 6058 NAPI_GRO_CB(skb)->recursion_counter = 0; 6059 NAPI_GRO_CB(skb)->is_fou = 0; 6060 NAPI_GRO_CB(skb)->is_atomic = 1; 6061 NAPI_GRO_CB(skb)->gro_remcsum_start = 0; 6062 6063 /* Setup for GRO checksum validation */ 6064 switch (skb->ip_summed) { 6065 case CHECKSUM_COMPLETE: 6066 NAPI_GRO_CB(skb)->csum = skb->csum; 6067 NAPI_GRO_CB(skb)->csum_valid = 1; 6068 NAPI_GRO_CB(skb)->csum_cnt = 0; 6069 break; 6070 case CHECKSUM_UNNECESSARY: 6071 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1; 6072 NAPI_GRO_CB(skb)->csum_valid = 0; 6073 break; 6074 default: 6075 NAPI_GRO_CB(skb)->csum_cnt = 0; 6076 NAPI_GRO_CB(skb)->csum_valid = 0; 6077 } 6078 6079 pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive, 6080 ipv6_gro_receive, inet_gro_receive, 6081 &gro_list->list, skb); 6082 break; 6083 } 6084 rcu_read_unlock(); 6085 6086 if (&ptype->list == head) 6087 goto normal; 6088 6089 if (PTR_ERR(pp) == -EINPROGRESS) { 6090 ret = GRO_CONSUMED; 6091 goto ok; 6092 } 6093 6094 same_flow = NAPI_GRO_CB(skb)->same_flow; 6095 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED; 6096 6097 if (pp) { 6098 skb_list_del_init(pp); 6099 napi_gro_complete(napi, pp); 6100 gro_list->count--; 6101 } 6102 6103 if (same_flow) 6104 goto ok; 6105 6106 if (NAPI_GRO_CB(skb)->flush) 6107 goto normal; 6108 6109 if (unlikely(gro_list->count >= MAX_GRO_SKBS)) 6110 gro_flush_oldest(napi, &gro_list->list); 6111 else 6112 gro_list->count++; 6113 6114 NAPI_GRO_CB(skb)->count = 1; 6115 NAPI_GRO_CB(skb)->age = jiffies; 6116 NAPI_GRO_CB(skb)->last = skb; 6117 skb_shinfo(skb)->gso_size = skb_gro_len(skb); 6118 list_add(&skb->list, &gro_list->list); 6119 ret = GRO_HELD; 6120 6121 pull: 6122 grow = skb_gro_offset(skb) - skb_headlen(skb); 6123 if (grow > 0) 6124 gro_pull_from_frag0(skb, grow); 6125 ok: 6126 if (gro_list->count) { 6127 if (!test_bit(bucket, &napi->gro_bitmask)) 6128 __set_bit(bucket, &napi->gro_bitmask); 6129 } else if (test_bit(bucket, &napi->gro_bitmask)) { 6130 __clear_bit(bucket, &napi->gro_bitmask); 6131 } 6132 6133 return ret; 6134 6135 normal: 6136 ret = GRO_NORMAL; 6137 goto pull; 6138 } 6139 6140 struct packet_offload *gro_find_receive_by_type(__be16 type) 6141 { 6142 struct list_head *offload_head = &offload_base; 6143 struct packet_offload *ptype; 6144 6145 list_for_each_entry_rcu(ptype, offload_head, list) { 6146 if (ptype->type != type || !ptype->callbacks.gro_receive) 6147 continue; 6148 return ptype; 6149 } 6150 return NULL; 6151 } 6152 EXPORT_SYMBOL(gro_find_receive_by_type); 6153 6154 struct packet_offload *gro_find_complete_by_type(__be16 type) 6155 { 6156 struct list_head *offload_head = &offload_base; 6157 struct packet_offload *ptype; 6158 6159 list_for_each_entry_rcu(ptype, offload_head, list) { 6160 if (ptype->type != type || !ptype->callbacks.gro_complete) 6161 continue; 6162 return ptype; 6163 } 6164 return NULL; 6165 } 6166 EXPORT_SYMBOL(gro_find_complete_by_type); 6167 6168 static gro_result_t napi_skb_finish(struct napi_struct *napi, 6169 struct sk_buff *skb, 6170 gro_result_t ret) 6171 { 6172 switch (ret) { 6173 case GRO_NORMAL: 6174 gro_normal_one(napi, skb, 1); 6175 break; 6176 6177 case GRO_MERGED_FREE: 6178 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) 6179 napi_skb_free_stolen_head(skb); 6180 else if (skb->fclone != SKB_FCLONE_UNAVAILABLE) 6181 __kfree_skb(skb); 6182 else 6183 __kfree_skb_defer(skb); 6184 break; 6185 6186 case GRO_HELD: 6187 case GRO_MERGED: 6188 case GRO_CONSUMED: 6189 break; 6190 } 6191 6192 return ret; 6193 } 6194 6195 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 6196 { 6197 gro_result_t ret; 6198 6199 skb_mark_napi_id(skb, napi); 6200 trace_napi_gro_receive_entry(skb); 6201 6202 skb_gro_reset_offset(skb, 0); 6203 6204 ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb)); 6205 trace_napi_gro_receive_exit(ret); 6206 6207 return ret; 6208 } 6209 EXPORT_SYMBOL(napi_gro_receive); 6210 6211 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) 6212 { 6213 if (unlikely(skb->pfmemalloc)) { 6214 consume_skb(skb); 6215 return; 6216 } 6217 __skb_pull(skb, skb_headlen(skb)); 6218 /* restore the reserve we had after netdev_alloc_skb_ip_align() */ 6219 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb)); 6220 __vlan_hwaccel_clear_tag(skb); 6221 skb->dev = napi->dev; 6222 skb->skb_iif = 0; 6223 6224 /* eth_type_trans() assumes pkt_type is PACKET_HOST */ 6225 skb->pkt_type = PACKET_HOST; 6226 6227 skb->encapsulation = 0; 6228 skb_shinfo(skb)->gso_type = 0; 6229 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); 6230 if (unlikely(skb->slow_gro)) { 6231 skb_orphan(skb); 6232 skb_ext_reset(skb); 6233 nf_reset_ct(skb); 6234 skb->slow_gro = 0; 6235 } 6236 6237 napi->skb = skb; 6238 } 6239 6240 struct sk_buff *napi_get_frags(struct napi_struct *napi) 6241 { 6242 struct sk_buff *skb = napi->skb; 6243 6244 if (!skb) { 6245 skb = napi_alloc_skb(napi, GRO_MAX_HEAD); 6246 if (skb) { 6247 napi->skb = skb; 6248 skb_mark_napi_id(skb, napi); 6249 } 6250 } 6251 return skb; 6252 } 6253 EXPORT_SYMBOL(napi_get_frags); 6254 6255 static gro_result_t napi_frags_finish(struct napi_struct *napi, 6256 struct sk_buff *skb, 6257 gro_result_t ret) 6258 { 6259 switch (ret) { 6260 case GRO_NORMAL: 6261 case GRO_HELD: 6262 __skb_push(skb, ETH_HLEN); 6263 skb->protocol = eth_type_trans(skb, skb->dev); 6264 if (ret == GRO_NORMAL) 6265 gro_normal_one(napi, skb, 1); 6266 break; 6267 6268 case GRO_MERGED_FREE: 6269 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) 6270 napi_skb_free_stolen_head(skb); 6271 else 6272 napi_reuse_skb(napi, skb); 6273 break; 6274 6275 case GRO_MERGED: 6276 case GRO_CONSUMED: 6277 break; 6278 } 6279 6280 return ret; 6281 } 6282 6283 /* Upper GRO stack assumes network header starts at gro_offset=0 6284 * Drivers could call both napi_gro_frags() and napi_gro_receive() 6285 * We copy ethernet header into skb->data to have a common layout. 6286 */ 6287 static struct sk_buff *napi_frags_skb(struct napi_struct *napi) 6288 { 6289 struct sk_buff *skb = napi->skb; 6290 const struct ethhdr *eth; 6291 unsigned int hlen = sizeof(*eth); 6292 6293 napi->skb = NULL; 6294 6295 skb_reset_mac_header(skb); 6296 skb_gro_reset_offset(skb, hlen); 6297 6298 if (unlikely(skb_gro_header_hard(skb, hlen))) { 6299 eth = skb_gro_header_slow(skb, hlen, 0); 6300 if (unlikely(!eth)) { 6301 net_warn_ratelimited("%s: dropping impossible skb from %s\n", 6302 __func__, napi->dev->name); 6303 napi_reuse_skb(napi, skb); 6304 return NULL; 6305 } 6306 } else { 6307 eth = (const struct ethhdr *)skb->data; 6308 gro_pull_from_frag0(skb, hlen); 6309 NAPI_GRO_CB(skb)->frag0 += hlen; 6310 NAPI_GRO_CB(skb)->frag0_len -= hlen; 6311 } 6312 __skb_pull(skb, hlen); 6313 6314 /* 6315 * This works because the only protocols we care about don't require 6316 * special handling. 6317 * We'll fix it up properly in napi_frags_finish() 6318 */ 6319 skb->protocol = eth->h_proto; 6320 6321 return skb; 6322 } 6323 6324 gro_result_t napi_gro_frags(struct napi_struct *napi) 6325 { 6326 gro_result_t ret; 6327 struct sk_buff *skb = napi_frags_skb(napi); 6328 6329 trace_napi_gro_frags_entry(skb); 6330 6331 ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb)); 6332 trace_napi_gro_frags_exit(ret); 6333 6334 return ret; 6335 } 6336 EXPORT_SYMBOL(napi_gro_frags); 6337 6338 /* Compute the checksum from gro_offset and return the folded value 6339 * after adding in any pseudo checksum. 6340 */ 6341 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb) 6342 { 6343 __wsum wsum; 6344 __sum16 sum; 6345 6346 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0); 6347 6348 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */ 6349 sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum)); 6350 /* See comments in __skb_checksum_complete(). */ 6351 if (likely(!sum)) { 6352 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && 6353 !skb->csum_complete_sw) 6354 netdev_rx_csum_fault(skb->dev, skb); 6355 } 6356 6357 NAPI_GRO_CB(skb)->csum = wsum; 6358 NAPI_GRO_CB(skb)->csum_valid = 1; 6359 6360 return sum; 6361 } 6362 EXPORT_SYMBOL(__skb_gro_checksum_complete); 6363 6364 static void net_rps_send_ipi(struct softnet_data *remsd) 6365 { 6366 #ifdef CONFIG_RPS 6367 while (remsd) { 6368 struct softnet_data *next = remsd->rps_ipi_next; 6369 6370 if (cpu_online(remsd->cpu)) 6371 smp_call_function_single_async(remsd->cpu, &remsd->csd); 6372 remsd = next; 6373 } 6374 #endif 6375 } 6376 6377 /* 6378 * net_rps_action_and_irq_enable sends any pending IPI's for rps. 6379 * Note: called with local irq disabled, but exits with local irq enabled. 6380 */ 6381 static void net_rps_action_and_irq_enable(struct softnet_data *sd) 6382 { 6383 #ifdef CONFIG_RPS 6384 struct softnet_data *remsd = sd->rps_ipi_list; 6385 6386 if (remsd) { 6387 sd->rps_ipi_list = NULL; 6388 6389 local_irq_enable(); 6390 6391 /* Send pending IPI's to kick RPS processing on remote cpus. */ 6392 net_rps_send_ipi(remsd); 6393 } else 6394 #endif 6395 local_irq_enable(); 6396 } 6397 6398 static bool sd_has_rps_ipi_waiting(struct softnet_data *sd) 6399 { 6400 #ifdef CONFIG_RPS 6401 return sd->rps_ipi_list != NULL; 6402 #else 6403 return false; 6404 #endif 6405 } 6406 6407 static int process_backlog(struct napi_struct *napi, int quota) 6408 { 6409 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog); 6410 bool again = true; 6411 int work = 0; 6412 6413 /* Check if we have pending ipi, its better to send them now, 6414 * not waiting net_rx_action() end. 6415 */ 6416 if (sd_has_rps_ipi_waiting(sd)) { 6417 local_irq_disable(); 6418 net_rps_action_and_irq_enable(sd); 6419 } 6420 6421 napi->weight = dev_rx_weight; 6422 while (again) { 6423 struct sk_buff *skb; 6424 6425 while ((skb = __skb_dequeue(&sd->process_queue))) { 6426 rcu_read_lock(); 6427 __netif_receive_skb(skb); 6428 rcu_read_unlock(); 6429 input_queue_head_incr(sd); 6430 if (++work >= quota) 6431 return work; 6432 6433 } 6434 6435 local_irq_disable(); 6436 rps_lock(sd); 6437 if (skb_queue_empty(&sd->input_pkt_queue)) { 6438 /* 6439 * Inline a custom version of __napi_complete(). 6440 * only current cpu owns and manipulates this napi, 6441 * and NAPI_STATE_SCHED is the only possible flag set 6442 * on backlog. 6443 * We can use a plain write instead of clear_bit(), 6444 * and we dont need an smp_mb() memory barrier. 6445 */ 6446 napi->state = 0; 6447 again = false; 6448 } else { 6449 skb_queue_splice_tail_init(&sd->input_pkt_queue, 6450 &sd->process_queue); 6451 } 6452 rps_unlock(sd); 6453 local_irq_enable(); 6454 } 6455 6456 return work; 6457 } 6458 6459 /** 6460 * __napi_schedule - schedule for receive 6461 * @n: entry to schedule 6462 * 6463 * The entry's receive function will be scheduled to run. 6464 * Consider using __napi_schedule_irqoff() if hard irqs are masked. 6465 */ 6466 void __napi_schedule(struct napi_struct *n) 6467 { 6468 unsigned long flags; 6469 6470 local_irq_save(flags); 6471 ____napi_schedule(this_cpu_ptr(&softnet_data), n); 6472 local_irq_restore(flags); 6473 } 6474 EXPORT_SYMBOL(__napi_schedule); 6475 6476 /** 6477 * napi_schedule_prep - check if napi can be scheduled 6478 * @n: napi context 6479 * 6480 * Test if NAPI routine is already running, and if not mark 6481 * it as running. This is used as a condition variable to 6482 * insure only one NAPI poll instance runs. We also make 6483 * sure there is no pending NAPI disable. 6484 */ 6485 bool napi_schedule_prep(struct napi_struct *n) 6486 { 6487 unsigned long val, new; 6488 6489 do { 6490 val = READ_ONCE(n->state); 6491 if (unlikely(val & NAPIF_STATE_DISABLE)) 6492 return false; 6493 new = val | NAPIF_STATE_SCHED; 6494 6495 /* Sets STATE_MISSED bit if STATE_SCHED was already set 6496 * This was suggested by Alexander Duyck, as compiler 6497 * emits better code than : 6498 * if (val & NAPIF_STATE_SCHED) 6499 * new |= NAPIF_STATE_MISSED; 6500 */ 6501 new |= (val & NAPIF_STATE_SCHED) / NAPIF_STATE_SCHED * 6502 NAPIF_STATE_MISSED; 6503 } while (cmpxchg(&n->state, val, new) != val); 6504 6505 return !(val & NAPIF_STATE_SCHED); 6506 } 6507 EXPORT_SYMBOL(napi_schedule_prep); 6508 6509 /** 6510 * __napi_schedule_irqoff - schedule for receive 6511 * @n: entry to schedule 6512 * 6513 * Variant of __napi_schedule() assuming hard irqs are masked. 6514 * 6515 * On PREEMPT_RT enabled kernels this maps to __napi_schedule() 6516 * because the interrupt disabled assumption might not be true 6517 * due to force-threaded interrupts and spinlock substitution. 6518 */ 6519 void __napi_schedule_irqoff(struct napi_struct *n) 6520 { 6521 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) 6522 ____napi_schedule(this_cpu_ptr(&softnet_data), n); 6523 else 6524 __napi_schedule(n); 6525 } 6526 EXPORT_SYMBOL(__napi_schedule_irqoff); 6527 6528 bool napi_complete_done(struct napi_struct *n, int work_done) 6529 { 6530 unsigned long flags, val, new, timeout = 0; 6531 bool ret = true; 6532 6533 /* 6534 * 1) Don't let napi dequeue from the cpu poll list 6535 * just in case its running on a different cpu. 6536 * 2) If we are busy polling, do nothing here, we have 6537 * the guarantee we will be called later. 6538 */ 6539 if (unlikely(n->state & (NAPIF_STATE_NPSVC | 6540 NAPIF_STATE_IN_BUSY_POLL))) 6541 return false; 6542 6543 if (work_done) { 6544 if (n->gro_bitmask) 6545 timeout = READ_ONCE(n->dev->gro_flush_timeout); 6546 n->defer_hard_irqs_count = READ_ONCE(n->dev->napi_defer_hard_irqs); 6547 } 6548 if (n->defer_hard_irqs_count > 0) { 6549 n->defer_hard_irqs_count--; 6550 timeout = READ_ONCE(n->dev->gro_flush_timeout); 6551 if (timeout) 6552 ret = false; 6553 } 6554 if (n->gro_bitmask) { 6555 /* When the NAPI instance uses a timeout and keeps postponing 6556 * it, we need to bound somehow the time packets are kept in 6557 * the GRO layer 6558 */ 6559 napi_gro_flush(n, !!timeout); 6560 } 6561 6562 gro_normal_list(n); 6563 6564 if (unlikely(!list_empty(&n->poll_list))) { 6565 /* If n->poll_list is not empty, we need to mask irqs */ 6566 local_irq_save(flags); 6567 list_del_init(&n->poll_list); 6568 local_irq_restore(flags); 6569 } 6570 6571 do { 6572 val = READ_ONCE(n->state); 6573 6574 WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED)); 6575 6576 new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED | 6577 NAPIF_STATE_SCHED_THREADED | 6578 NAPIF_STATE_PREFER_BUSY_POLL); 6579 6580 /* If STATE_MISSED was set, leave STATE_SCHED set, 6581 * because we will call napi->poll() one more time. 6582 * This C code was suggested by Alexander Duyck to help gcc. 6583 */ 6584 new |= (val & NAPIF_STATE_MISSED) / NAPIF_STATE_MISSED * 6585 NAPIF_STATE_SCHED; 6586 } while (cmpxchg(&n->state, val, new) != val); 6587 6588 if (unlikely(val & NAPIF_STATE_MISSED)) { 6589 __napi_schedule(n); 6590 return false; 6591 } 6592 6593 if (timeout) 6594 hrtimer_start(&n->timer, ns_to_ktime(timeout), 6595 HRTIMER_MODE_REL_PINNED); 6596 return ret; 6597 } 6598 EXPORT_SYMBOL(napi_complete_done); 6599 6600 /* must be called under rcu_read_lock(), as we dont take a reference */ 6601 static struct napi_struct *napi_by_id(unsigned int napi_id) 6602 { 6603 unsigned int hash = napi_id % HASH_SIZE(napi_hash); 6604 struct napi_struct *napi; 6605 6606 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node) 6607 if (napi->napi_id == napi_id) 6608 return napi; 6609 6610 return NULL; 6611 } 6612 6613 #if defined(CONFIG_NET_RX_BUSY_POLL) 6614 6615 static void __busy_poll_stop(struct napi_struct *napi, bool skip_schedule) 6616 { 6617 if (!skip_schedule) { 6618 gro_normal_list(napi); 6619 __napi_schedule(napi); 6620 return; 6621 } 6622 6623 if (napi->gro_bitmask) { 6624 /* flush too old packets 6625 * If HZ < 1000, flush all packets. 6626 */ 6627 napi_gro_flush(napi, HZ >= 1000); 6628 } 6629 6630 gro_normal_list(napi); 6631 clear_bit(NAPI_STATE_SCHED, &napi->state); 6632 } 6633 6634 static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock, bool prefer_busy_poll, 6635 u16 budget) 6636 { 6637 bool skip_schedule = false; 6638 unsigned long timeout; 6639 int rc; 6640 6641 /* Busy polling means there is a high chance device driver hard irq 6642 * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was 6643 * set in napi_schedule_prep(). 6644 * Since we are about to call napi->poll() once more, we can safely 6645 * clear NAPI_STATE_MISSED. 6646 * 6647 * Note: x86 could use a single "lock and ..." instruction 6648 * to perform these two clear_bit() 6649 */ 6650 clear_bit(NAPI_STATE_MISSED, &napi->state); 6651 clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state); 6652 6653 local_bh_disable(); 6654 6655 if (prefer_busy_poll) { 6656 napi->defer_hard_irqs_count = READ_ONCE(napi->dev->napi_defer_hard_irqs); 6657 timeout = READ_ONCE(napi->dev->gro_flush_timeout); 6658 if (napi->defer_hard_irqs_count && timeout) { 6659 hrtimer_start(&napi->timer, ns_to_ktime(timeout), HRTIMER_MODE_REL_PINNED); 6660 skip_schedule = true; 6661 } 6662 } 6663 6664 /* All we really want here is to re-enable device interrupts. 6665 * Ideally, a new ndo_busy_poll_stop() could avoid another round. 6666 */ 6667 rc = napi->poll(napi, budget); 6668 /* We can't gro_normal_list() here, because napi->poll() might have 6669 * rearmed the napi (napi_complete_done()) in which case it could 6670 * already be running on another CPU. 6671 */ 6672 trace_napi_poll(napi, rc, budget); 6673 netpoll_poll_unlock(have_poll_lock); 6674 if (rc == budget) 6675 __busy_poll_stop(napi, skip_schedule); 6676 local_bh_enable(); 6677 } 6678 6679 void napi_busy_loop(unsigned int napi_id, 6680 bool (*loop_end)(void *, unsigned long), 6681 void *loop_end_arg, bool prefer_busy_poll, u16 budget) 6682 { 6683 unsigned long start_time = loop_end ? busy_loop_current_time() : 0; 6684 int (*napi_poll)(struct napi_struct *napi, int budget); 6685 void *have_poll_lock = NULL; 6686 struct napi_struct *napi; 6687 6688 restart: 6689 napi_poll = NULL; 6690 6691 rcu_read_lock(); 6692 6693 napi = napi_by_id(napi_id); 6694 if (!napi) 6695 goto out; 6696 6697 preempt_disable(); 6698 for (;;) { 6699 int work = 0; 6700 6701 local_bh_disable(); 6702 if (!napi_poll) { 6703 unsigned long val = READ_ONCE(napi->state); 6704 6705 /* If multiple threads are competing for this napi, 6706 * we avoid dirtying napi->state as much as we can. 6707 */ 6708 if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED | 6709 NAPIF_STATE_IN_BUSY_POLL)) { 6710 if (prefer_busy_poll) 6711 set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state); 6712 goto count; 6713 } 6714 if (cmpxchg(&napi->state, val, 6715 val | NAPIF_STATE_IN_BUSY_POLL | 6716 NAPIF_STATE_SCHED) != val) { 6717 if (prefer_busy_poll) 6718 set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state); 6719 goto count; 6720 } 6721 have_poll_lock = netpoll_poll_lock(napi); 6722 napi_poll = napi->poll; 6723 } 6724 work = napi_poll(napi, budget); 6725 trace_napi_poll(napi, work, budget); 6726 gro_normal_list(napi); 6727 count: 6728 if (work > 0) 6729 __NET_ADD_STATS(dev_net(napi->dev), 6730 LINUX_MIB_BUSYPOLLRXPACKETS, work); 6731 local_bh_enable(); 6732 6733 if (!loop_end || loop_end(loop_end_arg, start_time)) 6734 break; 6735 6736 if (unlikely(need_resched())) { 6737 if (napi_poll) 6738 busy_poll_stop(napi, have_poll_lock, prefer_busy_poll, budget); 6739 preempt_enable(); 6740 rcu_read_unlock(); 6741 cond_resched(); 6742 if (loop_end(loop_end_arg, start_time)) 6743 return; 6744 goto restart; 6745 } 6746 cpu_relax(); 6747 } 6748 if (napi_poll) 6749 busy_poll_stop(napi, have_poll_lock, prefer_busy_poll, budget); 6750 preempt_enable(); 6751 out: 6752 rcu_read_unlock(); 6753 } 6754 EXPORT_SYMBOL(napi_busy_loop); 6755 6756 #endif /* CONFIG_NET_RX_BUSY_POLL */ 6757 6758 static void napi_hash_add(struct napi_struct *napi) 6759 { 6760 if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state)) 6761 return; 6762 6763 spin_lock(&napi_hash_lock); 6764 6765 /* 0..NR_CPUS range is reserved for sender_cpu use */ 6766 do { 6767 if (unlikely(++napi_gen_id < MIN_NAPI_ID)) 6768 napi_gen_id = MIN_NAPI_ID; 6769 } while (napi_by_id(napi_gen_id)); 6770 napi->napi_id = napi_gen_id; 6771 6772 hlist_add_head_rcu(&napi->napi_hash_node, 6773 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]); 6774 6775 spin_unlock(&napi_hash_lock); 6776 } 6777 6778 /* Warning : caller is responsible to make sure rcu grace period 6779 * is respected before freeing memory containing @napi 6780 */ 6781 static void napi_hash_del(struct napi_struct *napi) 6782 { 6783 spin_lock(&napi_hash_lock); 6784 6785 hlist_del_init_rcu(&napi->napi_hash_node); 6786 6787 spin_unlock(&napi_hash_lock); 6788 } 6789 6790 static enum hrtimer_restart napi_watchdog(struct hrtimer *timer) 6791 { 6792 struct napi_struct *napi; 6793 6794 napi = container_of(timer, struct napi_struct, timer); 6795 6796 /* Note : we use a relaxed variant of napi_schedule_prep() not setting 6797 * NAPI_STATE_MISSED, since we do not react to a device IRQ. 6798 */ 6799 if (!napi_disable_pending(napi) && 6800 !test_and_set_bit(NAPI_STATE_SCHED, &napi->state)) { 6801 clear_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state); 6802 __napi_schedule_irqoff(napi); 6803 } 6804 6805 return HRTIMER_NORESTART; 6806 } 6807 6808 static void init_gro_hash(struct napi_struct *napi) 6809 { 6810 int i; 6811 6812 for (i = 0; i < GRO_HASH_BUCKETS; i++) { 6813 INIT_LIST_HEAD(&napi->gro_hash[i].list); 6814 napi->gro_hash[i].count = 0; 6815 } 6816 napi->gro_bitmask = 0; 6817 } 6818 6819 int dev_set_threaded(struct net_device *dev, bool threaded) 6820 { 6821 struct napi_struct *napi; 6822 int err = 0; 6823 6824 if (dev->threaded == threaded) 6825 return 0; 6826 6827 if (threaded) { 6828 list_for_each_entry(napi, &dev->napi_list, dev_list) { 6829 if (!napi->thread) { 6830 err = napi_kthread_create(napi); 6831 if (err) { 6832 threaded = false; 6833 break; 6834 } 6835 } 6836 } 6837 } 6838 6839 dev->threaded = threaded; 6840 6841 /* Make sure kthread is created before THREADED bit 6842 * is set. 6843 */ 6844 smp_mb__before_atomic(); 6845 6846 /* Setting/unsetting threaded mode on a napi might not immediately 6847 * take effect, if the current napi instance is actively being 6848 * polled. In this case, the switch between threaded mode and 6849 * softirq mode will happen in the next round of napi_schedule(). 6850 * This should not cause hiccups/stalls to the live traffic. 6851 */ 6852 list_for_each_entry(napi, &dev->napi_list, dev_list) { 6853 if (threaded) 6854 set_bit(NAPI_STATE_THREADED, &napi->state); 6855 else 6856 clear_bit(NAPI_STATE_THREADED, &napi->state); 6857 } 6858 6859 return err; 6860 } 6861 EXPORT_SYMBOL(dev_set_threaded); 6862 6863 void netif_napi_add(struct net_device *dev, struct napi_struct *napi, 6864 int (*poll)(struct napi_struct *, int), int weight) 6865 { 6866 if (WARN_ON(test_and_set_bit(NAPI_STATE_LISTED, &napi->state))) 6867 return; 6868 6869 INIT_LIST_HEAD(&napi->poll_list); 6870 INIT_HLIST_NODE(&napi->napi_hash_node); 6871 hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); 6872 napi->timer.function = napi_watchdog; 6873 init_gro_hash(napi); 6874 napi->skb = NULL; 6875 INIT_LIST_HEAD(&napi->rx_list); 6876 napi->rx_count = 0; 6877 napi->poll = poll; 6878 if (weight > NAPI_POLL_WEIGHT) 6879 netdev_err_once(dev, "%s() called with weight %d\n", __func__, 6880 weight); 6881 napi->weight = weight; 6882 napi->dev = dev; 6883 #ifdef CONFIG_NETPOLL 6884 napi->poll_owner = -1; 6885 #endif 6886 set_bit(NAPI_STATE_SCHED, &napi->state); 6887 set_bit(NAPI_STATE_NPSVC, &napi->state); 6888 list_add_rcu(&napi->dev_list, &dev->napi_list); 6889 napi_hash_add(napi); 6890 /* Create kthread for this napi if dev->threaded is set. 6891 * Clear dev->threaded if kthread creation failed so that 6892 * threaded mode will not be enabled in napi_enable(). 6893 */ 6894 if (dev->threaded && napi_kthread_create(napi)) 6895 dev->threaded = 0; 6896 } 6897 EXPORT_SYMBOL(netif_napi_add); 6898 6899 void napi_disable(struct napi_struct *n) 6900 { 6901 might_sleep(); 6902 set_bit(NAPI_STATE_DISABLE, &n->state); 6903 6904 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state)) 6905 msleep(1); 6906 while (test_and_set_bit(NAPI_STATE_NPSVC, &n->state)) 6907 msleep(1); 6908 6909 hrtimer_cancel(&n->timer); 6910 6911 clear_bit(NAPI_STATE_PREFER_BUSY_POLL, &n->state); 6912 clear_bit(NAPI_STATE_DISABLE, &n->state); 6913 clear_bit(NAPI_STATE_THREADED, &n->state); 6914 } 6915 EXPORT_SYMBOL(napi_disable); 6916 6917 /** 6918 * napi_enable - enable NAPI scheduling 6919 * @n: NAPI context 6920 * 6921 * Resume NAPI from being scheduled on this context. 6922 * Must be paired with napi_disable. 6923 */ 6924 void napi_enable(struct napi_struct *n) 6925 { 6926 unsigned long val, new; 6927 6928 do { 6929 val = READ_ONCE(n->state); 6930 BUG_ON(!test_bit(NAPI_STATE_SCHED, &val)); 6931 6932 new = val & ~(NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC); 6933 if (n->dev->threaded && n->thread) 6934 new |= NAPIF_STATE_THREADED; 6935 } while (cmpxchg(&n->state, val, new) != val); 6936 } 6937 EXPORT_SYMBOL(napi_enable); 6938 6939 static void flush_gro_hash(struct napi_struct *napi) 6940 { 6941 int i; 6942 6943 for (i = 0; i < GRO_HASH_BUCKETS; i++) { 6944 struct sk_buff *skb, *n; 6945 6946 list_for_each_entry_safe(skb, n, &napi->gro_hash[i].list, list) 6947 kfree_skb(skb); 6948 napi->gro_hash[i].count = 0; 6949 } 6950 } 6951 6952 /* Must be called in process context */ 6953 void __netif_napi_del(struct napi_struct *napi) 6954 { 6955 if (!test_and_clear_bit(NAPI_STATE_LISTED, &napi->state)) 6956 return; 6957 6958 napi_hash_del(napi); 6959 list_del_rcu(&napi->dev_list); 6960 napi_free_frags(napi); 6961 6962 flush_gro_hash(napi); 6963 napi->gro_bitmask = 0; 6964 6965 if (napi->thread) { 6966 kthread_stop(napi->thread); 6967 napi->thread = NULL; 6968 } 6969 } 6970 EXPORT_SYMBOL(__netif_napi_del); 6971 6972 static int __napi_poll(struct napi_struct *n, bool *repoll) 6973 { 6974 int work, weight; 6975 6976 weight = n->weight; 6977 6978 /* This NAPI_STATE_SCHED test is for avoiding a race 6979 * with netpoll's poll_napi(). Only the entity which 6980 * obtains the lock and sees NAPI_STATE_SCHED set will 6981 * actually make the ->poll() call. Therefore we avoid 6982 * accidentally calling ->poll() when NAPI is not scheduled. 6983 */ 6984 work = 0; 6985 if (test_bit(NAPI_STATE_SCHED, &n->state)) { 6986 work = n->poll(n, weight); 6987 trace_napi_poll(n, work, weight); 6988 } 6989 6990 if (unlikely(work > weight)) 6991 pr_err_once("NAPI poll function %pS returned %d, exceeding its budget of %d.\n", 6992 n->poll, work, weight); 6993 6994 if (likely(work < weight)) 6995 return work; 6996 6997 /* Drivers must not modify the NAPI state if they 6998 * consume the entire weight. In such cases this code 6999 * still "owns" the NAPI instance and therefore can 7000 * move the instance around on the list at-will. 7001 */ 7002 if (unlikely(napi_disable_pending(n))) { 7003 napi_complete(n); 7004 return work; 7005 } 7006 7007 /* The NAPI context has more processing work, but busy-polling 7008 * is preferred. Exit early. 7009 */ 7010 if (napi_prefer_busy_poll(n)) { 7011 if (napi_complete_done(n, work)) { 7012 /* If timeout is not set, we need to make sure 7013 * that the NAPI is re-scheduled. 7014 */ 7015 napi_schedule(n); 7016 } 7017 return work; 7018 } 7019 7020 if (n->gro_bitmask) { 7021 /* flush too old packets 7022 * If HZ < 1000, flush all packets. 7023 */ 7024 napi_gro_flush(n, HZ >= 1000); 7025 } 7026 7027 gro_normal_list(n); 7028 7029 /* Some drivers may have called napi_schedule 7030 * prior to exhausting their budget. 7031 */ 7032 if (unlikely(!list_empty(&n->poll_list))) { 7033 pr_warn_once("%s: Budget exhausted after napi rescheduled\n", 7034 n->dev ? n->dev->name : "backlog"); 7035 return work; 7036 } 7037 7038 *repoll = true; 7039 7040 return work; 7041 } 7042 7043 static int napi_poll(struct napi_struct *n, struct list_head *repoll) 7044 { 7045 bool do_repoll = false; 7046 void *have; 7047 int work; 7048 7049 list_del_init(&n->poll_list); 7050 7051 have = netpoll_poll_lock(n); 7052 7053 work = __napi_poll(n, &do_repoll); 7054 7055 if (do_repoll) 7056 list_add_tail(&n->poll_list, repoll); 7057 7058 netpoll_poll_unlock(have); 7059 7060 return work; 7061 } 7062 7063 static int napi_thread_wait(struct napi_struct *napi) 7064 { 7065 bool woken = false; 7066 7067 set_current_state(TASK_INTERRUPTIBLE); 7068 7069 while (!kthread_should_stop()) { 7070 /* Testing SCHED_THREADED bit here to make sure the current 7071 * kthread owns this napi and could poll on this napi. 7072 * Testing SCHED bit is not enough because SCHED bit might be 7073 * set by some other busy poll thread or by napi_disable(). 7074 */ 7075 if (test_bit(NAPI_STATE_SCHED_THREADED, &napi->state) || woken) { 7076 WARN_ON(!list_empty(&napi->poll_list)); 7077 __set_current_state(TASK_RUNNING); 7078 return 0; 7079 } 7080 7081 schedule(); 7082 /* woken being true indicates this thread owns this napi. */ 7083 woken = true; 7084 set_current_state(TASK_INTERRUPTIBLE); 7085 } 7086 __set_current_state(TASK_RUNNING); 7087 7088 return -1; 7089 } 7090 7091 static int napi_threaded_poll(void *data) 7092 { 7093 struct napi_struct *napi = data; 7094 void *have; 7095 7096 while (!napi_thread_wait(napi)) { 7097 for (;;) { 7098 bool repoll = false; 7099 7100 local_bh_disable(); 7101 7102 have = netpoll_poll_lock(napi); 7103 __napi_poll(napi, &repoll); 7104 netpoll_poll_unlock(have); 7105 7106 local_bh_enable(); 7107 7108 if (!repoll) 7109 break; 7110 7111 cond_resched(); 7112 } 7113 } 7114 return 0; 7115 } 7116 7117 static __latent_entropy void net_rx_action(struct softirq_action *h) 7118 { 7119 struct softnet_data *sd = this_cpu_ptr(&softnet_data); 7120 unsigned long time_limit = jiffies + 7121 usecs_to_jiffies(netdev_budget_usecs); 7122 int budget = netdev_budget; 7123 LIST_HEAD(list); 7124 LIST_HEAD(repoll); 7125 7126 local_irq_disable(); 7127 list_splice_init(&sd->poll_list, &list); 7128 local_irq_enable(); 7129 7130 for (;;) { 7131 struct napi_struct *n; 7132 7133 if (list_empty(&list)) { 7134 if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll)) 7135 return; 7136 break; 7137 } 7138 7139 n = list_first_entry(&list, struct napi_struct, poll_list); 7140 budget -= napi_poll(n, &repoll); 7141 7142 /* If softirq window is exhausted then punt. 7143 * Allow this to run for 2 jiffies since which will allow 7144 * an average latency of 1.5/HZ. 7145 */ 7146 if (unlikely(budget <= 0 || 7147 time_after_eq(jiffies, time_limit))) { 7148 sd->time_squeeze++; 7149 break; 7150 } 7151 } 7152 7153 local_irq_disable(); 7154 7155 list_splice_tail_init(&sd->poll_list, &list); 7156 list_splice_tail(&repoll, &list); 7157 list_splice(&list, &sd->poll_list); 7158 if (!list_empty(&sd->poll_list)) 7159 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 7160 7161 net_rps_action_and_irq_enable(sd); 7162 } 7163 7164 struct netdev_adjacent { 7165 struct net_device *dev; 7166 7167 /* upper master flag, there can only be one master device per list */ 7168 bool master; 7169 7170 /* lookup ignore flag */ 7171 bool ignore; 7172 7173 /* counter for the number of times this device was added to us */ 7174 u16 ref_nr; 7175 7176 /* private field for the users */ 7177 void *private; 7178 7179 struct list_head list; 7180 struct rcu_head rcu; 7181 }; 7182 7183 static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev, 7184 struct list_head *adj_list) 7185 { 7186 struct netdev_adjacent *adj; 7187 7188 list_for_each_entry(adj, adj_list, list) { 7189 if (adj->dev == adj_dev) 7190 return adj; 7191 } 7192 return NULL; 7193 } 7194 7195 static int ____netdev_has_upper_dev(struct net_device *upper_dev, 7196 struct netdev_nested_priv *priv) 7197 { 7198 struct net_device *dev = (struct net_device *)priv->data; 7199 7200 return upper_dev == dev; 7201 } 7202 7203 /** 7204 * netdev_has_upper_dev - Check if device is linked to an upper device 7205 * @dev: device 7206 * @upper_dev: upper device to check 7207 * 7208 * Find out if a device is linked to specified upper device and return true 7209 * in case it is. Note that this checks only immediate upper device, 7210 * not through a complete stack of devices. The caller must hold the RTNL lock. 7211 */ 7212 bool netdev_has_upper_dev(struct net_device *dev, 7213 struct net_device *upper_dev) 7214 { 7215 struct netdev_nested_priv priv = { 7216 .data = (void *)upper_dev, 7217 }; 7218 7219 ASSERT_RTNL(); 7220 7221 return netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev, 7222 &priv); 7223 } 7224 EXPORT_SYMBOL(netdev_has_upper_dev); 7225 7226 /** 7227 * netdev_has_upper_dev_all_rcu - Check if device is linked to an upper device 7228 * @dev: device 7229 * @upper_dev: upper device to check 7230 * 7231 * Find out if a device is linked to specified upper device and return true 7232 * in case it is. Note that this checks the entire upper device chain. 7233 * The caller must hold rcu lock. 7234 */ 7235 7236 bool netdev_has_upper_dev_all_rcu(struct net_device *dev, 7237 struct net_device *upper_dev) 7238 { 7239 struct netdev_nested_priv priv = { 7240 .data = (void *)upper_dev, 7241 }; 7242 7243 return !!netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev, 7244 &priv); 7245 } 7246 EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu); 7247 7248 /** 7249 * netdev_has_any_upper_dev - Check if device is linked to some device 7250 * @dev: device 7251 * 7252 * Find out if a device is linked to an upper device and return true in case 7253 * it is. The caller must hold the RTNL lock. 7254 */ 7255 bool netdev_has_any_upper_dev(struct net_device *dev) 7256 { 7257 ASSERT_RTNL(); 7258 7259 return !list_empty(&dev->adj_list.upper); 7260 } 7261 EXPORT_SYMBOL(netdev_has_any_upper_dev); 7262 7263 /** 7264 * netdev_master_upper_dev_get - Get master upper device 7265 * @dev: device 7266 * 7267 * Find a master upper device and return pointer to it or NULL in case 7268 * it's not there. The caller must hold the RTNL lock. 7269 */ 7270 struct net_device *netdev_master_upper_dev_get(struct net_device *dev) 7271 { 7272 struct netdev_adjacent *upper; 7273 7274 ASSERT_RTNL(); 7275 7276 if (list_empty(&dev->adj_list.upper)) 7277 return NULL; 7278 7279 upper = list_first_entry(&dev->adj_list.upper, 7280 struct netdev_adjacent, list); 7281 if (likely(upper->master)) 7282 return upper->dev; 7283 return NULL; 7284 } 7285 EXPORT_SYMBOL(netdev_master_upper_dev_get); 7286 7287 static struct net_device *__netdev_master_upper_dev_get(struct net_device *dev) 7288 { 7289 struct netdev_adjacent *upper; 7290 7291 ASSERT_RTNL(); 7292 7293 if (list_empty(&dev->adj_list.upper)) 7294 return NULL; 7295 7296 upper = list_first_entry(&dev->adj_list.upper, 7297 struct netdev_adjacent, list); 7298 if (likely(upper->master) && !upper->ignore) 7299 return upper->dev; 7300 return NULL; 7301 } 7302 7303 /** 7304 * netdev_has_any_lower_dev - Check if device is linked to some device 7305 * @dev: device 7306 * 7307 * Find out if a device is linked to a lower device and return true in case 7308 * it is. The caller must hold the RTNL lock. 7309 */ 7310 static bool netdev_has_any_lower_dev(struct net_device *dev) 7311 { 7312 ASSERT_RTNL(); 7313 7314 return !list_empty(&dev->adj_list.lower); 7315 } 7316 7317 void *netdev_adjacent_get_private(struct list_head *adj_list) 7318 { 7319 struct netdev_adjacent *adj; 7320 7321 adj = list_entry(adj_list, struct netdev_adjacent, list); 7322 7323 return adj->private; 7324 } 7325 EXPORT_SYMBOL(netdev_adjacent_get_private); 7326 7327 /** 7328 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list 7329 * @dev: device 7330 * @iter: list_head ** of the current position 7331 * 7332 * Gets the next device from the dev's upper list, starting from iter 7333 * position. The caller must hold RCU read lock. 7334 */ 7335 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, 7336 struct list_head **iter) 7337 { 7338 struct netdev_adjacent *upper; 7339 7340 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held()); 7341 7342 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 7343 7344 if (&upper->list == &dev->adj_list.upper) 7345 return NULL; 7346 7347 *iter = &upper->list; 7348 7349 return upper->dev; 7350 } 7351 EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu); 7352 7353 static struct net_device *__netdev_next_upper_dev(struct net_device *dev, 7354 struct list_head **iter, 7355 bool *ignore) 7356 { 7357 struct netdev_adjacent *upper; 7358 7359 upper = list_entry((*iter)->next, struct netdev_adjacent, list); 7360 7361 if (&upper->list == &dev->adj_list.upper) 7362 return NULL; 7363 7364 *iter = &upper->list; 7365 *ignore = upper->ignore; 7366 7367 return upper->dev; 7368 } 7369 7370 static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev, 7371 struct list_head **iter) 7372 { 7373 struct netdev_adjacent *upper; 7374 7375 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held()); 7376 7377 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 7378 7379 if (&upper->list == &dev->adj_list.upper) 7380 return NULL; 7381 7382 *iter = &upper->list; 7383 7384 return upper->dev; 7385 } 7386 7387 static int __netdev_walk_all_upper_dev(struct net_device *dev, 7388 int (*fn)(struct net_device *dev, 7389 struct netdev_nested_priv *priv), 7390 struct netdev_nested_priv *priv) 7391 { 7392 struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; 7393 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; 7394 int ret, cur = 0; 7395 bool ignore; 7396 7397 now = dev; 7398 iter = &dev->adj_list.upper; 7399 7400 while (1) { 7401 if (now != dev) { 7402 ret = fn(now, priv); 7403 if (ret) 7404 return ret; 7405 } 7406 7407 next = NULL; 7408 while (1) { 7409 udev = __netdev_next_upper_dev(now, &iter, &ignore); 7410 if (!udev) 7411 break; 7412 if (ignore) 7413 continue; 7414 7415 next = udev; 7416 niter = &udev->adj_list.upper; 7417 dev_stack[cur] = now; 7418 iter_stack[cur++] = iter; 7419 break; 7420 } 7421 7422 if (!next) { 7423 if (!cur) 7424 return 0; 7425 next = dev_stack[--cur]; 7426 niter = iter_stack[cur]; 7427 } 7428 7429 now = next; 7430 iter = niter; 7431 } 7432 7433 return 0; 7434 } 7435 7436 int netdev_walk_all_upper_dev_rcu(struct net_device *dev, 7437 int (*fn)(struct net_device *dev, 7438 struct netdev_nested_priv *priv), 7439 struct netdev_nested_priv *priv) 7440 { 7441 struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; 7442 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; 7443 int ret, cur = 0; 7444 7445 now = dev; 7446 iter = &dev->adj_list.upper; 7447 7448 while (1) { 7449 if (now != dev) { 7450 ret = fn(now, priv); 7451 if (ret) 7452 return ret; 7453 } 7454 7455 next = NULL; 7456 while (1) { 7457 udev = netdev_next_upper_dev_rcu(now, &iter); 7458 if (!udev) 7459 break; 7460 7461 next = udev; 7462 niter = &udev->adj_list.upper; 7463 dev_stack[cur] = now; 7464 iter_stack[cur++] = iter; 7465 break; 7466 } 7467 7468 if (!next) { 7469 if (!cur) 7470 return 0; 7471 next = dev_stack[--cur]; 7472 niter = iter_stack[cur]; 7473 } 7474 7475 now = next; 7476 iter = niter; 7477 } 7478 7479 return 0; 7480 } 7481 EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu); 7482 7483 static bool __netdev_has_upper_dev(struct net_device *dev, 7484 struct net_device *upper_dev) 7485 { 7486 struct netdev_nested_priv priv = { 7487 .flags = 0, 7488 .data = (void *)upper_dev, 7489 }; 7490 7491 ASSERT_RTNL(); 7492 7493 return __netdev_walk_all_upper_dev(dev, ____netdev_has_upper_dev, 7494 &priv); 7495 } 7496 7497 /** 7498 * netdev_lower_get_next_private - Get the next ->private from the 7499 * lower neighbour list 7500 * @dev: device 7501 * @iter: list_head ** of the current position 7502 * 7503 * Gets the next netdev_adjacent->private from the dev's lower neighbour 7504 * list, starting from iter position. The caller must hold either hold the 7505 * RTNL lock or its own locking that guarantees that the neighbour lower 7506 * list will remain unchanged. 7507 */ 7508 void *netdev_lower_get_next_private(struct net_device *dev, 7509 struct list_head **iter) 7510 { 7511 struct netdev_adjacent *lower; 7512 7513 lower = list_entry(*iter, struct netdev_adjacent, list); 7514 7515 if (&lower->list == &dev->adj_list.lower) 7516 return NULL; 7517 7518 *iter = lower->list.next; 7519 7520 return lower->private; 7521 } 7522 EXPORT_SYMBOL(netdev_lower_get_next_private); 7523 7524 /** 7525 * netdev_lower_get_next_private_rcu - Get the next ->private from the 7526 * lower neighbour list, RCU 7527 * variant 7528 * @dev: device 7529 * @iter: list_head ** of the current position 7530 * 7531 * Gets the next netdev_adjacent->private from the dev's lower neighbour 7532 * list, starting from iter position. The caller must hold RCU read lock. 7533 */ 7534 void *netdev_lower_get_next_private_rcu(struct net_device *dev, 7535 struct list_head **iter) 7536 { 7537 struct netdev_adjacent *lower; 7538 7539 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); 7540 7541 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 7542 7543 if (&lower->list == &dev->adj_list.lower) 7544 return NULL; 7545 7546 *iter = &lower->list; 7547 7548 return lower->private; 7549 } 7550 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu); 7551 7552 /** 7553 * netdev_lower_get_next - Get the next device from the lower neighbour 7554 * list 7555 * @dev: device 7556 * @iter: list_head ** of the current position 7557 * 7558 * Gets the next netdev_adjacent from the dev's lower neighbour 7559 * list, starting from iter position. The caller must hold RTNL lock or 7560 * its own locking that guarantees that the neighbour lower 7561 * list will remain unchanged. 7562 */ 7563 void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter) 7564 { 7565 struct netdev_adjacent *lower; 7566 7567 lower = list_entry(*iter, struct netdev_adjacent, list); 7568 7569 if (&lower->list == &dev->adj_list.lower) 7570 return NULL; 7571 7572 *iter = lower->list.next; 7573 7574 return lower->dev; 7575 } 7576 EXPORT_SYMBOL(netdev_lower_get_next); 7577 7578 static struct net_device *netdev_next_lower_dev(struct net_device *dev, 7579 struct list_head **iter) 7580 { 7581 struct netdev_adjacent *lower; 7582 7583 lower = list_entry((*iter)->next, struct netdev_adjacent, list); 7584 7585 if (&lower->list == &dev->adj_list.lower) 7586 return NULL; 7587 7588 *iter = &lower->list; 7589 7590 return lower->dev; 7591 } 7592 7593 static struct net_device *__netdev_next_lower_dev(struct net_device *dev, 7594 struct list_head **iter, 7595 bool *ignore) 7596 { 7597 struct netdev_adjacent *lower; 7598 7599 lower = list_entry((*iter)->next, struct netdev_adjacent, list); 7600 7601 if (&lower->list == &dev->adj_list.lower) 7602 return NULL; 7603 7604 *iter = &lower->list; 7605 *ignore = lower->ignore; 7606 7607 return lower->dev; 7608 } 7609 7610 int netdev_walk_all_lower_dev(struct net_device *dev, 7611 int (*fn)(struct net_device *dev, 7612 struct netdev_nested_priv *priv), 7613 struct netdev_nested_priv *priv) 7614 { 7615 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; 7616 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; 7617 int ret, cur = 0; 7618 7619 now = dev; 7620 iter = &dev->adj_list.lower; 7621 7622 while (1) { 7623 if (now != dev) { 7624 ret = fn(now, priv); 7625 if (ret) 7626 return ret; 7627 } 7628 7629 next = NULL; 7630 while (1) { 7631 ldev = netdev_next_lower_dev(now, &iter); 7632 if (!ldev) 7633 break; 7634 7635 next = ldev; 7636 niter = &ldev->adj_list.lower; 7637 dev_stack[cur] = now; 7638 iter_stack[cur++] = iter; 7639 break; 7640 } 7641 7642 if (!next) { 7643 if (!cur) 7644 return 0; 7645 next = dev_stack[--cur]; 7646 niter = iter_stack[cur]; 7647 } 7648 7649 now = next; 7650 iter = niter; 7651 } 7652 7653 return 0; 7654 } 7655 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev); 7656 7657 static int __netdev_walk_all_lower_dev(struct net_device *dev, 7658 int (*fn)(struct net_device *dev, 7659 struct netdev_nested_priv *priv), 7660 struct netdev_nested_priv *priv) 7661 { 7662 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; 7663 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; 7664 int ret, cur = 0; 7665 bool ignore; 7666 7667 now = dev; 7668 iter = &dev->adj_list.lower; 7669 7670 while (1) { 7671 if (now != dev) { 7672 ret = fn(now, priv); 7673 if (ret) 7674 return ret; 7675 } 7676 7677 next = NULL; 7678 while (1) { 7679 ldev = __netdev_next_lower_dev(now, &iter, &ignore); 7680 if (!ldev) 7681 break; 7682 if (ignore) 7683 continue; 7684 7685 next = ldev; 7686 niter = &ldev->adj_list.lower; 7687 dev_stack[cur] = now; 7688 iter_stack[cur++] = iter; 7689 break; 7690 } 7691 7692 if (!next) { 7693 if (!cur) 7694 return 0; 7695 next = dev_stack[--cur]; 7696 niter = iter_stack[cur]; 7697 } 7698 7699 now = next; 7700 iter = niter; 7701 } 7702 7703 return 0; 7704 } 7705 7706 struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev, 7707 struct list_head **iter) 7708 { 7709 struct netdev_adjacent *lower; 7710 7711 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 7712 if (&lower->list == &dev->adj_list.lower) 7713 return NULL; 7714 7715 *iter = &lower->list; 7716 7717 return lower->dev; 7718 } 7719 EXPORT_SYMBOL(netdev_next_lower_dev_rcu); 7720 7721 static u8 __netdev_upper_depth(struct net_device *dev) 7722 { 7723 struct net_device *udev; 7724 struct list_head *iter; 7725 u8 max_depth = 0; 7726 bool ignore; 7727 7728 for (iter = &dev->adj_list.upper, 7729 udev = __netdev_next_upper_dev(dev, &iter, &ignore); 7730 udev; 7731 udev = __netdev_next_upper_dev(dev, &iter, &ignore)) { 7732 if (ignore) 7733 continue; 7734 if (max_depth < udev->upper_level) 7735 max_depth = udev->upper_level; 7736 } 7737 7738 return max_depth; 7739 } 7740 7741 static u8 __netdev_lower_depth(struct net_device *dev) 7742 { 7743 struct net_device *ldev; 7744 struct list_head *iter; 7745 u8 max_depth = 0; 7746 bool ignore; 7747 7748 for (iter = &dev->adj_list.lower, 7749 ldev = __netdev_next_lower_dev(dev, &iter, &ignore); 7750 ldev; 7751 ldev = __netdev_next_lower_dev(dev, &iter, &ignore)) { 7752 if (ignore) 7753 continue; 7754 if (max_depth < ldev->lower_level) 7755 max_depth = ldev->lower_level; 7756 } 7757 7758 return max_depth; 7759 } 7760 7761 static int __netdev_update_upper_level(struct net_device *dev, 7762 struct netdev_nested_priv *__unused) 7763 { 7764 dev->upper_level = __netdev_upper_depth(dev) + 1; 7765 return 0; 7766 } 7767 7768 static int __netdev_update_lower_level(struct net_device *dev, 7769 struct netdev_nested_priv *priv) 7770 { 7771 dev->lower_level = __netdev_lower_depth(dev) + 1; 7772 7773 #ifdef CONFIG_LOCKDEP 7774 if (!priv) 7775 return 0; 7776 7777 if (priv->flags & NESTED_SYNC_IMM) 7778 dev->nested_level = dev->lower_level - 1; 7779 if (priv->flags & NESTED_SYNC_TODO) 7780 net_unlink_todo(dev); 7781 #endif 7782 return 0; 7783 } 7784 7785 int netdev_walk_all_lower_dev_rcu(struct net_device *dev, 7786 int (*fn)(struct net_device *dev, 7787 struct netdev_nested_priv *priv), 7788 struct netdev_nested_priv *priv) 7789 { 7790 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; 7791 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; 7792 int ret, cur = 0; 7793 7794 now = dev; 7795 iter = &dev->adj_list.lower; 7796 7797 while (1) { 7798 if (now != dev) { 7799 ret = fn(now, priv); 7800 if (ret) 7801 return ret; 7802 } 7803 7804 next = NULL; 7805 while (1) { 7806 ldev = netdev_next_lower_dev_rcu(now, &iter); 7807 if (!ldev) 7808 break; 7809 7810 next = ldev; 7811 niter = &ldev->adj_list.lower; 7812 dev_stack[cur] = now; 7813 iter_stack[cur++] = iter; 7814 break; 7815 } 7816 7817 if (!next) { 7818 if (!cur) 7819 return 0; 7820 next = dev_stack[--cur]; 7821 niter = iter_stack[cur]; 7822 } 7823 7824 now = next; 7825 iter = niter; 7826 } 7827 7828 return 0; 7829 } 7830 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu); 7831 7832 /** 7833 * netdev_lower_get_first_private_rcu - Get the first ->private from the 7834 * lower neighbour list, RCU 7835 * variant 7836 * @dev: device 7837 * 7838 * Gets the first netdev_adjacent->private from the dev's lower neighbour 7839 * list. The caller must hold RCU read lock. 7840 */ 7841 void *netdev_lower_get_first_private_rcu(struct net_device *dev) 7842 { 7843 struct netdev_adjacent *lower; 7844 7845 lower = list_first_or_null_rcu(&dev->adj_list.lower, 7846 struct netdev_adjacent, list); 7847 if (lower) 7848 return lower->private; 7849 return NULL; 7850 } 7851 EXPORT_SYMBOL(netdev_lower_get_first_private_rcu); 7852 7853 /** 7854 * netdev_master_upper_dev_get_rcu - Get master upper device 7855 * @dev: device 7856 * 7857 * Find a master upper device and return pointer to it or NULL in case 7858 * it's not there. The caller must hold the RCU read lock. 7859 */ 7860 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev) 7861 { 7862 struct netdev_adjacent *upper; 7863 7864 upper = list_first_or_null_rcu(&dev->adj_list.upper, 7865 struct netdev_adjacent, list); 7866 if (upper && likely(upper->master)) 7867 return upper->dev; 7868 return NULL; 7869 } 7870 EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu); 7871 7872 static int netdev_adjacent_sysfs_add(struct net_device *dev, 7873 struct net_device *adj_dev, 7874 struct list_head *dev_list) 7875 { 7876 char linkname[IFNAMSIZ+7]; 7877 7878 sprintf(linkname, dev_list == &dev->adj_list.upper ? 7879 "upper_%s" : "lower_%s", adj_dev->name); 7880 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj), 7881 linkname); 7882 } 7883 static void netdev_adjacent_sysfs_del(struct net_device *dev, 7884 char *name, 7885 struct list_head *dev_list) 7886 { 7887 char linkname[IFNAMSIZ+7]; 7888 7889 sprintf(linkname, dev_list == &dev->adj_list.upper ? 7890 "upper_%s" : "lower_%s", name); 7891 sysfs_remove_link(&(dev->dev.kobj), linkname); 7892 } 7893 7894 static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev, 7895 struct net_device *adj_dev, 7896 struct list_head *dev_list) 7897 { 7898 return (dev_list == &dev->adj_list.upper || 7899 dev_list == &dev->adj_list.lower) && 7900 net_eq(dev_net(dev), dev_net(adj_dev)); 7901 } 7902 7903 static int __netdev_adjacent_dev_insert(struct net_device *dev, 7904 struct net_device *adj_dev, 7905 struct list_head *dev_list, 7906 void *private, bool master) 7907 { 7908 struct netdev_adjacent *adj; 7909 int ret; 7910 7911 adj = __netdev_find_adj(adj_dev, dev_list); 7912 7913 if (adj) { 7914 adj->ref_nr += 1; 7915 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n", 7916 dev->name, adj_dev->name, adj->ref_nr); 7917 7918 return 0; 7919 } 7920 7921 adj = kmalloc(sizeof(*adj), GFP_KERNEL); 7922 if (!adj) 7923 return -ENOMEM; 7924 7925 adj->dev = adj_dev; 7926 adj->master = master; 7927 adj->ref_nr = 1; 7928 adj->private = private; 7929 adj->ignore = false; 7930 dev_hold(adj_dev); 7931 7932 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n", 7933 dev->name, adj_dev->name, adj->ref_nr, adj_dev->name); 7934 7935 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) { 7936 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list); 7937 if (ret) 7938 goto free_adj; 7939 } 7940 7941 /* Ensure that master link is always the first item in list. */ 7942 if (master) { 7943 ret = sysfs_create_link(&(dev->dev.kobj), 7944 &(adj_dev->dev.kobj), "master"); 7945 if (ret) 7946 goto remove_symlinks; 7947 7948 list_add_rcu(&adj->list, dev_list); 7949 } else { 7950 list_add_tail_rcu(&adj->list, dev_list); 7951 } 7952 7953 return 0; 7954 7955 remove_symlinks: 7956 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) 7957 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list); 7958 free_adj: 7959 kfree(adj); 7960 dev_put(adj_dev); 7961 7962 return ret; 7963 } 7964 7965 static void __netdev_adjacent_dev_remove(struct net_device *dev, 7966 struct net_device *adj_dev, 7967 u16 ref_nr, 7968 struct list_head *dev_list) 7969 { 7970 struct netdev_adjacent *adj; 7971 7972 pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n", 7973 dev->name, adj_dev->name, ref_nr); 7974 7975 adj = __netdev_find_adj(adj_dev, dev_list); 7976 7977 if (!adj) { 7978 pr_err("Adjacency does not exist for device %s from %s\n", 7979 dev->name, adj_dev->name); 7980 WARN_ON(1); 7981 return; 7982 } 7983 7984 if (adj->ref_nr > ref_nr) { 7985 pr_debug("adjacency: %s to %s ref_nr - %d = %d\n", 7986 dev->name, adj_dev->name, ref_nr, 7987 adj->ref_nr - ref_nr); 7988 adj->ref_nr -= ref_nr; 7989 return; 7990 } 7991 7992 if (adj->master) 7993 sysfs_remove_link(&(dev->dev.kobj), "master"); 7994 7995 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) 7996 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list); 7997 7998 list_del_rcu(&adj->list); 7999 pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n", 8000 adj_dev->name, dev->name, adj_dev->name); 8001 dev_put(adj_dev); 8002 kfree_rcu(adj, rcu); 8003 } 8004 8005 static int __netdev_adjacent_dev_link_lists(struct net_device *dev, 8006 struct net_device *upper_dev, 8007 struct list_head *up_list, 8008 struct list_head *down_list, 8009 void *private, bool master) 8010 { 8011 int ret; 8012 8013 ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, 8014 private, master); 8015 if (ret) 8016 return ret; 8017 8018 ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, 8019 private, false); 8020 if (ret) { 8021 __netdev_adjacent_dev_remove(dev, upper_dev, 1, up_list); 8022 return ret; 8023 } 8024 8025 return 0; 8026 } 8027 8028 static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev, 8029 struct net_device *upper_dev, 8030 u16 ref_nr, 8031 struct list_head *up_list, 8032 struct list_head *down_list) 8033 { 8034 __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list); 8035 __netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list); 8036 } 8037 8038 static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev, 8039 struct net_device *upper_dev, 8040 void *private, bool master) 8041 { 8042 return __netdev_adjacent_dev_link_lists(dev, upper_dev, 8043 &dev->adj_list.upper, 8044 &upper_dev->adj_list.lower, 8045 private, master); 8046 } 8047 8048 static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev, 8049 struct net_device *upper_dev) 8050 { 8051 __netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1, 8052 &dev->adj_list.upper, 8053 &upper_dev->adj_list.lower); 8054 } 8055 8056 static int __netdev_upper_dev_link(struct net_device *dev, 8057 struct net_device *upper_dev, bool master, 8058 void *upper_priv, void *upper_info, 8059 struct netdev_nested_priv *priv, 8060 struct netlink_ext_ack *extack) 8061 { 8062 struct netdev_notifier_changeupper_info changeupper_info = { 8063 .info = { 8064 .dev = dev, 8065 .extack = extack, 8066 }, 8067 .upper_dev = upper_dev, 8068 .master = master, 8069 .linking = true, 8070 .upper_info = upper_info, 8071 }; 8072 struct net_device *master_dev; 8073 int ret = 0; 8074 8075 ASSERT_RTNL(); 8076 8077 if (dev == upper_dev) 8078 return -EBUSY; 8079 8080 /* To prevent loops, check if dev is not upper device to upper_dev. */ 8081 if (__netdev_has_upper_dev(upper_dev, dev)) 8082 return -EBUSY; 8083 8084 if ((dev->lower_level + upper_dev->upper_level) > MAX_NEST_DEV) 8085 return -EMLINK; 8086 8087 if (!master) { 8088 if (__netdev_has_upper_dev(dev, upper_dev)) 8089 return -EEXIST; 8090 } else { 8091 master_dev = __netdev_master_upper_dev_get(dev); 8092 if (master_dev) 8093 return master_dev == upper_dev ? -EEXIST : -EBUSY; 8094 } 8095 8096 ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, 8097 &changeupper_info.info); 8098 ret = notifier_to_errno(ret); 8099 if (ret) 8100 return ret; 8101 8102 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv, 8103 master); 8104 if (ret) 8105 return ret; 8106 8107 ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, 8108 &changeupper_info.info); 8109 ret = notifier_to_errno(ret); 8110 if (ret) 8111 goto rollback; 8112 8113 __netdev_update_upper_level(dev, NULL); 8114 __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL); 8115 8116 __netdev_update_lower_level(upper_dev, priv); 8117 __netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level, 8118 priv); 8119 8120 return 0; 8121 8122 rollback: 8123 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev); 8124 8125 return ret; 8126 } 8127 8128 /** 8129 * netdev_upper_dev_link - Add a link to the upper device 8130 * @dev: device 8131 * @upper_dev: new upper device 8132 * @extack: netlink extended ack 8133 * 8134 * Adds a link to device which is upper to this one. The caller must hold 8135 * the RTNL lock. On a failure a negative errno code is returned. 8136 * On success the reference counts are adjusted and the function 8137 * returns zero. 8138 */ 8139 int netdev_upper_dev_link(struct net_device *dev, 8140 struct net_device *upper_dev, 8141 struct netlink_ext_ack *extack) 8142 { 8143 struct netdev_nested_priv priv = { 8144 .flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO, 8145 .data = NULL, 8146 }; 8147 8148 return __netdev_upper_dev_link(dev, upper_dev, false, 8149 NULL, NULL, &priv, extack); 8150 } 8151 EXPORT_SYMBOL(netdev_upper_dev_link); 8152 8153 /** 8154 * netdev_master_upper_dev_link - Add a master link to the upper device 8155 * @dev: device 8156 * @upper_dev: new upper device 8157 * @upper_priv: upper device private 8158 * @upper_info: upper info to be passed down via notifier 8159 * @extack: netlink extended ack 8160 * 8161 * Adds a link to device which is upper to this one. In this case, only 8162 * one master upper device can be linked, although other non-master devices 8163 * might be linked as well. The caller must hold the RTNL lock. 8164 * On a failure a negative errno code is returned. On success the reference 8165 * counts are adjusted and the function returns zero. 8166 */ 8167 int netdev_master_upper_dev_link(struct net_device *dev, 8168 struct net_device *upper_dev, 8169 void *upper_priv, void *upper_info, 8170 struct netlink_ext_ack *extack) 8171 { 8172 struct netdev_nested_priv priv = { 8173 .flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO, 8174 .data = NULL, 8175 }; 8176 8177 return __netdev_upper_dev_link(dev, upper_dev, true, 8178 upper_priv, upper_info, &priv, extack); 8179 } 8180 EXPORT_SYMBOL(netdev_master_upper_dev_link); 8181 8182 static void __netdev_upper_dev_unlink(struct net_device *dev, 8183 struct net_device *upper_dev, 8184 struct netdev_nested_priv *priv) 8185 { 8186 struct netdev_notifier_changeupper_info changeupper_info = { 8187 .info = { 8188 .dev = dev, 8189 }, 8190 .upper_dev = upper_dev, 8191 .linking = false, 8192 }; 8193 8194 ASSERT_RTNL(); 8195 8196 changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev; 8197 8198 call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, 8199 &changeupper_info.info); 8200 8201 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev); 8202 8203 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, 8204 &changeupper_info.info); 8205 8206 __netdev_update_upper_level(dev, NULL); 8207 __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL); 8208 8209 __netdev_update_lower_level(upper_dev, priv); 8210 __netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level, 8211 priv); 8212 } 8213 8214 /** 8215 * netdev_upper_dev_unlink - Removes a link to upper device 8216 * @dev: device 8217 * @upper_dev: new upper device 8218 * 8219 * Removes a link to device which is upper to this one. The caller must hold 8220 * the RTNL lock. 8221 */ 8222 void netdev_upper_dev_unlink(struct net_device *dev, 8223 struct net_device *upper_dev) 8224 { 8225 struct netdev_nested_priv priv = { 8226 .flags = NESTED_SYNC_TODO, 8227 .data = NULL, 8228 }; 8229 8230 __netdev_upper_dev_unlink(dev, upper_dev, &priv); 8231 } 8232 EXPORT_SYMBOL(netdev_upper_dev_unlink); 8233 8234 static void __netdev_adjacent_dev_set(struct net_device *upper_dev, 8235 struct net_device *lower_dev, 8236 bool val) 8237 { 8238 struct netdev_adjacent *adj; 8239 8240 adj = __netdev_find_adj(lower_dev, &upper_dev->adj_list.lower); 8241 if (adj) 8242 adj->ignore = val; 8243 8244 adj = __netdev_find_adj(upper_dev, &lower_dev->adj_list.upper); 8245 if (adj) 8246 adj->ignore = val; 8247 } 8248 8249 static void netdev_adjacent_dev_disable(struct net_device *upper_dev, 8250 struct net_device *lower_dev) 8251 { 8252 __netdev_adjacent_dev_set(upper_dev, lower_dev, true); 8253 } 8254 8255 static void netdev_adjacent_dev_enable(struct net_device *upper_dev, 8256 struct net_device *lower_dev) 8257 { 8258 __netdev_adjacent_dev_set(upper_dev, lower_dev, false); 8259 } 8260 8261 int netdev_adjacent_change_prepare(struct net_device *old_dev, 8262 struct net_device *new_dev, 8263 struct net_device *dev, 8264 struct netlink_ext_ack *extack) 8265 { 8266 struct netdev_nested_priv priv = { 8267 .flags = 0, 8268 .data = NULL, 8269 }; 8270 int err; 8271 8272 if (!new_dev) 8273 return 0; 8274 8275 if (old_dev && new_dev != old_dev) 8276 netdev_adjacent_dev_disable(dev, old_dev); 8277 err = __netdev_upper_dev_link(new_dev, dev, false, NULL, NULL, &priv, 8278 extack); 8279 if (err) { 8280 if (old_dev && new_dev != old_dev) 8281 netdev_adjacent_dev_enable(dev, old_dev); 8282 return err; 8283 } 8284 8285 return 0; 8286 } 8287 EXPORT_SYMBOL(netdev_adjacent_change_prepare); 8288 8289 void netdev_adjacent_change_commit(struct net_device *old_dev, 8290 struct net_device *new_dev, 8291 struct net_device *dev) 8292 { 8293 struct netdev_nested_priv priv = { 8294 .flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO, 8295 .data = NULL, 8296 }; 8297 8298 if (!new_dev || !old_dev) 8299 return; 8300 8301 if (new_dev == old_dev) 8302 return; 8303 8304 netdev_adjacent_dev_enable(dev, old_dev); 8305 __netdev_upper_dev_unlink(old_dev, dev, &priv); 8306 } 8307 EXPORT_SYMBOL(netdev_adjacent_change_commit); 8308 8309 void netdev_adjacent_change_abort(struct net_device *old_dev, 8310 struct net_device *new_dev, 8311 struct net_device *dev) 8312 { 8313 struct netdev_nested_priv priv = { 8314 .flags = 0, 8315 .data = NULL, 8316 }; 8317 8318 if (!new_dev) 8319 return; 8320 8321 if (old_dev && new_dev != old_dev) 8322 netdev_adjacent_dev_enable(dev, old_dev); 8323 8324 __netdev_upper_dev_unlink(new_dev, dev, &priv); 8325 } 8326 EXPORT_SYMBOL(netdev_adjacent_change_abort); 8327 8328 /** 8329 * netdev_bonding_info_change - Dispatch event about slave change 8330 * @dev: device 8331 * @bonding_info: info to dispatch 8332 * 8333 * Send NETDEV_BONDING_INFO to netdev notifiers with info. 8334 * The caller must hold the RTNL lock. 8335 */ 8336 void netdev_bonding_info_change(struct net_device *dev, 8337 struct netdev_bonding_info *bonding_info) 8338 { 8339 struct netdev_notifier_bonding_info info = { 8340 .info.dev = dev, 8341 }; 8342 8343 memcpy(&info.bonding_info, bonding_info, 8344 sizeof(struct netdev_bonding_info)); 8345 call_netdevice_notifiers_info(NETDEV_BONDING_INFO, 8346 &info.info); 8347 } 8348 EXPORT_SYMBOL(netdev_bonding_info_change); 8349 8350 /** 8351 * netdev_get_xmit_slave - Get the xmit slave of master device 8352 * @dev: device 8353 * @skb: The packet 8354 * @all_slaves: assume all the slaves are active 8355 * 8356 * The reference counters are not incremented so the caller must be 8357 * careful with locks. The caller must hold RCU lock. 8358 * %NULL is returned if no slave is found. 8359 */ 8360 8361 struct net_device *netdev_get_xmit_slave(struct net_device *dev, 8362 struct sk_buff *skb, 8363 bool all_slaves) 8364 { 8365 const struct net_device_ops *ops = dev->netdev_ops; 8366 8367 if (!ops->ndo_get_xmit_slave) 8368 return NULL; 8369 return ops->ndo_get_xmit_slave(dev, skb, all_slaves); 8370 } 8371 EXPORT_SYMBOL(netdev_get_xmit_slave); 8372 8373 static struct net_device *netdev_sk_get_lower_dev(struct net_device *dev, 8374 struct sock *sk) 8375 { 8376 const struct net_device_ops *ops = dev->netdev_ops; 8377 8378 if (!ops->ndo_sk_get_lower_dev) 8379 return NULL; 8380 return ops->ndo_sk_get_lower_dev(dev, sk); 8381 } 8382 8383 /** 8384 * netdev_sk_get_lowest_dev - Get the lowest device in chain given device and socket 8385 * @dev: device 8386 * @sk: the socket 8387 * 8388 * %NULL is returned if no lower device is found. 8389 */ 8390 8391 struct net_device *netdev_sk_get_lowest_dev(struct net_device *dev, 8392 struct sock *sk) 8393 { 8394 struct net_device *lower; 8395 8396 lower = netdev_sk_get_lower_dev(dev, sk); 8397 while (lower) { 8398 dev = lower; 8399 lower = netdev_sk_get_lower_dev(dev, sk); 8400 } 8401 8402 return dev; 8403 } 8404 EXPORT_SYMBOL(netdev_sk_get_lowest_dev); 8405 8406 static void netdev_adjacent_add_links(struct net_device *dev) 8407 { 8408 struct netdev_adjacent *iter; 8409 8410 struct net *net = dev_net(dev); 8411 8412 list_for_each_entry(iter, &dev->adj_list.upper, list) { 8413 if (!net_eq(net, dev_net(iter->dev))) 8414 continue; 8415 netdev_adjacent_sysfs_add(iter->dev, dev, 8416 &iter->dev->adj_list.lower); 8417 netdev_adjacent_sysfs_add(dev, iter->dev, 8418 &dev->adj_list.upper); 8419 } 8420 8421 list_for_each_entry(iter, &dev->adj_list.lower, list) { 8422 if (!net_eq(net, dev_net(iter->dev))) 8423 continue; 8424 netdev_adjacent_sysfs_add(iter->dev, dev, 8425 &iter->dev->adj_list.upper); 8426 netdev_adjacent_sysfs_add(dev, iter->dev, 8427 &dev->adj_list.lower); 8428 } 8429 } 8430 8431 static void netdev_adjacent_del_links(struct net_device *dev) 8432 { 8433 struct netdev_adjacent *iter; 8434 8435 struct net *net = dev_net(dev); 8436 8437 list_for_each_entry(iter, &dev->adj_list.upper, list) { 8438 if (!net_eq(net, dev_net(iter->dev))) 8439 continue; 8440 netdev_adjacent_sysfs_del(iter->dev, dev->name, 8441 &iter->dev->adj_list.lower); 8442 netdev_adjacent_sysfs_del(dev, iter->dev->name, 8443 &dev->adj_list.upper); 8444 } 8445 8446 list_for_each_entry(iter, &dev->adj_list.lower, list) { 8447 if (!net_eq(net, dev_net(iter->dev))) 8448 continue; 8449 netdev_adjacent_sysfs_del(iter->dev, dev->name, 8450 &iter->dev->adj_list.upper); 8451 netdev_adjacent_sysfs_del(dev, iter->dev->name, 8452 &dev->adj_list.lower); 8453 } 8454 } 8455 8456 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname) 8457 { 8458 struct netdev_adjacent *iter; 8459 8460 struct net *net = dev_net(dev); 8461 8462 list_for_each_entry(iter, &dev->adj_list.upper, list) { 8463 if (!net_eq(net, dev_net(iter->dev))) 8464 continue; 8465 netdev_adjacent_sysfs_del(iter->dev, oldname, 8466 &iter->dev->adj_list.lower); 8467 netdev_adjacent_sysfs_add(iter->dev, dev, 8468 &iter->dev->adj_list.lower); 8469 } 8470 8471 list_for_each_entry(iter, &dev->adj_list.lower, list) { 8472 if (!net_eq(net, dev_net(iter->dev))) 8473 continue; 8474 netdev_adjacent_sysfs_del(iter->dev, oldname, 8475 &iter->dev->adj_list.upper); 8476 netdev_adjacent_sysfs_add(iter->dev, dev, 8477 &iter->dev->adj_list.upper); 8478 } 8479 } 8480 8481 void *netdev_lower_dev_get_private(struct net_device *dev, 8482 struct net_device *lower_dev) 8483 { 8484 struct netdev_adjacent *lower; 8485 8486 if (!lower_dev) 8487 return NULL; 8488 lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower); 8489 if (!lower) 8490 return NULL; 8491 8492 return lower->private; 8493 } 8494 EXPORT_SYMBOL(netdev_lower_dev_get_private); 8495 8496 8497 /** 8498 * netdev_lower_state_changed - Dispatch event about lower device state change 8499 * @lower_dev: device 8500 * @lower_state_info: state to dispatch 8501 * 8502 * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info. 8503 * The caller must hold the RTNL lock. 8504 */ 8505 void netdev_lower_state_changed(struct net_device *lower_dev, 8506 void *lower_state_info) 8507 { 8508 struct netdev_notifier_changelowerstate_info changelowerstate_info = { 8509 .info.dev = lower_dev, 8510 }; 8511 8512 ASSERT_RTNL(); 8513 changelowerstate_info.lower_state_info = lower_state_info; 8514 call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE, 8515 &changelowerstate_info.info); 8516 } 8517 EXPORT_SYMBOL(netdev_lower_state_changed); 8518 8519 static void dev_change_rx_flags(struct net_device *dev, int flags) 8520 { 8521 const struct net_device_ops *ops = dev->netdev_ops; 8522 8523 if (ops->ndo_change_rx_flags) 8524 ops->ndo_change_rx_flags(dev, flags); 8525 } 8526 8527 static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify) 8528 { 8529 unsigned int old_flags = dev->flags; 8530 kuid_t uid; 8531 kgid_t gid; 8532 8533 ASSERT_RTNL(); 8534 8535 dev->flags |= IFF_PROMISC; 8536 dev->promiscuity += inc; 8537 if (dev->promiscuity == 0) { 8538 /* 8539 * Avoid overflow. 8540 * If inc causes overflow, untouch promisc and return error. 8541 */ 8542 if (inc < 0) 8543 dev->flags &= ~IFF_PROMISC; 8544 else { 8545 dev->promiscuity -= inc; 8546 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n", 8547 dev->name); 8548 return -EOVERFLOW; 8549 } 8550 } 8551 if (dev->flags != old_flags) { 8552 pr_info("device %s %s promiscuous mode\n", 8553 dev->name, 8554 dev->flags & IFF_PROMISC ? "entered" : "left"); 8555 if (audit_enabled) { 8556 current_uid_gid(&uid, &gid); 8557 audit_log(audit_context(), GFP_ATOMIC, 8558 AUDIT_ANOM_PROMISCUOUS, 8559 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u", 8560 dev->name, (dev->flags & IFF_PROMISC), 8561 (old_flags & IFF_PROMISC), 8562 from_kuid(&init_user_ns, audit_get_loginuid(current)), 8563 from_kuid(&init_user_ns, uid), 8564 from_kgid(&init_user_ns, gid), 8565 audit_get_sessionid(current)); 8566 } 8567 8568 dev_change_rx_flags(dev, IFF_PROMISC); 8569 } 8570 if (notify) 8571 __dev_notify_flags(dev, old_flags, IFF_PROMISC); 8572 return 0; 8573 } 8574 8575 /** 8576 * dev_set_promiscuity - update promiscuity count on a device 8577 * @dev: device 8578 * @inc: modifier 8579 * 8580 * Add or remove promiscuity from a device. While the count in the device 8581 * remains above zero the interface remains promiscuous. Once it hits zero 8582 * the device reverts back to normal filtering operation. A negative inc 8583 * value is used to drop promiscuity on the device. 8584 * Return 0 if successful or a negative errno code on error. 8585 */ 8586 int dev_set_promiscuity(struct net_device *dev, int inc) 8587 { 8588 unsigned int old_flags = dev->flags; 8589 int err; 8590 8591 err = __dev_set_promiscuity(dev, inc, true); 8592 if (err < 0) 8593 return err; 8594 if (dev->flags != old_flags) 8595 dev_set_rx_mode(dev); 8596 return err; 8597 } 8598 EXPORT_SYMBOL(dev_set_promiscuity); 8599 8600 static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify) 8601 { 8602 unsigned int old_flags = dev->flags, old_gflags = dev->gflags; 8603 8604 ASSERT_RTNL(); 8605 8606 dev->flags |= IFF_ALLMULTI; 8607 dev->allmulti += inc; 8608 if (dev->allmulti == 0) { 8609 /* 8610 * Avoid overflow. 8611 * If inc causes overflow, untouch allmulti and return error. 8612 */ 8613 if (inc < 0) 8614 dev->flags &= ~IFF_ALLMULTI; 8615 else { 8616 dev->allmulti -= inc; 8617 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n", 8618 dev->name); 8619 return -EOVERFLOW; 8620 } 8621 } 8622 if (dev->flags ^ old_flags) { 8623 dev_change_rx_flags(dev, IFF_ALLMULTI); 8624 dev_set_rx_mode(dev); 8625 if (notify) 8626 __dev_notify_flags(dev, old_flags, 8627 dev->gflags ^ old_gflags); 8628 } 8629 return 0; 8630 } 8631 8632 /** 8633 * dev_set_allmulti - update allmulti count on a device 8634 * @dev: device 8635 * @inc: modifier 8636 * 8637 * Add or remove reception of all multicast frames to a device. While the 8638 * count in the device remains above zero the interface remains listening 8639 * to all interfaces. Once it hits zero the device reverts back to normal 8640 * filtering operation. A negative @inc value is used to drop the counter 8641 * when releasing a resource needing all multicasts. 8642 * Return 0 if successful or a negative errno code on error. 8643 */ 8644 8645 int dev_set_allmulti(struct net_device *dev, int inc) 8646 { 8647 return __dev_set_allmulti(dev, inc, true); 8648 } 8649 EXPORT_SYMBOL(dev_set_allmulti); 8650 8651 /* 8652 * Upload unicast and multicast address lists to device and 8653 * configure RX filtering. When the device doesn't support unicast 8654 * filtering it is put in promiscuous mode while unicast addresses 8655 * are present. 8656 */ 8657 void __dev_set_rx_mode(struct net_device *dev) 8658 { 8659 const struct net_device_ops *ops = dev->netdev_ops; 8660 8661 /* dev_open will call this function so the list will stay sane. */ 8662 if (!(dev->flags&IFF_UP)) 8663 return; 8664 8665 if (!netif_device_present(dev)) 8666 return; 8667 8668 if (!(dev->priv_flags & IFF_UNICAST_FLT)) { 8669 /* Unicast addresses changes may only happen under the rtnl, 8670 * therefore calling __dev_set_promiscuity here is safe. 8671 */ 8672 if (!netdev_uc_empty(dev) && !dev->uc_promisc) { 8673 __dev_set_promiscuity(dev, 1, false); 8674 dev->uc_promisc = true; 8675 } else if (netdev_uc_empty(dev) && dev->uc_promisc) { 8676 __dev_set_promiscuity(dev, -1, false); 8677 dev->uc_promisc = false; 8678 } 8679 } 8680 8681 if (ops->ndo_set_rx_mode) 8682 ops->ndo_set_rx_mode(dev); 8683 } 8684 8685 void dev_set_rx_mode(struct net_device *dev) 8686 { 8687 netif_addr_lock_bh(dev); 8688 __dev_set_rx_mode(dev); 8689 netif_addr_unlock_bh(dev); 8690 } 8691 8692 /** 8693 * dev_get_flags - get flags reported to userspace 8694 * @dev: device 8695 * 8696 * Get the combination of flag bits exported through APIs to userspace. 8697 */ 8698 unsigned int dev_get_flags(const struct net_device *dev) 8699 { 8700 unsigned int flags; 8701 8702 flags = (dev->flags & ~(IFF_PROMISC | 8703 IFF_ALLMULTI | 8704 IFF_RUNNING | 8705 IFF_LOWER_UP | 8706 IFF_DORMANT)) | 8707 (dev->gflags & (IFF_PROMISC | 8708 IFF_ALLMULTI)); 8709 8710 if (netif_running(dev)) { 8711 if (netif_oper_up(dev)) 8712 flags |= IFF_RUNNING; 8713 if (netif_carrier_ok(dev)) 8714 flags |= IFF_LOWER_UP; 8715 if (netif_dormant(dev)) 8716 flags |= IFF_DORMANT; 8717 } 8718 8719 return flags; 8720 } 8721 EXPORT_SYMBOL(dev_get_flags); 8722 8723 int __dev_change_flags(struct net_device *dev, unsigned int flags, 8724 struct netlink_ext_ack *extack) 8725 { 8726 unsigned int old_flags = dev->flags; 8727 int ret; 8728 8729 ASSERT_RTNL(); 8730 8731 /* 8732 * Set the flags on our device. 8733 */ 8734 8735 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP | 8736 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL | 8737 IFF_AUTOMEDIA)) | 8738 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC | 8739 IFF_ALLMULTI)); 8740 8741 /* 8742 * Load in the correct multicast list now the flags have changed. 8743 */ 8744 8745 if ((old_flags ^ flags) & IFF_MULTICAST) 8746 dev_change_rx_flags(dev, IFF_MULTICAST); 8747 8748 dev_set_rx_mode(dev); 8749 8750 /* 8751 * Have we downed the interface. We handle IFF_UP ourselves 8752 * according to user attempts to set it, rather than blindly 8753 * setting it. 8754 */ 8755 8756 ret = 0; 8757 if ((old_flags ^ flags) & IFF_UP) { 8758 if (old_flags & IFF_UP) 8759 __dev_close(dev); 8760 else 8761 ret = __dev_open(dev, extack); 8762 } 8763 8764 if ((flags ^ dev->gflags) & IFF_PROMISC) { 8765 int inc = (flags & IFF_PROMISC) ? 1 : -1; 8766 unsigned int old_flags = dev->flags; 8767 8768 dev->gflags ^= IFF_PROMISC; 8769 8770 if (__dev_set_promiscuity(dev, inc, false) >= 0) 8771 if (dev->flags != old_flags) 8772 dev_set_rx_mode(dev); 8773 } 8774 8775 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI 8776 * is important. Some (broken) drivers set IFF_PROMISC, when 8777 * IFF_ALLMULTI is requested not asking us and not reporting. 8778 */ 8779 if ((flags ^ dev->gflags) & IFF_ALLMULTI) { 8780 int inc = (flags & IFF_ALLMULTI) ? 1 : -1; 8781 8782 dev->gflags ^= IFF_ALLMULTI; 8783 __dev_set_allmulti(dev, inc, false); 8784 } 8785 8786 return ret; 8787 } 8788 8789 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags, 8790 unsigned int gchanges) 8791 { 8792 unsigned int changes = dev->flags ^ old_flags; 8793 8794 if (gchanges) 8795 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC); 8796 8797 if (changes & IFF_UP) { 8798 if (dev->flags & IFF_UP) 8799 call_netdevice_notifiers(NETDEV_UP, dev); 8800 else 8801 call_netdevice_notifiers(NETDEV_DOWN, dev); 8802 } 8803 8804 if (dev->flags & IFF_UP && 8805 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) { 8806 struct netdev_notifier_change_info change_info = { 8807 .info = { 8808 .dev = dev, 8809 }, 8810 .flags_changed = changes, 8811 }; 8812 8813 call_netdevice_notifiers_info(NETDEV_CHANGE, &change_info.info); 8814 } 8815 } 8816 8817 /** 8818 * dev_change_flags - change device settings 8819 * @dev: device 8820 * @flags: device state flags 8821 * @extack: netlink extended ack 8822 * 8823 * Change settings on device based state flags. The flags are 8824 * in the userspace exported format. 8825 */ 8826 int dev_change_flags(struct net_device *dev, unsigned int flags, 8827 struct netlink_ext_ack *extack) 8828 { 8829 int ret; 8830 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags; 8831 8832 ret = __dev_change_flags(dev, flags, extack); 8833 if (ret < 0) 8834 return ret; 8835 8836 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags); 8837 __dev_notify_flags(dev, old_flags, changes); 8838 return ret; 8839 } 8840 EXPORT_SYMBOL(dev_change_flags); 8841 8842 int __dev_set_mtu(struct net_device *dev, int new_mtu) 8843 { 8844 const struct net_device_ops *ops = dev->netdev_ops; 8845 8846 if (ops->ndo_change_mtu) 8847 return ops->ndo_change_mtu(dev, new_mtu); 8848 8849 /* Pairs with all the lockless reads of dev->mtu in the stack */ 8850 WRITE_ONCE(dev->mtu, new_mtu); 8851 return 0; 8852 } 8853 EXPORT_SYMBOL(__dev_set_mtu); 8854 8855 int dev_validate_mtu(struct net_device *dev, int new_mtu, 8856 struct netlink_ext_ack *extack) 8857 { 8858 /* MTU must be positive, and in range */ 8859 if (new_mtu < 0 || new_mtu < dev->min_mtu) { 8860 NL_SET_ERR_MSG(extack, "mtu less than device minimum"); 8861 return -EINVAL; 8862 } 8863 8864 if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) { 8865 NL_SET_ERR_MSG(extack, "mtu greater than device maximum"); 8866 return -EINVAL; 8867 } 8868 return 0; 8869 } 8870 8871 /** 8872 * dev_set_mtu_ext - Change maximum transfer unit 8873 * @dev: device 8874 * @new_mtu: new transfer unit 8875 * @extack: netlink extended ack 8876 * 8877 * Change the maximum transfer size of the network device. 8878 */ 8879 int dev_set_mtu_ext(struct net_device *dev, int new_mtu, 8880 struct netlink_ext_ack *extack) 8881 { 8882 int err, orig_mtu; 8883 8884 if (new_mtu == dev->mtu) 8885 return 0; 8886 8887 err = dev_validate_mtu(dev, new_mtu, extack); 8888 if (err) 8889 return err; 8890 8891 if (!netif_device_present(dev)) 8892 return -ENODEV; 8893 8894 err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev); 8895 err = notifier_to_errno(err); 8896 if (err) 8897 return err; 8898 8899 orig_mtu = dev->mtu; 8900 err = __dev_set_mtu(dev, new_mtu); 8901 8902 if (!err) { 8903 err = call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev, 8904 orig_mtu); 8905 err = notifier_to_errno(err); 8906 if (err) { 8907 /* setting mtu back and notifying everyone again, 8908 * so that they have a chance to revert changes. 8909 */ 8910 __dev_set_mtu(dev, orig_mtu); 8911 call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev, 8912 new_mtu); 8913 } 8914 } 8915 return err; 8916 } 8917 8918 int dev_set_mtu(struct net_device *dev, int new_mtu) 8919 { 8920 struct netlink_ext_ack extack; 8921 int err; 8922 8923 memset(&extack, 0, sizeof(extack)); 8924 err = dev_set_mtu_ext(dev, new_mtu, &extack); 8925 if (err && extack._msg) 8926 net_err_ratelimited("%s: %s\n", dev->name, extack._msg); 8927 return err; 8928 } 8929 EXPORT_SYMBOL(dev_set_mtu); 8930 8931 /** 8932 * dev_change_tx_queue_len - Change TX queue length of a netdevice 8933 * @dev: device 8934 * @new_len: new tx queue length 8935 */ 8936 int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len) 8937 { 8938 unsigned int orig_len = dev->tx_queue_len; 8939 int res; 8940 8941 if (new_len != (unsigned int)new_len) 8942 return -ERANGE; 8943 8944 if (new_len != orig_len) { 8945 dev->tx_queue_len = new_len; 8946 res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev); 8947 res = notifier_to_errno(res); 8948 if (res) 8949 goto err_rollback; 8950 res = dev_qdisc_change_tx_queue_len(dev); 8951 if (res) 8952 goto err_rollback; 8953 } 8954 8955 return 0; 8956 8957 err_rollback: 8958 netdev_err(dev, "refused to change device tx_queue_len\n"); 8959 dev->tx_queue_len = orig_len; 8960 return res; 8961 } 8962 8963 /** 8964 * dev_set_group - Change group this device belongs to 8965 * @dev: device 8966 * @new_group: group this device should belong to 8967 */ 8968 void dev_set_group(struct net_device *dev, int new_group) 8969 { 8970 dev->group = new_group; 8971 } 8972 EXPORT_SYMBOL(dev_set_group); 8973 8974 /** 8975 * dev_pre_changeaddr_notify - Call NETDEV_PRE_CHANGEADDR. 8976 * @dev: device 8977 * @addr: new address 8978 * @extack: netlink extended ack 8979 */ 8980 int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr, 8981 struct netlink_ext_ack *extack) 8982 { 8983 struct netdev_notifier_pre_changeaddr_info info = { 8984 .info.dev = dev, 8985 .info.extack = extack, 8986 .dev_addr = addr, 8987 }; 8988 int rc; 8989 8990 rc = call_netdevice_notifiers_info(NETDEV_PRE_CHANGEADDR, &info.info); 8991 return notifier_to_errno(rc); 8992 } 8993 EXPORT_SYMBOL(dev_pre_changeaddr_notify); 8994 8995 /** 8996 * dev_set_mac_address - Change Media Access Control Address 8997 * @dev: device 8998 * @sa: new address 8999 * @extack: netlink extended ack 9000 * 9001 * Change the hardware (MAC) address of the device 9002 */ 9003 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa, 9004 struct netlink_ext_ack *extack) 9005 { 9006 const struct net_device_ops *ops = dev->netdev_ops; 9007 int err; 9008 9009 if (!ops->ndo_set_mac_address) 9010 return -EOPNOTSUPP; 9011 if (sa->sa_family != dev->type) 9012 return -EINVAL; 9013 if (!netif_device_present(dev)) 9014 return -ENODEV; 9015 err = dev_pre_changeaddr_notify(dev, sa->sa_data, extack); 9016 if (err) 9017 return err; 9018 err = ops->ndo_set_mac_address(dev, sa); 9019 if (err) 9020 return err; 9021 dev->addr_assign_type = NET_ADDR_SET; 9022 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 9023 add_device_randomness(dev->dev_addr, dev->addr_len); 9024 return 0; 9025 } 9026 EXPORT_SYMBOL(dev_set_mac_address); 9027 9028 static DECLARE_RWSEM(dev_addr_sem); 9029 9030 int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa, 9031 struct netlink_ext_ack *extack) 9032 { 9033 int ret; 9034 9035 down_write(&dev_addr_sem); 9036 ret = dev_set_mac_address(dev, sa, extack); 9037 up_write(&dev_addr_sem); 9038 return ret; 9039 } 9040 EXPORT_SYMBOL(dev_set_mac_address_user); 9041 9042 int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name) 9043 { 9044 size_t size = sizeof(sa->sa_data); 9045 struct net_device *dev; 9046 int ret = 0; 9047 9048 down_read(&dev_addr_sem); 9049 rcu_read_lock(); 9050 9051 dev = dev_get_by_name_rcu(net, dev_name); 9052 if (!dev) { 9053 ret = -ENODEV; 9054 goto unlock; 9055 } 9056 if (!dev->addr_len) 9057 memset(sa->sa_data, 0, size); 9058 else 9059 memcpy(sa->sa_data, dev->dev_addr, 9060 min_t(size_t, size, dev->addr_len)); 9061 sa->sa_family = dev->type; 9062 9063 unlock: 9064 rcu_read_unlock(); 9065 up_read(&dev_addr_sem); 9066 return ret; 9067 } 9068 EXPORT_SYMBOL(dev_get_mac_address); 9069 9070 /** 9071 * dev_change_carrier - Change device carrier 9072 * @dev: device 9073 * @new_carrier: new value 9074 * 9075 * Change device carrier 9076 */ 9077 int dev_change_carrier(struct net_device *dev, bool new_carrier) 9078 { 9079 const struct net_device_ops *ops = dev->netdev_ops; 9080 9081 if (!ops->ndo_change_carrier) 9082 return -EOPNOTSUPP; 9083 if (!netif_device_present(dev)) 9084 return -ENODEV; 9085 return ops->ndo_change_carrier(dev, new_carrier); 9086 } 9087 EXPORT_SYMBOL(dev_change_carrier); 9088 9089 /** 9090 * dev_get_phys_port_id - Get device physical port ID 9091 * @dev: device 9092 * @ppid: port ID 9093 * 9094 * Get device physical port ID 9095 */ 9096 int dev_get_phys_port_id(struct net_device *dev, 9097 struct netdev_phys_item_id *ppid) 9098 { 9099 const struct net_device_ops *ops = dev->netdev_ops; 9100 9101 if (!ops->ndo_get_phys_port_id) 9102 return -EOPNOTSUPP; 9103 return ops->ndo_get_phys_port_id(dev, ppid); 9104 } 9105 EXPORT_SYMBOL(dev_get_phys_port_id); 9106 9107 /** 9108 * dev_get_phys_port_name - Get device physical port name 9109 * @dev: device 9110 * @name: port name 9111 * @len: limit of bytes to copy to name 9112 * 9113 * Get device physical port name 9114 */ 9115 int dev_get_phys_port_name(struct net_device *dev, 9116 char *name, size_t len) 9117 { 9118 const struct net_device_ops *ops = dev->netdev_ops; 9119 int err; 9120 9121 if (ops->ndo_get_phys_port_name) { 9122 err = ops->ndo_get_phys_port_name(dev, name, len); 9123 if (err != -EOPNOTSUPP) 9124 return err; 9125 } 9126 return devlink_compat_phys_port_name_get(dev, name, len); 9127 } 9128 EXPORT_SYMBOL(dev_get_phys_port_name); 9129 9130 /** 9131 * dev_get_port_parent_id - Get the device's port parent identifier 9132 * @dev: network device 9133 * @ppid: pointer to a storage for the port's parent identifier 9134 * @recurse: allow/disallow recursion to lower devices 9135 * 9136 * Get the devices's port parent identifier 9137 */ 9138 int dev_get_port_parent_id(struct net_device *dev, 9139 struct netdev_phys_item_id *ppid, 9140 bool recurse) 9141 { 9142 const struct net_device_ops *ops = dev->netdev_ops; 9143 struct netdev_phys_item_id first = { }; 9144 struct net_device *lower_dev; 9145 struct list_head *iter; 9146 int err; 9147 9148 if (ops->ndo_get_port_parent_id) { 9149 err = ops->ndo_get_port_parent_id(dev, ppid); 9150 if (err != -EOPNOTSUPP) 9151 return err; 9152 } 9153 9154 err = devlink_compat_switch_id_get(dev, ppid); 9155 if (!err || err != -EOPNOTSUPP) 9156 return err; 9157 9158 if (!recurse) 9159 return -EOPNOTSUPP; 9160 9161 netdev_for_each_lower_dev(dev, lower_dev, iter) { 9162 err = dev_get_port_parent_id(lower_dev, ppid, recurse); 9163 if (err) 9164 break; 9165 if (!first.id_len) 9166 first = *ppid; 9167 else if (memcmp(&first, ppid, sizeof(*ppid))) 9168 return -EOPNOTSUPP; 9169 } 9170 9171 return err; 9172 } 9173 EXPORT_SYMBOL(dev_get_port_parent_id); 9174 9175 /** 9176 * netdev_port_same_parent_id - Indicate if two network devices have 9177 * the same port parent identifier 9178 * @a: first network device 9179 * @b: second network device 9180 */ 9181 bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b) 9182 { 9183 struct netdev_phys_item_id a_id = { }; 9184 struct netdev_phys_item_id b_id = { }; 9185 9186 if (dev_get_port_parent_id(a, &a_id, true) || 9187 dev_get_port_parent_id(b, &b_id, true)) 9188 return false; 9189 9190 return netdev_phys_item_id_same(&a_id, &b_id); 9191 } 9192 EXPORT_SYMBOL(netdev_port_same_parent_id); 9193 9194 /** 9195 * dev_change_proto_down - update protocol port state information 9196 * @dev: device 9197 * @proto_down: new value 9198 * 9199 * This info can be used by switch drivers to set the phys state of the 9200 * port. 9201 */ 9202 int dev_change_proto_down(struct net_device *dev, bool proto_down) 9203 { 9204 const struct net_device_ops *ops = dev->netdev_ops; 9205 9206 if (!ops->ndo_change_proto_down) 9207 return -EOPNOTSUPP; 9208 if (!netif_device_present(dev)) 9209 return -ENODEV; 9210 return ops->ndo_change_proto_down(dev, proto_down); 9211 } 9212 EXPORT_SYMBOL(dev_change_proto_down); 9213 9214 /** 9215 * dev_change_proto_down_generic - generic implementation for 9216 * ndo_change_proto_down that sets carrier according to 9217 * proto_down. 9218 * 9219 * @dev: device 9220 * @proto_down: new value 9221 */ 9222 int dev_change_proto_down_generic(struct net_device *dev, bool proto_down) 9223 { 9224 if (proto_down) 9225 netif_carrier_off(dev); 9226 else 9227 netif_carrier_on(dev); 9228 dev->proto_down = proto_down; 9229 return 0; 9230 } 9231 EXPORT_SYMBOL(dev_change_proto_down_generic); 9232 9233 /** 9234 * dev_change_proto_down_reason - proto down reason 9235 * 9236 * @dev: device 9237 * @mask: proto down mask 9238 * @value: proto down value 9239 */ 9240 void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask, 9241 u32 value) 9242 { 9243 int b; 9244 9245 if (!mask) { 9246 dev->proto_down_reason = value; 9247 } else { 9248 for_each_set_bit(b, &mask, 32) { 9249 if (value & (1 << b)) 9250 dev->proto_down_reason |= BIT(b); 9251 else 9252 dev->proto_down_reason &= ~BIT(b); 9253 } 9254 } 9255 } 9256 EXPORT_SYMBOL(dev_change_proto_down_reason); 9257 9258 struct bpf_xdp_link { 9259 struct bpf_link link; 9260 struct net_device *dev; /* protected by rtnl_lock, no refcnt held */ 9261 int flags; 9262 }; 9263 9264 static enum bpf_xdp_mode dev_xdp_mode(struct net_device *dev, u32 flags) 9265 { 9266 if (flags & XDP_FLAGS_HW_MODE) 9267 return XDP_MODE_HW; 9268 if (flags & XDP_FLAGS_DRV_MODE) 9269 return XDP_MODE_DRV; 9270 if (flags & XDP_FLAGS_SKB_MODE) 9271 return XDP_MODE_SKB; 9272 return dev->netdev_ops->ndo_bpf ? XDP_MODE_DRV : XDP_MODE_SKB; 9273 } 9274 9275 static bpf_op_t dev_xdp_bpf_op(struct net_device *dev, enum bpf_xdp_mode mode) 9276 { 9277 switch (mode) { 9278 case XDP_MODE_SKB: 9279 return generic_xdp_install; 9280 case XDP_MODE_DRV: 9281 case XDP_MODE_HW: 9282 return dev->netdev_ops->ndo_bpf; 9283 default: 9284 return NULL; 9285 } 9286 } 9287 9288 static struct bpf_xdp_link *dev_xdp_link(struct net_device *dev, 9289 enum bpf_xdp_mode mode) 9290 { 9291 return dev->xdp_state[mode].link; 9292 } 9293 9294 static struct bpf_prog *dev_xdp_prog(struct net_device *dev, 9295 enum bpf_xdp_mode mode) 9296 { 9297 struct bpf_xdp_link *link = dev_xdp_link(dev, mode); 9298 9299 if (link) 9300 return link->link.prog; 9301 return dev->xdp_state[mode].prog; 9302 } 9303 9304 u8 dev_xdp_prog_count(struct net_device *dev) 9305 { 9306 u8 count = 0; 9307 int i; 9308 9309 for (i = 0; i < __MAX_XDP_MODE; i++) 9310 if (dev->xdp_state[i].prog || dev->xdp_state[i].link) 9311 count++; 9312 return count; 9313 } 9314 EXPORT_SYMBOL_GPL(dev_xdp_prog_count); 9315 9316 u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode) 9317 { 9318 struct bpf_prog *prog = dev_xdp_prog(dev, mode); 9319 9320 return prog ? prog->aux->id : 0; 9321 } 9322 9323 static void dev_xdp_set_link(struct net_device *dev, enum bpf_xdp_mode mode, 9324 struct bpf_xdp_link *link) 9325 { 9326 dev->xdp_state[mode].link = link; 9327 dev->xdp_state[mode].prog = NULL; 9328 } 9329 9330 static void dev_xdp_set_prog(struct net_device *dev, enum bpf_xdp_mode mode, 9331 struct bpf_prog *prog) 9332 { 9333 dev->xdp_state[mode].link = NULL; 9334 dev->xdp_state[mode].prog = prog; 9335 } 9336 9337 static int dev_xdp_install(struct net_device *dev, enum bpf_xdp_mode mode, 9338 bpf_op_t bpf_op, struct netlink_ext_ack *extack, 9339 u32 flags, struct bpf_prog *prog) 9340 { 9341 struct netdev_bpf xdp; 9342 int err; 9343 9344 memset(&xdp, 0, sizeof(xdp)); 9345 xdp.command = mode == XDP_MODE_HW ? XDP_SETUP_PROG_HW : XDP_SETUP_PROG; 9346 xdp.extack = extack; 9347 xdp.flags = flags; 9348 xdp.prog = prog; 9349 9350 /* Drivers assume refcnt is already incremented (i.e, prog pointer is 9351 * "moved" into driver), so they don't increment it on their own, but 9352 * they do decrement refcnt when program is detached or replaced. 9353 * Given net_device also owns link/prog, we need to bump refcnt here 9354 * to prevent drivers from underflowing it. 9355 */ 9356 if (prog) 9357 bpf_prog_inc(prog); 9358 err = bpf_op(dev, &xdp); 9359 if (err) { 9360 if (prog) 9361 bpf_prog_put(prog); 9362 return err; 9363 } 9364 9365 if (mode != XDP_MODE_HW) 9366 bpf_prog_change_xdp(dev_xdp_prog(dev, mode), prog); 9367 9368 return 0; 9369 } 9370 9371 static void dev_xdp_uninstall(struct net_device *dev) 9372 { 9373 struct bpf_xdp_link *link; 9374 struct bpf_prog *prog; 9375 enum bpf_xdp_mode mode; 9376 bpf_op_t bpf_op; 9377 9378 ASSERT_RTNL(); 9379 9380 for (mode = XDP_MODE_SKB; mode < __MAX_XDP_MODE; mode++) { 9381 prog = dev_xdp_prog(dev, mode); 9382 if (!prog) 9383 continue; 9384 9385 bpf_op = dev_xdp_bpf_op(dev, mode); 9386 if (!bpf_op) 9387 continue; 9388 9389 WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL)); 9390 9391 /* auto-detach link from net device */ 9392 link = dev_xdp_link(dev, mode); 9393 if (link) 9394 link->dev = NULL; 9395 else 9396 bpf_prog_put(prog); 9397 9398 dev_xdp_set_link(dev, mode, NULL); 9399 } 9400 } 9401 9402 static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack, 9403 struct bpf_xdp_link *link, struct bpf_prog *new_prog, 9404 struct bpf_prog *old_prog, u32 flags) 9405 { 9406 unsigned int num_modes = hweight32(flags & XDP_FLAGS_MODES); 9407 struct bpf_prog *cur_prog; 9408 struct net_device *upper; 9409 struct list_head *iter; 9410 enum bpf_xdp_mode mode; 9411 bpf_op_t bpf_op; 9412 int err; 9413 9414 ASSERT_RTNL(); 9415 9416 /* either link or prog attachment, never both */ 9417 if (link && (new_prog || old_prog)) 9418 return -EINVAL; 9419 /* link supports only XDP mode flags */ 9420 if (link && (flags & ~XDP_FLAGS_MODES)) { 9421 NL_SET_ERR_MSG(extack, "Invalid XDP flags for BPF link attachment"); 9422 return -EINVAL; 9423 } 9424 /* just one XDP mode bit should be set, zero defaults to drv/skb mode */ 9425 if (num_modes > 1) { 9426 NL_SET_ERR_MSG(extack, "Only one XDP mode flag can be set"); 9427 return -EINVAL; 9428 } 9429 /* avoid ambiguity if offload + drv/skb mode progs are both loaded */ 9430 if (!num_modes && dev_xdp_prog_count(dev) > 1) { 9431 NL_SET_ERR_MSG(extack, 9432 "More than one program loaded, unset mode is ambiguous"); 9433 return -EINVAL; 9434 } 9435 /* old_prog != NULL implies XDP_FLAGS_REPLACE is set */ 9436 if (old_prog && !(flags & XDP_FLAGS_REPLACE)) { 9437 NL_SET_ERR_MSG(extack, "XDP_FLAGS_REPLACE is not specified"); 9438 return -EINVAL; 9439 } 9440 9441 mode = dev_xdp_mode(dev, flags); 9442 /* can't replace attached link */ 9443 if (dev_xdp_link(dev, mode)) { 9444 NL_SET_ERR_MSG(extack, "Can't replace active BPF XDP link"); 9445 return -EBUSY; 9446 } 9447 9448 /* don't allow if an upper device already has a program */ 9449 netdev_for_each_upper_dev_rcu(dev, upper, iter) { 9450 if (dev_xdp_prog_count(upper) > 0) { 9451 NL_SET_ERR_MSG(extack, "Cannot attach when an upper device already has a program"); 9452 return -EEXIST; 9453 } 9454 } 9455 9456 cur_prog = dev_xdp_prog(dev, mode); 9457 /* can't replace attached prog with link */ 9458 if (link && cur_prog) { 9459 NL_SET_ERR_MSG(extack, "Can't replace active XDP program with BPF link"); 9460 return -EBUSY; 9461 } 9462 if ((flags & XDP_FLAGS_REPLACE) && cur_prog != old_prog) { 9463 NL_SET_ERR_MSG(extack, "Active program does not match expected"); 9464 return -EEXIST; 9465 } 9466 9467 /* put effective new program into new_prog */ 9468 if (link) 9469 new_prog = link->link.prog; 9470 9471 if (new_prog) { 9472 bool offload = mode == XDP_MODE_HW; 9473 enum bpf_xdp_mode other_mode = mode == XDP_MODE_SKB 9474 ? XDP_MODE_DRV : XDP_MODE_SKB; 9475 9476 if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && cur_prog) { 9477 NL_SET_ERR_MSG(extack, "XDP program already attached"); 9478 return -EBUSY; 9479 } 9480 if (!offload && dev_xdp_prog(dev, other_mode)) { 9481 NL_SET_ERR_MSG(extack, "Native and generic XDP can't be active at the same time"); 9482 return -EEXIST; 9483 } 9484 if (!offload && bpf_prog_is_dev_bound(new_prog->aux)) { 9485 NL_SET_ERR_MSG(extack, "Using device-bound program without HW_MODE flag is not supported"); 9486 return -EINVAL; 9487 } 9488 if (new_prog->expected_attach_type == BPF_XDP_DEVMAP) { 9489 NL_SET_ERR_MSG(extack, "BPF_XDP_DEVMAP programs can not be attached to a device"); 9490 return -EINVAL; 9491 } 9492 if (new_prog->expected_attach_type == BPF_XDP_CPUMAP) { 9493 NL_SET_ERR_MSG(extack, "BPF_XDP_CPUMAP programs can not be attached to a device"); 9494 return -EINVAL; 9495 } 9496 } 9497 9498 /* don't call drivers if the effective program didn't change */ 9499 if (new_prog != cur_prog) { 9500 bpf_op = dev_xdp_bpf_op(dev, mode); 9501 if (!bpf_op) { 9502 NL_SET_ERR_MSG(extack, "Underlying driver does not support XDP in native mode"); 9503 return -EOPNOTSUPP; 9504 } 9505 9506 err = dev_xdp_install(dev, mode, bpf_op, extack, flags, new_prog); 9507 if (err) 9508 return err; 9509 } 9510 9511 if (link) 9512 dev_xdp_set_link(dev, mode, link); 9513 else 9514 dev_xdp_set_prog(dev, mode, new_prog); 9515 if (cur_prog) 9516 bpf_prog_put(cur_prog); 9517 9518 return 0; 9519 } 9520 9521 static int dev_xdp_attach_link(struct net_device *dev, 9522 struct netlink_ext_ack *extack, 9523 struct bpf_xdp_link *link) 9524 { 9525 return dev_xdp_attach(dev, extack, link, NULL, NULL, link->flags); 9526 } 9527 9528 static int dev_xdp_detach_link(struct net_device *dev, 9529 struct netlink_ext_ack *extack, 9530 struct bpf_xdp_link *link) 9531 { 9532 enum bpf_xdp_mode mode; 9533 bpf_op_t bpf_op; 9534 9535 ASSERT_RTNL(); 9536 9537 mode = dev_xdp_mode(dev, link->flags); 9538 if (dev_xdp_link(dev, mode) != link) 9539 return -EINVAL; 9540 9541 bpf_op = dev_xdp_bpf_op(dev, mode); 9542 WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL)); 9543 dev_xdp_set_link(dev, mode, NULL); 9544 return 0; 9545 } 9546 9547 static void bpf_xdp_link_release(struct bpf_link *link) 9548 { 9549 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link); 9550 9551 rtnl_lock(); 9552 9553 /* if racing with net_device's tear down, xdp_link->dev might be 9554 * already NULL, in which case link was already auto-detached 9555 */ 9556 if (xdp_link->dev) { 9557 WARN_ON(dev_xdp_detach_link(xdp_link->dev, NULL, xdp_link)); 9558 xdp_link->dev = NULL; 9559 } 9560 9561 rtnl_unlock(); 9562 } 9563 9564 static int bpf_xdp_link_detach(struct bpf_link *link) 9565 { 9566 bpf_xdp_link_release(link); 9567 return 0; 9568 } 9569 9570 static void bpf_xdp_link_dealloc(struct bpf_link *link) 9571 { 9572 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link); 9573 9574 kfree(xdp_link); 9575 } 9576 9577 static void bpf_xdp_link_show_fdinfo(const struct bpf_link *link, 9578 struct seq_file *seq) 9579 { 9580 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link); 9581 u32 ifindex = 0; 9582 9583 rtnl_lock(); 9584 if (xdp_link->dev) 9585 ifindex = xdp_link->dev->ifindex; 9586 rtnl_unlock(); 9587 9588 seq_printf(seq, "ifindex:\t%u\n", ifindex); 9589 } 9590 9591 static int bpf_xdp_link_fill_link_info(const struct bpf_link *link, 9592 struct bpf_link_info *info) 9593 { 9594 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link); 9595 u32 ifindex = 0; 9596 9597 rtnl_lock(); 9598 if (xdp_link->dev) 9599 ifindex = xdp_link->dev->ifindex; 9600 rtnl_unlock(); 9601 9602 info->xdp.ifindex = ifindex; 9603 return 0; 9604 } 9605 9606 static int bpf_xdp_link_update(struct bpf_link *link, struct bpf_prog *new_prog, 9607 struct bpf_prog *old_prog) 9608 { 9609 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link); 9610 enum bpf_xdp_mode mode; 9611 bpf_op_t bpf_op; 9612 int err = 0; 9613 9614 rtnl_lock(); 9615 9616 /* link might have been auto-released already, so fail */ 9617 if (!xdp_link->dev) { 9618 err = -ENOLINK; 9619 goto out_unlock; 9620 } 9621 9622 if (old_prog && link->prog != old_prog) { 9623 err = -EPERM; 9624 goto out_unlock; 9625 } 9626 old_prog = link->prog; 9627 if (old_prog == new_prog) { 9628 /* no-op, don't disturb drivers */ 9629 bpf_prog_put(new_prog); 9630 goto out_unlock; 9631 } 9632 9633 mode = dev_xdp_mode(xdp_link->dev, xdp_link->flags); 9634 bpf_op = dev_xdp_bpf_op(xdp_link->dev, mode); 9635 err = dev_xdp_install(xdp_link->dev, mode, bpf_op, NULL, 9636 xdp_link->flags, new_prog); 9637 if (err) 9638 goto out_unlock; 9639 9640 old_prog = xchg(&link->prog, new_prog); 9641 bpf_prog_put(old_prog); 9642 9643 out_unlock: 9644 rtnl_unlock(); 9645 return err; 9646 } 9647 9648 static const struct bpf_link_ops bpf_xdp_link_lops = { 9649 .release = bpf_xdp_link_release, 9650 .dealloc = bpf_xdp_link_dealloc, 9651 .detach = bpf_xdp_link_detach, 9652 .show_fdinfo = bpf_xdp_link_show_fdinfo, 9653 .fill_link_info = bpf_xdp_link_fill_link_info, 9654 .update_prog = bpf_xdp_link_update, 9655 }; 9656 9657 int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 9658 { 9659 struct net *net = current->nsproxy->net_ns; 9660 struct bpf_link_primer link_primer; 9661 struct bpf_xdp_link *link; 9662 struct net_device *dev; 9663 int err, fd; 9664 9665 rtnl_lock(); 9666 dev = dev_get_by_index(net, attr->link_create.target_ifindex); 9667 if (!dev) { 9668 rtnl_unlock(); 9669 return -EINVAL; 9670 } 9671 9672 link = kzalloc(sizeof(*link), GFP_USER); 9673 if (!link) { 9674 err = -ENOMEM; 9675 goto unlock; 9676 } 9677 9678 bpf_link_init(&link->link, BPF_LINK_TYPE_XDP, &bpf_xdp_link_lops, prog); 9679 link->dev = dev; 9680 link->flags = attr->link_create.flags; 9681 9682 err = bpf_link_prime(&link->link, &link_primer); 9683 if (err) { 9684 kfree(link); 9685 goto unlock; 9686 } 9687 9688 err = dev_xdp_attach_link(dev, NULL, link); 9689 rtnl_unlock(); 9690 9691 if (err) { 9692 link->dev = NULL; 9693 bpf_link_cleanup(&link_primer); 9694 goto out_put_dev; 9695 } 9696 9697 fd = bpf_link_settle(&link_primer); 9698 /* link itself doesn't hold dev's refcnt to not complicate shutdown */ 9699 dev_put(dev); 9700 return fd; 9701 9702 unlock: 9703 rtnl_unlock(); 9704 9705 out_put_dev: 9706 dev_put(dev); 9707 return err; 9708 } 9709 9710 /** 9711 * dev_change_xdp_fd - set or clear a bpf program for a device rx path 9712 * @dev: device 9713 * @extack: netlink extended ack 9714 * @fd: new program fd or negative value to clear 9715 * @expected_fd: old program fd that userspace expects to replace or clear 9716 * @flags: xdp-related flags 9717 * 9718 * Set or clear a bpf program for a device 9719 */ 9720 int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, 9721 int fd, int expected_fd, u32 flags) 9722 { 9723 enum bpf_xdp_mode mode = dev_xdp_mode(dev, flags); 9724 struct bpf_prog *new_prog = NULL, *old_prog = NULL; 9725 int err; 9726 9727 ASSERT_RTNL(); 9728 9729 if (fd >= 0) { 9730 new_prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP, 9731 mode != XDP_MODE_SKB); 9732 if (IS_ERR(new_prog)) 9733 return PTR_ERR(new_prog); 9734 } 9735 9736 if (expected_fd >= 0) { 9737 old_prog = bpf_prog_get_type_dev(expected_fd, BPF_PROG_TYPE_XDP, 9738 mode != XDP_MODE_SKB); 9739 if (IS_ERR(old_prog)) { 9740 err = PTR_ERR(old_prog); 9741 old_prog = NULL; 9742 goto err_out; 9743 } 9744 } 9745 9746 err = dev_xdp_attach(dev, extack, NULL, new_prog, old_prog, flags); 9747 9748 err_out: 9749 if (err && new_prog) 9750 bpf_prog_put(new_prog); 9751 if (old_prog) 9752 bpf_prog_put(old_prog); 9753 return err; 9754 } 9755 9756 /** 9757 * dev_new_index - allocate an ifindex 9758 * @net: the applicable net namespace 9759 * 9760 * Returns a suitable unique value for a new device interface 9761 * number. The caller must hold the rtnl semaphore or the 9762 * dev_base_lock to be sure it remains unique. 9763 */ 9764 static int dev_new_index(struct net *net) 9765 { 9766 int ifindex = net->ifindex; 9767 9768 for (;;) { 9769 if (++ifindex <= 0) 9770 ifindex = 1; 9771 if (!__dev_get_by_index(net, ifindex)) 9772 return net->ifindex = ifindex; 9773 } 9774 } 9775 9776 /* Delayed registration/unregisteration */ 9777 static LIST_HEAD(net_todo_list); 9778 DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq); 9779 9780 static void net_set_todo(struct net_device *dev) 9781 { 9782 list_add_tail(&dev->todo_list, &net_todo_list); 9783 dev_net(dev)->dev_unreg_count++; 9784 } 9785 9786 static netdev_features_t netdev_sync_upper_features(struct net_device *lower, 9787 struct net_device *upper, netdev_features_t features) 9788 { 9789 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES; 9790 netdev_features_t feature; 9791 int feature_bit; 9792 9793 for_each_netdev_feature(upper_disables, feature_bit) { 9794 feature = __NETIF_F_BIT(feature_bit); 9795 if (!(upper->wanted_features & feature) 9796 && (features & feature)) { 9797 netdev_dbg(lower, "Dropping feature %pNF, upper dev %s has it off.\n", 9798 &feature, upper->name); 9799 features &= ~feature; 9800 } 9801 } 9802 9803 return features; 9804 } 9805 9806 static void netdev_sync_lower_features(struct net_device *upper, 9807 struct net_device *lower, netdev_features_t features) 9808 { 9809 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES; 9810 netdev_features_t feature; 9811 int feature_bit; 9812 9813 for_each_netdev_feature(upper_disables, feature_bit) { 9814 feature = __NETIF_F_BIT(feature_bit); 9815 if (!(features & feature) && (lower->features & feature)) { 9816 netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n", 9817 &feature, lower->name); 9818 lower->wanted_features &= ~feature; 9819 __netdev_update_features(lower); 9820 9821 if (unlikely(lower->features & feature)) 9822 netdev_WARN(upper, "failed to disable %pNF on %s!\n", 9823 &feature, lower->name); 9824 else 9825 netdev_features_change(lower); 9826 } 9827 } 9828 } 9829 9830 static netdev_features_t netdev_fix_features(struct net_device *dev, 9831 netdev_features_t features) 9832 { 9833 /* Fix illegal checksum combinations */ 9834 if ((features & NETIF_F_HW_CSUM) && 9835 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { 9836 netdev_warn(dev, "mixed HW and IP checksum settings.\n"); 9837 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); 9838 } 9839 9840 /* TSO requires that SG is present as well. */ 9841 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) { 9842 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n"); 9843 features &= ~NETIF_F_ALL_TSO; 9844 } 9845 9846 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) && 9847 !(features & NETIF_F_IP_CSUM)) { 9848 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n"); 9849 features &= ~NETIF_F_TSO; 9850 features &= ~NETIF_F_TSO_ECN; 9851 } 9852 9853 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) && 9854 !(features & NETIF_F_IPV6_CSUM)) { 9855 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n"); 9856 features &= ~NETIF_F_TSO6; 9857 } 9858 9859 /* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */ 9860 if ((features & NETIF_F_TSO_MANGLEID) && !(features & NETIF_F_TSO)) 9861 features &= ~NETIF_F_TSO_MANGLEID; 9862 9863 /* TSO ECN requires that TSO is present as well. */ 9864 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN) 9865 features &= ~NETIF_F_TSO_ECN; 9866 9867 /* Software GSO depends on SG. */ 9868 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) { 9869 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n"); 9870 features &= ~NETIF_F_GSO; 9871 } 9872 9873 /* GSO partial features require GSO partial be set */ 9874 if ((features & dev->gso_partial_features) && 9875 !(features & NETIF_F_GSO_PARTIAL)) { 9876 netdev_dbg(dev, 9877 "Dropping partially supported GSO features since no GSO partial.\n"); 9878 features &= ~dev->gso_partial_features; 9879 } 9880 9881 if (!(features & NETIF_F_RXCSUM)) { 9882 /* NETIF_F_GRO_HW implies doing RXCSUM since every packet 9883 * successfully merged by hardware must also have the 9884 * checksum verified by hardware. If the user does not 9885 * want to enable RXCSUM, logically, we should disable GRO_HW. 9886 */ 9887 if (features & NETIF_F_GRO_HW) { 9888 netdev_dbg(dev, "Dropping NETIF_F_GRO_HW since no RXCSUM feature.\n"); 9889 features &= ~NETIF_F_GRO_HW; 9890 } 9891 } 9892 9893 /* LRO/HW-GRO features cannot be combined with RX-FCS */ 9894 if (features & NETIF_F_RXFCS) { 9895 if (features & NETIF_F_LRO) { 9896 netdev_dbg(dev, "Dropping LRO feature since RX-FCS is requested.\n"); 9897 features &= ~NETIF_F_LRO; 9898 } 9899 9900 if (features & NETIF_F_GRO_HW) { 9901 netdev_dbg(dev, "Dropping HW-GRO feature since RX-FCS is requested.\n"); 9902 features &= ~NETIF_F_GRO_HW; 9903 } 9904 } 9905 9906 if (features & NETIF_F_HW_TLS_TX) { 9907 bool ip_csum = (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) == 9908 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); 9909 bool hw_csum = features & NETIF_F_HW_CSUM; 9910 9911 if (!ip_csum && !hw_csum) { 9912 netdev_dbg(dev, "Dropping TLS TX HW offload feature since no CSUM feature.\n"); 9913 features &= ~NETIF_F_HW_TLS_TX; 9914 } 9915 } 9916 9917 if ((features & NETIF_F_HW_TLS_RX) && !(features & NETIF_F_RXCSUM)) { 9918 netdev_dbg(dev, "Dropping TLS RX HW offload feature since no RXCSUM feature.\n"); 9919 features &= ~NETIF_F_HW_TLS_RX; 9920 } 9921 9922 return features; 9923 } 9924 9925 int __netdev_update_features(struct net_device *dev) 9926 { 9927 struct net_device *upper, *lower; 9928 netdev_features_t features; 9929 struct list_head *iter; 9930 int err = -1; 9931 9932 ASSERT_RTNL(); 9933 9934 features = netdev_get_wanted_features(dev); 9935 9936 if (dev->netdev_ops->ndo_fix_features) 9937 features = dev->netdev_ops->ndo_fix_features(dev, features); 9938 9939 /* driver might be less strict about feature dependencies */ 9940 features = netdev_fix_features(dev, features); 9941 9942 /* some features can't be enabled if they're off on an upper device */ 9943 netdev_for_each_upper_dev_rcu(dev, upper, iter) 9944 features = netdev_sync_upper_features(dev, upper, features); 9945 9946 if (dev->features == features) 9947 goto sync_lower; 9948 9949 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n", 9950 &dev->features, &features); 9951 9952 if (dev->netdev_ops->ndo_set_features) 9953 err = dev->netdev_ops->ndo_set_features(dev, features); 9954 else 9955 err = 0; 9956 9957 if (unlikely(err < 0)) { 9958 netdev_err(dev, 9959 "set_features() failed (%d); wanted %pNF, left %pNF\n", 9960 err, &features, &dev->features); 9961 /* return non-0 since some features might have changed and 9962 * it's better to fire a spurious notification than miss it 9963 */ 9964 return -1; 9965 } 9966 9967 sync_lower: 9968 /* some features must be disabled on lower devices when disabled 9969 * on an upper device (think: bonding master or bridge) 9970 */ 9971 netdev_for_each_lower_dev(dev, lower, iter) 9972 netdev_sync_lower_features(dev, lower, features); 9973 9974 if (!err) { 9975 netdev_features_t diff = features ^ dev->features; 9976 9977 if (diff & NETIF_F_RX_UDP_TUNNEL_PORT) { 9978 /* udp_tunnel_{get,drop}_rx_info both need 9979 * NETIF_F_RX_UDP_TUNNEL_PORT enabled on the 9980 * device, or they won't do anything. 9981 * Thus we need to update dev->features 9982 * *before* calling udp_tunnel_get_rx_info, 9983 * but *after* calling udp_tunnel_drop_rx_info. 9984 */ 9985 if (features & NETIF_F_RX_UDP_TUNNEL_PORT) { 9986 dev->features = features; 9987 udp_tunnel_get_rx_info(dev); 9988 } else { 9989 udp_tunnel_drop_rx_info(dev); 9990 } 9991 } 9992 9993 if (diff & NETIF_F_HW_VLAN_CTAG_FILTER) { 9994 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) { 9995 dev->features = features; 9996 err |= vlan_get_rx_ctag_filter_info(dev); 9997 } else { 9998 vlan_drop_rx_ctag_filter_info(dev); 9999 } 10000 } 10001 10002 if (diff & NETIF_F_HW_VLAN_STAG_FILTER) { 10003 if (features & NETIF_F_HW_VLAN_STAG_FILTER) { 10004 dev->features = features; 10005 err |= vlan_get_rx_stag_filter_info(dev); 10006 } else { 10007 vlan_drop_rx_stag_filter_info(dev); 10008 } 10009 } 10010 10011 dev->features = features; 10012 } 10013 10014 return err < 0 ? 0 : 1; 10015 } 10016 10017 /** 10018 * netdev_update_features - recalculate device features 10019 * @dev: the device to check 10020 * 10021 * Recalculate dev->features set and send notifications if it 10022 * has changed. Should be called after driver or hardware dependent 10023 * conditions might have changed that influence the features. 10024 */ 10025 void netdev_update_features(struct net_device *dev) 10026 { 10027 if (__netdev_update_features(dev)) 10028 netdev_features_change(dev); 10029 } 10030 EXPORT_SYMBOL(netdev_update_features); 10031 10032 /** 10033 * netdev_change_features - recalculate device features 10034 * @dev: the device to check 10035 * 10036 * Recalculate dev->features set and send notifications even 10037 * if they have not changed. Should be called instead of 10038 * netdev_update_features() if also dev->vlan_features might 10039 * have changed to allow the changes to be propagated to stacked 10040 * VLAN devices. 10041 */ 10042 void netdev_change_features(struct net_device *dev) 10043 { 10044 __netdev_update_features(dev); 10045 netdev_features_change(dev); 10046 } 10047 EXPORT_SYMBOL(netdev_change_features); 10048 10049 /** 10050 * netif_stacked_transfer_operstate - transfer operstate 10051 * @rootdev: the root or lower level device to transfer state from 10052 * @dev: the device to transfer operstate to 10053 * 10054 * Transfer operational state from root to device. This is normally 10055 * called when a stacking relationship exists between the root 10056 * device and the device(a leaf device). 10057 */ 10058 void netif_stacked_transfer_operstate(const struct net_device *rootdev, 10059 struct net_device *dev) 10060 { 10061 if (rootdev->operstate == IF_OPER_DORMANT) 10062 netif_dormant_on(dev); 10063 else 10064 netif_dormant_off(dev); 10065 10066 if (rootdev->operstate == IF_OPER_TESTING) 10067 netif_testing_on(dev); 10068 else 10069 netif_testing_off(dev); 10070 10071 if (netif_carrier_ok(rootdev)) 10072 netif_carrier_on(dev); 10073 else 10074 netif_carrier_off(dev); 10075 } 10076 EXPORT_SYMBOL(netif_stacked_transfer_operstate); 10077 10078 static int netif_alloc_rx_queues(struct net_device *dev) 10079 { 10080 unsigned int i, count = dev->num_rx_queues; 10081 struct netdev_rx_queue *rx; 10082 size_t sz = count * sizeof(*rx); 10083 int err = 0; 10084 10085 BUG_ON(count < 1); 10086 10087 rx = kvzalloc(sz, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL); 10088 if (!rx) 10089 return -ENOMEM; 10090 10091 dev->_rx = rx; 10092 10093 for (i = 0; i < count; i++) { 10094 rx[i].dev = dev; 10095 10096 /* XDP RX-queue setup */ 10097 err = xdp_rxq_info_reg(&rx[i].xdp_rxq, dev, i, 0); 10098 if (err < 0) 10099 goto err_rxq_info; 10100 } 10101 return 0; 10102 10103 err_rxq_info: 10104 /* Rollback successful reg's and free other resources */ 10105 while (i--) 10106 xdp_rxq_info_unreg(&rx[i].xdp_rxq); 10107 kvfree(dev->_rx); 10108 dev->_rx = NULL; 10109 return err; 10110 } 10111 10112 static void netif_free_rx_queues(struct net_device *dev) 10113 { 10114 unsigned int i, count = dev->num_rx_queues; 10115 10116 /* netif_alloc_rx_queues alloc failed, resources have been unreg'ed */ 10117 if (!dev->_rx) 10118 return; 10119 10120 for (i = 0; i < count; i++) 10121 xdp_rxq_info_unreg(&dev->_rx[i].xdp_rxq); 10122 10123 kvfree(dev->_rx); 10124 } 10125 10126 static void netdev_init_one_queue(struct net_device *dev, 10127 struct netdev_queue *queue, void *_unused) 10128 { 10129 /* Initialize queue lock */ 10130 spin_lock_init(&queue->_xmit_lock); 10131 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type); 10132 queue->xmit_lock_owner = -1; 10133 netdev_queue_numa_node_write(queue, NUMA_NO_NODE); 10134 queue->dev = dev; 10135 #ifdef CONFIG_BQL 10136 dql_init(&queue->dql, HZ); 10137 #endif 10138 } 10139 10140 static void netif_free_tx_queues(struct net_device *dev) 10141 { 10142 kvfree(dev->_tx); 10143 } 10144 10145 static int netif_alloc_netdev_queues(struct net_device *dev) 10146 { 10147 unsigned int count = dev->num_tx_queues; 10148 struct netdev_queue *tx; 10149 size_t sz = count * sizeof(*tx); 10150 10151 if (count < 1 || count > 0xffff) 10152 return -EINVAL; 10153 10154 tx = kvzalloc(sz, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL); 10155 if (!tx) 10156 return -ENOMEM; 10157 10158 dev->_tx = tx; 10159 10160 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL); 10161 spin_lock_init(&dev->tx_global_lock); 10162 10163 return 0; 10164 } 10165 10166 void netif_tx_stop_all_queues(struct net_device *dev) 10167 { 10168 unsigned int i; 10169 10170 for (i = 0; i < dev->num_tx_queues; i++) { 10171 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 10172 10173 netif_tx_stop_queue(txq); 10174 } 10175 } 10176 EXPORT_SYMBOL(netif_tx_stop_all_queues); 10177 10178 /** 10179 * register_netdevice - register a network device 10180 * @dev: device to register 10181 * 10182 * Take a completed network device structure and add it to the kernel 10183 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier 10184 * chain. 0 is returned on success. A negative errno code is returned 10185 * on a failure to set up the device, or if the name is a duplicate. 10186 * 10187 * Callers must hold the rtnl semaphore. You may want 10188 * register_netdev() instead of this. 10189 * 10190 * BUGS: 10191 * The locking appears insufficient to guarantee two parallel registers 10192 * will not get the same name. 10193 */ 10194 10195 int register_netdevice(struct net_device *dev) 10196 { 10197 int ret; 10198 struct net *net = dev_net(dev); 10199 10200 BUILD_BUG_ON(sizeof(netdev_features_t) * BITS_PER_BYTE < 10201 NETDEV_FEATURE_COUNT); 10202 BUG_ON(dev_boot_phase); 10203 ASSERT_RTNL(); 10204 10205 might_sleep(); 10206 10207 /* When net_device's are persistent, this will be fatal. */ 10208 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED); 10209 BUG_ON(!net); 10210 10211 ret = ethtool_check_ops(dev->ethtool_ops); 10212 if (ret) 10213 return ret; 10214 10215 spin_lock_init(&dev->addr_list_lock); 10216 netdev_set_addr_lockdep_class(dev); 10217 10218 ret = dev_get_valid_name(net, dev, dev->name); 10219 if (ret < 0) 10220 goto out; 10221 10222 ret = -ENOMEM; 10223 dev->name_node = netdev_name_node_head_alloc(dev); 10224 if (!dev->name_node) 10225 goto out; 10226 10227 /* Init, if this function is available */ 10228 if (dev->netdev_ops->ndo_init) { 10229 ret = dev->netdev_ops->ndo_init(dev); 10230 if (ret) { 10231 if (ret > 0) 10232 ret = -EIO; 10233 goto err_free_name; 10234 } 10235 } 10236 10237 if (((dev->hw_features | dev->features) & 10238 NETIF_F_HW_VLAN_CTAG_FILTER) && 10239 (!dev->netdev_ops->ndo_vlan_rx_add_vid || 10240 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) { 10241 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n"); 10242 ret = -EINVAL; 10243 goto err_uninit; 10244 } 10245 10246 ret = -EBUSY; 10247 if (!dev->ifindex) 10248 dev->ifindex = dev_new_index(net); 10249 else if (__dev_get_by_index(net, dev->ifindex)) 10250 goto err_uninit; 10251 10252 /* Transfer changeable features to wanted_features and enable 10253 * software offloads (GSO and GRO). 10254 */ 10255 dev->hw_features |= (NETIF_F_SOFT_FEATURES | NETIF_F_SOFT_FEATURES_OFF); 10256 dev->features |= NETIF_F_SOFT_FEATURES; 10257 10258 if (dev->udp_tunnel_nic_info) { 10259 dev->features |= NETIF_F_RX_UDP_TUNNEL_PORT; 10260 dev->hw_features |= NETIF_F_RX_UDP_TUNNEL_PORT; 10261 } 10262 10263 dev->wanted_features = dev->features & dev->hw_features; 10264 10265 if (!(dev->flags & IFF_LOOPBACK)) 10266 dev->hw_features |= NETIF_F_NOCACHE_COPY; 10267 10268 /* If IPv4 TCP segmentation offload is supported we should also 10269 * allow the device to enable segmenting the frame with the option 10270 * of ignoring a static IP ID value. This doesn't enable the 10271 * feature itself but allows the user to enable it later. 10272 */ 10273 if (dev->hw_features & NETIF_F_TSO) 10274 dev->hw_features |= NETIF_F_TSO_MANGLEID; 10275 if (dev->vlan_features & NETIF_F_TSO) 10276 dev->vlan_features |= NETIF_F_TSO_MANGLEID; 10277 if (dev->mpls_features & NETIF_F_TSO) 10278 dev->mpls_features |= NETIF_F_TSO_MANGLEID; 10279 if (dev->hw_enc_features & NETIF_F_TSO) 10280 dev->hw_enc_features |= NETIF_F_TSO_MANGLEID; 10281 10282 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices. 10283 */ 10284 dev->vlan_features |= NETIF_F_HIGHDMA; 10285 10286 /* Make NETIF_F_SG inheritable to tunnel devices. 10287 */ 10288 dev->hw_enc_features |= NETIF_F_SG | NETIF_F_GSO_PARTIAL; 10289 10290 /* Make NETIF_F_SG inheritable to MPLS. 10291 */ 10292 dev->mpls_features |= NETIF_F_SG; 10293 10294 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev); 10295 ret = notifier_to_errno(ret); 10296 if (ret) 10297 goto err_uninit; 10298 10299 ret = netdev_register_kobject(dev); 10300 if (ret) { 10301 dev->reg_state = NETREG_UNREGISTERED; 10302 goto err_uninit; 10303 } 10304 dev->reg_state = NETREG_REGISTERED; 10305 10306 __netdev_update_features(dev); 10307 10308 /* 10309 * Default initial state at registry is that the 10310 * device is present. 10311 */ 10312 10313 set_bit(__LINK_STATE_PRESENT, &dev->state); 10314 10315 linkwatch_init_dev(dev); 10316 10317 dev_init_scheduler(dev); 10318 dev_hold(dev); 10319 list_netdevice(dev); 10320 add_device_randomness(dev->dev_addr, dev->addr_len); 10321 10322 /* If the device has permanent device address, driver should 10323 * set dev_addr and also addr_assign_type should be set to 10324 * NET_ADDR_PERM (default value). 10325 */ 10326 if (dev->addr_assign_type == NET_ADDR_PERM) 10327 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 10328 10329 /* Notify protocols, that a new device appeared. */ 10330 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev); 10331 ret = notifier_to_errno(ret); 10332 if (ret) { 10333 /* Expect explicit free_netdev() on failure */ 10334 dev->needs_free_netdev = false; 10335 unregister_netdevice_queue(dev, NULL); 10336 goto out; 10337 } 10338 /* 10339 * Prevent userspace races by waiting until the network 10340 * device is fully setup before sending notifications. 10341 */ 10342 if (!dev->rtnl_link_ops || 10343 dev->rtnl_link_state == RTNL_LINK_INITIALIZED) 10344 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL); 10345 10346 out: 10347 return ret; 10348 10349 err_uninit: 10350 if (dev->netdev_ops->ndo_uninit) 10351 dev->netdev_ops->ndo_uninit(dev); 10352 if (dev->priv_destructor) 10353 dev->priv_destructor(dev); 10354 err_free_name: 10355 netdev_name_node_free(dev->name_node); 10356 goto out; 10357 } 10358 EXPORT_SYMBOL(register_netdevice); 10359 10360 /** 10361 * init_dummy_netdev - init a dummy network device for NAPI 10362 * @dev: device to init 10363 * 10364 * This takes a network device structure and initialize the minimum 10365 * amount of fields so it can be used to schedule NAPI polls without 10366 * registering a full blown interface. This is to be used by drivers 10367 * that need to tie several hardware interfaces to a single NAPI 10368 * poll scheduler due to HW limitations. 10369 */ 10370 int init_dummy_netdev(struct net_device *dev) 10371 { 10372 /* Clear everything. Note we don't initialize spinlocks 10373 * are they aren't supposed to be taken by any of the 10374 * NAPI code and this dummy netdev is supposed to be 10375 * only ever used for NAPI polls 10376 */ 10377 memset(dev, 0, sizeof(struct net_device)); 10378 10379 /* make sure we BUG if trying to hit standard 10380 * register/unregister code path 10381 */ 10382 dev->reg_state = NETREG_DUMMY; 10383 10384 /* NAPI wants this */ 10385 INIT_LIST_HEAD(&dev->napi_list); 10386 10387 /* a dummy interface is started by default */ 10388 set_bit(__LINK_STATE_PRESENT, &dev->state); 10389 set_bit(__LINK_STATE_START, &dev->state); 10390 10391 /* napi_busy_loop stats accounting wants this */ 10392 dev_net_set(dev, &init_net); 10393 10394 /* Note : We dont allocate pcpu_refcnt for dummy devices, 10395 * because users of this 'device' dont need to change 10396 * its refcount. 10397 */ 10398 10399 return 0; 10400 } 10401 EXPORT_SYMBOL_GPL(init_dummy_netdev); 10402 10403 10404 /** 10405 * register_netdev - register a network device 10406 * @dev: device to register 10407 * 10408 * Take a completed network device structure and add it to the kernel 10409 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier 10410 * chain. 0 is returned on success. A negative errno code is returned 10411 * on a failure to set up the device, or if the name is a duplicate. 10412 * 10413 * This is a wrapper around register_netdevice that takes the rtnl semaphore 10414 * and expands the device name if you passed a format string to 10415 * alloc_netdev. 10416 */ 10417 int register_netdev(struct net_device *dev) 10418 { 10419 int err; 10420 10421 if (rtnl_lock_killable()) 10422 return -EINTR; 10423 err = register_netdevice(dev); 10424 rtnl_unlock(); 10425 return err; 10426 } 10427 EXPORT_SYMBOL(register_netdev); 10428 10429 int netdev_refcnt_read(const struct net_device *dev) 10430 { 10431 #ifdef CONFIG_PCPU_DEV_REFCNT 10432 int i, refcnt = 0; 10433 10434 for_each_possible_cpu(i) 10435 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i); 10436 return refcnt; 10437 #else 10438 return refcount_read(&dev->dev_refcnt); 10439 #endif 10440 } 10441 EXPORT_SYMBOL(netdev_refcnt_read); 10442 10443 int netdev_unregister_timeout_secs __read_mostly = 10; 10444 10445 #define WAIT_REFS_MIN_MSECS 1 10446 #define WAIT_REFS_MAX_MSECS 250 10447 /** 10448 * netdev_wait_allrefs - wait until all references are gone. 10449 * @dev: target net_device 10450 * 10451 * This is called when unregistering network devices. 10452 * 10453 * Any protocol or device that holds a reference should register 10454 * for netdevice notification, and cleanup and put back the 10455 * reference if they receive an UNREGISTER event. 10456 * We can get stuck here if buggy protocols don't correctly 10457 * call dev_put. 10458 */ 10459 static void netdev_wait_allrefs(struct net_device *dev) 10460 { 10461 unsigned long rebroadcast_time, warning_time; 10462 int wait = 0, refcnt; 10463 10464 linkwatch_forget_dev(dev); 10465 10466 rebroadcast_time = warning_time = jiffies; 10467 refcnt = netdev_refcnt_read(dev); 10468 10469 while (refcnt != 1) { 10470 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) { 10471 rtnl_lock(); 10472 10473 /* Rebroadcast unregister notification */ 10474 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 10475 10476 __rtnl_unlock(); 10477 rcu_barrier(); 10478 rtnl_lock(); 10479 10480 if (test_bit(__LINK_STATE_LINKWATCH_PENDING, 10481 &dev->state)) { 10482 /* We must not have linkwatch events 10483 * pending on unregister. If this 10484 * happens, we simply run the queue 10485 * unscheduled, resulting in a noop 10486 * for this device. 10487 */ 10488 linkwatch_run_queue(); 10489 } 10490 10491 __rtnl_unlock(); 10492 10493 rebroadcast_time = jiffies; 10494 } 10495 10496 if (!wait) { 10497 rcu_barrier(); 10498 wait = WAIT_REFS_MIN_MSECS; 10499 } else { 10500 msleep(wait); 10501 wait = min(wait << 1, WAIT_REFS_MAX_MSECS); 10502 } 10503 10504 refcnt = netdev_refcnt_read(dev); 10505 10506 if (refcnt != 1 && 10507 time_after(jiffies, warning_time + 10508 netdev_unregister_timeout_secs * HZ)) { 10509 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n", 10510 dev->name, refcnt); 10511 warning_time = jiffies; 10512 } 10513 } 10514 } 10515 10516 /* The sequence is: 10517 * 10518 * rtnl_lock(); 10519 * ... 10520 * register_netdevice(x1); 10521 * register_netdevice(x2); 10522 * ... 10523 * unregister_netdevice(y1); 10524 * unregister_netdevice(y2); 10525 * ... 10526 * rtnl_unlock(); 10527 * free_netdev(y1); 10528 * free_netdev(y2); 10529 * 10530 * We are invoked by rtnl_unlock(). 10531 * This allows us to deal with problems: 10532 * 1) We can delete sysfs objects which invoke hotplug 10533 * without deadlocking with linkwatch via keventd. 10534 * 2) Since we run with the RTNL semaphore not held, we can sleep 10535 * safely in order to wait for the netdev refcnt to drop to zero. 10536 * 10537 * We must not return until all unregister events added during 10538 * the interval the lock was held have been completed. 10539 */ 10540 void netdev_run_todo(void) 10541 { 10542 struct list_head list; 10543 #ifdef CONFIG_LOCKDEP 10544 struct list_head unlink_list; 10545 10546 list_replace_init(&net_unlink_list, &unlink_list); 10547 10548 while (!list_empty(&unlink_list)) { 10549 struct net_device *dev = list_first_entry(&unlink_list, 10550 struct net_device, 10551 unlink_list); 10552 list_del_init(&dev->unlink_list); 10553 dev->nested_level = dev->lower_level - 1; 10554 } 10555 #endif 10556 10557 /* Snapshot list, allow later requests */ 10558 list_replace_init(&net_todo_list, &list); 10559 10560 __rtnl_unlock(); 10561 10562 10563 /* Wait for rcu callbacks to finish before next phase */ 10564 if (!list_empty(&list)) 10565 rcu_barrier(); 10566 10567 while (!list_empty(&list)) { 10568 struct net_device *dev 10569 = list_first_entry(&list, struct net_device, todo_list); 10570 list_del(&dev->todo_list); 10571 10572 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) { 10573 pr_err("network todo '%s' but state %d\n", 10574 dev->name, dev->reg_state); 10575 dump_stack(); 10576 continue; 10577 } 10578 10579 dev->reg_state = NETREG_UNREGISTERED; 10580 10581 netdev_wait_allrefs(dev); 10582 10583 /* paranoia */ 10584 BUG_ON(netdev_refcnt_read(dev) != 1); 10585 BUG_ON(!list_empty(&dev->ptype_all)); 10586 BUG_ON(!list_empty(&dev->ptype_specific)); 10587 WARN_ON(rcu_access_pointer(dev->ip_ptr)); 10588 WARN_ON(rcu_access_pointer(dev->ip6_ptr)); 10589 #if IS_ENABLED(CONFIG_DECNET) 10590 WARN_ON(dev->dn_ptr); 10591 #endif 10592 if (dev->priv_destructor) 10593 dev->priv_destructor(dev); 10594 if (dev->needs_free_netdev) 10595 free_netdev(dev); 10596 10597 /* Report a network device has been unregistered */ 10598 rtnl_lock(); 10599 dev_net(dev)->dev_unreg_count--; 10600 __rtnl_unlock(); 10601 wake_up(&netdev_unregistering_wq); 10602 10603 /* Free network device */ 10604 kobject_put(&dev->dev.kobj); 10605 } 10606 } 10607 10608 /* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has 10609 * all the same fields in the same order as net_device_stats, with only 10610 * the type differing, but rtnl_link_stats64 may have additional fields 10611 * at the end for newer counters. 10612 */ 10613 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, 10614 const struct net_device_stats *netdev_stats) 10615 { 10616 #if BITS_PER_LONG == 64 10617 BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats)); 10618 memcpy(stats64, netdev_stats, sizeof(*netdev_stats)); 10619 /* zero out counters that only exist in rtnl_link_stats64 */ 10620 memset((char *)stats64 + sizeof(*netdev_stats), 0, 10621 sizeof(*stats64) - sizeof(*netdev_stats)); 10622 #else 10623 size_t i, n = sizeof(*netdev_stats) / sizeof(unsigned long); 10624 const unsigned long *src = (const unsigned long *)netdev_stats; 10625 u64 *dst = (u64 *)stats64; 10626 10627 BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64)); 10628 for (i = 0; i < n; i++) 10629 dst[i] = src[i]; 10630 /* zero out counters that only exist in rtnl_link_stats64 */ 10631 memset((char *)stats64 + n * sizeof(u64), 0, 10632 sizeof(*stats64) - n * sizeof(u64)); 10633 #endif 10634 } 10635 EXPORT_SYMBOL(netdev_stats_to_stats64); 10636 10637 /** 10638 * dev_get_stats - get network device statistics 10639 * @dev: device to get statistics from 10640 * @storage: place to store stats 10641 * 10642 * Get network statistics from device. Return @storage. 10643 * The device driver may provide its own method by setting 10644 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats; 10645 * otherwise the internal statistics structure is used. 10646 */ 10647 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, 10648 struct rtnl_link_stats64 *storage) 10649 { 10650 const struct net_device_ops *ops = dev->netdev_ops; 10651 10652 if (ops->ndo_get_stats64) { 10653 memset(storage, 0, sizeof(*storage)); 10654 ops->ndo_get_stats64(dev, storage); 10655 } else if (ops->ndo_get_stats) { 10656 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev)); 10657 } else { 10658 netdev_stats_to_stats64(storage, &dev->stats); 10659 } 10660 storage->rx_dropped += (unsigned long)atomic_long_read(&dev->rx_dropped); 10661 storage->tx_dropped += (unsigned long)atomic_long_read(&dev->tx_dropped); 10662 storage->rx_nohandler += (unsigned long)atomic_long_read(&dev->rx_nohandler); 10663 return storage; 10664 } 10665 EXPORT_SYMBOL(dev_get_stats); 10666 10667 /** 10668 * dev_fetch_sw_netstats - get per-cpu network device statistics 10669 * @s: place to store stats 10670 * @netstats: per-cpu network stats to read from 10671 * 10672 * Read per-cpu network statistics and populate the related fields in @s. 10673 */ 10674 void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s, 10675 const struct pcpu_sw_netstats __percpu *netstats) 10676 { 10677 int cpu; 10678 10679 for_each_possible_cpu(cpu) { 10680 const struct pcpu_sw_netstats *stats; 10681 struct pcpu_sw_netstats tmp; 10682 unsigned int start; 10683 10684 stats = per_cpu_ptr(netstats, cpu); 10685 do { 10686 start = u64_stats_fetch_begin_irq(&stats->syncp); 10687 tmp.rx_packets = stats->rx_packets; 10688 tmp.rx_bytes = stats->rx_bytes; 10689 tmp.tx_packets = stats->tx_packets; 10690 tmp.tx_bytes = stats->tx_bytes; 10691 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 10692 10693 s->rx_packets += tmp.rx_packets; 10694 s->rx_bytes += tmp.rx_bytes; 10695 s->tx_packets += tmp.tx_packets; 10696 s->tx_bytes += tmp.tx_bytes; 10697 } 10698 } 10699 EXPORT_SYMBOL_GPL(dev_fetch_sw_netstats); 10700 10701 /** 10702 * dev_get_tstats64 - ndo_get_stats64 implementation 10703 * @dev: device to get statistics from 10704 * @s: place to store stats 10705 * 10706 * Populate @s from dev->stats and dev->tstats. Can be used as 10707 * ndo_get_stats64() callback. 10708 */ 10709 void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s) 10710 { 10711 netdev_stats_to_stats64(s, &dev->stats); 10712 dev_fetch_sw_netstats(s, dev->tstats); 10713 } 10714 EXPORT_SYMBOL_GPL(dev_get_tstats64); 10715 10716 struct netdev_queue *dev_ingress_queue_create(struct net_device *dev) 10717 { 10718 struct netdev_queue *queue = dev_ingress_queue(dev); 10719 10720 #ifdef CONFIG_NET_CLS_ACT 10721 if (queue) 10722 return queue; 10723 queue = kzalloc(sizeof(*queue), GFP_KERNEL); 10724 if (!queue) 10725 return NULL; 10726 netdev_init_one_queue(dev, queue, NULL); 10727 RCU_INIT_POINTER(queue->qdisc, &noop_qdisc); 10728 queue->qdisc_sleeping = &noop_qdisc; 10729 rcu_assign_pointer(dev->ingress_queue, queue); 10730 #endif 10731 return queue; 10732 } 10733 10734 static const struct ethtool_ops default_ethtool_ops; 10735 10736 void netdev_set_default_ethtool_ops(struct net_device *dev, 10737 const struct ethtool_ops *ops) 10738 { 10739 if (dev->ethtool_ops == &default_ethtool_ops) 10740 dev->ethtool_ops = ops; 10741 } 10742 EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops); 10743 10744 void netdev_freemem(struct net_device *dev) 10745 { 10746 char *addr = (char *)dev - dev->padded; 10747 10748 kvfree(addr); 10749 } 10750 10751 /** 10752 * alloc_netdev_mqs - allocate network device 10753 * @sizeof_priv: size of private data to allocate space for 10754 * @name: device name format string 10755 * @name_assign_type: origin of device name 10756 * @setup: callback to initialize device 10757 * @txqs: the number of TX subqueues to allocate 10758 * @rxqs: the number of RX subqueues to allocate 10759 * 10760 * Allocates a struct net_device with private data area for driver use 10761 * and performs basic initialization. Also allocates subqueue structs 10762 * for each queue on the device. 10763 */ 10764 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, 10765 unsigned char name_assign_type, 10766 void (*setup)(struct net_device *), 10767 unsigned int txqs, unsigned int rxqs) 10768 { 10769 struct net_device *dev; 10770 unsigned int alloc_size; 10771 struct net_device *p; 10772 10773 BUG_ON(strlen(name) >= sizeof(dev->name)); 10774 10775 if (txqs < 1) { 10776 pr_err("alloc_netdev: Unable to allocate device with zero queues\n"); 10777 return NULL; 10778 } 10779 10780 if (rxqs < 1) { 10781 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n"); 10782 return NULL; 10783 } 10784 10785 alloc_size = sizeof(struct net_device); 10786 if (sizeof_priv) { 10787 /* ensure 32-byte alignment of private area */ 10788 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN); 10789 alloc_size += sizeof_priv; 10790 } 10791 /* ensure 32-byte alignment of whole construct */ 10792 alloc_size += NETDEV_ALIGN - 1; 10793 10794 p = kvzalloc(alloc_size, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL); 10795 if (!p) 10796 return NULL; 10797 10798 dev = PTR_ALIGN(p, NETDEV_ALIGN); 10799 dev->padded = (char *)dev - (char *)p; 10800 10801 #ifdef CONFIG_PCPU_DEV_REFCNT 10802 dev->pcpu_refcnt = alloc_percpu(int); 10803 if (!dev->pcpu_refcnt) 10804 goto free_dev; 10805 dev_hold(dev); 10806 #else 10807 refcount_set(&dev->dev_refcnt, 1); 10808 #endif 10809 10810 if (dev_addr_init(dev)) 10811 goto free_pcpu; 10812 10813 dev_mc_init(dev); 10814 dev_uc_init(dev); 10815 10816 dev_net_set(dev, &init_net); 10817 10818 dev->gso_max_size = GSO_MAX_SIZE; 10819 dev->gso_max_segs = GSO_MAX_SEGS; 10820 dev->upper_level = 1; 10821 dev->lower_level = 1; 10822 #ifdef CONFIG_LOCKDEP 10823 dev->nested_level = 0; 10824 INIT_LIST_HEAD(&dev->unlink_list); 10825 #endif 10826 10827 INIT_LIST_HEAD(&dev->napi_list); 10828 INIT_LIST_HEAD(&dev->unreg_list); 10829 INIT_LIST_HEAD(&dev->close_list); 10830 INIT_LIST_HEAD(&dev->link_watch_list); 10831 INIT_LIST_HEAD(&dev->adj_list.upper); 10832 INIT_LIST_HEAD(&dev->adj_list.lower); 10833 INIT_LIST_HEAD(&dev->ptype_all); 10834 INIT_LIST_HEAD(&dev->ptype_specific); 10835 INIT_LIST_HEAD(&dev->net_notifier_list); 10836 #ifdef CONFIG_NET_SCHED 10837 hash_init(dev->qdisc_hash); 10838 #endif 10839 dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM; 10840 setup(dev); 10841 10842 if (!dev->tx_queue_len) { 10843 dev->priv_flags |= IFF_NO_QUEUE; 10844 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN; 10845 } 10846 10847 dev->num_tx_queues = txqs; 10848 dev->real_num_tx_queues = txqs; 10849 if (netif_alloc_netdev_queues(dev)) 10850 goto free_all; 10851 10852 dev->num_rx_queues = rxqs; 10853 dev->real_num_rx_queues = rxqs; 10854 if (netif_alloc_rx_queues(dev)) 10855 goto free_all; 10856 10857 strcpy(dev->name, name); 10858 dev->name_assign_type = name_assign_type; 10859 dev->group = INIT_NETDEV_GROUP; 10860 if (!dev->ethtool_ops) 10861 dev->ethtool_ops = &default_ethtool_ops; 10862 10863 nf_hook_ingress_init(dev); 10864 10865 return dev; 10866 10867 free_all: 10868 free_netdev(dev); 10869 return NULL; 10870 10871 free_pcpu: 10872 #ifdef CONFIG_PCPU_DEV_REFCNT 10873 free_percpu(dev->pcpu_refcnt); 10874 free_dev: 10875 #endif 10876 netdev_freemem(dev); 10877 return NULL; 10878 } 10879 EXPORT_SYMBOL(alloc_netdev_mqs); 10880 10881 /** 10882 * free_netdev - free network device 10883 * @dev: device 10884 * 10885 * This function does the last stage of destroying an allocated device 10886 * interface. The reference to the device object is released. If this 10887 * is the last reference then it will be freed.Must be called in process 10888 * context. 10889 */ 10890 void free_netdev(struct net_device *dev) 10891 { 10892 struct napi_struct *p, *n; 10893 10894 might_sleep(); 10895 10896 /* When called immediately after register_netdevice() failed the unwind 10897 * handling may still be dismantling the device. Handle that case by 10898 * deferring the free. 10899 */ 10900 if (dev->reg_state == NETREG_UNREGISTERING) { 10901 ASSERT_RTNL(); 10902 dev->needs_free_netdev = true; 10903 return; 10904 } 10905 10906 netif_free_tx_queues(dev); 10907 netif_free_rx_queues(dev); 10908 10909 kfree(rcu_dereference_protected(dev->ingress_queue, 1)); 10910 10911 /* Flush device addresses */ 10912 dev_addr_flush(dev); 10913 10914 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list) 10915 netif_napi_del(p); 10916 10917 #ifdef CONFIG_PCPU_DEV_REFCNT 10918 free_percpu(dev->pcpu_refcnt); 10919 dev->pcpu_refcnt = NULL; 10920 #endif 10921 free_percpu(dev->xdp_bulkq); 10922 dev->xdp_bulkq = NULL; 10923 10924 /* Compatibility with error handling in drivers */ 10925 if (dev->reg_state == NETREG_UNINITIALIZED) { 10926 netdev_freemem(dev); 10927 return; 10928 } 10929 10930 BUG_ON(dev->reg_state != NETREG_UNREGISTERED); 10931 dev->reg_state = NETREG_RELEASED; 10932 10933 /* will free via device release */ 10934 put_device(&dev->dev); 10935 } 10936 EXPORT_SYMBOL(free_netdev); 10937 10938 /** 10939 * synchronize_net - Synchronize with packet receive processing 10940 * 10941 * Wait for packets currently being received to be done. 10942 * Does not block later packets from starting. 10943 */ 10944 void synchronize_net(void) 10945 { 10946 might_sleep(); 10947 if (rtnl_is_locked()) 10948 synchronize_rcu_expedited(); 10949 else 10950 synchronize_rcu(); 10951 } 10952 EXPORT_SYMBOL(synchronize_net); 10953 10954 /** 10955 * unregister_netdevice_queue - remove device from the kernel 10956 * @dev: device 10957 * @head: list 10958 * 10959 * This function shuts down a device interface and removes it 10960 * from the kernel tables. 10961 * If head not NULL, device is queued to be unregistered later. 10962 * 10963 * Callers must hold the rtnl semaphore. You may want 10964 * unregister_netdev() instead of this. 10965 */ 10966 10967 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head) 10968 { 10969 ASSERT_RTNL(); 10970 10971 if (head) { 10972 list_move_tail(&dev->unreg_list, head); 10973 } else { 10974 LIST_HEAD(single); 10975 10976 list_add(&dev->unreg_list, &single); 10977 unregister_netdevice_many(&single); 10978 } 10979 } 10980 EXPORT_SYMBOL(unregister_netdevice_queue); 10981 10982 /** 10983 * unregister_netdevice_many - unregister many devices 10984 * @head: list of devices 10985 * 10986 * Note: As most callers use a stack allocated list_head, 10987 * we force a list_del() to make sure stack wont be corrupted later. 10988 */ 10989 void unregister_netdevice_many(struct list_head *head) 10990 { 10991 struct net_device *dev, *tmp; 10992 LIST_HEAD(close_head); 10993 10994 BUG_ON(dev_boot_phase); 10995 ASSERT_RTNL(); 10996 10997 if (list_empty(head)) 10998 return; 10999 11000 list_for_each_entry_safe(dev, tmp, head, unreg_list) { 11001 /* Some devices call without registering 11002 * for initialization unwind. Remove those 11003 * devices and proceed with the remaining. 11004 */ 11005 if (dev->reg_state == NETREG_UNINITIALIZED) { 11006 pr_debug("unregister_netdevice: device %s/%p never was registered\n", 11007 dev->name, dev); 11008 11009 WARN_ON(1); 11010 list_del(&dev->unreg_list); 11011 continue; 11012 } 11013 dev->dismantle = true; 11014 BUG_ON(dev->reg_state != NETREG_REGISTERED); 11015 } 11016 11017 /* If device is running, close it first. */ 11018 list_for_each_entry(dev, head, unreg_list) 11019 list_add_tail(&dev->close_list, &close_head); 11020 dev_close_many(&close_head, true); 11021 11022 list_for_each_entry(dev, head, unreg_list) { 11023 /* And unlink it from device chain. */ 11024 unlist_netdevice(dev); 11025 11026 dev->reg_state = NETREG_UNREGISTERING; 11027 } 11028 flush_all_backlogs(); 11029 11030 synchronize_net(); 11031 11032 list_for_each_entry(dev, head, unreg_list) { 11033 struct sk_buff *skb = NULL; 11034 11035 /* Shutdown queueing discipline. */ 11036 dev_shutdown(dev); 11037 11038 dev_xdp_uninstall(dev); 11039 11040 /* Notify protocols, that we are about to destroy 11041 * this device. They should clean all the things. 11042 */ 11043 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 11044 11045 if (!dev->rtnl_link_ops || 11046 dev->rtnl_link_state == RTNL_LINK_INITIALIZED) 11047 skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0, 11048 GFP_KERNEL, NULL, 0); 11049 11050 /* 11051 * Flush the unicast and multicast chains 11052 */ 11053 dev_uc_flush(dev); 11054 dev_mc_flush(dev); 11055 11056 netdev_name_node_alt_flush(dev); 11057 netdev_name_node_free(dev->name_node); 11058 11059 if (dev->netdev_ops->ndo_uninit) 11060 dev->netdev_ops->ndo_uninit(dev); 11061 11062 if (skb) 11063 rtmsg_ifinfo_send(skb, dev, GFP_KERNEL); 11064 11065 /* Notifier chain MUST detach us all upper devices. */ 11066 WARN_ON(netdev_has_any_upper_dev(dev)); 11067 WARN_ON(netdev_has_any_lower_dev(dev)); 11068 11069 /* Remove entries from kobject tree */ 11070 netdev_unregister_kobject(dev); 11071 #ifdef CONFIG_XPS 11072 /* Remove XPS queueing entries */ 11073 netif_reset_xps_queues_gt(dev, 0); 11074 #endif 11075 } 11076 11077 synchronize_net(); 11078 11079 list_for_each_entry(dev, head, unreg_list) { 11080 dev_put(dev); 11081 net_set_todo(dev); 11082 } 11083 11084 list_del(head); 11085 } 11086 EXPORT_SYMBOL(unregister_netdevice_many); 11087 11088 /** 11089 * unregister_netdev - remove device from the kernel 11090 * @dev: device 11091 * 11092 * This function shuts down a device interface and removes it 11093 * from the kernel tables. 11094 * 11095 * This is just a wrapper for unregister_netdevice that takes 11096 * the rtnl semaphore. In general you want to use this and not 11097 * unregister_netdevice. 11098 */ 11099 void unregister_netdev(struct net_device *dev) 11100 { 11101 rtnl_lock(); 11102 unregister_netdevice(dev); 11103 rtnl_unlock(); 11104 } 11105 EXPORT_SYMBOL(unregister_netdev); 11106 11107 /** 11108 * __dev_change_net_namespace - move device to different nethost namespace 11109 * @dev: device 11110 * @net: network namespace 11111 * @pat: If not NULL name pattern to try if the current device name 11112 * is already taken in the destination network namespace. 11113 * @new_ifindex: If not zero, specifies device index in the target 11114 * namespace. 11115 * 11116 * This function shuts down a device interface and moves it 11117 * to a new network namespace. On success 0 is returned, on 11118 * a failure a netagive errno code is returned. 11119 * 11120 * Callers must hold the rtnl semaphore. 11121 */ 11122 11123 int __dev_change_net_namespace(struct net_device *dev, struct net *net, 11124 const char *pat, int new_ifindex) 11125 { 11126 struct net *net_old = dev_net(dev); 11127 int err, new_nsid; 11128 11129 ASSERT_RTNL(); 11130 11131 /* Don't allow namespace local devices to be moved. */ 11132 err = -EINVAL; 11133 if (dev->features & NETIF_F_NETNS_LOCAL) 11134 goto out; 11135 11136 /* Ensure the device has been registrered */ 11137 if (dev->reg_state != NETREG_REGISTERED) 11138 goto out; 11139 11140 /* Get out if there is nothing todo */ 11141 err = 0; 11142 if (net_eq(net_old, net)) 11143 goto out; 11144 11145 /* Pick the destination device name, and ensure 11146 * we can use it in the destination network namespace. 11147 */ 11148 err = -EEXIST; 11149 if (__dev_get_by_name(net, dev->name)) { 11150 /* We get here if we can't use the current device name */ 11151 if (!pat) 11152 goto out; 11153 err = dev_get_valid_name(net, dev, pat); 11154 if (err < 0) 11155 goto out; 11156 } 11157 11158 /* Check that new_ifindex isn't used yet. */ 11159 err = -EBUSY; 11160 if (new_ifindex && __dev_get_by_index(net, new_ifindex)) 11161 goto out; 11162 11163 /* 11164 * And now a mini version of register_netdevice unregister_netdevice. 11165 */ 11166 11167 /* If device is running close it first. */ 11168 dev_close(dev); 11169 11170 /* And unlink it from device chain */ 11171 unlist_netdevice(dev); 11172 11173 synchronize_net(); 11174 11175 /* Shutdown queueing discipline. */ 11176 dev_shutdown(dev); 11177 11178 /* Notify protocols, that we are about to destroy 11179 * this device. They should clean all the things. 11180 * 11181 * Note that dev->reg_state stays at NETREG_REGISTERED. 11182 * This is wanted because this way 8021q and macvlan know 11183 * the device is just moving and can keep their slaves up. 11184 */ 11185 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 11186 rcu_barrier(); 11187 11188 new_nsid = peernet2id_alloc(dev_net(dev), net, GFP_KERNEL); 11189 /* If there is an ifindex conflict assign a new one */ 11190 if (!new_ifindex) { 11191 if (__dev_get_by_index(net, dev->ifindex)) 11192 new_ifindex = dev_new_index(net); 11193 else 11194 new_ifindex = dev->ifindex; 11195 } 11196 11197 rtmsg_ifinfo_newnet(RTM_DELLINK, dev, ~0U, GFP_KERNEL, &new_nsid, 11198 new_ifindex); 11199 11200 /* 11201 * Flush the unicast and multicast chains 11202 */ 11203 dev_uc_flush(dev); 11204 dev_mc_flush(dev); 11205 11206 /* Send a netdev-removed uevent to the old namespace */ 11207 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE); 11208 netdev_adjacent_del_links(dev); 11209 11210 /* Move per-net netdevice notifiers that are following the netdevice */ 11211 move_netdevice_notifiers_dev_net(dev, net); 11212 11213 /* Actually switch the network namespace */ 11214 dev_net_set(dev, net); 11215 dev->ifindex = new_ifindex; 11216 11217 /* Send a netdev-add uevent to the new namespace */ 11218 kobject_uevent(&dev->dev.kobj, KOBJ_ADD); 11219 netdev_adjacent_add_links(dev); 11220 11221 /* Fixup kobjects */ 11222 err = device_rename(&dev->dev, dev->name); 11223 WARN_ON(err); 11224 11225 /* Adapt owner in case owning user namespace of target network 11226 * namespace is different from the original one. 11227 */ 11228 err = netdev_change_owner(dev, net_old, net); 11229 WARN_ON(err); 11230 11231 /* Add the device back in the hashes */ 11232 list_netdevice(dev); 11233 11234 /* Notify protocols, that a new device appeared. */ 11235 call_netdevice_notifiers(NETDEV_REGISTER, dev); 11236 11237 /* 11238 * Prevent userspace races by waiting until the network 11239 * device is fully setup before sending notifications. 11240 */ 11241 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL); 11242 11243 synchronize_net(); 11244 err = 0; 11245 out: 11246 return err; 11247 } 11248 EXPORT_SYMBOL_GPL(__dev_change_net_namespace); 11249 11250 static int dev_cpu_dead(unsigned int oldcpu) 11251 { 11252 struct sk_buff **list_skb; 11253 struct sk_buff *skb; 11254 unsigned int cpu; 11255 struct softnet_data *sd, *oldsd, *remsd = NULL; 11256 11257 local_irq_disable(); 11258 cpu = smp_processor_id(); 11259 sd = &per_cpu(softnet_data, cpu); 11260 oldsd = &per_cpu(softnet_data, oldcpu); 11261 11262 /* Find end of our completion_queue. */ 11263 list_skb = &sd->completion_queue; 11264 while (*list_skb) 11265 list_skb = &(*list_skb)->next; 11266 /* Append completion queue from offline CPU. */ 11267 *list_skb = oldsd->completion_queue; 11268 oldsd->completion_queue = NULL; 11269 11270 /* Append output queue from offline CPU. */ 11271 if (oldsd->output_queue) { 11272 *sd->output_queue_tailp = oldsd->output_queue; 11273 sd->output_queue_tailp = oldsd->output_queue_tailp; 11274 oldsd->output_queue = NULL; 11275 oldsd->output_queue_tailp = &oldsd->output_queue; 11276 } 11277 /* Append NAPI poll list from offline CPU, with one exception : 11278 * process_backlog() must be called by cpu owning percpu backlog. 11279 * We properly handle process_queue & input_pkt_queue later. 11280 */ 11281 while (!list_empty(&oldsd->poll_list)) { 11282 struct napi_struct *napi = list_first_entry(&oldsd->poll_list, 11283 struct napi_struct, 11284 poll_list); 11285 11286 list_del_init(&napi->poll_list); 11287 if (napi->poll == process_backlog) 11288 napi->state = 0; 11289 else 11290 ____napi_schedule(sd, napi); 11291 } 11292 11293 raise_softirq_irqoff(NET_TX_SOFTIRQ); 11294 local_irq_enable(); 11295 11296 #ifdef CONFIG_RPS 11297 remsd = oldsd->rps_ipi_list; 11298 oldsd->rps_ipi_list = NULL; 11299 #endif 11300 /* send out pending IPI's on offline CPU */ 11301 net_rps_send_ipi(remsd); 11302 11303 /* Process offline CPU's input_pkt_queue */ 11304 while ((skb = __skb_dequeue(&oldsd->process_queue))) { 11305 netif_rx_ni(skb); 11306 input_queue_head_incr(oldsd); 11307 } 11308 while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) { 11309 netif_rx_ni(skb); 11310 input_queue_head_incr(oldsd); 11311 } 11312 11313 return 0; 11314 } 11315 11316 /** 11317 * netdev_increment_features - increment feature set by one 11318 * @all: current feature set 11319 * @one: new feature set 11320 * @mask: mask feature set 11321 * 11322 * Computes a new feature set after adding a device with feature set 11323 * @one to the master device with current feature set @all. Will not 11324 * enable anything that is off in @mask. Returns the new feature set. 11325 */ 11326 netdev_features_t netdev_increment_features(netdev_features_t all, 11327 netdev_features_t one, netdev_features_t mask) 11328 { 11329 if (mask & NETIF_F_HW_CSUM) 11330 mask |= NETIF_F_CSUM_MASK; 11331 mask |= NETIF_F_VLAN_CHALLENGED; 11332 11333 all |= one & (NETIF_F_ONE_FOR_ALL | NETIF_F_CSUM_MASK) & mask; 11334 all &= one | ~NETIF_F_ALL_FOR_ALL; 11335 11336 /* If one device supports hw checksumming, set for all. */ 11337 if (all & NETIF_F_HW_CSUM) 11338 all &= ~(NETIF_F_CSUM_MASK & ~NETIF_F_HW_CSUM); 11339 11340 return all; 11341 } 11342 EXPORT_SYMBOL(netdev_increment_features); 11343 11344 static struct hlist_head * __net_init netdev_create_hash(void) 11345 { 11346 int i; 11347 struct hlist_head *hash; 11348 11349 hash = kmalloc_array(NETDEV_HASHENTRIES, sizeof(*hash), GFP_KERNEL); 11350 if (hash != NULL) 11351 for (i = 0; i < NETDEV_HASHENTRIES; i++) 11352 INIT_HLIST_HEAD(&hash[i]); 11353 11354 return hash; 11355 } 11356 11357 /* Initialize per network namespace state */ 11358 static int __net_init netdev_init(struct net *net) 11359 { 11360 BUILD_BUG_ON(GRO_HASH_BUCKETS > 11361 8 * sizeof_field(struct napi_struct, gro_bitmask)); 11362 11363 if (net != &init_net) 11364 INIT_LIST_HEAD(&net->dev_base_head); 11365 11366 net->dev_name_head = netdev_create_hash(); 11367 if (net->dev_name_head == NULL) 11368 goto err_name; 11369 11370 net->dev_index_head = netdev_create_hash(); 11371 if (net->dev_index_head == NULL) 11372 goto err_idx; 11373 11374 RAW_INIT_NOTIFIER_HEAD(&net->netdev_chain); 11375 11376 return 0; 11377 11378 err_idx: 11379 kfree(net->dev_name_head); 11380 err_name: 11381 return -ENOMEM; 11382 } 11383 11384 /** 11385 * netdev_drivername - network driver for the device 11386 * @dev: network device 11387 * 11388 * Determine network driver for device. 11389 */ 11390 const char *netdev_drivername(const struct net_device *dev) 11391 { 11392 const struct device_driver *driver; 11393 const struct device *parent; 11394 const char *empty = ""; 11395 11396 parent = dev->dev.parent; 11397 if (!parent) 11398 return empty; 11399 11400 driver = parent->driver; 11401 if (driver && driver->name) 11402 return driver->name; 11403 return empty; 11404 } 11405 11406 static void __netdev_printk(const char *level, const struct net_device *dev, 11407 struct va_format *vaf) 11408 { 11409 if (dev && dev->dev.parent) { 11410 dev_printk_emit(level[1] - '0', 11411 dev->dev.parent, 11412 "%s %s %s%s: %pV", 11413 dev_driver_string(dev->dev.parent), 11414 dev_name(dev->dev.parent), 11415 netdev_name(dev), netdev_reg_state(dev), 11416 vaf); 11417 } else if (dev) { 11418 printk("%s%s%s: %pV", 11419 level, netdev_name(dev), netdev_reg_state(dev), vaf); 11420 } else { 11421 printk("%s(NULL net_device): %pV", level, vaf); 11422 } 11423 } 11424 11425 void netdev_printk(const char *level, const struct net_device *dev, 11426 const char *format, ...) 11427 { 11428 struct va_format vaf; 11429 va_list args; 11430 11431 va_start(args, format); 11432 11433 vaf.fmt = format; 11434 vaf.va = &args; 11435 11436 __netdev_printk(level, dev, &vaf); 11437 11438 va_end(args); 11439 } 11440 EXPORT_SYMBOL(netdev_printk); 11441 11442 #define define_netdev_printk_level(func, level) \ 11443 void func(const struct net_device *dev, const char *fmt, ...) \ 11444 { \ 11445 struct va_format vaf; \ 11446 va_list args; \ 11447 \ 11448 va_start(args, fmt); \ 11449 \ 11450 vaf.fmt = fmt; \ 11451 vaf.va = &args; \ 11452 \ 11453 __netdev_printk(level, dev, &vaf); \ 11454 \ 11455 va_end(args); \ 11456 } \ 11457 EXPORT_SYMBOL(func); 11458 11459 define_netdev_printk_level(netdev_emerg, KERN_EMERG); 11460 define_netdev_printk_level(netdev_alert, KERN_ALERT); 11461 define_netdev_printk_level(netdev_crit, KERN_CRIT); 11462 define_netdev_printk_level(netdev_err, KERN_ERR); 11463 define_netdev_printk_level(netdev_warn, KERN_WARNING); 11464 define_netdev_printk_level(netdev_notice, KERN_NOTICE); 11465 define_netdev_printk_level(netdev_info, KERN_INFO); 11466 11467 static void __net_exit netdev_exit(struct net *net) 11468 { 11469 kfree(net->dev_name_head); 11470 kfree(net->dev_index_head); 11471 if (net != &init_net) 11472 WARN_ON_ONCE(!list_empty(&net->dev_base_head)); 11473 } 11474 11475 static struct pernet_operations __net_initdata netdev_net_ops = { 11476 .init = netdev_init, 11477 .exit = netdev_exit, 11478 }; 11479 11480 static void __net_exit default_device_exit(struct net *net) 11481 { 11482 struct net_device *dev, *aux; 11483 /* 11484 * Push all migratable network devices back to the 11485 * initial network namespace 11486 */ 11487 rtnl_lock(); 11488 for_each_netdev_safe(net, dev, aux) { 11489 int err; 11490 char fb_name[IFNAMSIZ]; 11491 11492 /* Ignore unmoveable devices (i.e. loopback) */ 11493 if (dev->features & NETIF_F_NETNS_LOCAL) 11494 continue; 11495 11496 /* Leave virtual devices for the generic cleanup */ 11497 if (dev->rtnl_link_ops && !dev->rtnl_link_ops->netns_refund) 11498 continue; 11499 11500 /* Push remaining network devices to init_net */ 11501 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex); 11502 if (__dev_get_by_name(&init_net, fb_name)) 11503 snprintf(fb_name, IFNAMSIZ, "dev%%d"); 11504 err = dev_change_net_namespace(dev, &init_net, fb_name); 11505 if (err) { 11506 pr_emerg("%s: failed to move %s to init_net: %d\n", 11507 __func__, dev->name, err); 11508 BUG(); 11509 } 11510 } 11511 rtnl_unlock(); 11512 } 11513 11514 static void __net_exit rtnl_lock_unregistering(struct list_head *net_list) 11515 { 11516 /* Return with the rtnl_lock held when there are no network 11517 * devices unregistering in any network namespace in net_list. 11518 */ 11519 struct net *net; 11520 bool unregistering; 11521 DEFINE_WAIT_FUNC(wait, woken_wake_function); 11522 11523 add_wait_queue(&netdev_unregistering_wq, &wait); 11524 for (;;) { 11525 unregistering = false; 11526 rtnl_lock(); 11527 list_for_each_entry(net, net_list, exit_list) { 11528 if (net->dev_unreg_count > 0) { 11529 unregistering = true; 11530 break; 11531 } 11532 } 11533 if (!unregistering) 11534 break; 11535 __rtnl_unlock(); 11536 11537 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); 11538 } 11539 remove_wait_queue(&netdev_unregistering_wq, &wait); 11540 } 11541 11542 static void __net_exit default_device_exit_batch(struct list_head *net_list) 11543 { 11544 /* At exit all network devices most be removed from a network 11545 * namespace. Do this in the reverse order of registration. 11546 * Do this across as many network namespaces as possible to 11547 * improve batching efficiency. 11548 */ 11549 struct net_device *dev; 11550 struct net *net; 11551 LIST_HEAD(dev_kill_list); 11552 11553 /* To prevent network device cleanup code from dereferencing 11554 * loopback devices or network devices that have been freed 11555 * wait here for all pending unregistrations to complete, 11556 * before unregistring the loopback device and allowing the 11557 * network namespace be freed. 11558 * 11559 * The netdev todo list containing all network devices 11560 * unregistrations that happen in default_device_exit_batch 11561 * will run in the rtnl_unlock() at the end of 11562 * default_device_exit_batch. 11563 */ 11564 rtnl_lock_unregistering(net_list); 11565 list_for_each_entry(net, net_list, exit_list) { 11566 for_each_netdev_reverse(net, dev) { 11567 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) 11568 dev->rtnl_link_ops->dellink(dev, &dev_kill_list); 11569 else 11570 unregister_netdevice_queue(dev, &dev_kill_list); 11571 } 11572 } 11573 unregister_netdevice_many(&dev_kill_list); 11574 rtnl_unlock(); 11575 } 11576 11577 static struct pernet_operations __net_initdata default_device_ops = { 11578 .exit = default_device_exit, 11579 .exit_batch = default_device_exit_batch, 11580 }; 11581 11582 /* 11583 * Initialize the DEV module. At boot time this walks the device list and 11584 * unhooks any devices that fail to initialise (normally hardware not 11585 * present) and leaves us with a valid list of present and active devices. 11586 * 11587 */ 11588 11589 /* 11590 * This is called single threaded during boot, so no need 11591 * to take the rtnl semaphore. 11592 */ 11593 static int __init net_dev_init(void) 11594 { 11595 int i, rc = -ENOMEM; 11596 11597 BUG_ON(!dev_boot_phase); 11598 11599 if (dev_proc_init()) 11600 goto out; 11601 11602 if (netdev_kobject_init()) 11603 goto out; 11604 11605 INIT_LIST_HEAD(&ptype_all); 11606 for (i = 0; i < PTYPE_HASH_SIZE; i++) 11607 INIT_LIST_HEAD(&ptype_base[i]); 11608 11609 INIT_LIST_HEAD(&offload_base); 11610 11611 if (register_pernet_subsys(&netdev_net_ops)) 11612 goto out; 11613 11614 /* 11615 * Initialise the packet receive queues. 11616 */ 11617 11618 for_each_possible_cpu(i) { 11619 struct work_struct *flush = per_cpu_ptr(&flush_works, i); 11620 struct softnet_data *sd = &per_cpu(softnet_data, i); 11621 11622 INIT_WORK(flush, flush_backlog); 11623 11624 skb_queue_head_init(&sd->input_pkt_queue); 11625 skb_queue_head_init(&sd->process_queue); 11626 #ifdef CONFIG_XFRM_OFFLOAD 11627 skb_queue_head_init(&sd->xfrm_backlog); 11628 #endif 11629 INIT_LIST_HEAD(&sd->poll_list); 11630 sd->output_queue_tailp = &sd->output_queue; 11631 #ifdef CONFIG_RPS 11632 INIT_CSD(&sd->csd, rps_trigger_softirq, sd); 11633 sd->cpu = i; 11634 #endif 11635 11636 init_gro_hash(&sd->backlog); 11637 sd->backlog.poll = process_backlog; 11638 sd->backlog.weight = weight_p; 11639 } 11640 11641 dev_boot_phase = 0; 11642 11643 /* The loopback device is special if any other network devices 11644 * is present in a network namespace the loopback device must 11645 * be present. Since we now dynamically allocate and free the 11646 * loopback device ensure this invariant is maintained by 11647 * keeping the loopback device as the first device on the 11648 * list of network devices. Ensuring the loopback devices 11649 * is the first device that appears and the last network device 11650 * that disappears. 11651 */ 11652 if (register_pernet_device(&loopback_net_ops)) 11653 goto out; 11654 11655 if (register_pernet_device(&default_device_ops)) 11656 goto out; 11657 11658 open_softirq(NET_TX_SOFTIRQ, net_tx_action); 11659 open_softirq(NET_RX_SOFTIRQ, net_rx_action); 11660 11661 rc = cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD, "net/dev:dead", 11662 NULL, dev_cpu_dead); 11663 WARN_ON(rc < 0); 11664 rc = 0; 11665 out: 11666 return rc; 11667 } 11668 11669 subsys_initcall(net_dev_init); 11670