1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * NET3 Protocol independent device support routines. 4 * 5 * Derived from the non IP parts of dev.c 1.0.19 6 * Authors: Ross Biro 7 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 8 * Mark Evans, <evansmp@uhura.aston.ac.uk> 9 * 10 * Additional Authors: 11 * Florian la Roche <rzsfl@rz.uni-sb.de> 12 * Alan Cox <gw4pts@gw4pts.ampr.org> 13 * David Hinds <dahinds@users.sourceforge.net> 14 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> 15 * Adam Sulmicki <adam@cfar.umd.edu> 16 * Pekka Riikonen <priikone@poesidon.pspt.fi> 17 * 18 * Changes: 19 * D.J. Barrow : Fixed bug where dev->refcnt gets set 20 * to 2 if register_netdev gets called 21 * before net_dev_init & also removed a 22 * few lines of code in the process. 23 * Alan Cox : device private ioctl copies fields back. 24 * Alan Cox : Transmit queue code does relevant 25 * stunts to keep the queue safe. 26 * Alan Cox : Fixed double lock. 27 * Alan Cox : Fixed promisc NULL pointer trap 28 * ???????? : Support the full private ioctl range 29 * Alan Cox : Moved ioctl permission check into 30 * drivers 31 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI 32 * Alan Cox : 100 backlog just doesn't cut it when 33 * you start doing multicast video 8) 34 * Alan Cox : Rewrote net_bh and list manager. 35 * Alan Cox : Fix ETH_P_ALL echoback lengths. 36 * Alan Cox : Took out transmit every packet pass 37 * Saved a few bytes in the ioctl handler 38 * Alan Cox : Network driver sets packet type before 39 * calling netif_rx. Saves a function 40 * call a packet. 41 * Alan Cox : Hashed net_bh() 42 * Richard Kooijman: Timestamp fixes. 43 * Alan Cox : Wrong field in SIOCGIFDSTADDR 44 * Alan Cox : Device lock protection. 45 * Alan Cox : Fixed nasty side effect of device close 46 * changes. 47 * Rudi Cilibrasi : Pass the right thing to 48 * set_mac_address() 49 * Dave Miller : 32bit quantity for the device lock to 50 * make it work out on a Sparc. 51 * Bjorn Ekwall : Added KERNELD hack. 52 * Alan Cox : Cleaned up the backlog initialise. 53 * Craig Metz : SIOCGIFCONF fix if space for under 54 * 1 device. 55 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there 56 * is no device open function. 57 * Andi Kleen : Fix error reporting for SIOCGIFCONF 58 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF 59 * Cyrus Durgin : Cleaned for KMOD 60 * Adam Sulmicki : Bug Fix : Network Device Unload 61 * A network device unload needs to purge 62 * the backlog queue. 63 * Paul Rusty Russell : SIOCSIFNAME 64 * Pekka Riikonen : Netdev boot-time settings code 65 * Andrew Morton : Make unregister_netdevice wait 66 * indefinitely on dev->refcnt 67 * J Hadi Salim : - Backlog queue sampling 68 * - netif_rx() feedback 69 */ 70 71 #include <linux/uaccess.h> 72 #include <linux/bitops.h> 73 #include <linux/capability.h> 74 #include <linux/cpu.h> 75 #include <linux/types.h> 76 #include <linux/kernel.h> 77 #include <linux/hash.h> 78 #include <linux/slab.h> 79 #include <linux/sched.h> 80 #include <linux/sched/mm.h> 81 #include <linux/mutex.h> 82 #include <linux/string.h> 83 #include <linux/mm.h> 84 #include <linux/socket.h> 85 #include <linux/sockios.h> 86 #include <linux/errno.h> 87 #include <linux/interrupt.h> 88 #include <linux/if_ether.h> 89 #include <linux/netdevice.h> 90 #include <linux/etherdevice.h> 91 #include <linux/ethtool.h> 92 #include <linux/skbuff.h> 93 #include <linux/bpf.h> 94 #include <linux/bpf_trace.h> 95 #include <net/net_namespace.h> 96 #include <net/sock.h> 97 #include <net/busy_poll.h> 98 #include <linux/rtnetlink.h> 99 #include <linux/stat.h> 100 #include <net/dst.h> 101 #include <net/dst_metadata.h> 102 #include <net/pkt_sched.h> 103 #include <net/pkt_cls.h> 104 #include <net/checksum.h> 105 #include <net/xfrm.h> 106 #include <linux/highmem.h> 107 #include <linux/init.h> 108 #include <linux/module.h> 109 #include <linux/netpoll.h> 110 #include <linux/rcupdate.h> 111 #include <linux/delay.h> 112 #include <net/iw_handler.h> 113 #include <asm/current.h> 114 #include <linux/audit.h> 115 #include <linux/dmaengine.h> 116 #include <linux/err.h> 117 #include <linux/ctype.h> 118 #include <linux/if_arp.h> 119 #include <linux/if_vlan.h> 120 #include <linux/ip.h> 121 #include <net/ip.h> 122 #include <net/mpls.h> 123 #include <linux/ipv6.h> 124 #include <linux/in.h> 125 #include <linux/jhash.h> 126 #include <linux/random.h> 127 #include <trace/events/napi.h> 128 #include <trace/events/net.h> 129 #include <trace/events/skb.h> 130 #include <linux/inetdevice.h> 131 #include <linux/cpu_rmap.h> 132 #include <linux/static_key.h> 133 #include <linux/hashtable.h> 134 #include <linux/vmalloc.h> 135 #include <linux/if_macvlan.h> 136 #include <linux/errqueue.h> 137 #include <linux/hrtimer.h> 138 #include <linux/netfilter_ingress.h> 139 #include <linux/crash_dump.h> 140 #include <linux/sctp.h> 141 #include <net/udp_tunnel.h> 142 #include <linux/net_namespace.h> 143 #include <linux/indirect_call_wrapper.h> 144 #include <net/devlink.h> 145 146 #include "net-sysfs.h" 147 148 #define MAX_GRO_SKBS 8 149 150 /* This should be increased if a protocol with a bigger head is added. */ 151 #define GRO_MAX_HEAD (MAX_HEADER + 128) 152 153 static DEFINE_SPINLOCK(ptype_lock); 154 static DEFINE_SPINLOCK(offload_lock); 155 struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; 156 struct list_head ptype_all __read_mostly; /* Taps */ 157 static struct list_head offload_base __read_mostly; 158 159 static int netif_rx_internal(struct sk_buff *skb); 160 static int call_netdevice_notifiers_info(unsigned long val, 161 struct netdev_notifier_info *info); 162 static int call_netdevice_notifiers_extack(unsigned long val, 163 struct net_device *dev, 164 struct netlink_ext_ack *extack); 165 static struct napi_struct *napi_by_id(unsigned int napi_id); 166 167 /* 168 * The @dev_base_head list is protected by @dev_base_lock and the rtnl 169 * semaphore. 170 * 171 * Pure readers hold dev_base_lock for reading, or rcu_read_lock() 172 * 173 * Writers must hold the rtnl semaphore while they loop through the 174 * dev_base_head list, and hold dev_base_lock for writing when they do the 175 * actual updates. This allows pure readers to access the list even 176 * while a writer is preparing to update it. 177 * 178 * To put it another way, dev_base_lock is held for writing only to 179 * protect against pure readers; the rtnl semaphore provides the 180 * protection against other writers. 181 * 182 * See, for example usages, register_netdevice() and 183 * unregister_netdevice(), which must be called with the rtnl 184 * semaphore held. 185 */ 186 DEFINE_RWLOCK(dev_base_lock); 187 EXPORT_SYMBOL(dev_base_lock); 188 189 static DEFINE_MUTEX(ifalias_mutex); 190 191 /* protects napi_hash addition/deletion and napi_gen_id */ 192 static DEFINE_SPINLOCK(napi_hash_lock); 193 194 static unsigned int napi_gen_id = NR_CPUS; 195 static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8); 196 197 static seqcount_t devnet_rename_seq; 198 199 static inline void dev_base_seq_inc(struct net *net) 200 { 201 while (++net->dev_base_seq == 0) 202 ; 203 } 204 205 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name) 206 { 207 unsigned int hash = full_name_hash(net, name, strnlen(name, IFNAMSIZ)); 208 209 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)]; 210 } 211 212 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) 213 { 214 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)]; 215 } 216 217 static inline void rps_lock(struct softnet_data *sd) 218 { 219 #ifdef CONFIG_RPS 220 spin_lock(&sd->input_pkt_queue.lock); 221 #endif 222 } 223 224 static inline void rps_unlock(struct softnet_data *sd) 225 { 226 #ifdef CONFIG_RPS 227 spin_unlock(&sd->input_pkt_queue.lock); 228 #endif 229 } 230 231 static struct netdev_name_node *netdev_name_node_alloc(struct net_device *dev, 232 const char *name) 233 { 234 struct netdev_name_node *name_node; 235 236 name_node = kmalloc(sizeof(*name_node), GFP_KERNEL); 237 if (!name_node) 238 return NULL; 239 INIT_HLIST_NODE(&name_node->hlist); 240 name_node->dev = dev; 241 name_node->name = name; 242 return name_node; 243 } 244 245 static struct netdev_name_node * 246 netdev_name_node_head_alloc(struct net_device *dev) 247 { 248 struct netdev_name_node *name_node; 249 250 name_node = netdev_name_node_alloc(dev, dev->name); 251 if (!name_node) 252 return NULL; 253 INIT_LIST_HEAD(&name_node->list); 254 return name_node; 255 } 256 257 static void netdev_name_node_free(struct netdev_name_node *name_node) 258 { 259 kfree(name_node); 260 } 261 262 static void netdev_name_node_add(struct net *net, 263 struct netdev_name_node *name_node) 264 { 265 hlist_add_head_rcu(&name_node->hlist, 266 dev_name_hash(net, name_node->name)); 267 } 268 269 static void netdev_name_node_del(struct netdev_name_node *name_node) 270 { 271 hlist_del_rcu(&name_node->hlist); 272 } 273 274 static struct netdev_name_node *netdev_name_node_lookup(struct net *net, 275 const char *name) 276 { 277 struct hlist_head *head = dev_name_hash(net, name); 278 struct netdev_name_node *name_node; 279 280 hlist_for_each_entry(name_node, head, hlist) 281 if (!strcmp(name_node->name, name)) 282 return name_node; 283 return NULL; 284 } 285 286 static struct netdev_name_node *netdev_name_node_lookup_rcu(struct net *net, 287 const char *name) 288 { 289 struct hlist_head *head = dev_name_hash(net, name); 290 struct netdev_name_node *name_node; 291 292 hlist_for_each_entry_rcu(name_node, head, hlist) 293 if (!strcmp(name_node->name, name)) 294 return name_node; 295 return NULL; 296 } 297 298 int netdev_name_node_alt_create(struct net_device *dev, const char *name) 299 { 300 struct netdev_name_node *name_node; 301 struct net *net = dev_net(dev); 302 303 name_node = netdev_name_node_lookup(net, name); 304 if (name_node) 305 return -EEXIST; 306 name_node = netdev_name_node_alloc(dev, name); 307 if (!name_node) 308 return -ENOMEM; 309 netdev_name_node_add(net, name_node); 310 /* The node that holds dev->name acts as a head of per-device list. */ 311 list_add_tail(&name_node->list, &dev->name_node->list); 312 313 return 0; 314 } 315 EXPORT_SYMBOL(netdev_name_node_alt_create); 316 317 static void __netdev_name_node_alt_destroy(struct netdev_name_node *name_node) 318 { 319 list_del(&name_node->list); 320 netdev_name_node_del(name_node); 321 kfree(name_node->name); 322 netdev_name_node_free(name_node); 323 } 324 325 int netdev_name_node_alt_destroy(struct net_device *dev, const char *name) 326 { 327 struct netdev_name_node *name_node; 328 struct net *net = dev_net(dev); 329 330 name_node = netdev_name_node_lookup(net, name); 331 if (!name_node) 332 return -ENOENT; 333 /* lookup might have found our primary name or a name belonging 334 * to another device. 335 */ 336 if (name_node == dev->name_node || name_node->dev != dev) 337 return -EINVAL; 338 339 __netdev_name_node_alt_destroy(name_node); 340 341 return 0; 342 } 343 EXPORT_SYMBOL(netdev_name_node_alt_destroy); 344 345 static void netdev_name_node_alt_flush(struct net_device *dev) 346 { 347 struct netdev_name_node *name_node, *tmp; 348 349 list_for_each_entry_safe(name_node, tmp, &dev->name_node->list, list) 350 __netdev_name_node_alt_destroy(name_node); 351 } 352 353 /* Device list insertion */ 354 static void list_netdevice(struct net_device *dev) 355 { 356 struct net *net = dev_net(dev); 357 358 ASSERT_RTNL(); 359 360 write_lock_bh(&dev_base_lock); 361 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head); 362 netdev_name_node_add(net, dev->name_node); 363 hlist_add_head_rcu(&dev->index_hlist, 364 dev_index_hash(net, dev->ifindex)); 365 write_unlock_bh(&dev_base_lock); 366 367 dev_base_seq_inc(net); 368 } 369 370 /* Device list removal 371 * caller must respect a RCU grace period before freeing/reusing dev 372 */ 373 static void unlist_netdevice(struct net_device *dev) 374 { 375 ASSERT_RTNL(); 376 377 /* Unlink dev from the device chain */ 378 write_lock_bh(&dev_base_lock); 379 list_del_rcu(&dev->dev_list); 380 netdev_name_node_del(dev->name_node); 381 hlist_del_rcu(&dev->index_hlist); 382 write_unlock_bh(&dev_base_lock); 383 384 dev_base_seq_inc(dev_net(dev)); 385 } 386 387 /* 388 * Our notifier list 389 */ 390 391 static RAW_NOTIFIER_HEAD(netdev_chain); 392 393 /* 394 * Device drivers call our routines to queue packets here. We empty the 395 * queue in the local softnet handler. 396 */ 397 398 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); 399 EXPORT_PER_CPU_SYMBOL(softnet_data); 400 401 /******************************************************************************* 402 * 403 * Protocol management and registration routines 404 * 405 *******************************************************************************/ 406 407 408 /* 409 * Add a protocol ID to the list. Now that the input handler is 410 * smarter we can dispense with all the messy stuff that used to be 411 * here. 412 * 413 * BEWARE!!! Protocol handlers, mangling input packets, 414 * MUST BE last in hash buckets and checking protocol handlers 415 * MUST start from promiscuous ptype_all chain in net_bh. 416 * It is true now, do not change it. 417 * Explanation follows: if protocol handler, mangling packet, will 418 * be the first on list, it is not able to sense, that packet 419 * is cloned and should be copied-on-write, so that it will 420 * change it and subsequent readers will get broken packet. 421 * --ANK (980803) 422 */ 423 424 static inline struct list_head *ptype_head(const struct packet_type *pt) 425 { 426 if (pt->type == htons(ETH_P_ALL)) 427 return pt->dev ? &pt->dev->ptype_all : &ptype_all; 428 else 429 return pt->dev ? &pt->dev->ptype_specific : 430 &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK]; 431 } 432 433 /** 434 * dev_add_pack - add packet handler 435 * @pt: packet type declaration 436 * 437 * Add a protocol handler to the networking stack. The passed &packet_type 438 * is linked into kernel lists and may not be freed until it has been 439 * removed from the kernel lists. 440 * 441 * This call does not sleep therefore it can not 442 * guarantee all CPU's that are in middle of receiving packets 443 * will see the new packet type (until the next received packet). 444 */ 445 446 void dev_add_pack(struct packet_type *pt) 447 { 448 struct list_head *head = ptype_head(pt); 449 450 spin_lock(&ptype_lock); 451 list_add_rcu(&pt->list, head); 452 spin_unlock(&ptype_lock); 453 } 454 EXPORT_SYMBOL(dev_add_pack); 455 456 /** 457 * __dev_remove_pack - remove packet handler 458 * @pt: packet type declaration 459 * 460 * Remove a protocol handler that was previously added to the kernel 461 * protocol handlers by dev_add_pack(). The passed &packet_type is removed 462 * from the kernel lists and can be freed or reused once this function 463 * returns. 464 * 465 * The packet type might still be in use by receivers 466 * and must not be freed until after all the CPU's have gone 467 * through a quiescent state. 468 */ 469 void __dev_remove_pack(struct packet_type *pt) 470 { 471 struct list_head *head = ptype_head(pt); 472 struct packet_type *pt1; 473 474 spin_lock(&ptype_lock); 475 476 list_for_each_entry(pt1, head, list) { 477 if (pt == pt1) { 478 list_del_rcu(&pt->list); 479 goto out; 480 } 481 } 482 483 pr_warn("dev_remove_pack: %p not found\n", pt); 484 out: 485 spin_unlock(&ptype_lock); 486 } 487 EXPORT_SYMBOL(__dev_remove_pack); 488 489 /** 490 * dev_remove_pack - remove packet handler 491 * @pt: packet type declaration 492 * 493 * Remove a protocol handler that was previously added to the kernel 494 * protocol handlers by dev_add_pack(). The passed &packet_type is removed 495 * from the kernel lists and can be freed or reused once this function 496 * returns. 497 * 498 * This call sleeps to guarantee that no CPU is looking at the packet 499 * type after return. 500 */ 501 void dev_remove_pack(struct packet_type *pt) 502 { 503 __dev_remove_pack(pt); 504 505 synchronize_net(); 506 } 507 EXPORT_SYMBOL(dev_remove_pack); 508 509 510 /** 511 * dev_add_offload - register offload handlers 512 * @po: protocol offload declaration 513 * 514 * Add protocol offload handlers to the networking stack. The passed 515 * &proto_offload is linked into kernel lists and may not be freed until 516 * it has been removed from the kernel lists. 517 * 518 * This call does not sleep therefore it can not 519 * guarantee all CPU's that are in middle of receiving packets 520 * will see the new offload handlers (until the next received packet). 521 */ 522 void dev_add_offload(struct packet_offload *po) 523 { 524 struct packet_offload *elem; 525 526 spin_lock(&offload_lock); 527 list_for_each_entry(elem, &offload_base, list) { 528 if (po->priority < elem->priority) 529 break; 530 } 531 list_add_rcu(&po->list, elem->list.prev); 532 spin_unlock(&offload_lock); 533 } 534 EXPORT_SYMBOL(dev_add_offload); 535 536 /** 537 * __dev_remove_offload - remove offload handler 538 * @po: packet offload declaration 539 * 540 * Remove a protocol offload handler that was previously added to the 541 * kernel offload handlers by dev_add_offload(). The passed &offload_type 542 * is removed from the kernel lists and can be freed or reused once this 543 * function returns. 544 * 545 * The packet type might still be in use by receivers 546 * and must not be freed until after all the CPU's have gone 547 * through a quiescent state. 548 */ 549 static void __dev_remove_offload(struct packet_offload *po) 550 { 551 struct list_head *head = &offload_base; 552 struct packet_offload *po1; 553 554 spin_lock(&offload_lock); 555 556 list_for_each_entry(po1, head, list) { 557 if (po == po1) { 558 list_del_rcu(&po->list); 559 goto out; 560 } 561 } 562 563 pr_warn("dev_remove_offload: %p not found\n", po); 564 out: 565 spin_unlock(&offload_lock); 566 } 567 568 /** 569 * dev_remove_offload - remove packet offload handler 570 * @po: packet offload declaration 571 * 572 * Remove a packet offload handler that was previously added to the kernel 573 * offload handlers by dev_add_offload(). The passed &offload_type is 574 * removed from the kernel lists and can be freed or reused once this 575 * function returns. 576 * 577 * This call sleeps to guarantee that no CPU is looking at the packet 578 * type after return. 579 */ 580 void dev_remove_offload(struct packet_offload *po) 581 { 582 __dev_remove_offload(po); 583 584 synchronize_net(); 585 } 586 EXPORT_SYMBOL(dev_remove_offload); 587 588 /****************************************************************************** 589 * 590 * Device Boot-time Settings Routines 591 * 592 ******************************************************************************/ 593 594 /* Boot time configuration table */ 595 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX]; 596 597 /** 598 * netdev_boot_setup_add - add new setup entry 599 * @name: name of the device 600 * @map: configured settings for the device 601 * 602 * Adds new setup entry to the dev_boot_setup list. The function 603 * returns 0 on error and 1 on success. This is a generic routine to 604 * all netdevices. 605 */ 606 static int netdev_boot_setup_add(char *name, struct ifmap *map) 607 { 608 struct netdev_boot_setup *s; 609 int i; 610 611 s = dev_boot_setup; 612 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) { 613 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') { 614 memset(s[i].name, 0, sizeof(s[i].name)); 615 strlcpy(s[i].name, name, IFNAMSIZ); 616 memcpy(&s[i].map, map, sizeof(s[i].map)); 617 break; 618 } 619 } 620 621 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1; 622 } 623 624 /** 625 * netdev_boot_setup_check - check boot time settings 626 * @dev: the netdevice 627 * 628 * Check boot time settings for the device. 629 * The found settings are set for the device to be used 630 * later in the device probing. 631 * Returns 0 if no settings found, 1 if they are. 632 */ 633 int netdev_boot_setup_check(struct net_device *dev) 634 { 635 struct netdev_boot_setup *s = dev_boot_setup; 636 int i; 637 638 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) { 639 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' && 640 !strcmp(dev->name, s[i].name)) { 641 dev->irq = s[i].map.irq; 642 dev->base_addr = s[i].map.base_addr; 643 dev->mem_start = s[i].map.mem_start; 644 dev->mem_end = s[i].map.mem_end; 645 return 1; 646 } 647 } 648 return 0; 649 } 650 EXPORT_SYMBOL(netdev_boot_setup_check); 651 652 653 /** 654 * netdev_boot_base - get address from boot time settings 655 * @prefix: prefix for network device 656 * @unit: id for network device 657 * 658 * Check boot time settings for the base address of device. 659 * The found settings are set for the device to be used 660 * later in the device probing. 661 * Returns 0 if no settings found. 662 */ 663 unsigned long netdev_boot_base(const char *prefix, int unit) 664 { 665 const struct netdev_boot_setup *s = dev_boot_setup; 666 char name[IFNAMSIZ]; 667 int i; 668 669 sprintf(name, "%s%d", prefix, unit); 670 671 /* 672 * If device already registered then return base of 1 673 * to indicate not to probe for this interface 674 */ 675 if (__dev_get_by_name(&init_net, name)) 676 return 1; 677 678 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) 679 if (!strcmp(name, s[i].name)) 680 return s[i].map.base_addr; 681 return 0; 682 } 683 684 /* 685 * Saves at boot time configured settings for any netdevice. 686 */ 687 int __init netdev_boot_setup(char *str) 688 { 689 int ints[5]; 690 struct ifmap map; 691 692 str = get_options(str, ARRAY_SIZE(ints), ints); 693 if (!str || !*str) 694 return 0; 695 696 /* Save settings */ 697 memset(&map, 0, sizeof(map)); 698 if (ints[0] > 0) 699 map.irq = ints[1]; 700 if (ints[0] > 1) 701 map.base_addr = ints[2]; 702 if (ints[0] > 2) 703 map.mem_start = ints[3]; 704 if (ints[0] > 3) 705 map.mem_end = ints[4]; 706 707 /* Add new entry to the list */ 708 return netdev_boot_setup_add(str, &map); 709 } 710 711 __setup("netdev=", netdev_boot_setup); 712 713 /******************************************************************************* 714 * 715 * Device Interface Subroutines 716 * 717 *******************************************************************************/ 718 719 /** 720 * dev_get_iflink - get 'iflink' value of a interface 721 * @dev: targeted interface 722 * 723 * Indicates the ifindex the interface is linked to. 724 * Physical interfaces have the same 'ifindex' and 'iflink' values. 725 */ 726 727 int dev_get_iflink(const struct net_device *dev) 728 { 729 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink) 730 return dev->netdev_ops->ndo_get_iflink(dev); 731 732 return dev->ifindex; 733 } 734 EXPORT_SYMBOL(dev_get_iflink); 735 736 /** 737 * dev_fill_metadata_dst - Retrieve tunnel egress information. 738 * @dev: targeted interface 739 * @skb: The packet. 740 * 741 * For better visibility of tunnel traffic OVS needs to retrieve 742 * egress tunnel information for a packet. Following API allows 743 * user to get this info. 744 */ 745 int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) 746 { 747 struct ip_tunnel_info *info; 748 749 if (!dev->netdev_ops || !dev->netdev_ops->ndo_fill_metadata_dst) 750 return -EINVAL; 751 752 info = skb_tunnel_info_unclone(skb); 753 if (!info) 754 return -ENOMEM; 755 if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX))) 756 return -EINVAL; 757 758 return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb); 759 } 760 EXPORT_SYMBOL_GPL(dev_fill_metadata_dst); 761 762 /** 763 * __dev_get_by_name - find a device by its name 764 * @net: the applicable net namespace 765 * @name: name to find 766 * 767 * Find an interface by name. Must be called under RTNL semaphore 768 * or @dev_base_lock. If the name is found a pointer to the device 769 * is returned. If the name is not found then %NULL is returned. The 770 * reference counters are not incremented so the caller must be 771 * careful with locks. 772 */ 773 774 struct net_device *__dev_get_by_name(struct net *net, const char *name) 775 { 776 struct netdev_name_node *node_name; 777 778 node_name = netdev_name_node_lookup(net, name); 779 return node_name ? node_name->dev : NULL; 780 } 781 EXPORT_SYMBOL(__dev_get_by_name); 782 783 /** 784 * dev_get_by_name_rcu - find a device by its name 785 * @net: the applicable net namespace 786 * @name: name to find 787 * 788 * Find an interface by name. 789 * If the name is found a pointer to the device is returned. 790 * If the name is not found then %NULL is returned. 791 * The reference counters are not incremented so the caller must be 792 * careful with locks. The caller must hold RCU lock. 793 */ 794 795 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name) 796 { 797 struct netdev_name_node *node_name; 798 799 node_name = netdev_name_node_lookup_rcu(net, name); 800 return node_name ? node_name->dev : NULL; 801 } 802 EXPORT_SYMBOL(dev_get_by_name_rcu); 803 804 /** 805 * dev_get_by_name - find a device by its name 806 * @net: the applicable net namespace 807 * @name: name to find 808 * 809 * Find an interface by name. This can be called from any 810 * context and does its own locking. The returned handle has 811 * the usage count incremented and the caller must use dev_put() to 812 * release it when it is no longer needed. %NULL is returned if no 813 * matching device is found. 814 */ 815 816 struct net_device *dev_get_by_name(struct net *net, const char *name) 817 { 818 struct net_device *dev; 819 820 rcu_read_lock(); 821 dev = dev_get_by_name_rcu(net, name); 822 if (dev) 823 dev_hold(dev); 824 rcu_read_unlock(); 825 return dev; 826 } 827 EXPORT_SYMBOL(dev_get_by_name); 828 829 /** 830 * __dev_get_by_index - find a device by its ifindex 831 * @net: the applicable net namespace 832 * @ifindex: index of device 833 * 834 * Search for an interface by index. Returns %NULL if the device 835 * is not found or a pointer to the device. The device has not 836 * had its reference counter increased so the caller must be careful 837 * about locking. The caller must hold either the RTNL semaphore 838 * or @dev_base_lock. 839 */ 840 841 struct net_device *__dev_get_by_index(struct net *net, int ifindex) 842 { 843 struct net_device *dev; 844 struct hlist_head *head = dev_index_hash(net, ifindex); 845 846 hlist_for_each_entry(dev, head, index_hlist) 847 if (dev->ifindex == ifindex) 848 return dev; 849 850 return NULL; 851 } 852 EXPORT_SYMBOL(__dev_get_by_index); 853 854 /** 855 * dev_get_by_index_rcu - find a device by its ifindex 856 * @net: the applicable net namespace 857 * @ifindex: index of device 858 * 859 * Search for an interface by index. Returns %NULL if the device 860 * is not found or a pointer to the device. The device has not 861 * had its reference counter increased so the caller must be careful 862 * about locking. The caller must hold RCU lock. 863 */ 864 865 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex) 866 { 867 struct net_device *dev; 868 struct hlist_head *head = dev_index_hash(net, ifindex); 869 870 hlist_for_each_entry_rcu(dev, head, index_hlist) 871 if (dev->ifindex == ifindex) 872 return dev; 873 874 return NULL; 875 } 876 EXPORT_SYMBOL(dev_get_by_index_rcu); 877 878 879 /** 880 * dev_get_by_index - find a device by its ifindex 881 * @net: the applicable net namespace 882 * @ifindex: index of device 883 * 884 * Search for an interface by index. Returns NULL if the device 885 * is not found or a pointer to the device. The device returned has 886 * had a reference added and the pointer is safe until the user calls 887 * dev_put to indicate they have finished with it. 888 */ 889 890 struct net_device *dev_get_by_index(struct net *net, int ifindex) 891 { 892 struct net_device *dev; 893 894 rcu_read_lock(); 895 dev = dev_get_by_index_rcu(net, ifindex); 896 if (dev) 897 dev_hold(dev); 898 rcu_read_unlock(); 899 return dev; 900 } 901 EXPORT_SYMBOL(dev_get_by_index); 902 903 /** 904 * dev_get_by_napi_id - find a device by napi_id 905 * @napi_id: ID of the NAPI struct 906 * 907 * Search for an interface by NAPI ID. Returns %NULL if the device 908 * is not found or a pointer to the device. The device has not had 909 * its reference counter increased so the caller must be careful 910 * about locking. The caller must hold RCU lock. 911 */ 912 913 struct net_device *dev_get_by_napi_id(unsigned int napi_id) 914 { 915 struct napi_struct *napi; 916 917 WARN_ON_ONCE(!rcu_read_lock_held()); 918 919 if (napi_id < MIN_NAPI_ID) 920 return NULL; 921 922 napi = napi_by_id(napi_id); 923 924 return napi ? napi->dev : NULL; 925 } 926 EXPORT_SYMBOL(dev_get_by_napi_id); 927 928 /** 929 * netdev_get_name - get a netdevice name, knowing its ifindex. 930 * @net: network namespace 931 * @name: a pointer to the buffer where the name will be stored. 932 * @ifindex: the ifindex of the interface to get the name from. 933 * 934 * The use of raw_seqcount_begin() and cond_resched() before 935 * retrying is required as we want to give the writers a chance 936 * to complete when CONFIG_PREEMPTION is not set. 937 */ 938 int netdev_get_name(struct net *net, char *name, int ifindex) 939 { 940 struct net_device *dev; 941 unsigned int seq; 942 943 retry: 944 seq = raw_seqcount_begin(&devnet_rename_seq); 945 rcu_read_lock(); 946 dev = dev_get_by_index_rcu(net, ifindex); 947 if (!dev) { 948 rcu_read_unlock(); 949 return -ENODEV; 950 } 951 952 strcpy(name, dev->name); 953 rcu_read_unlock(); 954 if (read_seqcount_retry(&devnet_rename_seq, seq)) { 955 cond_resched(); 956 goto retry; 957 } 958 959 return 0; 960 } 961 962 /** 963 * dev_getbyhwaddr_rcu - find a device by its hardware address 964 * @net: the applicable net namespace 965 * @type: media type of device 966 * @ha: hardware address 967 * 968 * Search for an interface by MAC address. Returns NULL if the device 969 * is not found or a pointer to the device. 970 * The caller must hold RCU or RTNL. 971 * The returned device has not had its ref count increased 972 * and the caller must therefore be careful about locking 973 * 974 */ 975 976 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, 977 const char *ha) 978 { 979 struct net_device *dev; 980 981 for_each_netdev_rcu(net, dev) 982 if (dev->type == type && 983 !memcmp(dev->dev_addr, ha, dev->addr_len)) 984 return dev; 985 986 return NULL; 987 } 988 EXPORT_SYMBOL(dev_getbyhwaddr_rcu); 989 990 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type) 991 { 992 struct net_device *dev; 993 994 ASSERT_RTNL(); 995 for_each_netdev(net, dev) 996 if (dev->type == type) 997 return dev; 998 999 return NULL; 1000 } 1001 EXPORT_SYMBOL(__dev_getfirstbyhwtype); 1002 1003 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type) 1004 { 1005 struct net_device *dev, *ret = NULL; 1006 1007 rcu_read_lock(); 1008 for_each_netdev_rcu(net, dev) 1009 if (dev->type == type) { 1010 dev_hold(dev); 1011 ret = dev; 1012 break; 1013 } 1014 rcu_read_unlock(); 1015 return ret; 1016 } 1017 EXPORT_SYMBOL(dev_getfirstbyhwtype); 1018 1019 /** 1020 * __dev_get_by_flags - find any device with given flags 1021 * @net: the applicable net namespace 1022 * @if_flags: IFF_* values 1023 * @mask: bitmask of bits in if_flags to check 1024 * 1025 * Search for any interface with the given flags. Returns NULL if a device 1026 * is not found or a pointer to the device. Must be called inside 1027 * rtnl_lock(), and result refcount is unchanged. 1028 */ 1029 1030 struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags, 1031 unsigned short mask) 1032 { 1033 struct net_device *dev, *ret; 1034 1035 ASSERT_RTNL(); 1036 1037 ret = NULL; 1038 for_each_netdev(net, dev) { 1039 if (((dev->flags ^ if_flags) & mask) == 0) { 1040 ret = dev; 1041 break; 1042 } 1043 } 1044 return ret; 1045 } 1046 EXPORT_SYMBOL(__dev_get_by_flags); 1047 1048 /** 1049 * dev_valid_name - check if name is okay for network device 1050 * @name: name string 1051 * 1052 * Network device names need to be valid file names to 1053 * to allow sysfs to work. We also disallow any kind of 1054 * whitespace. 1055 */ 1056 bool dev_valid_name(const char *name) 1057 { 1058 if (*name == '\0') 1059 return false; 1060 if (strnlen(name, IFNAMSIZ) == IFNAMSIZ) 1061 return false; 1062 if (!strcmp(name, ".") || !strcmp(name, "..")) 1063 return false; 1064 1065 while (*name) { 1066 if (*name == '/' || *name == ':' || isspace(*name)) 1067 return false; 1068 name++; 1069 } 1070 return true; 1071 } 1072 EXPORT_SYMBOL(dev_valid_name); 1073 1074 /** 1075 * __dev_alloc_name - allocate a name for a device 1076 * @net: network namespace to allocate the device name in 1077 * @name: name format string 1078 * @buf: scratch buffer and result name string 1079 * 1080 * Passed a format string - eg "lt%d" it will try and find a suitable 1081 * id. It scans list of devices to build up a free map, then chooses 1082 * the first empty slot. The caller must hold the dev_base or rtnl lock 1083 * while allocating the name and adding the device in order to avoid 1084 * duplicates. 1085 * Limited to bits_per_byte * page size devices (ie 32K on most platforms). 1086 * Returns the number of the unit assigned or a negative errno code. 1087 */ 1088 1089 static int __dev_alloc_name(struct net *net, const char *name, char *buf) 1090 { 1091 int i = 0; 1092 const char *p; 1093 const int max_netdevices = 8*PAGE_SIZE; 1094 unsigned long *inuse; 1095 struct net_device *d; 1096 1097 if (!dev_valid_name(name)) 1098 return -EINVAL; 1099 1100 p = strchr(name, '%'); 1101 if (p) { 1102 /* 1103 * Verify the string as this thing may have come from 1104 * the user. There must be either one "%d" and no other "%" 1105 * characters. 1106 */ 1107 if (p[1] != 'd' || strchr(p + 2, '%')) 1108 return -EINVAL; 1109 1110 /* Use one page as a bit array of possible slots */ 1111 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC); 1112 if (!inuse) 1113 return -ENOMEM; 1114 1115 for_each_netdev(net, d) { 1116 if (!sscanf(d->name, name, &i)) 1117 continue; 1118 if (i < 0 || i >= max_netdevices) 1119 continue; 1120 1121 /* avoid cases where sscanf is not exact inverse of printf */ 1122 snprintf(buf, IFNAMSIZ, name, i); 1123 if (!strncmp(buf, d->name, IFNAMSIZ)) 1124 set_bit(i, inuse); 1125 } 1126 1127 i = find_first_zero_bit(inuse, max_netdevices); 1128 free_page((unsigned long) inuse); 1129 } 1130 1131 snprintf(buf, IFNAMSIZ, name, i); 1132 if (!__dev_get_by_name(net, buf)) 1133 return i; 1134 1135 /* It is possible to run out of possible slots 1136 * when the name is long and there isn't enough space left 1137 * for the digits, or if all bits are used. 1138 */ 1139 return -ENFILE; 1140 } 1141 1142 static int dev_alloc_name_ns(struct net *net, 1143 struct net_device *dev, 1144 const char *name) 1145 { 1146 char buf[IFNAMSIZ]; 1147 int ret; 1148 1149 BUG_ON(!net); 1150 ret = __dev_alloc_name(net, name, buf); 1151 if (ret >= 0) 1152 strlcpy(dev->name, buf, IFNAMSIZ); 1153 return ret; 1154 } 1155 1156 /** 1157 * dev_alloc_name - allocate a name for a device 1158 * @dev: device 1159 * @name: name format string 1160 * 1161 * Passed a format string - eg "lt%d" it will try and find a suitable 1162 * id. It scans list of devices to build up a free map, then chooses 1163 * the first empty slot. The caller must hold the dev_base or rtnl lock 1164 * while allocating the name and adding the device in order to avoid 1165 * duplicates. 1166 * Limited to bits_per_byte * page size devices (ie 32K on most platforms). 1167 * Returns the number of the unit assigned or a negative errno code. 1168 */ 1169 1170 int dev_alloc_name(struct net_device *dev, const char *name) 1171 { 1172 return dev_alloc_name_ns(dev_net(dev), dev, name); 1173 } 1174 EXPORT_SYMBOL(dev_alloc_name); 1175 1176 static int dev_get_valid_name(struct net *net, struct net_device *dev, 1177 const char *name) 1178 { 1179 BUG_ON(!net); 1180 1181 if (!dev_valid_name(name)) 1182 return -EINVAL; 1183 1184 if (strchr(name, '%')) 1185 return dev_alloc_name_ns(net, dev, name); 1186 else if (__dev_get_by_name(net, name)) 1187 return -EEXIST; 1188 else if (dev->name != name) 1189 strlcpy(dev->name, name, IFNAMSIZ); 1190 1191 return 0; 1192 } 1193 1194 /** 1195 * dev_change_name - change name of a device 1196 * @dev: device 1197 * @newname: name (or format string) must be at least IFNAMSIZ 1198 * 1199 * Change name of a device, can pass format strings "eth%d". 1200 * for wildcarding. 1201 */ 1202 int dev_change_name(struct net_device *dev, const char *newname) 1203 { 1204 unsigned char old_assign_type; 1205 char oldname[IFNAMSIZ]; 1206 int err = 0; 1207 int ret; 1208 struct net *net; 1209 1210 ASSERT_RTNL(); 1211 BUG_ON(!dev_net(dev)); 1212 1213 net = dev_net(dev); 1214 1215 /* Some auto-enslaved devices e.g. failover slaves are 1216 * special, as userspace might rename the device after 1217 * the interface had been brought up and running since 1218 * the point kernel initiated auto-enslavement. Allow 1219 * live name change even when these slave devices are 1220 * up and running. 1221 * 1222 * Typically, users of these auto-enslaving devices 1223 * don't actually care about slave name change, as 1224 * they are supposed to operate on master interface 1225 * directly. 1226 */ 1227 if (dev->flags & IFF_UP && 1228 likely(!(dev->priv_flags & IFF_LIVE_RENAME_OK))) 1229 return -EBUSY; 1230 1231 write_seqcount_begin(&devnet_rename_seq); 1232 1233 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) { 1234 write_seqcount_end(&devnet_rename_seq); 1235 return 0; 1236 } 1237 1238 memcpy(oldname, dev->name, IFNAMSIZ); 1239 1240 err = dev_get_valid_name(net, dev, newname); 1241 if (err < 0) { 1242 write_seqcount_end(&devnet_rename_seq); 1243 return err; 1244 } 1245 1246 if (oldname[0] && !strchr(oldname, '%')) 1247 netdev_info(dev, "renamed from %s\n", oldname); 1248 1249 old_assign_type = dev->name_assign_type; 1250 dev->name_assign_type = NET_NAME_RENAMED; 1251 1252 rollback: 1253 ret = device_rename(&dev->dev, dev->name); 1254 if (ret) { 1255 memcpy(dev->name, oldname, IFNAMSIZ); 1256 dev->name_assign_type = old_assign_type; 1257 write_seqcount_end(&devnet_rename_seq); 1258 return ret; 1259 } 1260 1261 write_seqcount_end(&devnet_rename_seq); 1262 1263 netdev_adjacent_rename_links(dev, oldname); 1264 1265 write_lock_bh(&dev_base_lock); 1266 netdev_name_node_del(dev->name_node); 1267 write_unlock_bh(&dev_base_lock); 1268 1269 synchronize_rcu(); 1270 1271 write_lock_bh(&dev_base_lock); 1272 netdev_name_node_add(net, dev->name_node); 1273 write_unlock_bh(&dev_base_lock); 1274 1275 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev); 1276 ret = notifier_to_errno(ret); 1277 1278 if (ret) { 1279 /* err >= 0 after dev_alloc_name() or stores the first errno */ 1280 if (err >= 0) { 1281 err = ret; 1282 write_seqcount_begin(&devnet_rename_seq); 1283 memcpy(dev->name, oldname, IFNAMSIZ); 1284 memcpy(oldname, newname, IFNAMSIZ); 1285 dev->name_assign_type = old_assign_type; 1286 old_assign_type = NET_NAME_RENAMED; 1287 goto rollback; 1288 } else { 1289 pr_err("%s: name change rollback failed: %d\n", 1290 dev->name, ret); 1291 } 1292 } 1293 1294 return err; 1295 } 1296 1297 /** 1298 * dev_set_alias - change ifalias of a device 1299 * @dev: device 1300 * @alias: name up to IFALIASZ 1301 * @len: limit of bytes to copy from info 1302 * 1303 * Set ifalias for a device, 1304 */ 1305 int dev_set_alias(struct net_device *dev, const char *alias, size_t len) 1306 { 1307 struct dev_ifalias *new_alias = NULL; 1308 1309 if (len >= IFALIASZ) 1310 return -EINVAL; 1311 1312 if (len) { 1313 new_alias = kmalloc(sizeof(*new_alias) + len + 1, GFP_KERNEL); 1314 if (!new_alias) 1315 return -ENOMEM; 1316 1317 memcpy(new_alias->ifalias, alias, len); 1318 new_alias->ifalias[len] = 0; 1319 } 1320 1321 mutex_lock(&ifalias_mutex); 1322 new_alias = rcu_replace_pointer(dev->ifalias, new_alias, 1323 mutex_is_locked(&ifalias_mutex)); 1324 mutex_unlock(&ifalias_mutex); 1325 1326 if (new_alias) 1327 kfree_rcu(new_alias, rcuhead); 1328 1329 return len; 1330 } 1331 EXPORT_SYMBOL(dev_set_alias); 1332 1333 /** 1334 * dev_get_alias - get ifalias of a device 1335 * @dev: device 1336 * @name: buffer to store name of ifalias 1337 * @len: size of buffer 1338 * 1339 * get ifalias for a device. Caller must make sure dev cannot go 1340 * away, e.g. rcu read lock or own a reference count to device. 1341 */ 1342 int dev_get_alias(const struct net_device *dev, char *name, size_t len) 1343 { 1344 const struct dev_ifalias *alias; 1345 int ret = 0; 1346 1347 rcu_read_lock(); 1348 alias = rcu_dereference(dev->ifalias); 1349 if (alias) 1350 ret = snprintf(name, len, "%s", alias->ifalias); 1351 rcu_read_unlock(); 1352 1353 return ret; 1354 } 1355 1356 /** 1357 * netdev_features_change - device changes features 1358 * @dev: device to cause notification 1359 * 1360 * Called to indicate a device has changed features. 1361 */ 1362 void netdev_features_change(struct net_device *dev) 1363 { 1364 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev); 1365 } 1366 EXPORT_SYMBOL(netdev_features_change); 1367 1368 /** 1369 * netdev_state_change - device changes state 1370 * @dev: device to cause notification 1371 * 1372 * Called to indicate a device has changed state. This function calls 1373 * the notifier chains for netdev_chain and sends a NEWLINK message 1374 * to the routing socket. 1375 */ 1376 void netdev_state_change(struct net_device *dev) 1377 { 1378 if (dev->flags & IFF_UP) { 1379 struct netdev_notifier_change_info change_info = { 1380 .info.dev = dev, 1381 }; 1382 1383 call_netdevice_notifiers_info(NETDEV_CHANGE, 1384 &change_info.info); 1385 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL); 1386 } 1387 } 1388 EXPORT_SYMBOL(netdev_state_change); 1389 1390 /** 1391 * netdev_notify_peers - notify network peers about existence of @dev 1392 * @dev: network device 1393 * 1394 * Generate traffic such that interested network peers are aware of 1395 * @dev, such as by generating a gratuitous ARP. This may be used when 1396 * a device wants to inform the rest of the network about some sort of 1397 * reconfiguration such as a failover event or virtual machine 1398 * migration. 1399 */ 1400 void netdev_notify_peers(struct net_device *dev) 1401 { 1402 rtnl_lock(); 1403 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev); 1404 call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev); 1405 rtnl_unlock(); 1406 } 1407 EXPORT_SYMBOL(netdev_notify_peers); 1408 1409 static int __dev_open(struct net_device *dev, struct netlink_ext_ack *extack) 1410 { 1411 const struct net_device_ops *ops = dev->netdev_ops; 1412 int ret; 1413 1414 ASSERT_RTNL(); 1415 1416 if (!netif_device_present(dev)) 1417 return -ENODEV; 1418 1419 /* Block netpoll from trying to do any rx path servicing. 1420 * If we don't do this there is a chance ndo_poll_controller 1421 * or ndo_poll may be running while we open the device 1422 */ 1423 netpoll_poll_disable(dev); 1424 1425 ret = call_netdevice_notifiers_extack(NETDEV_PRE_UP, dev, extack); 1426 ret = notifier_to_errno(ret); 1427 if (ret) 1428 return ret; 1429 1430 set_bit(__LINK_STATE_START, &dev->state); 1431 1432 if (ops->ndo_validate_addr) 1433 ret = ops->ndo_validate_addr(dev); 1434 1435 if (!ret && ops->ndo_open) 1436 ret = ops->ndo_open(dev); 1437 1438 netpoll_poll_enable(dev); 1439 1440 if (ret) 1441 clear_bit(__LINK_STATE_START, &dev->state); 1442 else { 1443 dev->flags |= IFF_UP; 1444 dev_set_rx_mode(dev); 1445 dev_activate(dev); 1446 add_device_randomness(dev->dev_addr, dev->addr_len); 1447 } 1448 1449 return ret; 1450 } 1451 1452 /** 1453 * dev_open - prepare an interface for use. 1454 * @dev: device to open 1455 * @extack: netlink extended ack 1456 * 1457 * Takes a device from down to up state. The device's private open 1458 * function is invoked and then the multicast lists are loaded. Finally 1459 * the device is moved into the up state and a %NETDEV_UP message is 1460 * sent to the netdev notifier chain. 1461 * 1462 * Calling this function on an active interface is a nop. On a failure 1463 * a negative errno code is returned. 1464 */ 1465 int dev_open(struct net_device *dev, struct netlink_ext_ack *extack) 1466 { 1467 int ret; 1468 1469 if (dev->flags & IFF_UP) 1470 return 0; 1471 1472 ret = __dev_open(dev, extack); 1473 if (ret < 0) 1474 return ret; 1475 1476 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL); 1477 call_netdevice_notifiers(NETDEV_UP, dev); 1478 1479 return ret; 1480 } 1481 EXPORT_SYMBOL(dev_open); 1482 1483 static void __dev_close_many(struct list_head *head) 1484 { 1485 struct net_device *dev; 1486 1487 ASSERT_RTNL(); 1488 might_sleep(); 1489 1490 list_for_each_entry(dev, head, close_list) { 1491 /* Temporarily disable netpoll until the interface is down */ 1492 netpoll_poll_disable(dev); 1493 1494 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev); 1495 1496 clear_bit(__LINK_STATE_START, &dev->state); 1497 1498 /* Synchronize to scheduled poll. We cannot touch poll list, it 1499 * can be even on different cpu. So just clear netif_running(). 1500 * 1501 * dev->stop() will invoke napi_disable() on all of it's 1502 * napi_struct instances on this device. 1503 */ 1504 smp_mb__after_atomic(); /* Commit netif_running(). */ 1505 } 1506 1507 dev_deactivate_many(head); 1508 1509 list_for_each_entry(dev, head, close_list) { 1510 const struct net_device_ops *ops = dev->netdev_ops; 1511 1512 /* 1513 * Call the device specific close. This cannot fail. 1514 * Only if device is UP 1515 * 1516 * We allow it to be called even after a DETACH hot-plug 1517 * event. 1518 */ 1519 if (ops->ndo_stop) 1520 ops->ndo_stop(dev); 1521 1522 dev->flags &= ~IFF_UP; 1523 netpoll_poll_enable(dev); 1524 } 1525 } 1526 1527 static void __dev_close(struct net_device *dev) 1528 { 1529 LIST_HEAD(single); 1530 1531 list_add(&dev->close_list, &single); 1532 __dev_close_many(&single); 1533 list_del(&single); 1534 } 1535 1536 void dev_close_many(struct list_head *head, bool unlink) 1537 { 1538 struct net_device *dev, *tmp; 1539 1540 /* Remove the devices that don't need to be closed */ 1541 list_for_each_entry_safe(dev, tmp, head, close_list) 1542 if (!(dev->flags & IFF_UP)) 1543 list_del_init(&dev->close_list); 1544 1545 __dev_close_many(head); 1546 1547 list_for_each_entry_safe(dev, tmp, head, close_list) { 1548 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL); 1549 call_netdevice_notifiers(NETDEV_DOWN, dev); 1550 if (unlink) 1551 list_del_init(&dev->close_list); 1552 } 1553 } 1554 EXPORT_SYMBOL(dev_close_many); 1555 1556 /** 1557 * dev_close - shutdown an interface. 1558 * @dev: device to shutdown 1559 * 1560 * This function moves an active device into down state. A 1561 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device 1562 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier 1563 * chain. 1564 */ 1565 void dev_close(struct net_device *dev) 1566 { 1567 if (dev->flags & IFF_UP) { 1568 LIST_HEAD(single); 1569 1570 list_add(&dev->close_list, &single); 1571 dev_close_many(&single, true); 1572 list_del(&single); 1573 } 1574 } 1575 EXPORT_SYMBOL(dev_close); 1576 1577 1578 /** 1579 * dev_disable_lro - disable Large Receive Offload on a device 1580 * @dev: device 1581 * 1582 * Disable Large Receive Offload (LRO) on a net device. Must be 1583 * called under RTNL. This is needed if received packets may be 1584 * forwarded to another interface. 1585 */ 1586 void dev_disable_lro(struct net_device *dev) 1587 { 1588 struct net_device *lower_dev; 1589 struct list_head *iter; 1590 1591 dev->wanted_features &= ~NETIF_F_LRO; 1592 netdev_update_features(dev); 1593 1594 if (unlikely(dev->features & NETIF_F_LRO)) 1595 netdev_WARN(dev, "failed to disable LRO!\n"); 1596 1597 netdev_for_each_lower_dev(dev, lower_dev, iter) 1598 dev_disable_lro(lower_dev); 1599 } 1600 EXPORT_SYMBOL(dev_disable_lro); 1601 1602 /** 1603 * dev_disable_gro_hw - disable HW Generic Receive Offload on a device 1604 * @dev: device 1605 * 1606 * Disable HW Generic Receive Offload (GRO_HW) on a net device. Must be 1607 * called under RTNL. This is needed if Generic XDP is installed on 1608 * the device. 1609 */ 1610 static void dev_disable_gro_hw(struct net_device *dev) 1611 { 1612 dev->wanted_features &= ~NETIF_F_GRO_HW; 1613 netdev_update_features(dev); 1614 1615 if (unlikely(dev->features & NETIF_F_GRO_HW)) 1616 netdev_WARN(dev, "failed to disable GRO_HW!\n"); 1617 } 1618 1619 const char *netdev_cmd_to_name(enum netdev_cmd cmd) 1620 { 1621 #define N(val) \ 1622 case NETDEV_##val: \ 1623 return "NETDEV_" __stringify(val); 1624 switch (cmd) { 1625 N(UP) N(DOWN) N(REBOOT) N(CHANGE) N(REGISTER) N(UNREGISTER) 1626 N(CHANGEMTU) N(CHANGEADDR) N(GOING_DOWN) N(CHANGENAME) N(FEAT_CHANGE) 1627 N(BONDING_FAILOVER) N(PRE_UP) N(PRE_TYPE_CHANGE) N(POST_TYPE_CHANGE) 1628 N(POST_INIT) N(RELEASE) N(NOTIFY_PEERS) N(JOIN) N(CHANGEUPPER) 1629 N(RESEND_IGMP) N(PRECHANGEMTU) N(CHANGEINFODATA) N(BONDING_INFO) 1630 N(PRECHANGEUPPER) N(CHANGELOWERSTATE) N(UDP_TUNNEL_PUSH_INFO) 1631 N(UDP_TUNNEL_DROP_INFO) N(CHANGE_TX_QUEUE_LEN) 1632 N(CVLAN_FILTER_PUSH_INFO) N(CVLAN_FILTER_DROP_INFO) 1633 N(SVLAN_FILTER_PUSH_INFO) N(SVLAN_FILTER_DROP_INFO) 1634 N(PRE_CHANGEADDR) 1635 } 1636 #undef N 1637 return "UNKNOWN_NETDEV_EVENT"; 1638 } 1639 EXPORT_SYMBOL_GPL(netdev_cmd_to_name); 1640 1641 static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val, 1642 struct net_device *dev) 1643 { 1644 struct netdev_notifier_info info = { 1645 .dev = dev, 1646 }; 1647 1648 return nb->notifier_call(nb, val, &info); 1649 } 1650 1651 static int call_netdevice_register_notifiers(struct notifier_block *nb, 1652 struct net_device *dev) 1653 { 1654 int err; 1655 1656 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev); 1657 err = notifier_to_errno(err); 1658 if (err) 1659 return err; 1660 1661 if (!(dev->flags & IFF_UP)) 1662 return 0; 1663 1664 call_netdevice_notifier(nb, NETDEV_UP, dev); 1665 return 0; 1666 } 1667 1668 static void call_netdevice_unregister_notifiers(struct notifier_block *nb, 1669 struct net_device *dev) 1670 { 1671 if (dev->flags & IFF_UP) { 1672 call_netdevice_notifier(nb, NETDEV_GOING_DOWN, 1673 dev); 1674 call_netdevice_notifier(nb, NETDEV_DOWN, dev); 1675 } 1676 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev); 1677 } 1678 1679 static int call_netdevice_register_net_notifiers(struct notifier_block *nb, 1680 struct net *net) 1681 { 1682 struct net_device *dev; 1683 int err; 1684 1685 for_each_netdev(net, dev) { 1686 err = call_netdevice_register_notifiers(nb, dev); 1687 if (err) 1688 goto rollback; 1689 } 1690 return 0; 1691 1692 rollback: 1693 for_each_netdev_continue_reverse(net, dev) 1694 call_netdevice_unregister_notifiers(nb, dev); 1695 return err; 1696 } 1697 1698 static void call_netdevice_unregister_net_notifiers(struct notifier_block *nb, 1699 struct net *net) 1700 { 1701 struct net_device *dev; 1702 1703 for_each_netdev(net, dev) 1704 call_netdevice_unregister_notifiers(nb, dev); 1705 } 1706 1707 static int dev_boot_phase = 1; 1708 1709 /** 1710 * register_netdevice_notifier - register a network notifier block 1711 * @nb: notifier 1712 * 1713 * Register a notifier to be called when network device events occur. 1714 * The notifier passed is linked into the kernel structures and must 1715 * not be reused until it has been unregistered. A negative errno code 1716 * is returned on a failure. 1717 * 1718 * When registered all registration and up events are replayed 1719 * to the new notifier to allow device to have a race free 1720 * view of the network device list. 1721 */ 1722 1723 int register_netdevice_notifier(struct notifier_block *nb) 1724 { 1725 struct net *net; 1726 int err; 1727 1728 /* Close race with setup_net() and cleanup_net() */ 1729 down_write(&pernet_ops_rwsem); 1730 rtnl_lock(); 1731 err = raw_notifier_chain_register(&netdev_chain, nb); 1732 if (err) 1733 goto unlock; 1734 if (dev_boot_phase) 1735 goto unlock; 1736 for_each_net(net) { 1737 err = call_netdevice_register_net_notifiers(nb, net); 1738 if (err) 1739 goto rollback; 1740 } 1741 1742 unlock: 1743 rtnl_unlock(); 1744 up_write(&pernet_ops_rwsem); 1745 return err; 1746 1747 rollback: 1748 for_each_net_continue_reverse(net) 1749 call_netdevice_unregister_net_notifiers(nb, net); 1750 1751 raw_notifier_chain_unregister(&netdev_chain, nb); 1752 goto unlock; 1753 } 1754 EXPORT_SYMBOL(register_netdevice_notifier); 1755 1756 /** 1757 * unregister_netdevice_notifier - unregister a network notifier block 1758 * @nb: notifier 1759 * 1760 * Unregister a notifier previously registered by 1761 * register_netdevice_notifier(). The notifier is unlinked into the 1762 * kernel structures and may then be reused. A negative errno code 1763 * is returned on a failure. 1764 * 1765 * After unregistering unregister and down device events are synthesized 1766 * for all devices on the device list to the removed notifier to remove 1767 * the need for special case cleanup code. 1768 */ 1769 1770 int unregister_netdevice_notifier(struct notifier_block *nb) 1771 { 1772 struct net *net; 1773 int err; 1774 1775 /* Close race with setup_net() and cleanup_net() */ 1776 down_write(&pernet_ops_rwsem); 1777 rtnl_lock(); 1778 err = raw_notifier_chain_unregister(&netdev_chain, nb); 1779 if (err) 1780 goto unlock; 1781 1782 for_each_net(net) 1783 call_netdevice_unregister_net_notifiers(nb, net); 1784 1785 unlock: 1786 rtnl_unlock(); 1787 up_write(&pernet_ops_rwsem); 1788 return err; 1789 } 1790 EXPORT_SYMBOL(unregister_netdevice_notifier); 1791 1792 static int __register_netdevice_notifier_net(struct net *net, 1793 struct notifier_block *nb, 1794 bool ignore_call_fail) 1795 { 1796 int err; 1797 1798 err = raw_notifier_chain_register(&net->netdev_chain, nb); 1799 if (err) 1800 return err; 1801 if (dev_boot_phase) 1802 return 0; 1803 1804 err = call_netdevice_register_net_notifiers(nb, net); 1805 if (err && !ignore_call_fail) 1806 goto chain_unregister; 1807 1808 return 0; 1809 1810 chain_unregister: 1811 raw_notifier_chain_unregister(&net->netdev_chain, nb); 1812 return err; 1813 } 1814 1815 static int __unregister_netdevice_notifier_net(struct net *net, 1816 struct notifier_block *nb) 1817 { 1818 int err; 1819 1820 err = raw_notifier_chain_unregister(&net->netdev_chain, nb); 1821 if (err) 1822 return err; 1823 1824 call_netdevice_unregister_net_notifiers(nb, net); 1825 return 0; 1826 } 1827 1828 /** 1829 * register_netdevice_notifier_net - register a per-netns network notifier block 1830 * @net: network namespace 1831 * @nb: notifier 1832 * 1833 * Register a notifier to be called when network device events occur. 1834 * The notifier passed is linked into the kernel structures and must 1835 * not be reused until it has been unregistered. A negative errno code 1836 * is returned on a failure. 1837 * 1838 * When registered all registration and up events are replayed 1839 * to the new notifier to allow device to have a race free 1840 * view of the network device list. 1841 */ 1842 1843 int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb) 1844 { 1845 int err; 1846 1847 rtnl_lock(); 1848 err = __register_netdevice_notifier_net(net, nb, false); 1849 rtnl_unlock(); 1850 return err; 1851 } 1852 EXPORT_SYMBOL(register_netdevice_notifier_net); 1853 1854 /** 1855 * unregister_netdevice_notifier_net - unregister a per-netns 1856 * network notifier block 1857 * @net: network namespace 1858 * @nb: notifier 1859 * 1860 * Unregister a notifier previously registered by 1861 * register_netdevice_notifier(). The notifier is unlinked into the 1862 * kernel structures and may then be reused. A negative errno code 1863 * is returned on a failure. 1864 * 1865 * After unregistering unregister and down device events are synthesized 1866 * for all devices on the device list to the removed notifier to remove 1867 * the need for special case cleanup code. 1868 */ 1869 1870 int unregister_netdevice_notifier_net(struct net *net, 1871 struct notifier_block *nb) 1872 { 1873 int err; 1874 1875 rtnl_lock(); 1876 err = __unregister_netdevice_notifier_net(net, nb); 1877 rtnl_unlock(); 1878 return err; 1879 } 1880 EXPORT_SYMBOL(unregister_netdevice_notifier_net); 1881 1882 int register_netdevice_notifier_dev_net(struct net_device *dev, 1883 struct notifier_block *nb, 1884 struct netdev_net_notifier *nn) 1885 { 1886 int err; 1887 1888 rtnl_lock(); 1889 err = __register_netdevice_notifier_net(dev_net(dev), nb, false); 1890 if (!err) { 1891 nn->nb = nb; 1892 list_add(&nn->list, &dev->net_notifier_list); 1893 } 1894 rtnl_unlock(); 1895 return err; 1896 } 1897 EXPORT_SYMBOL(register_netdevice_notifier_dev_net); 1898 1899 int unregister_netdevice_notifier_dev_net(struct net_device *dev, 1900 struct notifier_block *nb, 1901 struct netdev_net_notifier *nn) 1902 { 1903 int err; 1904 1905 rtnl_lock(); 1906 list_del(&nn->list); 1907 err = __unregister_netdevice_notifier_net(dev_net(dev), nb); 1908 rtnl_unlock(); 1909 return err; 1910 } 1911 EXPORT_SYMBOL(unregister_netdevice_notifier_dev_net); 1912 1913 static void move_netdevice_notifiers_dev_net(struct net_device *dev, 1914 struct net *net) 1915 { 1916 struct netdev_net_notifier *nn; 1917 1918 list_for_each_entry(nn, &dev->net_notifier_list, list) { 1919 __unregister_netdevice_notifier_net(dev_net(dev), nn->nb); 1920 __register_netdevice_notifier_net(net, nn->nb, true); 1921 } 1922 } 1923 1924 /** 1925 * call_netdevice_notifiers_info - call all network notifier blocks 1926 * @val: value passed unmodified to notifier function 1927 * @info: notifier information data 1928 * 1929 * Call all network notifier blocks. Parameters and return value 1930 * are as for raw_notifier_call_chain(). 1931 */ 1932 1933 static int call_netdevice_notifiers_info(unsigned long val, 1934 struct netdev_notifier_info *info) 1935 { 1936 struct net *net = dev_net(info->dev); 1937 int ret; 1938 1939 ASSERT_RTNL(); 1940 1941 /* Run per-netns notifier block chain first, then run the global one. 1942 * Hopefully, one day, the global one is going to be removed after 1943 * all notifier block registrators get converted to be per-netns. 1944 */ 1945 ret = raw_notifier_call_chain(&net->netdev_chain, val, info); 1946 if (ret & NOTIFY_STOP_MASK) 1947 return ret; 1948 return raw_notifier_call_chain(&netdev_chain, val, info); 1949 } 1950 1951 static int call_netdevice_notifiers_extack(unsigned long val, 1952 struct net_device *dev, 1953 struct netlink_ext_ack *extack) 1954 { 1955 struct netdev_notifier_info info = { 1956 .dev = dev, 1957 .extack = extack, 1958 }; 1959 1960 return call_netdevice_notifiers_info(val, &info); 1961 } 1962 1963 /** 1964 * call_netdevice_notifiers - call all network notifier blocks 1965 * @val: value passed unmodified to notifier function 1966 * @dev: net_device pointer passed unmodified to notifier function 1967 * 1968 * Call all network notifier blocks. Parameters and return value 1969 * are as for raw_notifier_call_chain(). 1970 */ 1971 1972 int call_netdevice_notifiers(unsigned long val, struct net_device *dev) 1973 { 1974 return call_netdevice_notifiers_extack(val, dev, NULL); 1975 } 1976 EXPORT_SYMBOL(call_netdevice_notifiers); 1977 1978 /** 1979 * call_netdevice_notifiers_mtu - call all network notifier blocks 1980 * @val: value passed unmodified to notifier function 1981 * @dev: net_device pointer passed unmodified to notifier function 1982 * @arg: additional u32 argument passed to the notifier function 1983 * 1984 * Call all network notifier blocks. Parameters and return value 1985 * are as for raw_notifier_call_chain(). 1986 */ 1987 static int call_netdevice_notifiers_mtu(unsigned long val, 1988 struct net_device *dev, u32 arg) 1989 { 1990 struct netdev_notifier_info_ext info = { 1991 .info.dev = dev, 1992 .ext.mtu = arg, 1993 }; 1994 1995 BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext, info) != 0); 1996 1997 return call_netdevice_notifiers_info(val, &info.info); 1998 } 1999 2000 #ifdef CONFIG_NET_INGRESS 2001 static DEFINE_STATIC_KEY_FALSE(ingress_needed_key); 2002 2003 void net_inc_ingress_queue(void) 2004 { 2005 static_branch_inc(&ingress_needed_key); 2006 } 2007 EXPORT_SYMBOL_GPL(net_inc_ingress_queue); 2008 2009 void net_dec_ingress_queue(void) 2010 { 2011 static_branch_dec(&ingress_needed_key); 2012 } 2013 EXPORT_SYMBOL_GPL(net_dec_ingress_queue); 2014 #endif 2015 2016 #ifdef CONFIG_NET_EGRESS 2017 static DEFINE_STATIC_KEY_FALSE(egress_needed_key); 2018 2019 void net_inc_egress_queue(void) 2020 { 2021 static_branch_inc(&egress_needed_key); 2022 } 2023 EXPORT_SYMBOL_GPL(net_inc_egress_queue); 2024 2025 void net_dec_egress_queue(void) 2026 { 2027 static_branch_dec(&egress_needed_key); 2028 } 2029 EXPORT_SYMBOL_GPL(net_dec_egress_queue); 2030 #endif 2031 2032 static DEFINE_STATIC_KEY_FALSE(netstamp_needed_key); 2033 #ifdef CONFIG_JUMP_LABEL 2034 static atomic_t netstamp_needed_deferred; 2035 static atomic_t netstamp_wanted; 2036 static void netstamp_clear(struct work_struct *work) 2037 { 2038 int deferred = atomic_xchg(&netstamp_needed_deferred, 0); 2039 int wanted; 2040 2041 wanted = atomic_add_return(deferred, &netstamp_wanted); 2042 if (wanted > 0) 2043 static_branch_enable(&netstamp_needed_key); 2044 else 2045 static_branch_disable(&netstamp_needed_key); 2046 } 2047 static DECLARE_WORK(netstamp_work, netstamp_clear); 2048 #endif 2049 2050 void net_enable_timestamp(void) 2051 { 2052 #ifdef CONFIG_JUMP_LABEL 2053 int wanted; 2054 2055 while (1) { 2056 wanted = atomic_read(&netstamp_wanted); 2057 if (wanted <= 0) 2058 break; 2059 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted + 1) == wanted) 2060 return; 2061 } 2062 atomic_inc(&netstamp_needed_deferred); 2063 schedule_work(&netstamp_work); 2064 #else 2065 static_branch_inc(&netstamp_needed_key); 2066 #endif 2067 } 2068 EXPORT_SYMBOL(net_enable_timestamp); 2069 2070 void net_disable_timestamp(void) 2071 { 2072 #ifdef CONFIG_JUMP_LABEL 2073 int wanted; 2074 2075 while (1) { 2076 wanted = atomic_read(&netstamp_wanted); 2077 if (wanted <= 1) 2078 break; 2079 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted - 1) == wanted) 2080 return; 2081 } 2082 atomic_dec(&netstamp_needed_deferred); 2083 schedule_work(&netstamp_work); 2084 #else 2085 static_branch_dec(&netstamp_needed_key); 2086 #endif 2087 } 2088 EXPORT_SYMBOL(net_disable_timestamp); 2089 2090 static inline void net_timestamp_set(struct sk_buff *skb) 2091 { 2092 skb->tstamp = 0; 2093 if (static_branch_unlikely(&netstamp_needed_key)) 2094 __net_timestamp(skb); 2095 } 2096 2097 #define net_timestamp_check(COND, SKB) \ 2098 if (static_branch_unlikely(&netstamp_needed_key)) { \ 2099 if ((COND) && !(SKB)->tstamp) \ 2100 __net_timestamp(SKB); \ 2101 } \ 2102 2103 bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb) 2104 { 2105 unsigned int len; 2106 2107 if (!(dev->flags & IFF_UP)) 2108 return false; 2109 2110 len = dev->mtu + dev->hard_header_len + VLAN_HLEN; 2111 if (skb->len <= len) 2112 return true; 2113 2114 /* if TSO is enabled, we don't care about the length as the packet 2115 * could be forwarded without being segmented before 2116 */ 2117 if (skb_is_gso(skb)) 2118 return true; 2119 2120 return false; 2121 } 2122 EXPORT_SYMBOL_GPL(is_skb_forwardable); 2123 2124 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb) 2125 { 2126 int ret = ____dev_forward_skb(dev, skb); 2127 2128 if (likely(!ret)) { 2129 skb->protocol = eth_type_trans(skb, dev); 2130 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); 2131 } 2132 2133 return ret; 2134 } 2135 EXPORT_SYMBOL_GPL(__dev_forward_skb); 2136 2137 /** 2138 * dev_forward_skb - loopback an skb to another netif 2139 * 2140 * @dev: destination network device 2141 * @skb: buffer to forward 2142 * 2143 * return values: 2144 * NET_RX_SUCCESS (no congestion) 2145 * NET_RX_DROP (packet was dropped, but freed) 2146 * 2147 * dev_forward_skb can be used for injecting an skb from the 2148 * start_xmit function of one device into the receive queue 2149 * of another device. 2150 * 2151 * The receiving device may be in another namespace, so 2152 * we have to clear all information in the skb that could 2153 * impact namespace isolation. 2154 */ 2155 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) 2156 { 2157 return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb); 2158 } 2159 EXPORT_SYMBOL_GPL(dev_forward_skb); 2160 2161 static inline int deliver_skb(struct sk_buff *skb, 2162 struct packet_type *pt_prev, 2163 struct net_device *orig_dev) 2164 { 2165 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) 2166 return -ENOMEM; 2167 refcount_inc(&skb->users); 2168 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev); 2169 } 2170 2171 static inline void deliver_ptype_list_skb(struct sk_buff *skb, 2172 struct packet_type **pt, 2173 struct net_device *orig_dev, 2174 __be16 type, 2175 struct list_head *ptype_list) 2176 { 2177 struct packet_type *ptype, *pt_prev = *pt; 2178 2179 list_for_each_entry_rcu(ptype, ptype_list, list) { 2180 if (ptype->type != type) 2181 continue; 2182 if (pt_prev) 2183 deliver_skb(skb, pt_prev, orig_dev); 2184 pt_prev = ptype; 2185 } 2186 *pt = pt_prev; 2187 } 2188 2189 static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb) 2190 { 2191 if (!ptype->af_packet_priv || !skb->sk) 2192 return false; 2193 2194 if (ptype->id_match) 2195 return ptype->id_match(ptype, skb->sk); 2196 else if ((struct sock *)ptype->af_packet_priv == skb->sk) 2197 return true; 2198 2199 return false; 2200 } 2201 2202 /** 2203 * dev_nit_active - return true if any network interface taps are in use 2204 * 2205 * @dev: network device to check for the presence of taps 2206 */ 2207 bool dev_nit_active(struct net_device *dev) 2208 { 2209 return !list_empty(&ptype_all) || !list_empty(&dev->ptype_all); 2210 } 2211 EXPORT_SYMBOL_GPL(dev_nit_active); 2212 2213 /* 2214 * Support routine. Sends outgoing frames to any network 2215 * taps currently in use. 2216 */ 2217 2218 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) 2219 { 2220 struct packet_type *ptype; 2221 struct sk_buff *skb2 = NULL; 2222 struct packet_type *pt_prev = NULL; 2223 struct list_head *ptype_list = &ptype_all; 2224 2225 rcu_read_lock(); 2226 again: 2227 list_for_each_entry_rcu(ptype, ptype_list, list) { 2228 if (ptype->ignore_outgoing) 2229 continue; 2230 2231 /* Never send packets back to the socket 2232 * they originated from - MvS (miquels@drinkel.ow.org) 2233 */ 2234 if (skb_loop_sk(ptype, skb)) 2235 continue; 2236 2237 if (pt_prev) { 2238 deliver_skb(skb2, pt_prev, skb->dev); 2239 pt_prev = ptype; 2240 continue; 2241 } 2242 2243 /* need to clone skb, done only once */ 2244 skb2 = skb_clone(skb, GFP_ATOMIC); 2245 if (!skb2) 2246 goto out_unlock; 2247 2248 net_timestamp_set(skb2); 2249 2250 /* skb->nh should be correctly 2251 * set by sender, so that the second statement is 2252 * just protection against buggy protocols. 2253 */ 2254 skb_reset_mac_header(skb2); 2255 2256 if (skb_network_header(skb2) < skb2->data || 2257 skb_network_header(skb2) > skb_tail_pointer(skb2)) { 2258 net_crit_ratelimited("protocol %04x is buggy, dev %s\n", 2259 ntohs(skb2->protocol), 2260 dev->name); 2261 skb_reset_network_header(skb2); 2262 } 2263 2264 skb2->transport_header = skb2->network_header; 2265 skb2->pkt_type = PACKET_OUTGOING; 2266 pt_prev = ptype; 2267 } 2268 2269 if (ptype_list == &ptype_all) { 2270 ptype_list = &dev->ptype_all; 2271 goto again; 2272 } 2273 out_unlock: 2274 if (pt_prev) { 2275 if (!skb_orphan_frags_rx(skb2, GFP_ATOMIC)) 2276 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev); 2277 else 2278 kfree_skb(skb2); 2279 } 2280 rcu_read_unlock(); 2281 } 2282 EXPORT_SYMBOL_GPL(dev_queue_xmit_nit); 2283 2284 /** 2285 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change 2286 * @dev: Network device 2287 * @txq: number of queues available 2288 * 2289 * If real_num_tx_queues is changed the tc mappings may no longer be 2290 * valid. To resolve this verify the tc mapping remains valid and if 2291 * not NULL the mapping. With no priorities mapping to this 2292 * offset/count pair it will no longer be used. In the worst case TC0 2293 * is invalid nothing can be done so disable priority mappings. If is 2294 * expected that drivers will fix this mapping if they can before 2295 * calling netif_set_real_num_tx_queues. 2296 */ 2297 static void netif_setup_tc(struct net_device *dev, unsigned int txq) 2298 { 2299 int i; 2300 struct netdev_tc_txq *tc = &dev->tc_to_txq[0]; 2301 2302 /* If TC0 is invalidated disable TC mapping */ 2303 if (tc->offset + tc->count > txq) { 2304 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n"); 2305 dev->num_tc = 0; 2306 return; 2307 } 2308 2309 /* Invalidated prio to tc mappings set to TC0 */ 2310 for (i = 1; i < TC_BITMASK + 1; i++) { 2311 int q = netdev_get_prio_tc_map(dev, i); 2312 2313 tc = &dev->tc_to_txq[q]; 2314 if (tc->offset + tc->count > txq) { 2315 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n", 2316 i, q); 2317 netdev_set_prio_tc_map(dev, i, 0); 2318 } 2319 } 2320 } 2321 2322 int netdev_txq_to_tc(struct net_device *dev, unsigned int txq) 2323 { 2324 if (dev->num_tc) { 2325 struct netdev_tc_txq *tc = &dev->tc_to_txq[0]; 2326 int i; 2327 2328 /* walk through the TCs and see if it falls into any of them */ 2329 for (i = 0; i < TC_MAX_QUEUE; i++, tc++) { 2330 if ((txq - tc->offset) < tc->count) 2331 return i; 2332 } 2333 2334 /* didn't find it, just return -1 to indicate no match */ 2335 return -1; 2336 } 2337 2338 return 0; 2339 } 2340 EXPORT_SYMBOL(netdev_txq_to_tc); 2341 2342 #ifdef CONFIG_XPS 2343 struct static_key xps_needed __read_mostly; 2344 EXPORT_SYMBOL(xps_needed); 2345 struct static_key xps_rxqs_needed __read_mostly; 2346 EXPORT_SYMBOL(xps_rxqs_needed); 2347 static DEFINE_MUTEX(xps_map_mutex); 2348 #define xmap_dereference(P) \ 2349 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex)) 2350 2351 static bool remove_xps_queue(struct xps_dev_maps *dev_maps, 2352 int tci, u16 index) 2353 { 2354 struct xps_map *map = NULL; 2355 int pos; 2356 2357 if (dev_maps) 2358 map = xmap_dereference(dev_maps->attr_map[tci]); 2359 if (!map) 2360 return false; 2361 2362 for (pos = map->len; pos--;) { 2363 if (map->queues[pos] != index) 2364 continue; 2365 2366 if (map->len > 1) { 2367 map->queues[pos] = map->queues[--map->len]; 2368 break; 2369 } 2370 2371 RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL); 2372 kfree_rcu(map, rcu); 2373 return false; 2374 } 2375 2376 return true; 2377 } 2378 2379 static bool remove_xps_queue_cpu(struct net_device *dev, 2380 struct xps_dev_maps *dev_maps, 2381 int cpu, u16 offset, u16 count) 2382 { 2383 int num_tc = dev->num_tc ? : 1; 2384 bool active = false; 2385 int tci; 2386 2387 for (tci = cpu * num_tc; num_tc--; tci++) { 2388 int i, j; 2389 2390 for (i = count, j = offset; i--; j++) { 2391 if (!remove_xps_queue(dev_maps, tci, j)) 2392 break; 2393 } 2394 2395 active |= i < 0; 2396 } 2397 2398 return active; 2399 } 2400 2401 static void reset_xps_maps(struct net_device *dev, 2402 struct xps_dev_maps *dev_maps, 2403 bool is_rxqs_map) 2404 { 2405 if (is_rxqs_map) { 2406 static_key_slow_dec_cpuslocked(&xps_rxqs_needed); 2407 RCU_INIT_POINTER(dev->xps_rxqs_map, NULL); 2408 } else { 2409 RCU_INIT_POINTER(dev->xps_cpus_map, NULL); 2410 } 2411 static_key_slow_dec_cpuslocked(&xps_needed); 2412 kfree_rcu(dev_maps, rcu); 2413 } 2414 2415 static void clean_xps_maps(struct net_device *dev, const unsigned long *mask, 2416 struct xps_dev_maps *dev_maps, unsigned int nr_ids, 2417 u16 offset, u16 count, bool is_rxqs_map) 2418 { 2419 bool active = false; 2420 int i, j; 2421 2422 for (j = -1; j = netif_attrmask_next(j, mask, nr_ids), 2423 j < nr_ids;) 2424 active |= remove_xps_queue_cpu(dev, dev_maps, j, offset, 2425 count); 2426 if (!active) 2427 reset_xps_maps(dev, dev_maps, is_rxqs_map); 2428 2429 if (!is_rxqs_map) { 2430 for (i = offset + (count - 1); count--; i--) { 2431 netdev_queue_numa_node_write( 2432 netdev_get_tx_queue(dev, i), 2433 NUMA_NO_NODE); 2434 } 2435 } 2436 } 2437 2438 static void netif_reset_xps_queues(struct net_device *dev, u16 offset, 2439 u16 count) 2440 { 2441 const unsigned long *possible_mask = NULL; 2442 struct xps_dev_maps *dev_maps; 2443 unsigned int nr_ids; 2444 2445 if (!static_key_false(&xps_needed)) 2446 return; 2447 2448 cpus_read_lock(); 2449 mutex_lock(&xps_map_mutex); 2450 2451 if (static_key_false(&xps_rxqs_needed)) { 2452 dev_maps = xmap_dereference(dev->xps_rxqs_map); 2453 if (dev_maps) { 2454 nr_ids = dev->num_rx_queues; 2455 clean_xps_maps(dev, possible_mask, dev_maps, nr_ids, 2456 offset, count, true); 2457 } 2458 } 2459 2460 dev_maps = xmap_dereference(dev->xps_cpus_map); 2461 if (!dev_maps) 2462 goto out_no_maps; 2463 2464 if (num_possible_cpus() > 1) 2465 possible_mask = cpumask_bits(cpu_possible_mask); 2466 nr_ids = nr_cpu_ids; 2467 clean_xps_maps(dev, possible_mask, dev_maps, nr_ids, offset, count, 2468 false); 2469 2470 out_no_maps: 2471 mutex_unlock(&xps_map_mutex); 2472 cpus_read_unlock(); 2473 } 2474 2475 static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index) 2476 { 2477 netif_reset_xps_queues(dev, index, dev->num_tx_queues - index); 2478 } 2479 2480 static struct xps_map *expand_xps_map(struct xps_map *map, int attr_index, 2481 u16 index, bool is_rxqs_map) 2482 { 2483 struct xps_map *new_map; 2484 int alloc_len = XPS_MIN_MAP_ALLOC; 2485 int i, pos; 2486 2487 for (pos = 0; map && pos < map->len; pos++) { 2488 if (map->queues[pos] != index) 2489 continue; 2490 return map; 2491 } 2492 2493 /* Need to add tx-queue to this CPU's/rx-queue's existing map */ 2494 if (map) { 2495 if (pos < map->alloc_len) 2496 return map; 2497 2498 alloc_len = map->alloc_len * 2; 2499 } 2500 2501 /* Need to allocate new map to store tx-queue on this CPU's/rx-queue's 2502 * map 2503 */ 2504 if (is_rxqs_map) 2505 new_map = kzalloc(XPS_MAP_SIZE(alloc_len), GFP_KERNEL); 2506 else 2507 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL, 2508 cpu_to_node(attr_index)); 2509 if (!new_map) 2510 return NULL; 2511 2512 for (i = 0; i < pos; i++) 2513 new_map->queues[i] = map->queues[i]; 2514 new_map->alloc_len = alloc_len; 2515 new_map->len = pos; 2516 2517 return new_map; 2518 } 2519 2520 /* Must be called under cpus_read_lock */ 2521 int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, 2522 u16 index, bool is_rxqs_map) 2523 { 2524 const unsigned long *online_mask = NULL, *possible_mask = NULL; 2525 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL; 2526 int i, j, tci, numa_node_id = -2; 2527 int maps_sz, num_tc = 1, tc = 0; 2528 struct xps_map *map, *new_map; 2529 bool active = false; 2530 unsigned int nr_ids; 2531 2532 if (dev->num_tc) { 2533 /* Do not allow XPS on subordinate device directly */ 2534 num_tc = dev->num_tc; 2535 if (num_tc < 0) 2536 return -EINVAL; 2537 2538 /* If queue belongs to subordinate dev use its map */ 2539 dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev; 2540 2541 tc = netdev_txq_to_tc(dev, index); 2542 if (tc < 0) 2543 return -EINVAL; 2544 } 2545 2546 mutex_lock(&xps_map_mutex); 2547 if (is_rxqs_map) { 2548 maps_sz = XPS_RXQ_DEV_MAPS_SIZE(num_tc, dev->num_rx_queues); 2549 dev_maps = xmap_dereference(dev->xps_rxqs_map); 2550 nr_ids = dev->num_rx_queues; 2551 } else { 2552 maps_sz = XPS_CPU_DEV_MAPS_SIZE(num_tc); 2553 if (num_possible_cpus() > 1) { 2554 online_mask = cpumask_bits(cpu_online_mask); 2555 possible_mask = cpumask_bits(cpu_possible_mask); 2556 } 2557 dev_maps = xmap_dereference(dev->xps_cpus_map); 2558 nr_ids = nr_cpu_ids; 2559 } 2560 2561 if (maps_sz < L1_CACHE_BYTES) 2562 maps_sz = L1_CACHE_BYTES; 2563 2564 /* allocate memory for queue storage */ 2565 for (j = -1; j = netif_attrmask_next_and(j, online_mask, mask, nr_ids), 2566 j < nr_ids;) { 2567 if (!new_dev_maps) 2568 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL); 2569 if (!new_dev_maps) { 2570 mutex_unlock(&xps_map_mutex); 2571 return -ENOMEM; 2572 } 2573 2574 tci = j * num_tc + tc; 2575 map = dev_maps ? xmap_dereference(dev_maps->attr_map[tci]) : 2576 NULL; 2577 2578 map = expand_xps_map(map, j, index, is_rxqs_map); 2579 if (!map) 2580 goto error; 2581 2582 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map); 2583 } 2584 2585 if (!new_dev_maps) 2586 goto out_no_new_maps; 2587 2588 if (!dev_maps) { 2589 /* Increment static keys at most once per type */ 2590 static_key_slow_inc_cpuslocked(&xps_needed); 2591 if (is_rxqs_map) 2592 static_key_slow_inc_cpuslocked(&xps_rxqs_needed); 2593 } 2594 2595 for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids), 2596 j < nr_ids;) { 2597 /* copy maps belonging to foreign traffic classes */ 2598 for (i = tc, tci = j * num_tc; dev_maps && i--; tci++) { 2599 /* fill in the new device map from the old device map */ 2600 map = xmap_dereference(dev_maps->attr_map[tci]); 2601 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map); 2602 } 2603 2604 /* We need to explicitly update tci as prevous loop 2605 * could break out early if dev_maps is NULL. 2606 */ 2607 tci = j * num_tc + tc; 2608 2609 if (netif_attr_test_mask(j, mask, nr_ids) && 2610 netif_attr_test_online(j, online_mask, nr_ids)) { 2611 /* add tx-queue to CPU/rx-queue maps */ 2612 int pos = 0; 2613 2614 map = xmap_dereference(new_dev_maps->attr_map[tci]); 2615 while ((pos < map->len) && (map->queues[pos] != index)) 2616 pos++; 2617 2618 if (pos == map->len) 2619 map->queues[map->len++] = index; 2620 #ifdef CONFIG_NUMA 2621 if (!is_rxqs_map) { 2622 if (numa_node_id == -2) 2623 numa_node_id = cpu_to_node(j); 2624 else if (numa_node_id != cpu_to_node(j)) 2625 numa_node_id = -1; 2626 } 2627 #endif 2628 } else if (dev_maps) { 2629 /* fill in the new device map from the old device map */ 2630 map = xmap_dereference(dev_maps->attr_map[tci]); 2631 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map); 2632 } 2633 2634 /* copy maps belonging to foreign traffic classes */ 2635 for (i = num_tc - tc, tci++; dev_maps && --i; tci++) { 2636 /* fill in the new device map from the old device map */ 2637 map = xmap_dereference(dev_maps->attr_map[tci]); 2638 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map); 2639 } 2640 } 2641 2642 if (is_rxqs_map) 2643 rcu_assign_pointer(dev->xps_rxqs_map, new_dev_maps); 2644 else 2645 rcu_assign_pointer(dev->xps_cpus_map, new_dev_maps); 2646 2647 /* Cleanup old maps */ 2648 if (!dev_maps) 2649 goto out_no_old_maps; 2650 2651 for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids), 2652 j < nr_ids;) { 2653 for (i = num_tc, tci = j * num_tc; i--; tci++) { 2654 new_map = xmap_dereference(new_dev_maps->attr_map[tci]); 2655 map = xmap_dereference(dev_maps->attr_map[tci]); 2656 if (map && map != new_map) 2657 kfree_rcu(map, rcu); 2658 } 2659 } 2660 2661 kfree_rcu(dev_maps, rcu); 2662 2663 out_no_old_maps: 2664 dev_maps = new_dev_maps; 2665 active = true; 2666 2667 out_no_new_maps: 2668 if (!is_rxqs_map) { 2669 /* update Tx queue numa node */ 2670 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index), 2671 (numa_node_id >= 0) ? 2672 numa_node_id : NUMA_NO_NODE); 2673 } 2674 2675 if (!dev_maps) 2676 goto out_no_maps; 2677 2678 /* removes tx-queue from unused CPUs/rx-queues */ 2679 for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids), 2680 j < nr_ids;) { 2681 for (i = tc, tci = j * num_tc; i--; tci++) 2682 active |= remove_xps_queue(dev_maps, tci, index); 2683 if (!netif_attr_test_mask(j, mask, nr_ids) || 2684 !netif_attr_test_online(j, online_mask, nr_ids)) 2685 active |= remove_xps_queue(dev_maps, tci, index); 2686 for (i = num_tc - tc, tci++; --i; tci++) 2687 active |= remove_xps_queue(dev_maps, tci, index); 2688 } 2689 2690 /* free map if not active */ 2691 if (!active) 2692 reset_xps_maps(dev, dev_maps, is_rxqs_map); 2693 2694 out_no_maps: 2695 mutex_unlock(&xps_map_mutex); 2696 2697 return 0; 2698 error: 2699 /* remove any maps that we added */ 2700 for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids), 2701 j < nr_ids;) { 2702 for (i = num_tc, tci = j * num_tc; i--; tci++) { 2703 new_map = xmap_dereference(new_dev_maps->attr_map[tci]); 2704 map = dev_maps ? 2705 xmap_dereference(dev_maps->attr_map[tci]) : 2706 NULL; 2707 if (new_map && new_map != map) 2708 kfree(new_map); 2709 } 2710 } 2711 2712 mutex_unlock(&xps_map_mutex); 2713 2714 kfree(new_dev_maps); 2715 return -ENOMEM; 2716 } 2717 EXPORT_SYMBOL_GPL(__netif_set_xps_queue); 2718 2719 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, 2720 u16 index) 2721 { 2722 int ret; 2723 2724 cpus_read_lock(); 2725 ret = __netif_set_xps_queue(dev, cpumask_bits(mask), index, false); 2726 cpus_read_unlock(); 2727 2728 return ret; 2729 } 2730 EXPORT_SYMBOL(netif_set_xps_queue); 2731 2732 #endif 2733 static void netdev_unbind_all_sb_channels(struct net_device *dev) 2734 { 2735 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues]; 2736 2737 /* Unbind any subordinate channels */ 2738 while (txq-- != &dev->_tx[0]) { 2739 if (txq->sb_dev) 2740 netdev_unbind_sb_channel(dev, txq->sb_dev); 2741 } 2742 } 2743 2744 void netdev_reset_tc(struct net_device *dev) 2745 { 2746 #ifdef CONFIG_XPS 2747 netif_reset_xps_queues_gt(dev, 0); 2748 #endif 2749 netdev_unbind_all_sb_channels(dev); 2750 2751 /* Reset TC configuration of device */ 2752 dev->num_tc = 0; 2753 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq)); 2754 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map)); 2755 } 2756 EXPORT_SYMBOL(netdev_reset_tc); 2757 2758 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset) 2759 { 2760 if (tc >= dev->num_tc) 2761 return -EINVAL; 2762 2763 #ifdef CONFIG_XPS 2764 netif_reset_xps_queues(dev, offset, count); 2765 #endif 2766 dev->tc_to_txq[tc].count = count; 2767 dev->tc_to_txq[tc].offset = offset; 2768 return 0; 2769 } 2770 EXPORT_SYMBOL(netdev_set_tc_queue); 2771 2772 int netdev_set_num_tc(struct net_device *dev, u8 num_tc) 2773 { 2774 if (num_tc > TC_MAX_QUEUE) 2775 return -EINVAL; 2776 2777 #ifdef CONFIG_XPS 2778 netif_reset_xps_queues_gt(dev, 0); 2779 #endif 2780 netdev_unbind_all_sb_channels(dev); 2781 2782 dev->num_tc = num_tc; 2783 return 0; 2784 } 2785 EXPORT_SYMBOL(netdev_set_num_tc); 2786 2787 void netdev_unbind_sb_channel(struct net_device *dev, 2788 struct net_device *sb_dev) 2789 { 2790 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues]; 2791 2792 #ifdef CONFIG_XPS 2793 netif_reset_xps_queues_gt(sb_dev, 0); 2794 #endif 2795 memset(sb_dev->tc_to_txq, 0, sizeof(sb_dev->tc_to_txq)); 2796 memset(sb_dev->prio_tc_map, 0, sizeof(sb_dev->prio_tc_map)); 2797 2798 while (txq-- != &dev->_tx[0]) { 2799 if (txq->sb_dev == sb_dev) 2800 txq->sb_dev = NULL; 2801 } 2802 } 2803 EXPORT_SYMBOL(netdev_unbind_sb_channel); 2804 2805 int netdev_bind_sb_channel_queue(struct net_device *dev, 2806 struct net_device *sb_dev, 2807 u8 tc, u16 count, u16 offset) 2808 { 2809 /* Make certain the sb_dev and dev are already configured */ 2810 if (sb_dev->num_tc >= 0 || tc >= dev->num_tc) 2811 return -EINVAL; 2812 2813 /* We cannot hand out queues we don't have */ 2814 if ((offset + count) > dev->real_num_tx_queues) 2815 return -EINVAL; 2816 2817 /* Record the mapping */ 2818 sb_dev->tc_to_txq[tc].count = count; 2819 sb_dev->tc_to_txq[tc].offset = offset; 2820 2821 /* Provide a way for Tx queue to find the tc_to_txq map or 2822 * XPS map for itself. 2823 */ 2824 while (count--) 2825 netdev_get_tx_queue(dev, count + offset)->sb_dev = sb_dev; 2826 2827 return 0; 2828 } 2829 EXPORT_SYMBOL(netdev_bind_sb_channel_queue); 2830 2831 int netdev_set_sb_channel(struct net_device *dev, u16 channel) 2832 { 2833 /* Do not use a multiqueue device to represent a subordinate channel */ 2834 if (netif_is_multiqueue(dev)) 2835 return -ENODEV; 2836 2837 /* We allow channels 1 - 32767 to be used for subordinate channels. 2838 * Channel 0 is meant to be "native" mode and used only to represent 2839 * the main root device. We allow writing 0 to reset the device back 2840 * to normal mode after being used as a subordinate channel. 2841 */ 2842 if (channel > S16_MAX) 2843 return -EINVAL; 2844 2845 dev->num_tc = -channel; 2846 2847 return 0; 2848 } 2849 EXPORT_SYMBOL(netdev_set_sb_channel); 2850 2851 /* 2852 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues 2853 * greater than real_num_tx_queues stale skbs on the qdisc must be flushed. 2854 */ 2855 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) 2856 { 2857 bool disabling; 2858 int rc; 2859 2860 disabling = txq < dev->real_num_tx_queues; 2861 2862 if (txq < 1 || txq > dev->num_tx_queues) 2863 return -EINVAL; 2864 2865 if (dev->reg_state == NETREG_REGISTERED || 2866 dev->reg_state == NETREG_UNREGISTERING) { 2867 ASSERT_RTNL(); 2868 2869 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues, 2870 txq); 2871 if (rc) 2872 return rc; 2873 2874 if (dev->num_tc) 2875 netif_setup_tc(dev, txq); 2876 2877 dev->real_num_tx_queues = txq; 2878 2879 if (disabling) { 2880 synchronize_net(); 2881 qdisc_reset_all_tx_gt(dev, txq); 2882 #ifdef CONFIG_XPS 2883 netif_reset_xps_queues_gt(dev, txq); 2884 #endif 2885 } 2886 } else { 2887 dev->real_num_tx_queues = txq; 2888 } 2889 2890 return 0; 2891 } 2892 EXPORT_SYMBOL(netif_set_real_num_tx_queues); 2893 2894 #ifdef CONFIG_SYSFS 2895 /** 2896 * netif_set_real_num_rx_queues - set actual number of RX queues used 2897 * @dev: Network device 2898 * @rxq: Actual number of RX queues 2899 * 2900 * This must be called either with the rtnl_lock held or before 2901 * registration of the net device. Returns 0 on success, or a 2902 * negative error code. If called before registration, it always 2903 * succeeds. 2904 */ 2905 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq) 2906 { 2907 int rc; 2908 2909 if (rxq < 1 || rxq > dev->num_rx_queues) 2910 return -EINVAL; 2911 2912 if (dev->reg_state == NETREG_REGISTERED) { 2913 ASSERT_RTNL(); 2914 2915 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues, 2916 rxq); 2917 if (rc) 2918 return rc; 2919 } 2920 2921 dev->real_num_rx_queues = rxq; 2922 return 0; 2923 } 2924 EXPORT_SYMBOL(netif_set_real_num_rx_queues); 2925 #endif 2926 2927 /** 2928 * netif_get_num_default_rss_queues - default number of RSS queues 2929 * 2930 * This routine should set an upper limit on the number of RSS queues 2931 * used by default by multiqueue devices. 2932 */ 2933 int netif_get_num_default_rss_queues(void) 2934 { 2935 return is_kdump_kernel() ? 2936 1 : min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus()); 2937 } 2938 EXPORT_SYMBOL(netif_get_num_default_rss_queues); 2939 2940 static void __netif_reschedule(struct Qdisc *q) 2941 { 2942 struct softnet_data *sd; 2943 unsigned long flags; 2944 2945 local_irq_save(flags); 2946 sd = this_cpu_ptr(&softnet_data); 2947 q->next_sched = NULL; 2948 *sd->output_queue_tailp = q; 2949 sd->output_queue_tailp = &q->next_sched; 2950 raise_softirq_irqoff(NET_TX_SOFTIRQ); 2951 local_irq_restore(flags); 2952 } 2953 2954 void __netif_schedule(struct Qdisc *q) 2955 { 2956 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) 2957 __netif_reschedule(q); 2958 } 2959 EXPORT_SYMBOL(__netif_schedule); 2960 2961 struct dev_kfree_skb_cb { 2962 enum skb_free_reason reason; 2963 }; 2964 2965 static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb) 2966 { 2967 return (struct dev_kfree_skb_cb *)skb->cb; 2968 } 2969 2970 void netif_schedule_queue(struct netdev_queue *txq) 2971 { 2972 rcu_read_lock(); 2973 if (!netif_xmit_stopped(txq)) { 2974 struct Qdisc *q = rcu_dereference(txq->qdisc); 2975 2976 __netif_schedule(q); 2977 } 2978 rcu_read_unlock(); 2979 } 2980 EXPORT_SYMBOL(netif_schedule_queue); 2981 2982 void netif_tx_wake_queue(struct netdev_queue *dev_queue) 2983 { 2984 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) { 2985 struct Qdisc *q; 2986 2987 rcu_read_lock(); 2988 q = rcu_dereference(dev_queue->qdisc); 2989 __netif_schedule(q); 2990 rcu_read_unlock(); 2991 } 2992 } 2993 EXPORT_SYMBOL(netif_tx_wake_queue); 2994 2995 void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason) 2996 { 2997 unsigned long flags; 2998 2999 if (unlikely(!skb)) 3000 return; 3001 3002 if (likely(refcount_read(&skb->users) == 1)) { 3003 smp_rmb(); 3004 refcount_set(&skb->users, 0); 3005 } else if (likely(!refcount_dec_and_test(&skb->users))) { 3006 return; 3007 } 3008 get_kfree_skb_cb(skb)->reason = reason; 3009 local_irq_save(flags); 3010 skb->next = __this_cpu_read(softnet_data.completion_queue); 3011 __this_cpu_write(softnet_data.completion_queue, skb); 3012 raise_softirq_irqoff(NET_TX_SOFTIRQ); 3013 local_irq_restore(flags); 3014 } 3015 EXPORT_SYMBOL(__dev_kfree_skb_irq); 3016 3017 void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason) 3018 { 3019 if (in_irq() || irqs_disabled()) 3020 __dev_kfree_skb_irq(skb, reason); 3021 else 3022 dev_kfree_skb(skb); 3023 } 3024 EXPORT_SYMBOL(__dev_kfree_skb_any); 3025 3026 3027 /** 3028 * netif_device_detach - mark device as removed 3029 * @dev: network device 3030 * 3031 * Mark device as removed from system and therefore no longer available. 3032 */ 3033 void netif_device_detach(struct net_device *dev) 3034 { 3035 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) && 3036 netif_running(dev)) { 3037 netif_tx_stop_all_queues(dev); 3038 } 3039 } 3040 EXPORT_SYMBOL(netif_device_detach); 3041 3042 /** 3043 * netif_device_attach - mark device as attached 3044 * @dev: network device 3045 * 3046 * Mark device as attached from system and restart if needed. 3047 */ 3048 void netif_device_attach(struct net_device *dev) 3049 { 3050 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) && 3051 netif_running(dev)) { 3052 netif_tx_wake_all_queues(dev); 3053 __netdev_watchdog_up(dev); 3054 } 3055 } 3056 EXPORT_SYMBOL(netif_device_attach); 3057 3058 /* 3059 * Returns a Tx hash based on the given packet descriptor a Tx queues' number 3060 * to be used as a distribution range. 3061 */ 3062 static u16 skb_tx_hash(const struct net_device *dev, 3063 const struct net_device *sb_dev, 3064 struct sk_buff *skb) 3065 { 3066 u32 hash; 3067 u16 qoffset = 0; 3068 u16 qcount = dev->real_num_tx_queues; 3069 3070 if (dev->num_tc) { 3071 u8 tc = netdev_get_prio_tc_map(dev, skb->priority); 3072 3073 qoffset = sb_dev->tc_to_txq[tc].offset; 3074 qcount = sb_dev->tc_to_txq[tc].count; 3075 } 3076 3077 if (skb_rx_queue_recorded(skb)) { 3078 hash = skb_get_rx_queue(skb); 3079 while (unlikely(hash >= qcount)) 3080 hash -= qcount; 3081 return hash + qoffset; 3082 } 3083 3084 return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset; 3085 } 3086 3087 static void skb_warn_bad_offload(const struct sk_buff *skb) 3088 { 3089 static const netdev_features_t null_features; 3090 struct net_device *dev = skb->dev; 3091 const char *name = ""; 3092 3093 if (!net_ratelimit()) 3094 return; 3095 3096 if (dev) { 3097 if (dev->dev.parent) 3098 name = dev_driver_string(dev->dev.parent); 3099 else 3100 name = netdev_name(dev); 3101 } 3102 skb_dump(KERN_WARNING, skb, false); 3103 WARN(1, "%s: caps=(%pNF, %pNF)\n", 3104 name, dev ? &dev->features : &null_features, 3105 skb->sk ? &skb->sk->sk_route_caps : &null_features); 3106 } 3107 3108 /* 3109 * Invalidate hardware checksum when packet is to be mangled, and 3110 * complete checksum manually on outgoing path. 3111 */ 3112 int skb_checksum_help(struct sk_buff *skb) 3113 { 3114 __wsum csum; 3115 int ret = 0, offset; 3116 3117 if (skb->ip_summed == CHECKSUM_COMPLETE) 3118 goto out_set_summed; 3119 3120 if (unlikely(skb_shinfo(skb)->gso_size)) { 3121 skb_warn_bad_offload(skb); 3122 return -EINVAL; 3123 } 3124 3125 /* Before computing a checksum, we should make sure no frag could 3126 * be modified by an external entity : checksum could be wrong. 3127 */ 3128 if (skb_has_shared_frag(skb)) { 3129 ret = __skb_linearize(skb); 3130 if (ret) 3131 goto out; 3132 } 3133 3134 offset = skb_checksum_start_offset(skb); 3135 BUG_ON(offset >= skb_headlen(skb)); 3136 csum = skb_checksum(skb, offset, skb->len - offset, 0); 3137 3138 offset += skb->csum_offset; 3139 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb)); 3140 3141 ret = skb_ensure_writable(skb, offset + sizeof(__sum16)); 3142 if (ret) 3143 goto out; 3144 3145 *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0; 3146 out_set_summed: 3147 skb->ip_summed = CHECKSUM_NONE; 3148 out: 3149 return ret; 3150 } 3151 EXPORT_SYMBOL(skb_checksum_help); 3152 3153 int skb_crc32c_csum_help(struct sk_buff *skb) 3154 { 3155 __le32 crc32c_csum; 3156 int ret = 0, offset, start; 3157 3158 if (skb->ip_summed != CHECKSUM_PARTIAL) 3159 goto out; 3160 3161 if (unlikely(skb_is_gso(skb))) 3162 goto out; 3163 3164 /* Before computing a checksum, we should make sure no frag could 3165 * be modified by an external entity : checksum could be wrong. 3166 */ 3167 if (unlikely(skb_has_shared_frag(skb))) { 3168 ret = __skb_linearize(skb); 3169 if (ret) 3170 goto out; 3171 } 3172 start = skb_checksum_start_offset(skb); 3173 offset = start + offsetof(struct sctphdr, checksum); 3174 if (WARN_ON_ONCE(offset >= skb_headlen(skb))) { 3175 ret = -EINVAL; 3176 goto out; 3177 } 3178 3179 ret = skb_ensure_writable(skb, offset + sizeof(__le32)); 3180 if (ret) 3181 goto out; 3182 3183 crc32c_csum = cpu_to_le32(~__skb_checksum(skb, start, 3184 skb->len - start, ~(__u32)0, 3185 crc32c_csum_stub)); 3186 *(__le32 *)(skb->data + offset) = crc32c_csum; 3187 skb->ip_summed = CHECKSUM_NONE; 3188 skb->csum_not_inet = 0; 3189 out: 3190 return ret; 3191 } 3192 3193 __be16 skb_network_protocol(struct sk_buff *skb, int *depth) 3194 { 3195 __be16 type = skb->protocol; 3196 3197 /* Tunnel gso handlers can set protocol to ethernet. */ 3198 if (type == htons(ETH_P_TEB)) { 3199 struct ethhdr *eth; 3200 3201 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr)))) 3202 return 0; 3203 3204 eth = (struct ethhdr *)skb->data; 3205 type = eth->h_proto; 3206 } 3207 3208 return __vlan_get_protocol(skb, type, depth); 3209 } 3210 3211 /** 3212 * skb_mac_gso_segment - mac layer segmentation handler. 3213 * @skb: buffer to segment 3214 * @features: features for the output path (see dev->features) 3215 */ 3216 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, 3217 netdev_features_t features) 3218 { 3219 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); 3220 struct packet_offload *ptype; 3221 int vlan_depth = skb->mac_len; 3222 __be16 type = skb_network_protocol(skb, &vlan_depth); 3223 3224 if (unlikely(!type)) 3225 return ERR_PTR(-EINVAL); 3226 3227 __skb_pull(skb, vlan_depth); 3228 3229 rcu_read_lock(); 3230 list_for_each_entry_rcu(ptype, &offload_base, list) { 3231 if (ptype->type == type && ptype->callbacks.gso_segment) { 3232 segs = ptype->callbacks.gso_segment(skb, features); 3233 break; 3234 } 3235 } 3236 rcu_read_unlock(); 3237 3238 __skb_push(skb, skb->data - skb_mac_header(skb)); 3239 3240 return segs; 3241 } 3242 EXPORT_SYMBOL(skb_mac_gso_segment); 3243 3244 3245 /* openvswitch calls this on rx path, so we need a different check. 3246 */ 3247 static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path) 3248 { 3249 if (tx_path) 3250 return skb->ip_summed != CHECKSUM_PARTIAL && 3251 skb->ip_summed != CHECKSUM_UNNECESSARY; 3252 3253 return skb->ip_summed == CHECKSUM_NONE; 3254 } 3255 3256 /** 3257 * __skb_gso_segment - Perform segmentation on skb. 3258 * @skb: buffer to segment 3259 * @features: features for the output path (see dev->features) 3260 * @tx_path: whether it is called in TX path 3261 * 3262 * This function segments the given skb and returns a list of segments. 3263 * 3264 * It may return NULL if the skb requires no segmentation. This is 3265 * only possible when GSO is used for verifying header integrity. 3266 * 3267 * Segmentation preserves SKB_SGO_CB_OFFSET bytes of previous skb cb. 3268 */ 3269 struct sk_buff *__skb_gso_segment(struct sk_buff *skb, 3270 netdev_features_t features, bool tx_path) 3271 { 3272 struct sk_buff *segs; 3273 3274 if (unlikely(skb_needs_check(skb, tx_path))) { 3275 int err; 3276 3277 /* We're going to init ->check field in TCP or UDP header */ 3278 err = skb_cow_head(skb, 0); 3279 if (err < 0) 3280 return ERR_PTR(err); 3281 } 3282 3283 /* Only report GSO partial support if it will enable us to 3284 * support segmentation on this frame without needing additional 3285 * work. 3286 */ 3287 if (features & NETIF_F_GSO_PARTIAL) { 3288 netdev_features_t partial_features = NETIF_F_GSO_ROBUST; 3289 struct net_device *dev = skb->dev; 3290 3291 partial_features |= dev->features & dev->gso_partial_features; 3292 if (!skb_gso_ok(skb, features | partial_features)) 3293 features &= ~NETIF_F_GSO_PARTIAL; 3294 } 3295 3296 BUILD_BUG_ON(SKB_SGO_CB_OFFSET + 3297 sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb)); 3298 3299 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb); 3300 SKB_GSO_CB(skb)->encap_level = 0; 3301 3302 skb_reset_mac_header(skb); 3303 skb_reset_mac_len(skb); 3304 3305 segs = skb_mac_gso_segment(skb, features); 3306 3307 if (segs != skb && unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs))) 3308 skb_warn_bad_offload(skb); 3309 3310 return segs; 3311 } 3312 EXPORT_SYMBOL(__skb_gso_segment); 3313 3314 /* Take action when hardware reception checksum errors are detected. */ 3315 #ifdef CONFIG_BUG 3316 void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb) 3317 { 3318 if (net_ratelimit()) { 3319 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>"); 3320 skb_dump(KERN_ERR, skb, true); 3321 dump_stack(); 3322 } 3323 } 3324 EXPORT_SYMBOL(netdev_rx_csum_fault); 3325 #endif 3326 3327 /* XXX: check that highmem exists at all on the given machine. */ 3328 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb) 3329 { 3330 #ifdef CONFIG_HIGHMEM 3331 int i; 3332 3333 if (!(dev->features & NETIF_F_HIGHDMA)) { 3334 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 3335 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3336 3337 if (PageHighMem(skb_frag_page(frag))) 3338 return 1; 3339 } 3340 } 3341 #endif 3342 return 0; 3343 } 3344 3345 /* If MPLS offload request, verify we are testing hardware MPLS features 3346 * instead of standard features for the netdev. 3347 */ 3348 #if IS_ENABLED(CONFIG_NET_MPLS_GSO) 3349 static netdev_features_t net_mpls_features(struct sk_buff *skb, 3350 netdev_features_t features, 3351 __be16 type) 3352 { 3353 if (eth_p_mpls(type)) 3354 features &= skb->dev->mpls_features; 3355 3356 return features; 3357 } 3358 #else 3359 static netdev_features_t net_mpls_features(struct sk_buff *skb, 3360 netdev_features_t features, 3361 __be16 type) 3362 { 3363 return features; 3364 } 3365 #endif 3366 3367 static netdev_features_t harmonize_features(struct sk_buff *skb, 3368 netdev_features_t features) 3369 { 3370 int tmp; 3371 __be16 type; 3372 3373 type = skb_network_protocol(skb, &tmp); 3374 features = net_mpls_features(skb, features, type); 3375 3376 if (skb->ip_summed != CHECKSUM_NONE && 3377 !can_checksum_protocol(features, type)) { 3378 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 3379 } 3380 if (illegal_highdma(skb->dev, skb)) 3381 features &= ~NETIF_F_SG; 3382 3383 return features; 3384 } 3385 3386 netdev_features_t passthru_features_check(struct sk_buff *skb, 3387 struct net_device *dev, 3388 netdev_features_t features) 3389 { 3390 return features; 3391 } 3392 EXPORT_SYMBOL(passthru_features_check); 3393 3394 static netdev_features_t dflt_features_check(struct sk_buff *skb, 3395 struct net_device *dev, 3396 netdev_features_t features) 3397 { 3398 return vlan_features_check(skb, features); 3399 } 3400 3401 static netdev_features_t gso_features_check(const struct sk_buff *skb, 3402 struct net_device *dev, 3403 netdev_features_t features) 3404 { 3405 u16 gso_segs = skb_shinfo(skb)->gso_segs; 3406 3407 if (gso_segs > dev->gso_max_segs) 3408 return features & ~NETIF_F_GSO_MASK; 3409 3410 /* Support for GSO partial features requires software 3411 * intervention before we can actually process the packets 3412 * so we need to strip support for any partial features now 3413 * and we can pull them back in after we have partially 3414 * segmented the frame. 3415 */ 3416 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL)) 3417 features &= ~dev->gso_partial_features; 3418 3419 /* Make sure to clear the IPv4 ID mangling feature if the 3420 * IPv4 header has the potential to be fragmented. 3421 */ 3422 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) { 3423 struct iphdr *iph = skb->encapsulation ? 3424 inner_ip_hdr(skb) : ip_hdr(skb); 3425 3426 if (!(iph->frag_off & htons(IP_DF))) 3427 features &= ~NETIF_F_TSO_MANGLEID; 3428 } 3429 3430 return features; 3431 } 3432 3433 netdev_features_t netif_skb_features(struct sk_buff *skb) 3434 { 3435 struct net_device *dev = skb->dev; 3436 netdev_features_t features = dev->features; 3437 3438 if (skb_is_gso(skb)) 3439 features = gso_features_check(skb, dev, features); 3440 3441 /* If encapsulation offload request, verify we are testing 3442 * hardware encapsulation features instead of standard 3443 * features for the netdev 3444 */ 3445 if (skb->encapsulation) 3446 features &= dev->hw_enc_features; 3447 3448 if (skb_vlan_tagged(skb)) 3449 features = netdev_intersect_features(features, 3450 dev->vlan_features | 3451 NETIF_F_HW_VLAN_CTAG_TX | 3452 NETIF_F_HW_VLAN_STAG_TX); 3453 3454 if (dev->netdev_ops->ndo_features_check) 3455 features &= dev->netdev_ops->ndo_features_check(skb, dev, 3456 features); 3457 else 3458 features &= dflt_features_check(skb, dev, features); 3459 3460 return harmonize_features(skb, features); 3461 } 3462 EXPORT_SYMBOL(netif_skb_features); 3463 3464 static int xmit_one(struct sk_buff *skb, struct net_device *dev, 3465 struct netdev_queue *txq, bool more) 3466 { 3467 unsigned int len; 3468 int rc; 3469 3470 if (dev_nit_active(dev)) 3471 dev_queue_xmit_nit(skb, dev); 3472 3473 len = skb->len; 3474 trace_net_dev_start_xmit(skb, dev); 3475 rc = netdev_start_xmit(skb, dev, txq, more); 3476 trace_net_dev_xmit(skb, rc, dev, len); 3477 3478 return rc; 3479 } 3480 3481 struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev, 3482 struct netdev_queue *txq, int *ret) 3483 { 3484 struct sk_buff *skb = first; 3485 int rc = NETDEV_TX_OK; 3486 3487 while (skb) { 3488 struct sk_buff *next = skb->next; 3489 3490 skb_mark_not_on_list(skb); 3491 rc = xmit_one(skb, dev, txq, next != NULL); 3492 if (unlikely(!dev_xmit_complete(rc))) { 3493 skb->next = next; 3494 goto out; 3495 } 3496 3497 skb = next; 3498 if (netif_tx_queue_stopped(txq) && skb) { 3499 rc = NETDEV_TX_BUSY; 3500 break; 3501 } 3502 } 3503 3504 out: 3505 *ret = rc; 3506 return skb; 3507 } 3508 3509 static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb, 3510 netdev_features_t features) 3511 { 3512 if (skb_vlan_tag_present(skb) && 3513 !vlan_hw_offload_capable(features, skb->vlan_proto)) 3514 skb = __vlan_hwaccel_push_inside(skb); 3515 return skb; 3516 } 3517 3518 int skb_csum_hwoffload_help(struct sk_buff *skb, 3519 const netdev_features_t features) 3520 { 3521 if (unlikely(skb->csum_not_inet)) 3522 return !!(features & NETIF_F_SCTP_CRC) ? 0 : 3523 skb_crc32c_csum_help(skb); 3524 3525 return !!(features & NETIF_F_CSUM_MASK) ? 0 : skb_checksum_help(skb); 3526 } 3527 EXPORT_SYMBOL(skb_csum_hwoffload_help); 3528 3529 static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again) 3530 { 3531 netdev_features_t features; 3532 3533 features = netif_skb_features(skb); 3534 skb = validate_xmit_vlan(skb, features); 3535 if (unlikely(!skb)) 3536 goto out_null; 3537 3538 skb = sk_validate_xmit_skb(skb, dev); 3539 if (unlikely(!skb)) 3540 goto out_null; 3541 3542 if (netif_needs_gso(skb, features)) { 3543 struct sk_buff *segs; 3544 3545 segs = skb_gso_segment(skb, features); 3546 if (IS_ERR(segs)) { 3547 goto out_kfree_skb; 3548 } else if (segs) { 3549 consume_skb(skb); 3550 skb = segs; 3551 } 3552 } else { 3553 if (skb_needs_linearize(skb, features) && 3554 __skb_linearize(skb)) 3555 goto out_kfree_skb; 3556 3557 /* If packet is not checksummed and device does not 3558 * support checksumming for this protocol, complete 3559 * checksumming here. 3560 */ 3561 if (skb->ip_summed == CHECKSUM_PARTIAL) { 3562 if (skb->encapsulation) 3563 skb_set_inner_transport_header(skb, 3564 skb_checksum_start_offset(skb)); 3565 else 3566 skb_set_transport_header(skb, 3567 skb_checksum_start_offset(skb)); 3568 if (skb_csum_hwoffload_help(skb, features)) 3569 goto out_kfree_skb; 3570 } 3571 } 3572 3573 skb = validate_xmit_xfrm(skb, features, again); 3574 3575 return skb; 3576 3577 out_kfree_skb: 3578 kfree_skb(skb); 3579 out_null: 3580 atomic_long_inc(&dev->tx_dropped); 3581 return NULL; 3582 } 3583 3584 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again) 3585 { 3586 struct sk_buff *next, *head = NULL, *tail; 3587 3588 for (; skb != NULL; skb = next) { 3589 next = skb->next; 3590 skb_mark_not_on_list(skb); 3591 3592 /* in case skb wont be segmented, point to itself */ 3593 skb->prev = skb; 3594 3595 skb = validate_xmit_skb(skb, dev, again); 3596 if (!skb) 3597 continue; 3598 3599 if (!head) 3600 head = skb; 3601 else 3602 tail->next = skb; 3603 /* If skb was segmented, skb->prev points to 3604 * the last segment. If not, it still contains skb. 3605 */ 3606 tail = skb->prev; 3607 } 3608 return head; 3609 } 3610 EXPORT_SYMBOL_GPL(validate_xmit_skb_list); 3611 3612 static void qdisc_pkt_len_init(struct sk_buff *skb) 3613 { 3614 const struct skb_shared_info *shinfo = skb_shinfo(skb); 3615 3616 qdisc_skb_cb(skb)->pkt_len = skb->len; 3617 3618 /* To get more precise estimation of bytes sent on wire, 3619 * we add to pkt_len the headers size of all segments 3620 */ 3621 if (shinfo->gso_size && skb_transport_header_was_set(skb)) { 3622 unsigned int hdr_len; 3623 u16 gso_segs = shinfo->gso_segs; 3624 3625 /* mac layer + network layer */ 3626 hdr_len = skb_transport_header(skb) - skb_mac_header(skb); 3627 3628 /* + transport layer */ 3629 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) { 3630 const struct tcphdr *th; 3631 struct tcphdr _tcphdr; 3632 3633 th = skb_header_pointer(skb, skb_transport_offset(skb), 3634 sizeof(_tcphdr), &_tcphdr); 3635 if (likely(th)) 3636 hdr_len += __tcp_hdrlen(th); 3637 } else { 3638 struct udphdr _udphdr; 3639 3640 if (skb_header_pointer(skb, skb_transport_offset(skb), 3641 sizeof(_udphdr), &_udphdr)) 3642 hdr_len += sizeof(struct udphdr); 3643 } 3644 3645 if (shinfo->gso_type & SKB_GSO_DODGY) 3646 gso_segs = DIV_ROUND_UP(skb->len - hdr_len, 3647 shinfo->gso_size); 3648 3649 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len; 3650 } 3651 } 3652 3653 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, 3654 struct net_device *dev, 3655 struct netdev_queue *txq) 3656 { 3657 spinlock_t *root_lock = qdisc_lock(q); 3658 struct sk_buff *to_free = NULL; 3659 bool contended; 3660 int rc; 3661 3662 qdisc_calculate_pkt_len(skb, q); 3663 3664 if (q->flags & TCQ_F_NOLOCK) { 3665 rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK; 3666 qdisc_run(q); 3667 3668 if (unlikely(to_free)) 3669 kfree_skb_list(to_free); 3670 return rc; 3671 } 3672 3673 /* 3674 * Heuristic to force contended enqueues to serialize on a 3675 * separate lock before trying to get qdisc main lock. 3676 * This permits qdisc->running owner to get the lock more 3677 * often and dequeue packets faster. 3678 */ 3679 contended = qdisc_is_running(q); 3680 if (unlikely(contended)) 3681 spin_lock(&q->busylock); 3682 3683 spin_lock(root_lock); 3684 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) { 3685 __qdisc_drop(skb, &to_free); 3686 rc = NET_XMIT_DROP; 3687 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) && 3688 qdisc_run_begin(q)) { 3689 /* 3690 * This is a work-conserving queue; there are no old skbs 3691 * waiting to be sent out; and the qdisc is not running - 3692 * xmit the skb directly. 3693 */ 3694 3695 qdisc_bstats_update(q, skb); 3696 3697 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) { 3698 if (unlikely(contended)) { 3699 spin_unlock(&q->busylock); 3700 contended = false; 3701 } 3702 __qdisc_run(q); 3703 } 3704 3705 qdisc_run_end(q); 3706 rc = NET_XMIT_SUCCESS; 3707 } else { 3708 rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK; 3709 if (qdisc_run_begin(q)) { 3710 if (unlikely(contended)) { 3711 spin_unlock(&q->busylock); 3712 contended = false; 3713 } 3714 __qdisc_run(q); 3715 qdisc_run_end(q); 3716 } 3717 } 3718 spin_unlock(root_lock); 3719 if (unlikely(to_free)) 3720 kfree_skb_list(to_free); 3721 if (unlikely(contended)) 3722 spin_unlock(&q->busylock); 3723 return rc; 3724 } 3725 3726 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) 3727 static void skb_update_prio(struct sk_buff *skb) 3728 { 3729 const struct netprio_map *map; 3730 const struct sock *sk; 3731 unsigned int prioidx; 3732 3733 if (skb->priority) 3734 return; 3735 map = rcu_dereference_bh(skb->dev->priomap); 3736 if (!map) 3737 return; 3738 sk = skb_to_full_sk(skb); 3739 if (!sk) 3740 return; 3741 3742 prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data); 3743 3744 if (prioidx < map->priomap_len) 3745 skb->priority = map->priomap[prioidx]; 3746 } 3747 #else 3748 #define skb_update_prio(skb) 3749 #endif 3750 3751 /** 3752 * dev_loopback_xmit - loop back @skb 3753 * @net: network namespace this loopback is happening in 3754 * @sk: sk needed to be a netfilter okfn 3755 * @skb: buffer to transmit 3756 */ 3757 int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb) 3758 { 3759 skb_reset_mac_header(skb); 3760 __skb_pull(skb, skb_network_offset(skb)); 3761 skb->pkt_type = PACKET_LOOPBACK; 3762 skb->ip_summed = CHECKSUM_UNNECESSARY; 3763 WARN_ON(!skb_dst(skb)); 3764 skb_dst_force(skb); 3765 netif_rx_ni(skb); 3766 return 0; 3767 } 3768 EXPORT_SYMBOL(dev_loopback_xmit); 3769 3770 #ifdef CONFIG_NET_EGRESS 3771 static struct sk_buff * 3772 sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev) 3773 { 3774 struct mini_Qdisc *miniq = rcu_dereference_bh(dev->miniq_egress); 3775 struct tcf_result cl_res; 3776 3777 if (!miniq) 3778 return skb; 3779 3780 /* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */ 3781 mini_qdisc_bstats_cpu_update(miniq, skb); 3782 3783 switch (tcf_classify(skb, miniq->filter_list, &cl_res, false)) { 3784 case TC_ACT_OK: 3785 case TC_ACT_RECLASSIFY: 3786 skb->tc_index = TC_H_MIN(cl_res.classid); 3787 break; 3788 case TC_ACT_SHOT: 3789 mini_qdisc_qstats_cpu_drop(miniq); 3790 *ret = NET_XMIT_DROP; 3791 kfree_skb(skb); 3792 return NULL; 3793 case TC_ACT_STOLEN: 3794 case TC_ACT_QUEUED: 3795 case TC_ACT_TRAP: 3796 *ret = NET_XMIT_SUCCESS; 3797 consume_skb(skb); 3798 return NULL; 3799 case TC_ACT_REDIRECT: 3800 /* No need to push/pop skb's mac_header here on egress! */ 3801 skb_do_redirect(skb); 3802 *ret = NET_XMIT_SUCCESS; 3803 return NULL; 3804 default: 3805 break; 3806 } 3807 3808 return skb; 3809 } 3810 #endif /* CONFIG_NET_EGRESS */ 3811 3812 #ifdef CONFIG_XPS 3813 static int __get_xps_queue_idx(struct net_device *dev, struct sk_buff *skb, 3814 struct xps_dev_maps *dev_maps, unsigned int tci) 3815 { 3816 struct xps_map *map; 3817 int queue_index = -1; 3818 3819 if (dev->num_tc) { 3820 tci *= dev->num_tc; 3821 tci += netdev_get_prio_tc_map(dev, skb->priority); 3822 } 3823 3824 map = rcu_dereference(dev_maps->attr_map[tci]); 3825 if (map) { 3826 if (map->len == 1) 3827 queue_index = map->queues[0]; 3828 else 3829 queue_index = map->queues[reciprocal_scale( 3830 skb_get_hash(skb), map->len)]; 3831 if (unlikely(queue_index >= dev->real_num_tx_queues)) 3832 queue_index = -1; 3833 } 3834 return queue_index; 3835 } 3836 #endif 3837 3838 static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev, 3839 struct sk_buff *skb) 3840 { 3841 #ifdef CONFIG_XPS 3842 struct xps_dev_maps *dev_maps; 3843 struct sock *sk = skb->sk; 3844 int queue_index = -1; 3845 3846 if (!static_key_false(&xps_needed)) 3847 return -1; 3848 3849 rcu_read_lock(); 3850 if (!static_key_false(&xps_rxqs_needed)) 3851 goto get_cpus_map; 3852 3853 dev_maps = rcu_dereference(sb_dev->xps_rxqs_map); 3854 if (dev_maps) { 3855 int tci = sk_rx_queue_get(sk); 3856 3857 if (tci >= 0 && tci < dev->num_rx_queues) 3858 queue_index = __get_xps_queue_idx(dev, skb, dev_maps, 3859 tci); 3860 } 3861 3862 get_cpus_map: 3863 if (queue_index < 0) { 3864 dev_maps = rcu_dereference(sb_dev->xps_cpus_map); 3865 if (dev_maps) { 3866 unsigned int tci = skb->sender_cpu - 1; 3867 3868 queue_index = __get_xps_queue_idx(dev, skb, dev_maps, 3869 tci); 3870 } 3871 } 3872 rcu_read_unlock(); 3873 3874 return queue_index; 3875 #else 3876 return -1; 3877 #endif 3878 } 3879 3880 u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb, 3881 struct net_device *sb_dev) 3882 { 3883 return 0; 3884 } 3885 EXPORT_SYMBOL(dev_pick_tx_zero); 3886 3887 u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb, 3888 struct net_device *sb_dev) 3889 { 3890 return (u16)raw_smp_processor_id() % dev->real_num_tx_queues; 3891 } 3892 EXPORT_SYMBOL(dev_pick_tx_cpu_id); 3893 3894 u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, 3895 struct net_device *sb_dev) 3896 { 3897 struct sock *sk = skb->sk; 3898 int queue_index = sk_tx_queue_get(sk); 3899 3900 sb_dev = sb_dev ? : dev; 3901 3902 if (queue_index < 0 || skb->ooo_okay || 3903 queue_index >= dev->real_num_tx_queues) { 3904 int new_index = get_xps_queue(dev, sb_dev, skb); 3905 3906 if (new_index < 0) 3907 new_index = skb_tx_hash(dev, sb_dev, skb); 3908 3909 if (queue_index != new_index && sk && 3910 sk_fullsock(sk) && 3911 rcu_access_pointer(sk->sk_dst_cache)) 3912 sk_tx_queue_set(sk, new_index); 3913 3914 queue_index = new_index; 3915 } 3916 3917 return queue_index; 3918 } 3919 EXPORT_SYMBOL(netdev_pick_tx); 3920 3921 struct netdev_queue *netdev_core_pick_tx(struct net_device *dev, 3922 struct sk_buff *skb, 3923 struct net_device *sb_dev) 3924 { 3925 int queue_index = 0; 3926 3927 #ifdef CONFIG_XPS 3928 u32 sender_cpu = skb->sender_cpu - 1; 3929 3930 if (sender_cpu >= (u32)NR_CPUS) 3931 skb->sender_cpu = raw_smp_processor_id() + 1; 3932 #endif 3933 3934 if (dev->real_num_tx_queues != 1) { 3935 const struct net_device_ops *ops = dev->netdev_ops; 3936 3937 if (ops->ndo_select_queue) 3938 queue_index = ops->ndo_select_queue(dev, skb, sb_dev); 3939 else 3940 queue_index = netdev_pick_tx(dev, skb, sb_dev); 3941 3942 queue_index = netdev_cap_txqueue(dev, queue_index); 3943 } 3944 3945 skb_set_queue_mapping(skb, queue_index); 3946 return netdev_get_tx_queue(dev, queue_index); 3947 } 3948 3949 /** 3950 * __dev_queue_xmit - transmit a buffer 3951 * @skb: buffer to transmit 3952 * @sb_dev: suboordinate device used for L2 forwarding offload 3953 * 3954 * Queue a buffer for transmission to a network device. The caller must 3955 * have set the device and priority and built the buffer before calling 3956 * this function. The function can be called from an interrupt. 3957 * 3958 * A negative errno code is returned on a failure. A success does not 3959 * guarantee the frame will be transmitted as it may be dropped due 3960 * to congestion or traffic shaping. 3961 * 3962 * ----------------------------------------------------------------------------------- 3963 * I notice this method can also return errors from the queue disciplines, 3964 * including NET_XMIT_DROP, which is a positive value. So, errors can also 3965 * be positive. 3966 * 3967 * Regardless of the return value, the skb is consumed, so it is currently 3968 * difficult to retry a send to this method. (You can bump the ref count 3969 * before sending to hold a reference for retry if you are careful.) 3970 * 3971 * When calling this method, interrupts MUST be enabled. This is because 3972 * the BH enable code must have IRQs enabled so that it will not deadlock. 3973 * --BLG 3974 */ 3975 static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev) 3976 { 3977 struct net_device *dev = skb->dev; 3978 struct netdev_queue *txq; 3979 struct Qdisc *q; 3980 int rc = -ENOMEM; 3981 bool again = false; 3982 3983 skb_reset_mac_header(skb); 3984 3985 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP)) 3986 __skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED); 3987 3988 /* Disable soft irqs for various locks below. Also 3989 * stops preemption for RCU. 3990 */ 3991 rcu_read_lock_bh(); 3992 3993 skb_update_prio(skb); 3994 3995 qdisc_pkt_len_init(skb); 3996 #ifdef CONFIG_NET_CLS_ACT 3997 skb->tc_at_ingress = 0; 3998 # ifdef CONFIG_NET_EGRESS 3999 if (static_branch_unlikely(&egress_needed_key)) { 4000 skb = sch_handle_egress(skb, &rc, dev); 4001 if (!skb) 4002 goto out; 4003 } 4004 # endif 4005 #endif 4006 /* If device/qdisc don't need skb->dst, release it right now while 4007 * its hot in this cpu cache. 4008 */ 4009 if (dev->priv_flags & IFF_XMIT_DST_RELEASE) 4010 skb_dst_drop(skb); 4011 else 4012 skb_dst_force(skb); 4013 4014 txq = netdev_core_pick_tx(dev, skb, sb_dev); 4015 q = rcu_dereference_bh(txq->qdisc); 4016 4017 trace_net_dev_queue(skb); 4018 if (q->enqueue) { 4019 rc = __dev_xmit_skb(skb, q, dev, txq); 4020 goto out; 4021 } 4022 4023 /* The device has no queue. Common case for software devices: 4024 * loopback, all the sorts of tunnels... 4025 4026 * Really, it is unlikely that netif_tx_lock protection is necessary 4027 * here. (f.e. loopback and IP tunnels are clean ignoring statistics 4028 * counters.) 4029 * However, it is possible, that they rely on protection 4030 * made by us here. 4031 4032 * Check this and shot the lock. It is not prone from deadlocks. 4033 *Either shot noqueue qdisc, it is even simpler 8) 4034 */ 4035 if (dev->flags & IFF_UP) { 4036 int cpu = smp_processor_id(); /* ok because BHs are off */ 4037 4038 if (txq->xmit_lock_owner != cpu) { 4039 if (dev_xmit_recursion()) 4040 goto recursion_alert; 4041 4042 skb = validate_xmit_skb(skb, dev, &again); 4043 if (!skb) 4044 goto out; 4045 4046 HARD_TX_LOCK(dev, txq, cpu); 4047 4048 if (!netif_xmit_stopped(txq)) { 4049 dev_xmit_recursion_inc(); 4050 skb = dev_hard_start_xmit(skb, dev, txq, &rc); 4051 dev_xmit_recursion_dec(); 4052 if (dev_xmit_complete(rc)) { 4053 HARD_TX_UNLOCK(dev, txq); 4054 goto out; 4055 } 4056 } 4057 HARD_TX_UNLOCK(dev, txq); 4058 net_crit_ratelimited("Virtual device %s asks to queue packet!\n", 4059 dev->name); 4060 } else { 4061 /* Recursion is detected! It is possible, 4062 * unfortunately 4063 */ 4064 recursion_alert: 4065 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n", 4066 dev->name); 4067 } 4068 } 4069 4070 rc = -ENETDOWN; 4071 rcu_read_unlock_bh(); 4072 4073 atomic_long_inc(&dev->tx_dropped); 4074 kfree_skb_list(skb); 4075 return rc; 4076 out: 4077 rcu_read_unlock_bh(); 4078 return rc; 4079 } 4080 4081 int dev_queue_xmit(struct sk_buff *skb) 4082 { 4083 return __dev_queue_xmit(skb, NULL); 4084 } 4085 EXPORT_SYMBOL(dev_queue_xmit); 4086 4087 int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev) 4088 { 4089 return __dev_queue_xmit(skb, sb_dev); 4090 } 4091 EXPORT_SYMBOL(dev_queue_xmit_accel); 4092 4093 int dev_direct_xmit(struct sk_buff *skb, u16 queue_id) 4094 { 4095 struct net_device *dev = skb->dev; 4096 struct sk_buff *orig_skb = skb; 4097 struct netdev_queue *txq; 4098 int ret = NETDEV_TX_BUSY; 4099 bool again = false; 4100 4101 if (unlikely(!netif_running(dev) || 4102 !netif_carrier_ok(dev))) 4103 goto drop; 4104 4105 skb = validate_xmit_skb_list(skb, dev, &again); 4106 if (skb != orig_skb) 4107 goto drop; 4108 4109 skb_set_queue_mapping(skb, queue_id); 4110 txq = skb_get_tx_queue(dev, skb); 4111 4112 local_bh_disable(); 4113 4114 HARD_TX_LOCK(dev, txq, smp_processor_id()); 4115 if (!netif_xmit_frozen_or_drv_stopped(txq)) 4116 ret = netdev_start_xmit(skb, dev, txq, false); 4117 HARD_TX_UNLOCK(dev, txq); 4118 4119 local_bh_enable(); 4120 4121 if (!dev_xmit_complete(ret)) 4122 kfree_skb(skb); 4123 4124 return ret; 4125 drop: 4126 atomic_long_inc(&dev->tx_dropped); 4127 kfree_skb_list(skb); 4128 return NET_XMIT_DROP; 4129 } 4130 EXPORT_SYMBOL(dev_direct_xmit); 4131 4132 /************************************************************************* 4133 * Receiver routines 4134 *************************************************************************/ 4135 4136 int netdev_max_backlog __read_mostly = 1000; 4137 EXPORT_SYMBOL(netdev_max_backlog); 4138 4139 int netdev_tstamp_prequeue __read_mostly = 1; 4140 int netdev_budget __read_mostly = 300; 4141 unsigned int __read_mostly netdev_budget_usecs = 2000; 4142 int weight_p __read_mostly = 64; /* old backlog weight */ 4143 int dev_weight_rx_bias __read_mostly = 1; /* bias for backlog weight */ 4144 int dev_weight_tx_bias __read_mostly = 1; /* bias for output_queue quota */ 4145 int dev_rx_weight __read_mostly = 64; 4146 int dev_tx_weight __read_mostly = 64; 4147 /* Maximum number of GRO_NORMAL skbs to batch up for list-RX */ 4148 int gro_normal_batch __read_mostly = 8; 4149 4150 /* Called with irq disabled */ 4151 static inline void ____napi_schedule(struct softnet_data *sd, 4152 struct napi_struct *napi) 4153 { 4154 list_add_tail(&napi->poll_list, &sd->poll_list); 4155 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 4156 } 4157 4158 #ifdef CONFIG_RPS 4159 4160 /* One global table that all flow-based protocols share. */ 4161 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly; 4162 EXPORT_SYMBOL(rps_sock_flow_table); 4163 u32 rps_cpu_mask __read_mostly; 4164 EXPORT_SYMBOL(rps_cpu_mask); 4165 4166 struct static_key_false rps_needed __read_mostly; 4167 EXPORT_SYMBOL(rps_needed); 4168 struct static_key_false rfs_needed __read_mostly; 4169 EXPORT_SYMBOL(rfs_needed); 4170 4171 static struct rps_dev_flow * 4172 set_rps_cpu(struct net_device *dev, struct sk_buff *skb, 4173 struct rps_dev_flow *rflow, u16 next_cpu) 4174 { 4175 if (next_cpu < nr_cpu_ids) { 4176 #ifdef CONFIG_RFS_ACCEL 4177 struct netdev_rx_queue *rxqueue; 4178 struct rps_dev_flow_table *flow_table; 4179 struct rps_dev_flow *old_rflow; 4180 u32 flow_id; 4181 u16 rxq_index; 4182 int rc; 4183 4184 /* Should we steer this flow to a different hardware queue? */ 4185 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap || 4186 !(dev->features & NETIF_F_NTUPLE)) 4187 goto out; 4188 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu); 4189 if (rxq_index == skb_get_rx_queue(skb)) 4190 goto out; 4191 4192 rxqueue = dev->_rx + rxq_index; 4193 flow_table = rcu_dereference(rxqueue->rps_flow_table); 4194 if (!flow_table) 4195 goto out; 4196 flow_id = skb_get_hash(skb) & flow_table->mask; 4197 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb, 4198 rxq_index, flow_id); 4199 if (rc < 0) 4200 goto out; 4201 old_rflow = rflow; 4202 rflow = &flow_table->flows[flow_id]; 4203 rflow->filter = rc; 4204 if (old_rflow->filter == rflow->filter) 4205 old_rflow->filter = RPS_NO_FILTER; 4206 out: 4207 #endif 4208 rflow->last_qtail = 4209 per_cpu(softnet_data, next_cpu).input_queue_head; 4210 } 4211 4212 rflow->cpu = next_cpu; 4213 return rflow; 4214 } 4215 4216 /* 4217 * get_rps_cpu is called from netif_receive_skb and returns the target 4218 * CPU from the RPS map of the receiving queue for a given skb. 4219 * rcu_read_lock must be held on entry. 4220 */ 4221 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, 4222 struct rps_dev_flow **rflowp) 4223 { 4224 const struct rps_sock_flow_table *sock_flow_table; 4225 struct netdev_rx_queue *rxqueue = dev->_rx; 4226 struct rps_dev_flow_table *flow_table; 4227 struct rps_map *map; 4228 int cpu = -1; 4229 u32 tcpu; 4230 u32 hash; 4231 4232 if (skb_rx_queue_recorded(skb)) { 4233 u16 index = skb_get_rx_queue(skb); 4234 4235 if (unlikely(index >= dev->real_num_rx_queues)) { 4236 WARN_ONCE(dev->real_num_rx_queues > 1, 4237 "%s received packet on queue %u, but number " 4238 "of RX queues is %u\n", 4239 dev->name, index, dev->real_num_rx_queues); 4240 goto done; 4241 } 4242 rxqueue += index; 4243 } 4244 4245 /* Avoid computing hash if RFS/RPS is not active for this rxqueue */ 4246 4247 flow_table = rcu_dereference(rxqueue->rps_flow_table); 4248 map = rcu_dereference(rxqueue->rps_map); 4249 if (!flow_table && !map) 4250 goto done; 4251 4252 skb_reset_network_header(skb); 4253 hash = skb_get_hash(skb); 4254 if (!hash) 4255 goto done; 4256 4257 sock_flow_table = rcu_dereference(rps_sock_flow_table); 4258 if (flow_table && sock_flow_table) { 4259 struct rps_dev_flow *rflow; 4260 u32 next_cpu; 4261 u32 ident; 4262 4263 /* First check into global flow table if there is a match */ 4264 ident = sock_flow_table->ents[hash & sock_flow_table->mask]; 4265 if ((ident ^ hash) & ~rps_cpu_mask) 4266 goto try_rps; 4267 4268 next_cpu = ident & rps_cpu_mask; 4269 4270 /* OK, now we know there is a match, 4271 * we can look at the local (per receive queue) flow table 4272 */ 4273 rflow = &flow_table->flows[hash & flow_table->mask]; 4274 tcpu = rflow->cpu; 4275 4276 /* 4277 * If the desired CPU (where last recvmsg was done) is 4278 * different from current CPU (one in the rx-queue flow 4279 * table entry), switch if one of the following holds: 4280 * - Current CPU is unset (>= nr_cpu_ids). 4281 * - Current CPU is offline. 4282 * - The current CPU's queue tail has advanced beyond the 4283 * last packet that was enqueued using this table entry. 4284 * This guarantees that all previous packets for the flow 4285 * have been dequeued, thus preserving in order delivery. 4286 */ 4287 if (unlikely(tcpu != next_cpu) && 4288 (tcpu >= nr_cpu_ids || !cpu_online(tcpu) || 4289 ((int)(per_cpu(softnet_data, tcpu).input_queue_head - 4290 rflow->last_qtail)) >= 0)) { 4291 tcpu = next_cpu; 4292 rflow = set_rps_cpu(dev, skb, rflow, next_cpu); 4293 } 4294 4295 if (tcpu < nr_cpu_ids && cpu_online(tcpu)) { 4296 *rflowp = rflow; 4297 cpu = tcpu; 4298 goto done; 4299 } 4300 } 4301 4302 try_rps: 4303 4304 if (map) { 4305 tcpu = map->cpus[reciprocal_scale(hash, map->len)]; 4306 if (cpu_online(tcpu)) { 4307 cpu = tcpu; 4308 goto done; 4309 } 4310 } 4311 4312 done: 4313 return cpu; 4314 } 4315 4316 #ifdef CONFIG_RFS_ACCEL 4317 4318 /** 4319 * rps_may_expire_flow - check whether an RFS hardware filter may be removed 4320 * @dev: Device on which the filter was set 4321 * @rxq_index: RX queue index 4322 * @flow_id: Flow ID passed to ndo_rx_flow_steer() 4323 * @filter_id: Filter ID returned by ndo_rx_flow_steer() 4324 * 4325 * Drivers that implement ndo_rx_flow_steer() should periodically call 4326 * this function for each installed filter and remove the filters for 4327 * which it returns %true. 4328 */ 4329 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, 4330 u32 flow_id, u16 filter_id) 4331 { 4332 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index; 4333 struct rps_dev_flow_table *flow_table; 4334 struct rps_dev_flow *rflow; 4335 bool expire = true; 4336 unsigned int cpu; 4337 4338 rcu_read_lock(); 4339 flow_table = rcu_dereference(rxqueue->rps_flow_table); 4340 if (flow_table && flow_id <= flow_table->mask) { 4341 rflow = &flow_table->flows[flow_id]; 4342 cpu = READ_ONCE(rflow->cpu); 4343 if (rflow->filter == filter_id && cpu < nr_cpu_ids && 4344 ((int)(per_cpu(softnet_data, cpu).input_queue_head - 4345 rflow->last_qtail) < 4346 (int)(10 * flow_table->mask))) 4347 expire = false; 4348 } 4349 rcu_read_unlock(); 4350 return expire; 4351 } 4352 EXPORT_SYMBOL(rps_may_expire_flow); 4353 4354 #endif /* CONFIG_RFS_ACCEL */ 4355 4356 /* Called from hardirq (IPI) context */ 4357 static void rps_trigger_softirq(void *data) 4358 { 4359 struct softnet_data *sd = data; 4360 4361 ____napi_schedule(sd, &sd->backlog); 4362 sd->received_rps++; 4363 } 4364 4365 #endif /* CONFIG_RPS */ 4366 4367 /* 4368 * Check if this softnet_data structure is another cpu one 4369 * If yes, queue it to our IPI list and return 1 4370 * If no, return 0 4371 */ 4372 static int rps_ipi_queued(struct softnet_data *sd) 4373 { 4374 #ifdef CONFIG_RPS 4375 struct softnet_data *mysd = this_cpu_ptr(&softnet_data); 4376 4377 if (sd != mysd) { 4378 sd->rps_ipi_next = mysd->rps_ipi_list; 4379 mysd->rps_ipi_list = sd; 4380 4381 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 4382 return 1; 4383 } 4384 #endif /* CONFIG_RPS */ 4385 return 0; 4386 } 4387 4388 #ifdef CONFIG_NET_FLOW_LIMIT 4389 int netdev_flow_limit_table_len __read_mostly = (1 << 12); 4390 #endif 4391 4392 static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen) 4393 { 4394 #ifdef CONFIG_NET_FLOW_LIMIT 4395 struct sd_flow_limit *fl; 4396 struct softnet_data *sd; 4397 unsigned int old_flow, new_flow; 4398 4399 if (qlen < (netdev_max_backlog >> 1)) 4400 return false; 4401 4402 sd = this_cpu_ptr(&softnet_data); 4403 4404 rcu_read_lock(); 4405 fl = rcu_dereference(sd->flow_limit); 4406 if (fl) { 4407 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1); 4408 old_flow = fl->history[fl->history_head]; 4409 fl->history[fl->history_head] = new_flow; 4410 4411 fl->history_head++; 4412 fl->history_head &= FLOW_LIMIT_HISTORY - 1; 4413 4414 if (likely(fl->buckets[old_flow])) 4415 fl->buckets[old_flow]--; 4416 4417 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) { 4418 fl->count++; 4419 rcu_read_unlock(); 4420 return true; 4421 } 4422 } 4423 rcu_read_unlock(); 4424 #endif 4425 return false; 4426 } 4427 4428 /* 4429 * enqueue_to_backlog is called to queue an skb to a per CPU backlog 4430 * queue (may be a remote CPU queue). 4431 */ 4432 static int enqueue_to_backlog(struct sk_buff *skb, int cpu, 4433 unsigned int *qtail) 4434 { 4435 struct softnet_data *sd; 4436 unsigned long flags; 4437 unsigned int qlen; 4438 4439 sd = &per_cpu(softnet_data, cpu); 4440 4441 local_irq_save(flags); 4442 4443 rps_lock(sd); 4444 if (!netif_running(skb->dev)) 4445 goto drop; 4446 qlen = skb_queue_len(&sd->input_pkt_queue); 4447 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) { 4448 if (qlen) { 4449 enqueue: 4450 __skb_queue_tail(&sd->input_pkt_queue, skb); 4451 input_queue_tail_incr_save(sd, qtail); 4452 rps_unlock(sd); 4453 local_irq_restore(flags); 4454 return NET_RX_SUCCESS; 4455 } 4456 4457 /* Schedule NAPI for backlog device 4458 * We can use non atomic operation since we own the queue lock 4459 */ 4460 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) { 4461 if (!rps_ipi_queued(sd)) 4462 ____napi_schedule(sd, &sd->backlog); 4463 } 4464 goto enqueue; 4465 } 4466 4467 drop: 4468 sd->dropped++; 4469 rps_unlock(sd); 4470 4471 local_irq_restore(flags); 4472 4473 atomic_long_inc(&skb->dev->rx_dropped); 4474 kfree_skb(skb); 4475 return NET_RX_DROP; 4476 } 4477 4478 static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb) 4479 { 4480 struct net_device *dev = skb->dev; 4481 struct netdev_rx_queue *rxqueue; 4482 4483 rxqueue = dev->_rx; 4484 4485 if (skb_rx_queue_recorded(skb)) { 4486 u16 index = skb_get_rx_queue(skb); 4487 4488 if (unlikely(index >= dev->real_num_rx_queues)) { 4489 WARN_ONCE(dev->real_num_rx_queues > 1, 4490 "%s received packet on queue %u, but number " 4491 "of RX queues is %u\n", 4492 dev->name, index, dev->real_num_rx_queues); 4493 4494 return rxqueue; /* Return first rxqueue */ 4495 } 4496 rxqueue += index; 4497 } 4498 return rxqueue; 4499 } 4500 4501 static u32 netif_receive_generic_xdp(struct sk_buff *skb, 4502 struct xdp_buff *xdp, 4503 struct bpf_prog *xdp_prog) 4504 { 4505 struct netdev_rx_queue *rxqueue; 4506 void *orig_data, *orig_data_end; 4507 u32 metalen, act = XDP_DROP; 4508 __be16 orig_eth_type; 4509 struct ethhdr *eth; 4510 bool orig_bcast; 4511 int hlen, off; 4512 u32 mac_len; 4513 4514 /* Reinjected packets coming from act_mirred or similar should 4515 * not get XDP generic processing. 4516 */ 4517 if (skb_is_tc_redirected(skb)) 4518 return XDP_PASS; 4519 4520 /* XDP packets must be linear and must have sufficient headroom 4521 * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also 4522 * native XDP provides, thus we need to do it here as well. 4523 */ 4524 if (skb_cloned(skb) || skb_is_nonlinear(skb) || 4525 skb_headroom(skb) < XDP_PACKET_HEADROOM) { 4526 int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb); 4527 int troom = skb->tail + skb->data_len - skb->end; 4528 4529 /* In case we have to go down the path and also linearize, 4530 * then lets do the pskb_expand_head() work just once here. 4531 */ 4532 if (pskb_expand_head(skb, 4533 hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0, 4534 troom > 0 ? troom + 128 : 0, GFP_ATOMIC)) 4535 goto do_drop; 4536 if (skb_linearize(skb)) 4537 goto do_drop; 4538 } 4539 4540 /* The XDP program wants to see the packet starting at the MAC 4541 * header. 4542 */ 4543 mac_len = skb->data - skb_mac_header(skb); 4544 hlen = skb_headlen(skb) + mac_len; 4545 xdp->data = skb->data - mac_len; 4546 xdp->data_meta = xdp->data; 4547 xdp->data_end = xdp->data + hlen; 4548 xdp->data_hard_start = skb->data - skb_headroom(skb); 4549 orig_data_end = xdp->data_end; 4550 orig_data = xdp->data; 4551 eth = (struct ethhdr *)xdp->data; 4552 orig_bcast = is_multicast_ether_addr_64bits(eth->h_dest); 4553 orig_eth_type = eth->h_proto; 4554 4555 rxqueue = netif_get_rxqueue(skb); 4556 xdp->rxq = &rxqueue->xdp_rxq; 4557 4558 act = bpf_prog_run_xdp(xdp_prog, xdp); 4559 4560 /* check if bpf_xdp_adjust_head was used */ 4561 off = xdp->data - orig_data; 4562 if (off) { 4563 if (off > 0) 4564 __skb_pull(skb, off); 4565 else if (off < 0) 4566 __skb_push(skb, -off); 4567 4568 skb->mac_header += off; 4569 skb_reset_network_header(skb); 4570 } 4571 4572 /* check if bpf_xdp_adjust_tail was used. it can only "shrink" 4573 * pckt. 4574 */ 4575 off = orig_data_end - xdp->data_end; 4576 if (off != 0) { 4577 skb_set_tail_pointer(skb, xdp->data_end - xdp->data); 4578 skb->len -= off; 4579 4580 } 4581 4582 /* check if XDP changed eth hdr such SKB needs update */ 4583 eth = (struct ethhdr *)xdp->data; 4584 if ((orig_eth_type != eth->h_proto) || 4585 (orig_bcast != is_multicast_ether_addr_64bits(eth->h_dest))) { 4586 __skb_push(skb, ETH_HLEN); 4587 skb->protocol = eth_type_trans(skb, skb->dev); 4588 } 4589 4590 switch (act) { 4591 case XDP_REDIRECT: 4592 case XDP_TX: 4593 __skb_push(skb, mac_len); 4594 break; 4595 case XDP_PASS: 4596 metalen = xdp->data - xdp->data_meta; 4597 if (metalen) 4598 skb_metadata_set(skb, metalen); 4599 break; 4600 default: 4601 bpf_warn_invalid_xdp_action(act); 4602 /* fall through */ 4603 case XDP_ABORTED: 4604 trace_xdp_exception(skb->dev, xdp_prog, act); 4605 /* fall through */ 4606 case XDP_DROP: 4607 do_drop: 4608 kfree_skb(skb); 4609 break; 4610 } 4611 4612 return act; 4613 } 4614 4615 /* When doing generic XDP we have to bypass the qdisc layer and the 4616 * network taps in order to match in-driver-XDP behavior. 4617 */ 4618 void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog) 4619 { 4620 struct net_device *dev = skb->dev; 4621 struct netdev_queue *txq; 4622 bool free_skb = true; 4623 int cpu, rc; 4624 4625 txq = netdev_core_pick_tx(dev, skb, NULL); 4626 cpu = smp_processor_id(); 4627 HARD_TX_LOCK(dev, txq, cpu); 4628 if (!netif_xmit_stopped(txq)) { 4629 rc = netdev_start_xmit(skb, dev, txq, 0); 4630 if (dev_xmit_complete(rc)) 4631 free_skb = false; 4632 } 4633 HARD_TX_UNLOCK(dev, txq); 4634 if (free_skb) { 4635 trace_xdp_exception(dev, xdp_prog, XDP_TX); 4636 kfree_skb(skb); 4637 } 4638 } 4639 EXPORT_SYMBOL_GPL(generic_xdp_tx); 4640 4641 static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key); 4642 4643 int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb) 4644 { 4645 if (xdp_prog) { 4646 struct xdp_buff xdp; 4647 u32 act; 4648 int err; 4649 4650 act = netif_receive_generic_xdp(skb, &xdp, xdp_prog); 4651 if (act != XDP_PASS) { 4652 switch (act) { 4653 case XDP_REDIRECT: 4654 err = xdp_do_generic_redirect(skb->dev, skb, 4655 &xdp, xdp_prog); 4656 if (err) 4657 goto out_redir; 4658 break; 4659 case XDP_TX: 4660 generic_xdp_tx(skb, xdp_prog); 4661 break; 4662 } 4663 return XDP_DROP; 4664 } 4665 } 4666 return XDP_PASS; 4667 out_redir: 4668 kfree_skb(skb); 4669 return XDP_DROP; 4670 } 4671 EXPORT_SYMBOL_GPL(do_xdp_generic); 4672 4673 static int netif_rx_internal(struct sk_buff *skb) 4674 { 4675 int ret; 4676 4677 net_timestamp_check(netdev_tstamp_prequeue, skb); 4678 4679 trace_netif_rx(skb); 4680 4681 #ifdef CONFIG_RPS 4682 if (static_branch_unlikely(&rps_needed)) { 4683 struct rps_dev_flow voidflow, *rflow = &voidflow; 4684 int cpu; 4685 4686 preempt_disable(); 4687 rcu_read_lock(); 4688 4689 cpu = get_rps_cpu(skb->dev, skb, &rflow); 4690 if (cpu < 0) 4691 cpu = smp_processor_id(); 4692 4693 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 4694 4695 rcu_read_unlock(); 4696 preempt_enable(); 4697 } else 4698 #endif 4699 { 4700 unsigned int qtail; 4701 4702 ret = enqueue_to_backlog(skb, get_cpu(), &qtail); 4703 put_cpu(); 4704 } 4705 return ret; 4706 } 4707 4708 /** 4709 * netif_rx - post buffer to the network code 4710 * @skb: buffer to post 4711 * 4712 * This function receives a packet from a device driver and queues it for 4713 * the upper (protocol) levels to process. It always succeeds. The buffer 4714 * may be dropped during processing for congestion control or by the 4715 * protocol layers. 4716 * 4717 * return values: 4718 * NET_RX_SUCCESS (no congestion) 4719 * NET_RX_DROP (packet was dropped) 4720 * 4721 */ 4722 4723 int netif_rx(struct sk_buff *skb) 4724 { 4725 int ret; 4726 4727 trace_netif_rx_entry(skb); 4728 4729 ret = netif_rx_internal(skb); 4730 trace_netif_rx_exit(ret); 4731 4732 return ret; 4733 } 4734 EXPORT_SYMBOL(netif_rx); 4735 4736 int netif_rx_ni(struct sk_buff *skb) 4737 { 4738 int err; 4739 4740 trace_netif_rx_ni_entry(skb); 4741 4742 preempt_disable(); 4743 err = netif_rx_internal(skb); 4744 if (local_softirq_pending()) 4745 do_softirq(); 4746 preempt_enable(); 4747 trace_netif_rx_ni_exit(err); 4748 4749 return err; 4750 } 4751 EXPORT_SYMBOL(netif_rx_ni); 4752 4753 static __latent_entropy void net_tx_action(struct softirq_action *h) 4754 { 4755 struct softnet_data *sd = this_cpu_ptr(&softnet_data); 4756 4757 if (sd->completion_queue) { 4758 struct sk_buff *clist; 4759 4760 local_irq_disable(); 4761 clist = sd->completion_queue; 4762 sd->completion_queue = NULL; 4763 local_irq_enable(); 4764 4765 while (clist) { 4766 struct sk_buff *skb = clist; 4767 4768 clist = clist->next; 4769 4770 WARN_ON(refcount_read(&skb->users)); 4771 if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED)) 4772 trace_consume_skb(skb); 4773 else 4774 trace_kfree_skb(skb, net_tx_action); 4775 4776 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) 4777 __kfree_skb(skb); 4778 else 4779 __kfree_skb_defer(skb); 4780 } 4781 4782 __kfree_skb_flush(); 4783 } 4784 4785 if (sd->output_queue) { 4786 struct Qdisc *head; 4787 4788 local_irq_disable(); 4789 head = sd->output_queue; 4790 sd->output_queue = NULL; 4791 sd->output_queue_tailp = &sd->output_queue; 4792 local_irq_enable(); 4793 4794 while (head) { 4795 struct Qdisc *q = head; 4796 spinlock_t *root_lock = NULL; 4797 4798 head = head->next_sched; 4799 4800 if (!(q->flags & TCQ_F_NOLOCK)) { 4801 root_lock = qdisc_lock(q); 4802 spin_lock(root_lock); 4803 } 4804 /* We need to make sure head->next_sched is read 4805 * before clearing __QDISC_STATE_SCHED 4806 */ 4807 smp_mb__before_atomic(); 4808 clear_bit(__QDISC_STATE_SCHED, &q->state); 4809 qdisc_run(q); 4810 if (root_lock) 4811 spin_unlock(root_lock); 4812 } 4813 } 4814 4815 xfrm_dev_backlog(sd); 4816 } 4817 4818 #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE) 4819 /* This hook is defined here for ATM LANE */ 4820 int (*br_fdb_test_addr_hook)(struct net_device *dev, 4821 unsigned char *addr) __read_mostly; 4822 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook); 4823 #endif 4824 4825 static inline struct sk_buff * 4826 sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret, 4827 struct net_device *orig_dev) 4828 { 4829 #ifdef CONFIG_NET_CLS_ACT 4830 struct mini_Qdisc *miniq = rcu_dereference_bh(skb->dev->miniq_ingress); 4831 struct tcf_result cl_res; 4832 4833 /* If there's at least one ingress present somewhere (so 4834 * we get here via enabled static key), remaining devices 4835 * that are not configured with an ingress qdisc will bail 4836 * out here. 4837 */ 4838 if (!miniq) 4839 return skb; 4840 4841 if (*pt_prev) { 4842 *ret = deliver_skb(skb, *pt_prev, orig_dev); 4843 *pt_prev = NULL; 4844 } 4845 4846 qdisc_skb_cb(skb)->pkt_len = skb->len; 4847 skb->tc_at_ingress = 1; 4848 mini_qdisc_bstats_cpu_update(miniq, skb); 4849 4850 switch (tcf_classify(skb, miniq->filter_list, &cl_res, false)) { 4851 case TC_ACT_OK: 4852 case TC_ACT_RECLASSIFY: 4853 skb->tc_index = TC_H_MIN(cl_res.classid); 4854 break; 4855 case TC_ACT_SHOT: 4856 mini_qdisc_qstats_cpu_drop(miniq); 4857 kfree_skb(skb); 4858 return NULL; 4859 case TC_ACT_STOLEN: 4860 case TC_ACT_QUEUED: 4861 case TC_ACT_TRAP: 4862 consume_skb(skb); 4863 return NULL; 4864 case TC_ACT_REDIRECT: 4865 /* skb_mac_header check was done by cls/act_bpf, so 4866 * we can safely push the L2 header back before 4867 * redirecting to another netdev 4868 */ 4869 __skb_push(skb, skb->mac_len); 4870 skb_do_redirect(skb); 4871 return NULL; 4872 case TC_ACT_CONSUMED: 4873 return NULL; 4874 default: 4875 break; 4876 } 4877 #endif /* CONFIG_NET_CLS_ACT */ 4878 return skb; 4879 } 4880 4881 /** 4882 * netdev_is_rx_handler_busy - check if receive handler is registered 4883 * @dev: device to check 4884 * 4885 * Check if a receive handler is already registered for a given device. 4886 * Return true if there one. 4887 * 4888 * The caller must hold the rtnl_mutex. 4889 */ 4890 bool netdev_is_rx_handler_busy(struct net_device *dev) 4891 { 4892 ASSERT_RTNL(); 4893 return dev && rtnl_dereference(dev->rx_handler); 4894 } 4895 EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy); 4896 4897 /** 4898 * netdev_rx_handler_register - register receive handler 4899 * @dev: device to register a handler for 4900 * @rx_handler: receive handler to register 4901 * @rx_handler_data: data pointer that is used by rx handler 4902 * 4903 * Register a receive handler for a device. This handler will then be 4904 * called from __netif_receive_skb. A negative errno code is returned 4905 * on a failure. 4906 * 4907 * The caller must hold the rtnl_mutex. 4908 * 4909 * For a general description of rx_handler, see enum rx_handler_result. 4910 */ 4911 int netdev_rx_handler_register(struct net_device *dev, 4912 rx_handler_func_t *rx_handler, 4913 void *rx_handler_data) 4914 { 4915 if (netdev_is_rx_handler_busy(dev)) 4916 return -EBUSY; 4917 4918 if (dev->priv_flags & IFF_NO_RX_HANDLER) 4919 return -EINVAL; 4920 4921 /* Note: rx_handler_data must be set before rx_handler */ 4922 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data); 4923 rcu_assign_pointer(dev->rx_handler, rx_handler); 4924 4925 return 0; 4926 } 4927 EXPORT_SYMBOL_GPL(netdev_rx_handler_register); 4928 4929 /** 4930 * netdev_rx_handler_unregister - unregister receive handler 4931 * @dev: device to unregister a handler from 4932 * 4933 * Unregister a receive handler from a device. 4934 * 4935 * The caller must hold the rtnl_mutex. 4936 */ 4937 void netdev_rx_handler_unregister(struct net_device *dev) 4938 { 4939 4940 ASSERT_RTNL(); 4941 RCU_INIT_POINTER(dev->rx_handler, NULL); 4942 /* a reader seeing a non NULL rx_handler in a rcu_read_lock() 4943 * section has a guarantee to see a non NULL rx_handler_data 4944 * as well. 4945 */ 4946 synchronize_net(); 4947 RCU_INIT_POINTER(dev->rx_handler_data, NULL); 4948 } 4949 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); 4950 4951 /* 4952 * Limit the use of PFMEMALLOC reserves to those protocols that implement 4953 * the special handling of PFMEMALLOC skbs. 4954 */ 4955 static bool skb_pfmemalloc_protocol(struct sk_buff *skb) 4956 { 4957 switch (skb->protocol) { 4958 case htons(ETH_P_ARP): 4959 case htons(ETH_P_IP): 4960 case htons(ETH_P_IPV6): 4961 case htons(ETH_P_8021Q): 4962 case htons(ETH_P_8021AD): 4963 return true; 4964 default: 4965 return false; 4966 } 4967 } 4968 4969 static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev, 4970 int *ret, struct net_device *orig_dev) 4971 { 4972 if (nf_hook_ingress_active(skb)) { 4973 int ingress_retval; 4974 4975 if (*pt_prev) { 4976 *ret = deliver_skb(skb, *pt_prev, orig_dev); 4977 *pt_prev = NULL; 4978 } 4979 4980 rcu_read_lock(); 4981 ingress_retval = nf_hook_ingress(skb); 4982 rcu_read_unlock(); 4983 return ingress_retval; 4984 } 4985 return 0; 4986 } 4987 4988 static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc, 4989 struct packet_type **ppt_prev) 4990 { 4991 struct packet_type *ptype, *pt_prev; 4992 rx_handler_func_t *rx_handler; 4993 struct net_device *orig_dev; 4994 bool deliver_exact = false; 4995 int ret = NET_RX_DROP; 4996 __be16 type; 4997 4998 net_timestamp_check(!netdev_tstamp_prequeue, skb); 4999 5000 trace_netif_receive_skb(skb); 5001 5002 orig_dev = skb->dev; 5003 5004 skb_reset_network_header(skb); 5005 if (!skb_transport_header_was_set(skb)) 5006 skb_reset_transport_header(skb); 5007 skb_reset_mac_len(skb); 5008 5009 pt_prev = NULL; 5010 5011 another_round: 5012 skb->skb_iif = skb->dev->ifindex; 5013 5014 __this_cpu_inc(softnet_data.processed); 5015 5016 if (static_branch_unlikely(&generic_xdp_needed_key)) { 5017 int ret2; 5018 5019 preempt_disable(); 5020 ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb); 5021 preempt_enable(); 5022 5023 if (ret2 != XDP_PASS) 5024 return NET_RX_DROP; 5025 skb_reset_mac_len(skb); 5026 } 5027 5028 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) || 5029 skb->protocol == cpu_to_be16(ETH_P_8021AD)) { 5030 skb = skb_vlan_untag(skb); 5031 if (unlikely(!skb)) 5032 goto out; 5033 } 5034 5035 if (skb_skip_tc_classify(skb)) 5036 goto skip_classify; 5037 5038 if (pfmemalloc) 5039 goto skip_taps; 5040 5041 list_for_each_entry_rcu(ptype, &ptype_all, list) { 5042 if (pt_prev) 5043 ret = deliver_skb(skb, pt_prev, orig_dev); 5044 pt_prev = ptype; 5045 } 5046 5047 list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) { 5048 if (pt_prev) 5049 ret = deliver_skb(skb, pt_prev, orig_dev); 5050 pt_prev = ptype; 5051 } 5052 5053 skip_taps: 5054 #ifdef CONFIG_NET_INGRESS 5055 if (static_branch_unlikely(&ingress_needed_key)) { 5056 skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev); 5057 if (!skb) 5058 goto out; 5059 5060 if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0) 5061 goto out; 5062 } 5063 #endif 5064 skb_reset_tc(skb); 5065 skip_classify: 5066 if (pfmemalloc && !skb_pfmemalloc_protocol(skb)) 5067 goto drop; 5068 5069 if (skb_vlan_tag_present(skb)) { 5070 if (pt_prev) { 5071 ret = deliver_skb(skb, pt_prev, orig_dev); 5072 pt_prev = NULL; 5073 } 5074 if (vlan_do_receive(&skb)) 5075 goto another_round; 5076 else if (unlikely(!skb)) 5077 goto out; 5078 } 5079 5080 rx_handler = rcu_dereference(skb->dev->rx_handler); 5081 if (rx_handler) { 5082 if (pt_prev) { 5083 ret = deliver_skb(skb, pt_prev, orig_dev); 5084 pt_prev = NULL; 5085 } 5086 switch (rx_handler(&skb)) { 5087 case RX_HANDLER_CONSUMED: 5088 ret = NET_RX_SUCCESS; 5089 goto out; 5090 case RX_HANDLER_ANOTHER: 5091 goto another_round; 5092 case RX_HANDLER_EXACT: 5093 deliver_exact = true; 5094 case RX_HANDLER_PASS: 5095 break; 5096 default: 5097 BUG(); 5098 } 5099 } 5100 5101 if (unlikely(skb_vlan_tag_present(skb))) { 5102 check_vlan_id: 5103 if (skb_vlan_tag_get_id(skb)) { 5104 /* Vlan id is non 0 and vlan_do_receive() above couldn't 5105 * find vlan device. 5106 */ 5107 skb->pkt_type = PACKET_OTHERHOST; 5108 } else if (skb->protocol == cpu_to_be16(ETH_P_8021Q) || 5109 skb->protocol == cpu_to_be16(ETH_P_8021AD)) { 5110 /* Outer header is 802.1P with vlan 0, inner header is 5111 * 802.1Q or 802.1AD and vlan_do_receive() above could 5112 * not find vlan dev for vlan id 0. 5113 */ 5114 __vlan_hwaccel_clear_tag(skb); 5115 skb = skb_vlan_untag(skb); 5116 if (unlikely(!skb)) 5117 goto out; 5118 if (vlan_do_receive(&skb)) 5119 /* After stripping off 802.1P header with vlan 0 5120 * vlan dev is found for inner header. 5121 */ 5122 goto another_round; 5123 else if (unlikely(!skb)) 5124 goto out; 5125 else 5126 /* We have stripped outer 802.1P vlan 0 header. 5127 * But could not find vlan dev. 5128 * check again for vlan id to set OTHERHOST. 5129 */ 5130 goto check_vlan_id; 5131 } 5132 /* Note: we might in the future use prio bits 5133 * and set skb->priority like in vlan_do_receive() 5134 * For the time being, just ignore Priority Code Point 5135 */ 5136 __vlan_hwaccel_clear_tag(skb); 5137 } 5138 5139 type = skb->protocol; 5140 5141 /* deliver only exact match when indicated */ 5142 if (likely(!deliver_exact)) { 5143 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, 5144 &ptype_base[ntohs(type) & 5145 PTYPE_HASH_MASK]); 5146 } 5147 5148 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, 5149 &orig_dev->ptype_specific); 5150 5151 if (unlikely(skb->dev != orig_dev)) { 5152 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, 5153 &skb->dev->ptype_specific); 5154 } 5155 5156 if (pt_prev) { 5157 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) 5158 goto drop; 5159 *ppt_prev = pt_prev; 5160 } else { 5161 drop: 5162 if (!deliver_exact) 5163 atomic_long_inc(&skb->dev->rx_dropped); 5164 else 5165 atomic_long_inc(&skb->dev->rx_nohandler); 5166 kfree_skb(skb); 5167 /* Jamal, now you will not able to escape explaining 5168 * me how you were going to use this. :-) 5169 */ 5170 ret = NET_RX_DROP; 5171 } 5172 5173 out: 5174 return ret; 5175 } 5176 5177 static int __netif_receive_skb_one_core(struct sk_buff *skb, bool pfmemalloc) 5178 { 5179 struct net_device *orig_dev = skb->dev; 5180 struct packet_type *pt_prev = NULL; 5181 int ret; 5182 5183 ret = __netif_receive_skb_core(skb, pfmemalloc, &pt_prev); 5184 if (pt_prev) 5185 ret = INDIRECT_CALL_INET(pt_prev->func, ipv6_rcv, ip_rcv, skb, 5186 skb->dev, pt_prev, orig_dev); 5187 return ret; 5188 } 5189 5190 /** 5191 * netif_receive_skb_core - special purpose version of netif_receive_skb 5192 * @skb: buffer to process 5193 * 5194 * More direct receive version of netif_receive_skb(). It should 5195 * only be used by callers that have a need to skip RPS and Generic XDP. 5196 * Caller must also take care of handling if (page_is_)pfmemalloc. 5197 * 5198 * This function may only be called from softirq context and interrupts 5199 * should be enabled. 5200 * 5201 * Return values (usually ignored): 5202 * NET_RX_SUCCESS: no congestion 5203 * NET_RX_DROP: packet was dropped 5204 */ 5205 int netif_receive_skb_core(struct sk_buff *skb) 5206 { 5207 int ret; 5208 5209 rcu_read_lock(); 5210 ret = __netif_receive_skb_one_core(skb, false); 5211 rcu_read_unlock(); 5212 5213 return ret; 5214 } 5215 EXPORT_SYMBOL(netif_receive_skb_core); 5216 5217 static inline void __netif_receive_skb_list_ptype(struct list_head *head, 5218 struct packet_type *pt_prev, 5219 struct net_device *orig_dev) 5220 { 5221 struct sk_buff *skb, *next; 5222 5223 if (!pt_prev) 5224 return; 5225 if (list_empty(head)) 5226 return; 5227 if (pt_prev->list_func != NULL) 5228 INDIRECT_CALL_INET(pt_prev->list_func, ipv6_list_rcv, 5229 ip_list_rcv, head, pt_prev, orig_dev); 5230 else 5231 list_for_each_entry_safe(skb, next, head, list) { 5232 skb_list_del_init(skb); 5233 pt_prev->func(skb, skb->dev, pt_prev, orig_dev); 5234 } 5235 } 5236 5237 static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc) 5238 { 5239 /* Fast-path assumptions: 5240 * - There is no RX handler. 5241 * - Only one packet_type matches. 5242 * If either of these fails, we will end up doing some per-packet 5243 * processing in-line, then handling the 'last ptype' for the whole 5244 * sublist. This can't cause out-of-order delivery to any single ptype, 5245 * because the 'last ptype' must be constant across the sublist, and all 5246 * other ptypes are handled per-packet. 5247 */ 5248 /* Current (common) ptype of sublist */ 5249 struct packet_type *pt_curr = NULL; 5250 /* Current (common) orig_dev of sublist */ 5251 struct net_device *od_curr = NULL; 5252 struct list_head sublist; 5253 struct sk_buff *skb, *next; 5254 5255 INIT_LIST_HEAD(&sublist); 5256 list_for_each_entry_safe(skb, next, head, list) { 5257 struct net_device *orig_dev = skb->dev; 5258 struct packet_type *pt_prev = NULL; 5259 5260 skb_list_del_init(skb); 5261 __netif_receive_skb_core(skb, pfmemalloc, &pt_prev); 5262 if (!pt_prev) 5263 continue; 5264 if (pt_curr != pt_prev || od_curr != orig_dev) { 5265 /* dispatch old sublist */ 5266 __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr); 5267 /* start new sublist */ 5268 INIT_LIST_HEAD(&sublist); 5269 pt_curr = pt_prev; 5270 od_curr = orig_dev; 5271 } 5272 list_add_tail(&skb->list, &sublist); 5273 } 5274 5275 /* dispatch final sublist */ 5276 __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr); 5277 } 5278 5279 static int __netif_receive_skb(struct sk_buff *skb) 5280 { 5281 int ret; 5282 5283 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) { 5284 unsigned int noreclaim_flag; 5285 5286 /* 5287 * PFMEMALLOC skbs are special, they should 5288 * - be delivered to SOCK_MEMALLOC sockets only 5289 * - stay away from userspace 5290 * - have bounded memory usage 5291 * 5292 * Use PF_MEMALLOC as this saves us from propagating the allocation 5293 * context down to all allocation sites. 5294 */ 5295 noreclaim_flag = memalloc_noreclaim_save(); 5296 ret = __netif_receive_skb_one_core(skb, true); 5297 memalloc_noreclaim_restore(noreclaim_flag); 5298 } else 5299 ret = __netif_receive_skb_one_core(skb, false); 5300 5301 return ret; 5302 } 5303 5304 static void __netif_receive_skb_list(struct list_head *head) 5305 { 5306 unsigned long noreclaim_flag = 0; 5307 struct sk_buff *skb, *next; 5308 bool pfmemalloc = false; /* Is current sublist PF_MEMALLOC? */ 5309 5310 list_for_each_entry_safe(skb, next, head, list) { 5311 if ((sk_memalloc_socks() && skb_pfmemalloc(skb)) != pfmemalloc) { 5312 struct list_head sublist; 5313 5314 /* Handle the previous sublist */ 5315 list_cut_before(&sublist, head, &skb->list); 5316 if (!list_empty(&sublist)) 5317 __netif_receive_skb_list_core(&sublist, pfmemalloc); 5318 pfmemalloc = !pfmemalloc; 5319 /* See comments in __netif_receive_skb */ 5320 if (pfmemalloc) 5321 noreclaim_flag = memalloc_noreclaim_save(); 5322 else 5323 memalloc_noreclaim_restore(noreclaim_flag); 5324 } 5325 } 5326 /* Handle the remaining sublist */ 5327 if (!list_empty(head)) 5328 __netif_receive_skb_list_core(head, pfmemalloc); 5329 /* Restore pflags */ 5330 if (pfmemalloc) 5331 memalloc_noreclaim_restore(noreclaim_flag); 5332 } 5333 5334 static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp) 5335 { 5336 struct bpf_prog *old = rtnl_dereference(dev->xdp_prog); 5337 struct bpf_prog *new = xdp->prog; 5338 int ret = 0; 5339 5340 switch (xdp->command) { 5341 case XDP_SETUP_PROG: 5342 rcu_assign_pointer(dev->xdp_prog, new); 5343 if (old) 5344 bpf_prog_put(old); 5345 5346 if (old && !new) { 5347 static_branch_dec(&generic_xdp_needed_key); 5348 } else if (new && !old) { 5349 static_branch_inc(&generic_xdp_needed_key); 5350 dev_disable_lro(dev); 5351 dev_disable_gro_hw(dev); 5352 } 5353 break; 5354 5355 case XDP_QUERY_PROG: 5356 xdp->prog_id = old ? old->aux->id : 0; 5357 break; 5358 5359 default: 5360 ret = -EINVAL; 5361 break; 5362 } 5363 5364 return ret; 5365 } 5366 5367 static int netif_receive_skb_internal(struct sk_buff *skb) 5368 { 5369 int ret; 5370 5371 net_timestamp_check(netdev_tstamp_prequeue, skb); 5372 5373 if (skb_defer_rx_timestamp(skb)) 5374 return NET_RX_SUCCESS; 5375 5376 rcu_read_lock(); 5377 #ifdef CONFIG_RPS 5378 if (static_branch_unlikely(&rps_needed)) { 5379 struct rps_dev_flow voidflow, *rflow = &voidflow; 5380 int cpu = get_rps_cpu(skb->dev, skb, &rflow); 5381 5382 if (cpu >= 0) { 5383 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 5384 rcu_read_unlock(); 5385 return ret; 5386 } 5387 } 5388 #endif 5389 ret = __netif_receive_skb(skb); 5390 rcu_read_unlock(); 5391 return ret; 5392 } 5393 5394 static void netif_receive_skb_list_internal(struct list_head *head) 5395 { 5396 struct sk_buff *skb, *next; 5397 struct list_head sublist; 5398 5399 INIT_LIST_HEAD(&sublist); 5400 list_for_each_entry_safe(skb, next, head, list) { 5401 net_timestamp_check(netdev_tstamp_prequeue, skb); 5402 skb_list_del_init(skb); 5403 if (!skb_defer_rx_timestamp(skb)) 5404 list_add_tail(&skb->list, &sublist); 5405 } 5406 list_splice_init(&sublist, head); 5407 5408 rcu_read_lock(); 5409 #ifdef CONFIG_RPS 5410 if (static_branch_unlikely(&rps_needed)) { 5411 list_for_each_entry_safe(skb, next, head, list) { 5412 struct rps_dev_flow voidflow, *rflow = &voidflow; 5413 int cpu = get_rps_cpu(skb->dev, skb, &rflow); 5414 5415 if (cpu >= 0) { 5416 /* Will be handled, remove from list */ 5417 skb_list_del_init(skb); 5418 enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 5419 } 5420 } 5421 } 5422 #endif 5423 __netif_receive_skb_list(head); 5424 rcu_read_unlock(); 5425 } 5426 5427 /** 5428 * netif_receive_skb - process receive buffer from network 5429 * @skb: buffer to process 5430 * 5431 * netif_receive_skb() is the main receive data processing function. 5432 * It always succeeds. The buffer may be dropped during processing 5433 * for congestion control or by the protocol layers. 5434 * 5435 * This function may only be called from softirq context and interrupts 5436 * should be enabled. 5437 * 5438 * Return values (usually ignored): 5439 * NET_RX_SUCCESS: no congestion 5440 * NET_RX_DROP: packet was dropped 5441 */ 5442 int netif_receive_skb(struct sk_buff *skb) 5443 { 5444 int ret; 5445 5446 trace_netif_receive_skb_entry(skb); 5447 5448 ret = netif_receive_skb_internal(skb); 5449 trace_netif_receive_skb_exit(ret); 5450 5451 return ret; 5452 } 5453 EXPORT_SYMBOL(netif_receive_skb); 5454 5455 /** 5456 * netif_receive_skb_list - process many receive buffers from network 5457 * @head: list of skbs to process. 5458 * 5459 * Since return value of netif_receive_skb() is normally ignored, and 5460 * wouldn't be meaningful for a list, this function returns void. 5461 * 5462 * This function may only be called from softirq context and interrupts 5463 * should be enabled. 5464 */ 5465 void netif_receive_skb_list(struct list_head *head) 5466 { 5467 struct sk_buff *skb; 5468 5469 if (list_empty(head)) 5470 return; 5471 if (trace_netif_receive_skb_list_entry_enabled()) { 5472 list_for_each_entry(skb, head, list) 5473 trace_netif_receive_skb_list_entry(skb); 5474 } 5475 netif_receive_skb_list_internal(head); 5476 trace_netif_receive_skb_list_exit(0); 5477 } 5478 EXPORT_SYMBOL(netif_receive_skb_list); 5479 5480 DEFINE_PER_CPU(struct work_struct, flush_works); 5481 5482 /* Network device is going away, flush any packets still pending */ 5483 static void flush_backlog(struct work_struct *work) 5484 { 5485 struct sk_buff *skb, *tmp; 5486 struct softnet_data *sd; 5487 5488 local_bh_disable(); 5489 sd = this_cpu_ptr(&softnet_data); 5490 5491 local_irq_disable(); 5492 rps_lock(sd); 5493 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) { 5494 if (skb->dev->reg_state == NETREG_UNREGISTERING) { 5495 __skb_unlink(skb, &sd->input_pkt_queue); 5496 kfree_skb(skb); 5497 input_queue_head_incr(sd); 5498 } 5499 } 5500 rps_unlock(sd); 5501 local_irq_enable(); 5502 5503 skb_queue_walk_safe(&sd->process_queue, skb, tmp) { 5504 if (skb->dev->reg_state == NETREG_UNREGISTERING) { 5505 __skb_unlink(skb, &sd->process_queue); 5506 kfree_skb(skb); 5507 input_queue_head_incr(sd); 5508 } 5509 } 5510 local_bh_enable(); 5511 } 5512 5513 static void flush_all_backlogs(void) 5514 { 5515 unsigned int cpu; 5516 5517 get_online_cpus(); 5518 5519 for_each_online_cpu(cpu) 5520 queue_work_on(cpu, system_highpri_wq, 5521 per_cpu_ptr(&flush_works, cpu)); 5522 5523 for_each_online_cpu(cpu) 5524 flush_work(per_cpu_ptr(&flush_works, cpu)); 5525 5526 put_online_cpus(); 5527 } 5528 5529 /* Pass the currently batched GRO_NORMAL SKBs up to the stack. */ 5530 static void gro_normal_list(struct napi_struct *napi) 5531 { 5532 if (!napi->rx_count) 5533 return; 5534 netif_receive_skb_list_internal(&napi->rx_list); 5535 INIT_LIST_HEAD(&napi->rx_list); 5536 napi->rx_count = 0; 5537 } 5538 5539 /* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded, 5540 * pass the whole batch up to the stack. 5541 */ 5542 static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb) 5543 { 5544 list_add_tail(&skb->list, &napi->rx_list); 5545 if (++napi->rx_count >= gro_normal_batch) 5546 gro_normal_list(napi); 5547 } 5548 5549 INDIRECT_CALLABLE_DECLARE(int inet_gro_complete(struct sk_buff *, int)); 5550 INDIRECT_CALLABLE_DECLARE(int ipv6_gro_complete(struct sk_buff *, int)); 5551 static int napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb) 5552 { 5553 struct packet_offload *ptype; 5554 __be16 type = skb->protocol; 5555 struct list_head *head = &offload_base; 5556 int err = -ENOENT; 5557 5558 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb)); 5559 5560 if (NAPI_GRO_CB(skb)->count == 1) { 5561 skb_shinfo(skb)->gso_size = 0; 5562 goto out; 5563 } 5564 5565 rcu_read_lock(); 5566 list_for_each_entry_rcu(ptype, head, list) { 5567 if (ptype->type != type || !ptype->callbacks.gro_complete) 5568 continue; 5569 5570 err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete, 5571 ipv6_gro_complete, inet_gro_complete, 5572 skb, 0); 5573 break; 5574 } 5575 rcu_read_unlock(); 5576 5577 if (err) { 5578 WARN_ON(&ptype->list == head); 5579 kfree_skb(skb); 5580 return NET_RX_SUCCESS; 5581 } 5582 5583 out: 5584 gro_normal_one(napi, skb); 5585 return NET_RX_SUCCESS; 5586 } 5587 5588 static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index, 5589 bool flush_old) 5590 { 5591 struct list_head *head = &napi->gro_hash[index].list; 5592 struct sk_buff *skb, *p; 5593 5594 list_for_each_entry_safe_reverse(skb, p, head, list) { 5595 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies) 5596 return; 5597 skb_list_del_init(skb); 5598 napi_gro_complete(napi, skb); 5599 napi->gro_hash[index].count--; 5600 } 5601 5602 if (!napi->gro_hash[index].count) 5603 __clear_bit(index, &napi->gro_bitmask); 5604 } 5605 5606 /* napi->gro_hash[].list contains packets ordered by age. 5607 * youngest packets at the head of it. 5608 * Complete skbs in reverse order to reduce latencies. 5609 */ 5610 void napi_gro_flush(struct napi_struct *napi, bool flush_old) 5611 { 5612 unsigned long bitmask = napi->gro_bitmask; 5613 unsigned int i, base = ~0U; 5614 5615 while ((i = ffs(bitmask)) != 0) { 5616 bitmask >>= i; 5617 base += i; 5618 __napi_gro_flush_chain(napi, base, flush_old); 5619 } 5620 } 5621 EXPORT_SYMBOL(napi_gro_flush); 5622 5623 static struct list_head *gro_list_prepare(struct napi_struct *napi, 5624 struct sk_buff *skb) 5625 { 5626 unsigned int maclen = skb->dev->hard_header_len; 5627 u32 hash = skb_get_hash_raw(skb); 5628 struct list_head *head; 5629 struct sk_buff *p; 5630 5631 head = &napi->gro_hash[hash & (GRO_HASH_BUCKETS - 1)].list; 5632 list_for_each_entry(p, head, list) { 5633 unsigned long diffs; 5634 5635 NAPI_GRO_CB(p)->flush = 0; 5636 5637 if (hash != skb_get_hash_raw(p)) { 5638 NAPI_GRO_CB(p)->same_flow = 0; 5639 continue; 5640 } 5641 5642 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev; 5643 diffs |= skb_vlan_tag_present(p) ^ skb_vlan_tag_present(skb); 5644 if (skb_vlan_tag_present(p)) 5645 diffs |= skb_vlan_tag_get(p) ^ skb_vlan_tag_get(skb); 5646 diffs |= skb_metadata_dst_cmp(p, skb); 5647 diffs |= skb_metadata_differs(p, skb); 5648 if (maclen == ETH_HLEN) 5649 diffs |= compare_ether_header(skb_mac_header(p), 5650 skb_mac_header(skb)); 5651 else if (!diffs) 5652 diffs = memcmp(skb_mac_header(p), 5653 skb_mac_header(skb), 5654 maclen); 5655 NAPI_GRO_CB(p)->same_flow = !diffs; 5656 } 5657 5658 return head; 5659 } 5660 5661 static void skb_gro_reset_offset(struct sk_buff *skb) 5662 { 5663 const struct skb_shared_info *pinfo = skb_shinfo(skb); 5664 const skb_frag_t *frag0 = &pinfo->frags[0]; 5665 5666 NAPI_GRO_CB(skb)->data_offset = 0; 5667 NAPI_GRO_CB(skb)->frag0 = NULL; 5668 NAPI_GRO_CB(skb)->frag0_len = 0; 5669 5670 if (!skb_headlen(skb) && pinfo->nr_frags && 5671 !PageHighMem(skb_frag_page(frag0))) { 5672 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0); 5673 NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int, 5674 skb_frag_size(frag0), 5675 skb->end - skb->tail); 5676 } 5677 } 5678 5679 static void gro_pull_from_frag0(struct sk_buff *skb, int grow) 5680 { 5681 struct skb_shared_info *pinfo = skb_shinfo(skb); 5682 5683 BUG_ON(skb->end - skb->tail < grow); 5684 5685 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow); 5686 5687 skb->data_len -= grow; 5688 skb->tail += grow; 5689 5690 skb_frag_off_add(&pinfo->frags[0], grow); 5691 skb_frag_size_sub(&pinfo->frags[0], grow); 5692 5693 if (unlikely(!skb_frag_size(&pinfo->frags[0]))) { 5694 skb_frag_unref(skb, 0); 5695 memmove(pinfo->frags, pinfo->frags + 1, 5696 --pinfo->nr_frags * sizeof(pinfo->frags[0])); 5697 } 5698 } 5699 5700 static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head) 5701 { 5702 struct sk_buff *oldest; 5703 5704 oldest = list_last_entry(head, struct sk_buff, list); 5705 5706 /* We are called with head length >= MAX_GRO_SKBS, so this is 5707 * impossible. 5708 */ 5709 if (WARN_ON_ONCE(!oldest)) 5710 return; 5711 5712 /* Do not adjust napi->gro_hash[].count, caller is adding a new 5713 * SKB to the chain. 5714 */ 5715 skb_list_del_init(oldest); 5716 napi_gro_complete(napi, oldest); 5717 } 5718 5719 INDIRECT_CALLABLE_DECLARE(struct sk_buff *inet_gro_receive(struct list_head *, 5720 struct sk_buff *)); 5721 INDIRECT_CALLABLE_DECLARE(struct sk_buff *ipv6_gro_receive(struct list_head *, 5722 struct sk_buff *)); 5723 static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 5724 { 5725 u32 hash = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1); 5726 struct list_head *head = &offload_base; 5727 struct packet_offload *ptype; 5728 __be16 type = skb->protocol; 5729 struct list_head *gro_head; 5730 struct sk_buff *pp = NULL; 5731 enum gro_result ret; 5732 int same_flow; 5733 int grow; 5734 5735 if (netif_elide_gro(skb->dev)) 5736 goto normal; 5737 5738 gro_head = gro_list_prepare(napi, skb); 5739 5740 rcu_read_lock(); 5741 list_for_each_entry_rcu(ptype, head, list) { 5742 if (ptype->type != type || !ptype->callbacks.gro_receive) 5743 continue; 5744 5745 skb_set_network_header(skb, skb_gro_offset(skb)); 5746 skb_reset_mac_len(skb); 5747 NAPI_GRO_CB(skb)->same_flow = 0; 5748 NAPI_GRO_CB(skb)->flush = skb_is_gso(skb) || skb_has_frag_list(skb); 5749 NAPI_GRO_CB(skb)->free = 0; 5750 NAPI_GRO_CB(skb)->encap_mark = 0; 5751 NAPI_GRO_CB(skb)->recursion_counter = 0; 5752 NAPI_GRO_CB(skb)->is_fou = 0; 5753 NAPI_GRO_CB(skb)->is_atomic = 1; 5754 NAPI_GRO_CB(skb)->gro_remcsum_start = 0; 5755 5756 /* Setup for GRO checksum validation */ 5757 switch (skb->ip_summed) { 5758 case CHECKSUM_COMPLETE: 5759 NAPI_GRO_CB(skb)->csum = skb->csum; 5760 NAPI_GRO_CB(skb)->csum_valid = 1; 5761 NAPI_GRO_CB(skb)->csum_cnt = 0; 5762 break; 5763 case CHECKSUM_UNNECESSARY: 5764 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1; 5765 NAPI_GRO_CB(skb)->csum_valid = 0; 5766 break; 5767 default: 5768 NAPI_GRO_CB(skb)->csum_cnt = 0; 5769 NAPI_GRO_CB(skb)->csum_valid = 0; 5770 } 5771 5772 pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive, 5773 ipv6_gro_receive, inet_gro_receive, 5774 gro_head, skb); 5775 break; 5776 } 5777 rcu_read_unlock(); 5778 5779 if (&ptype->list == head) 5780 goto normal; 5781 5782 if (PTR_ERR(pp) == -EINPROGRESS) { 5783 ret = GRO_CONSUMED; 5784 goto ok; 5785 } 5786 5787 same_flow = NAPI_GRO_CB(skb)->same_flow; 5788 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED; 5789 5790 if (pp) { 5791 skb_list_del_init(pp); 5792 napi_gro_complete(napi, pp); 5793 napi->gro_hash[hash].count--; 5794 } 5795 5796 if (same_flow) 5797 goto ok; 5798 5799 if (NAPI_GRO_CB(skb)->flush) 5800 goto normal; 5801 5802 if (unlikely(napi->gro_hash[hash].count >= MAX_GRO_SKBS)) { 5803 gro_flush_oldest(napi, gro_head); 5804 } else { 5805 napi->gro_hash[hash].count++; 5806 } 5807 NAPI_GRO_CB(skb)->count = 1; 5808 NAPI_GRO_CB(skb)->age = jiffies; 5809 NAPI_GRO_CB(skb)->last = skb; 5810 skb_shinfo(skb)->gso_size = skb_gro_len(skb); 5811 list_add(&skb->list, gro_head); 5812 ret = GRO_HELD; 5813 5814 pull: 5815 grow = skb_gro_offset(skb) - skb_headlen(skb); 5816 if (grow > 0) 5817 gro_pull_from_frag0(skb, grow); 5818 ok: 5819 if (napi->gro_hash[hash].count) { 5820 if (!test_bit(hash, &napi->gro_bitmask)) 5821 __set_bit(hash, &napi->gro_bitmask); 5822 } else if (test_bit(hash, &napi->gro_bitmask)) { 5823 __clear_bit(hash, &napi->gro_bitmask); 5824 } 5825 5826 return ret; 5827 5828 normal: 5829 ret = GRO_NORMAL; 5830 goto pull; 5831 } 5832 5833 struct packet_offload *gro_find_receive_by_type(__be16 type) 5834 { 5835 struct list_head *offload_head = &offload_base; 5836 struct packet_offload *ptype; 5837 5838 list_for_each_entry_rcu(ptype, offload_head, list) { 5839 if (ptype->type != type || !ptype->callbacks.gro_receive) 5840 continue; 5841 return ptype; 5842 } 5843 return NULL; 5844 } 5845 EXPORT_SYMBOL(gro_find_receive_by_type); 5846 5847 struct packet_offload *gro_find_complete_by_type(__be16 type) 5848 { 5849 struct list_head *offload_head = &offload_base; 5850 struct packet_offload *ptype; 5851 5852 list_for_each_entry_rcu(ptype, offload_head, list) { 5853 if (ptype->type != type || !ptype->callbacks.gro_complete) 5854 continue; 5855 return ptype; 5856 } 5857 return NULL; 5858 } 5859 EXPORT_SYMBOL(gro_find_complete_by_type); 5860 5861 static void napi_skb_free_stolen_head(struct sk_buff *skb) 5862 { 5863 skb_dst_drop(skb); 5864 skb_ext_put(skb); 5865 kmem_cache_free(skbuff_head_cache, skb); 5866 } 5867 5868 static gro_result_t napi_skb_finish(struct napi_struct *napi, 5869 struct sk_buff *skb, 5870 gro_result_t ret) 5871 { 5872 switch (ret) { 5873 case GRO_NORMAL: 5874 gro_normal_one(napi, skb); 5875 break; 5876 5877 case GRO_DROP: 5878 kfree_skb(skb); 5879 break; 5880 5881 case GRO_MERGED_FREE: 5882 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) 5883 napi_skb_free_stolen_head(skb); 5884 else 5885 __kfree_skb(skb); 5886 break; 5887 5888 case GRO_HELD: 5889 case GRO_MERGED: 5890 case GRO_CONSUMED: 5891 break; 5892 } 5893 5894 return ret; 5895 } 5896 5897 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 5898 { 5899 gro_result_t ret; 5900 5901 skb_mark_napi_id(skb, napi); 5902 trace_napi_gro_receive_entry(skb); 5903 5904 skb_gro_reset_offset(skb); 5905 5906 ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb)); 5907 trace_napi_gro_receive_exit(ret); 5908 5909 return ret; 5910 } 5911 EXPORT_SYMBOL(napi_gro_receive); 5912 5913 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) 5914 { 5915 if (unlikely(skb->pfmemalloc)) { 5916 consume_skb(skb); 5917 return; 5918 } 5919 __skb_pull(skb, skb_headlen(skb)); 5920 /* restore the reserve we had after netdev_alloc_skb_ip_align() */ 5921 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb)); 5922 __vlan_hwaccel_clear_tag(skb); 5923 skb->dev = napi->dev; 5924 skb->skb_iif = 0; 5925 5926 /* eth_type_trans() assumes pkt_type is PACKET_HOST */ 5927 skb->pkt_type = PACKET_HOST; 5928 5929 skb->encapsulation = 0; 5930 skb_shinfo(skb)->gso_type = 0; 5931 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); 5932 skb_ext_reset(skb); 5933 5934 napi->skb = skb; 5935 } 5936 5937 struct sk_buff *napi_get_frags(struct napi_struct *napi) 5938 { 5939 struct sk_buff *skb = napi->skb; 5940 5941 if (!skb) { 5942 skb = napi_alloc_skb(napi, GRO_MAX_HEAD); 5943 if (skb) { 5944 napi->skb = skb; 5945 skb_mark_napi_id(skb, napi); 5946 } 5947 } 5948 return skb; 5949 } 5950 EXPORT_SYMBOL(napi_get_frags); 5951 5952 static gro_result_t napi_frags_finish(struct napi_struct *napi, 5953 struct sk_buff *skb, 5954 gro_result_t ret) 5955 { 5956 switch (ret) { 5957 case GRO_NORMAL: 5958 case GRO_HELD: 5959 __skb_push(skb, ETH_HLEN); 5960 skb->protocol = eth_type_trans(skb, skb->dev); 5961 if (ret == GRO_NORMAL) 5962 gro_normal_one(napi, skb); 5963 break; 5964 5965 case GRO_DROP: 5966 napi_reuse_skb(napi, skb); 5967 break; 5968 5969 case GRO_MERGED_FREE: 5970 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) 5971 napi_skb_free_stolen_head(skb); 5972 else 5973 napi_reuse_skb(napi, skb); 5974 break; 5975 5976 case GRO_MERGED: 5977 case GRO_CONSUMED: 5978 break; 5979 } 5980 5981 return ret; 5982 } 5983 5984 /* Upper GRO stack assumes network header starts at gro_offset=0 5985 * Drivers could call both napi_gro_frags() and napi_gro_receive() 5986 * We copy ethernet header into skb->data to have a common layout. 5987 */ 5988 static struct sk_buff *napi_frags_skb(struct napi_struct *napi) 5989 { 5990 struct sk_buff *skb = napi->skb; 5991 const struct ethhdr *eth; 5992 unsigned int hlen = sizeof(*eth); 5993 5994 napi->skb = NULL; 5995 5996 skb_reset_mac_header(skb); 5997 skb_gro_reset_offset(skb); 5998 5999 if (unlikely(skb_gro_header_hard(skb, hlen))) { 6000 eth = skb_gro_header_slow(skb, hlen, 0); 6001 if (unlikely(!eth)) { 6002 net_warn_ratelimited("%s: dropping impossible skb from %s\n", 6003 __func__, napi->dev->name); 6004 napi_reuse_skb(napi, skb); 6005 return NULL; 6006 } 6007 } else { 6008 eth = (const struct ethhdr *)skb->data; 6009 gro_pull_from_frag0(skb, hlen); 6010 NAPI_GRO_CB(skb)->frag0 += hlen; 6011 NAPI_GRO_CB(skb)->frag0_len -= hlen; 6012 } 6013 __skb_pull(skb, hlen); 6014 6015 /* 6016 * This works because the only protocols we care about don't require 6017 * special handling. 6018 * We'll fix it up properly in napi_frags_finish() 6019 */ 6020 skb->protocol = eth->h_proto; 6021 6022 return skb; 6023 } 6024 6025 gro_result_t napi_gro_frags(struct napi_struct *napi) 6026 { 6027 gro_result_t ret; 6028 struct sk_buff *skb = napi_frags_skb(napi); 6029 6030 if (!skb) 6031 return GRO_DROP; 6032 6033 trace_napi_gro_frags_entry(skb); 6034 6035 ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb)); 6036 trace_napi_gro_frags_exit(ret); 6037 6038 return ret; 6039 } 6040 EXPORT_SYMBOL(napi_gro_frags); 6041 6042 /* Compute the checksum from gro_offset and return the folded value 6043 * after adding in any pseudo checksum. 6044 */ 6045 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb) 6046 { 6047 __wsum wsum; 6048 __sum16 sum; 6049 6050 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0); 6051 6052 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */ 6053 sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum)); 6054 /* See comments in __skb_checksum_complete(). */ 6055 if (likely(!sum)) { 6056 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && 6057 !skb->csum_complete_sw) 6058 netdev_rx_csum_fault(skb->dev, skb); 6059 } 6060 6061 NAPI_GRO_CB(skb)->csum = wsum; 6062 NAPI_GRO_CB(skb)->csum_valid = 1; 6063 6064 return sum; 6065 } 6066 EXPORT_SYMBOL(__skb_gro_checksum_complete); 6067 6068 static void net_rps_send_ipi(struct softnet_data *remsd) 6069 { 6070 #ifdef CONFIG_RPS 6071 while (remsd) { 6072 struct softnet_data *next = remsd->rps_ipi_next; 6073 6074 if (cpu_online(remsd->cpu)) 6075 smp_call_function_single_async(remsd->cpu, &remsd->csd); 6076 remsd = next; 6077 } 6078 #endif 6079 } 6080 6081 /* 6082 * net_rps_action_and_irq_enable sends any pending IPI's for rps. 6083 * Note: called with local irq disabled, but exits with local irq enabled. 6084 */ 6085 static void net_rps_action_and_irq_enable(struct softnet_data *sd) 6086 { 6087 #ifdef CONFIG_RPS 6088 struct softnet_data *remsd = sd->rps_ipi_list; 6089 6090 if (remsd) { 6091 sd->rps_ipi_list = NULL; 6092 6093 local_irq_enable(); 6094 6095 /* Send pending IPI's to kick RPS processing on remote cpus. */ 6096 net_rps_send_ipi(remsd); 6097 } else 6098 #endif 6099 local_irq_enable(); 6100 } 6101 6102 static bool sd_has_rps_ipi_waiting(struct softnet_data *sd) 6103 { 6104 #ifdef CONFIG_RPS 6105 return sd->rps_ipi_list != NULL; 6106 #else 6107 return false; 6108 #endif 6109 } 6110 6111 static int process_backlog(struct napi_struct *napi, int quota) 6112 { 6113 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog); 6114 bool again = true; 6115 int work = 0; 6116 6117 /* Check if we have pending ipi, its better to send them now, 6118 * not waiting net_rx_action() end. 6119 */ 6120 if (sd_has_rps_ipi_waiting(sd)) { 6121 local_irq_disable(); 6122 net_rps_action_and_irq_enable(sd); 6123 } 6124 6125 napi->weight = dev_rx_weight; 6126 while (again) { 6127 struct sk_buff *skb; 6128 6129 while ((skb = __skb_dequeue(&sd->process_queue))) { 6130 rcu_read_lock(); 6131 __netif_receive_skb(skb); 6132 rcu_read_unlock(); 6133 input_queue_head_incr(sd); 6134 if (++work >= quota) 6135 return work; 6136 6137 } 6138 6139 local_irq_disable(); 6140 rps_lock(sd); 6141 if (skb_queue_empty(&sd->input_pkt_queue)) { 6142 /* 6143 * Inline a custom version of __napi_complete(). 6144 * only current cpu owns and manipulates this napi, 6145 * and NAPI_STATE_SCHED is the only possible flag set 6146 * on backlog. 6147 * We can use a plain write instead of clear_bit(), 6148 * and we dont need an smp_mb() memory barrier. 6149 */ 6150 napi->state = 0; 6151 again = false; 6152 } else { 6153 skb_queue_splice_tail_init(&sd->input_pkt_queue, 6154 &sd->process_queue); 6155 } 6156 rps_unlock(sd); 6157 local_irq_enable(); 6158 } 6159 6160 return work; 6161 } 6162 6163 /** 6164 * __napi_schedule - schedule for receive 6165 * @n: entry to schedule 6166 * 6167 * The entry's receive function will be scheduled to run. 6168 * Consider using __napi_schedule_irqoff() if hard irqs are masked. 6169 */ 6170 void __napi_schedule(struct napi_struct *n) 6171 { 6172 unsigned long flags; 6173 6174 local_irq_save(flags); 6175 ____napi_schedule(this_cpu_ptr(&softnet_data), n); 6176 local_irq_restore(flags); 6177 } 6178 EXPORT_SYMBOL(__napi_schedule); 6179 6180 /** 6181 * napi_schedule_prep - check if napi can be scheduled 6182 * @n: napi context 6183 * 6184 * Test if NAPI routine is already running, and if not mark 6185 * it as running. This is used as a condition variable 6186 * insure only one NAPI poll instance runs. We also make 6187 * sure there is no pending NAPI disable. 6188 */ 6189 bool napi_schedule_prep(struct napi_struct *n) 6190 { 6191 unsigned long val, new; 6192 6193 do { 6194 val = READ_ONCE(n->state); 6195 if (unlikely(val & NAPIF_STATE_DISABLE)) 6196 return false; 6197 new = val | NAPIF_STATE_SCHED; 6198 6199 /* Sets STATE_MISSED bit if STATE_SCHED was already set 6200 * This was suggested by Alexander Duyck, as compiler 6201 * emits better code than : 6202 * if (val & NAPIF_STATE_SCHED) 6203 * new |= NAPIF_STATE_MISSED; 6204 */ 6205 new |= (val & NAPIF_STATE_SCHED) / NAPIF_STATE_SCHED * 6206 NAPIF_STATE_MISSED; 6207 } while (cmpxchg(&n->state, val, new) != val); 6208 6209 return !(val & NAPIF_STATE_SCHED); 6210 } 6211 EXPORT_SYMBOL(napi_schedule_prep); 6212 6213 /** 6214 * __napi_schedule_irqoff - schedule for receive 6215 * @n: entry to schedule 6216 * 6217 * Variant of __napi_schedule() assuming hard irqs are masked 6218 */ 6219 void __napi_schedule_irqoff(struct napi_struct *n) 6220 { 6221 ____napi_schedule(this_cpu_ptr(&softnet_data), n); 6222 } 6223 EXPORT_SYMBOL(__napi_schedule_irqoff); 6224 6225 bool napi_complete_done(struct napi_struct *n, int work_done) 6226 { 6227 unsigned long flags, val, new; 6228 6229 /* 6230 * 1) Don't let napi dequeue from the cpu poll list 6231 * just in case its running on a different cpu. 6232 * 2) If we are busy polling, do nothing here, we have 6233 * the guarantee we will be called later. 6234 */ 6235 if (unlikely(n->state & (NAPIF_STATE_NPSVC | 6236 NAPIF_STATE_IN_BUSY_POLL))) 6237 return false; 6238 6239 if (n->gro_bitmask) { 6240 unsigned long timeout = 0; 6241 6242 if (work_done) 6243 timeout = n->dev->gro_flush_timeout; 6244 6245 /* When the NAPI instance uses a timeout and keeps postponing 6246 * it, we need to bound somehow the time packets are kept in 6247 * the GRO layer 6248 */ 6249 napi_gro_flush(n, !!timeout); 6250 if (timeout) 6251 hrtimer_start(&n->timer, ns_to_ktime(timeout), 6252 HRTIMER_MODE_REL_PINNED); 6253 } 6254 6255 gro_normal_list(n); 6256 6257 if (unlikely(!list_empty(&n->poll_list))) { 6258 /* If n->poll_list is not empty, we need to mask irqs */ 6259 local_irq_save(flags); 6260 list_del_init(&n->poll_list); 6261 local_irq_restore(flags); 6262 } 6263 6264 do { 6265 val = READ_ONCE(n->state); 6266 6267 WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED)); 6268 6269 new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED); 6270 6271 /* If STATE_MISSED was set, leave STATE_SCHED set, 6272 * because we will call napi->poll() one more time. 6273 * This C code was suggested by Alexander Duyck to help gcc. 6274 */ 6275 new |= (val & NAPIF_STATE_MISSED) / NAPIF_STATE_MISSED * 6276 NAPIF_STATE_SCHED; 6277 } while (cmpxchg(&n->state, val, new) != val); 6278 6279 if (unlikely(val & NAPIF_STATE_MISSED)) { 6280 __napi_schedule(n); 6281 return false; 6282 } 6283 6284 return true; 6285 } 6286 EXPORT_SYMBOL(napi_complete_done); 6287 6288 /* must be called under rcu_read_lock(), as we dont take a reference */ 6289 static struct napi_struct *napi_by_id(unsigned int napi_id) 6290 { 6291 unsigned int hash = napi_id % HASH_SIZE(napi_hash); 6292 struct napi_struct *napi; 6293 6294 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node) 6295 if (napi->napi_id == napi_id) 6296 return napi; 6297 6298 return NULL; 6299 } 6300 6301 #if defined(CONFIG_NET_RX_BUSY_POLL) 6302 6303 #define BUSY_POLL_BUDGET 8 6304 6305 static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock) 6306 { 6307 int rc; 6308 6309 /* Busy polling means there is a high chance device driver hard irq 6310 * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was 6311 * set in napi_schedule_prep(). 6312 * Since we are about to call napi->poll() once more, we can safely 6313 * clear NAPI_STATE_MISSED. 6314 * 6315 * Note: x86 could use a single "lock and ..." instruction 6316 * to perform these two clear_bit() 6317 */ 6318 clear_bit(NAPI_STATE_MISSED, &napi->state); 6319 clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state); 6320 6321 local_bh_disable(); 6322 6323 /* All we really want here is to re-enable device interrupts. 6324 * Ideally, a new ndo_busy_poll_stop() could avoid another round. 6325 */ 6326 rc = napi->poll(napi, BUSY_POLL_BUDGET); 6327 /* We can't gro_normal_list() here, because napi->poll() might have 6328 * rearmed the napi (napi_complete_done()) in which case it could 6329 * already be running on another CPU. 6330 */ 6331 trace_napi_poll(napi, rc, BUSY_POLL_BUDGET); 6332 netpoll_poll_unlock(have_poll_lock); 6333 if (rc == BUSY_POLL_BUDGET) { 6334 /* As the whole budget was spent, we still own the napi so can 6335 * safely handle the rx_list. 6336 */ 6337 gro_normal_list(napi); 6338 __napi_schedule(napi); 6339 } 6340 local_bh_enable(); 6341 } 6342 6343 void napi_busy_loop(unsigned int napi_id, 6344 bool (*loop_end)(void *, unsigned long), 6345 void *loop_end_arg) 6346 { 6347 unsigned long start_time = loop_end ? busy_loop_current_time() : 0; 6348 int (*napi_poll)(struct napi_struct *napi, int budget); 6349 void *have_poll_lock = NULL; 6350 struct napi_struct *napi; 6351 6352 restart: 6353 napi_poll = NULL; 6354 6355 rcu_read_lock(); 6356 6357 napi = napi_by_id(napi_id); 6358 if (!napi) 6359 goto out; 6360 6361 preempt_disable(); 6362 for (;;) { 6363 int work = 0; 6364 6365 local_bh_disable(); 6366 if (!napi_poll) { 6367 unsigned long val = READ_ONCE(napi->state); 6368 6369 /* If multiple threads are competing for this napi, 6370 * we avoid dirtying napi->state as much as we can. 6371 */ 6372 if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED | 6373 NAPIF_STATE_IN_BUSY_POLL)) 6374 goto count; 6375 if (cmpxchg(&napi->state, val, 6376 val | NAPIF_STATE_IN_BUSY_POLL | 6377 NAPIF_STATE_SCHED) != val) 6378 goto count; 6379 have_poll_lock = netpoll_poll_lock(napi); 6380 napi_poll = napi->poll; 6381 } 6382 work = napi_poll(napi, BUSY_POLL_BUDGET); 6383 trace_napi_poll(napi, work, BUSY_POLL_BUDGET); 6384 gro_normal_list(napi); 6385 count: 6386 if (work > 0) 6387 __NET_ADD_STATS(dev_net(napi->dev), 6388 LINUX_MIB_BUSYPOLLRXPACKETS, work); 6389 local_bh_enable(); 6390 6391 if (!loop_end || loop_end(loop_end_arg, start_time)) 6392 break; 6393 6394 if (unlikely(need_resched())) { 6395 if (napi_poll) 6396 busy_poll_stop(napi, have_poll_lock); 6397 preempt_enable(); 6398 rcu_read_unlock(); 6399 cond_resched(); 6400 if (loop_end(loop_end_arg, start_time)) 6401 return; 6402 goto restart; 6403 } 6404 cpu_relax(); 6405 } 6406 if (napi_poll) 6407 busy_poll_stop(napi, have_poll_lock); 6408 preempt_enable(); 6409 out: 6410 rcu_read_unlock(); 6411 } 6412 EXPORT_SYMBOL(napi_busy_loop); 6413 6414 #endif /* CONFIG_NET_RX_BUSY_POLL */ 6415 6416 static void napi_hash_add(struct napi_struct *napi) 6417 { 6418 if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state) || 6419 test_and_set_bit(NAPI_STATE_HASHED, &napi->state)) 6420 return; 6421 6422 spin_lock(&napi_hash_lock); 6423 6424 /* 0..NR_CPUS range is reserved for sender_cpu use */ 6425 do { 6426 if (unlikely(++napi_gen_id < MIN_NAPI_ID)) 6427 napi_gen_id = MIN_NAPI_ID; 6428 } while (napi_by_id(napi_gen_id)); 6429 napi->napi_id = napi_gen_id; 6430 6431 hlist_add_head_rcu(&napi->napi_hash_node, 6432 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]); 6433 6434 spin_unlock(&napi_hash_lock); 6435 } 6436 6437 /* Warning : caller is responsible to make sure rcu grace period 6438 * is respected before freeing memory containing @napi 6439 */ 6440 bool napi_hash_del(struct napi_struct *napi) 6441 { 6442 bool rcu_sync_needed = false; 6443 6444 spin_lock(&napi_hash_lock); 6445 6446 if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state)) { 6447 rcu_sync_needed = true; 6448 hlist_del_rcu(&napi->napi_hash_node); 6449 } 6450 spin_unlock(&napi_hash_lock); 6451 return rcu_sync_needed; 6452 } 6453 EXPORT_SYMBOL_GPL(napi_hash_del); 6454 6455 static enum hrtimer_restart napi_watchdog(struct hrtimer *timer) 6456 { 6457 struct napi_struct *napi; 6458 6459 napi = container_of(timer, struct napi_struct, timer); 6460 6461 /* Note : we use a relaxed variant of napi_schedule_prep() not setting 6462 * NAPI_STATE_MISSED, since we do not react to a device IRQ. 6463 */ 6464 if (napi->gro_bitmask && !napi_disable_pending(napi) && 6465 !test_and_set_bit(NAPI_STATE_SCHED, &napi->state)) 6466 __napi_schedule_irqoff(napi); 6467 6468 return HRTIMER_NORESTART; 6469 } 6470 6471 static void init_gro_hash(struct napi_struct *napi) 6472 { 6473 int i; 6474 6475 for (i = 0; i < GRO_HASH_BUCKETS; i++) { 6476 INIT_LIST_HEAD(&napi->gro_hash[i].list); 6477 napi->gro_hash[i].count = 0; 6478 } 6479 napi->gro_bitmask = 0; 6480 } 6481 6482 void netif_napi_add(struct net_device *dev, struct napi_struct *napi, 6483 int (*poll)(struct napi_struct *, int), int weight) 6484 { 6485 INIT_LIST_HEAD(&napi->poll_list); 6486 hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); 6487 napi->timer.function = napi_watchdog; 6488 init_gro_hash(napi); 6489 napi->skb = NULL; 6490 INIT_LIST_HEAD(&napi->rx_list); 6491 napi->rx_count = 0; 6492 napi->poll = poll; 6493 if (weight > NAPI_POLL_WEIGHT) 6494 netdev_err_once(dev, "%s() called with weight %d\n", __func__, 6495 weight); 6496 napi->weight = weight; 6497 list_add(&napi->dev_list, &dev->napi_list); 6498 napi->dev = dev; 6499 #ifdef CONFIG_NETPOLL 6500 napi->poll_owner = -1; 6501 #endif 6502 set_bit(NAPI_STATE_SCHED, &napi->state); 6503 napi_hash_add(napi); 6504 } 6505 EXPORT_SYMBOL(netif_napi_add); 6506 6507 void napi_disable(struct napi_struct *n) 6508 { 6509 might_sleep(); 6510 set_bit(NAPI_STATE_DISABLE, &n->state); 6511 6512 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state)) 6513 msleep(1); 6514 while (test_and_set_bit(NAPI_STATE_NPSVC, &n->state)) 6515 msleep(1); 6516 6517 hrtimer_cancel(&n->timer); 6518 6519 clear_bit(NAPI_STATE_DISABLE, &n->state); 6520 } 6521 EXPORT_SYMBOL(napi_disable); 6522 6523 static void flush_gro_hash(struct napi_struct *napi) 6524 { 6525 int i; 6526 6527 for (i = 0; i < GRO_HASH_BUCKETS; i++) { 6528 struct sk_buff *skb, *n; 6529 6530 list_for_each_entry_safe(skb, n, &napi->gro_hash[i].list, list) 6531 kfree_skb(skb); 6532 napi->gro_hash[i].count = 0; 6533 } 6534 } 6535 6536 /* Must be called in process context */ 6537 void netif_napi_del(struct napi_struct *napi) 6538 { 6539 might_sleep(); 6540 if (napi_hash_del(napi)) 6541 synchronize_net(); 6542 list_del_init(&napi->dev_list); 6543 napi_free_frags(napi); 6544 6545 flush_gro_hash(napi); 6546 napi->gro_bitmask = 0; 6547 } 6548 EXPORT_SYMBOL(netif_napi_del); 6549 6550 static int napi_poll(struct napi_struct *n, struct list_head *repoll) 6551 { 6552 void *have; 6553 int work, weight; 6554 6555 list_del_init(&n->poll_list); 6556 6557 have = netpoll_poll_lock(n); 6558 6559 weight = n->weight; 6560 6561 /* This NAPI_STATE_SCHED test is for avoiding a race 6562 * with netpoll's poll_napi(). Only the entity which 6563 * obtains the lock and sees NAPI_STATE_SCHED set will 6564 * actually make the ->poll() call. Therefore we avoid 6565 * accidentally calling ->poll() when NAPI is not scheduled. 6566 */ 6567 work = 0; 6568 if (test_bit(NAPI_STATE_SCHED, &n->state)) { 6569 work = n->poll(n, weight); 6570 trace_napi_poll(n, work, weight); 6571 } 6572 6573 WARN_ON_ONCE(work > weight); 6574 6575 if (likely(work < weight)) 6576 goto out_unlock; 6577 6578 /* Drivers must not modify the NAPI state if they 6579 * consume the entire weight. In such cases this code 6580 * still "owns" the NAPI instance and therefore can 6581 * move the instance around on the list at-will. 6582 */ 6583 if (unlikely(napi_disable_pending(n))) { 6584 napi_complete(n); 6585 goto out_unlock; 6586 } 6587 6588 if (n->gro_bitmask) { 6589 /* flush too old packets 6590 * If HZ < 1000, flush all packets. 6591 */ 6592 napi_gro_flush(n, HZ >= 1000); 6593 } 6594 6595 gro_normal_list(n); 6596 6597 /* Some drivers may have called napi_schedule 6598 * prior to exhausting their budget. 6599 */ 6600 if (unlikely(!list_empty(&n->poll_list))) { 6601 pr_warn_once("%s: Budget exhausted after napi rescheduled\n", 6602 n->dev ? n->dev->name : "backlog"); 6603 goto out_unlock; 6604 } 6605 6606 list_add_tail(&n->poll_list, repoll); 6607 6608 out_unlock: 6609 netpoll_poll_unlock(have); 6610 6611 return work; 6612 } 6613 6614 static __latent_entropy void net_rx_action(struct softirq_action *h) 6615 { 6616 struct softnet_data *sd = this_cpu_ptr(&softnet_data); 6617 unsigned long time_limit = jiffies + 6618 usecs_to_jiffies(netdev_budget_usecs); 6619 int budget = netdev_budget; 6620 LIST_HEAD(list); 6621 LIST_HEAD(repoll); 6622 6623 local_irq_disable(); 6624 list_splice_init(&sd->poll_list, &list); 6625 local_irq_enable(); 6626 6627 for (;;) { 6628 struct napi_struct *n; 6629 6630 if (list_empty(&list)) { 6631 if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll)) 6632 goto out; 6633 break; 6634 } 6635 6636 n = list_first_entry(&list, struct napi_struct, poll_list); 6637 budget -= napi_poll(n, &repoll); 6638 6639 /* If softirq window is exhausted then punt. 6640 * Allow this to run for 2 jiffies since which will allow 6641 * an average latency of 1.5/HZ. 6642 */ 6643 if (unlikely(budget <= 0 || 6644 time_after_eq(jiffies, time_limit))) { 6645 sd->time_squeeze++; 6646 break; 6647 } 6648 } 6649 6650 local_irq_disable(); 6651 6652 list_splice_tail_init(&sd->poll_list, &list); 6653 list_splice_tail(&repoll, &list); 6654 list_splice(&list, &sd->poll_list); 6655 if (!list_empty(&sd->poll_list)) 6656 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 6657 6658 net_rps_action_and_irq_enable(sd); 6659 out: 6660 __kfree_skb_flush(); 6661 } 6662 6663 struct netdev_adjacent { 6664 struct net_device *dev; 6665 6666 /* upper master flag, there can only be one master device per list */ 6667 bool master; 6668 6669 /* lookup ignore flag */ 6670 bool ignore; 6671 6672 /* counter for the number of times this device was added to us */ 6673 u16 ref_nr; 6674 6675 /* private field for the users */ 6676 void *private; 6677 6678 struct list_head list; 6679 struct rcu_head rcu; 6680 }; 6681 6682 static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev, 6683 struct list_head *adj_list) 6684 { 6685 struct netdev_adjacent *adj; 6686 6687 list_for_each_entry(adj, adj_list, list) { 6688 if (adj->dev == adj_dev) 6689 return adj; 6690 } 6691 return NULL; 6692 } 6693 6694 static int ____netdev_has_upper_dev(struct net_device *upper_dev, void *data) 6695 { 6696 struct net_device *dev = data; 6697 6698 return upper_dev == dev; 6699 } 6700 6701 /** 6702 * netdev_has_upper_dev - Check if device is linked to an upper device 6703 * @dev: device 6704 * @upper_dev: upper device to check 6705 * 6706 * Find out if a device is linked to specified upper device and return true 6707 * in case it is. Note that this checks only immediate upper device, 6708 * not through a complete stack of devices. The caller must hold the RTNL lock. 6709 */ 6710 bool netdev_has_upper_dev(struct net_device *dev, 6711 struct net_device *upper_dev) 6712 { 6713 ASSERT_RTNL(); 6714 6715 return netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev, 6716 upper_dev); 6717 } 6718 EXPORT_SYMBOL(netdev_has_upper_dev); 6719 6720 /** 6721 * netdev_has_upper_dev_all - Check if device is linked to an upper device 6722 * @dev: device 6723 * @upper_dev: upper device to check 6724 * 6725 * Find out if a device is linked to specified upper device and return true 6726 * in case it is. Note that this checks the entire upper device chain. 6727 * The caller must hold rcu lock. 6728 */ 6729 6730 bool netdev_has_upper_dev_all_rcu(struct net_device *dev, 6731 struct net_device *upper_dev) 6732 { 6733 return !!netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev, 6734 upper_dev); 6735 } 6736 EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu); 6737 6738 /** 6739 * netdev_has_any_upper_dev - Check if device is linked to some device 6740 * @dev: device 6741 * 6742 * Find out if a device is linked to an upper device and return true in case 6743 * it is. The caller must hold the RTNL lock. 6744 */ 6745 bool netdev_has_any_upper_dev(struct net_device *dev) 6746 { 6747 ASSERT_RTNL(); 6748 6749 return !list_empty(&dev->adj_list.upper); 6750 } 6751 EXPORT_SYMBOL(netdev_has_any_upper_dev); 6752 6753 /** 6754 * netdev_master_upper_dev_get - Get master upper device 6755 * @dev: device 6756 * 6757 * Find a master upper device and return pointer to it or NULL in case 6758 * it's not there. The caller must hold the RTNL lock. 6759 */ 6760 struct net_device *netdev_master_upper_dev_get(struct net_device *dev) 6761 { 6762 struct netdev_adjacent *upper; 6763 6764 ASSERT_RTNL(); 6765 6766 if (list_empty(&dev->adj_list.upper)) 6767 return NULL; 6768 6769 upper = list_first_entry(&dev->adj_list.upper, 6770 struct netdev_adjacent, list); 6771 if (likely(upper->master)) 6772 return upper->dev; 6773 return NULL; 6774 } 6775 EXPORT_SYMBOL(netdev_master_upper_dev_get); 6776 6777 static struct net_device *__netdev_master_upper_dev_get(struct net_device *dev) 6778 { 6779 struct netdev_adjacent *upper; 6780 6781 ASSERT_RTNL(); 6782 6783 if (list_empty(&dev->adj_list.upper)) 6784 return NULL; 6785 6786 upper = list_first_entry(&dev->adj_list.upper, 6787 struct netdev_adjacent, list); 6788 if (likely(upper->master) && !upper->ignore) 6789 return upper->dev; 6790 return NULL; 6791 } 6792 6793 /** 6794 * netdev_has_any_lower_dev - Check if device is linked to some device 6795 * @dev: device 6796 * 6797 * Find out if a device is linked to a lower device and return true in case 6798 * it is. The caller must hold the RTNL lock. 6799 */ 6800 static bool netdev_has_any_lower_dev(struct net_device *dev) 6801 { 6802 ASSERT_RTNL(); 6803 6804 return !list_empty(&dev->adj_list.lower); 6805 } 6806 6807 void *netdev_adjacent_get_private(struct list_head *adj_list) 6808 { 6809 struct netdev_adjacent *adj; 6810 6811 adj = list_entry(adj_list, struct netdev_adjacent, list); 6812 6813 return adj->private; 6814 } 6815 EXPORT_SYMBOL(netdev_adjacent_get_private); 6816 6817 /** 6818 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list 6819 * @dev: device 6820 * @iter: list_head ** of the current position 6821 * 6822 * Gets the next device from the dev's upper list, starting from iter 6823 * position. The caller must hold RCU read lock. 6824 */ 6825 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, 6826 struct list_head **iter) 6827 { 6828 struct netdev_adjacent *upper; 6829 6830 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held()); 6831 6832 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 6833 6834 if (&upper->list == &dev->adj_list.upper) 6835 return NULL; 6836 6837 *iter = &upper->list; 6838 6839 return upper->dev; 6840 } 6841 EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu); 6842 6843 static struct net_device *__netdev_next_upper_dev(struct net_device *dev, 6844 struct list_head **iter, 6845 bool *ignore) 6846 { 6847 struct netdev_adjacent *upper; 6848 6849 upper = list_entry((*iter)->next, struct netdev_adjacent, list); 6850 6851 if (&upper->list == &dev->adj_list.upper) 6852 return NULL; 6853 6854 *iter = &upper->list; 6855 *ignore = upper->ignore; 6856 6857 return upper->dev; 6858 } 6859 6860 static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev, 6861 struct list_head **iter) 6862 { 6863 struct netdev_adjacent *upper; 6864 6865 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held()); 6866 6867 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 6868 6869 if (&upper->list == &dev->adj_list.upper) 6870 return NULL; 6871 6872 *iter = &upper->list; 6873 6874 return upper->dev; 6875 } 6876 6877 static int __netdev_walk_all_upper_dev(struct net_device *dev, 6878 int (*fn)(struct net_device *dev, 6879 void *data), 6880 void *data) 6881 { 6882 struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; 6883 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; 6884 int ret, cur = 0; 6885 bool ignore; 6886 6887 now = dev; 6888 iter = &dev->adj_list.upper; 6889 6890 while (1) { 6891 if (now != dev) { 6892 ret = fn(now, data); 6893 if (ret) 6894 return ret; 6895 } 6896 6897 next = NULL; 6898 while (1) { 6899 udev = __netdev_next_upper_dev(now, &iter, &ignore); 6900 if (!udev) 6901 break; 6902 if (ignore) 6903 continue; 6904 6905 next = udev; 6906 niter = &udev->adj_list.upper; 6907 dev_stack[cur] = now; 6908 iter_stack[cur++] = iter; 6909 break; 6910 } 6911 6912 if (!next) { 6913 if (!cur) 6914 return 0; 6915 next = dev_stack[--cur]; 6916 niter = iter_stack[cur]; 6917 } 6918 6919 now = next; 6920 iter = niter; 6921 } 6922 6923 return 0; 6924 } 6925 6926 int netdev_walk_all_upper_dev_rcu(struct net_device *dev, 6927 int (*fn)(struct net_device *dev, 6928 void *data), 6929 void *data) 6930 { 6931 struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; 6932 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; 6933 int ret, cur = 0; 6934 6935 now = dev; 6936 iter = &dev->adj_list.upper; 6937 6938 while (1) { 6939 if (now != dev) { 6940 ret = fn(now, data); 6941 if (ret) 6942 return ret; 6943 } 6944 6945 next = NULL; 6946 while (1) { 6947 udev = netdev_next_upper_dev_rcu(now, &iter); 6948 if (!udev) 6949 break; 6950 6951 next = udev; 6952 niter = &udev->adj_list.upper; 6953 dev_stack[cur] = now; 6954 iter_stack[cur++] = iter; 6955 break; 6956 } 6957 6958 if (!next) { 6959 if (!cur) 6960 return 0; 6961 next = dev_stack[--cur]; 6962 niter = iter_stack[cur]; 6963 } 6964 6965 now = next; 6966 iter = niter; 6967 } 6968 6969 return 0; 6970 } 6971 EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu); 6972 6973 static bool __netdev_has_upper_dev(struct net_device *dev, 6974 struct net_device *upper_dev) 6975 { 6976 ASSERT_RTNL(); 6977 6978 return __netdev_walk_all_upper_dev(dev, ____netdev_has_upper_dev, 6979 upper_dev); 6980 } 6981 6982 /** 6983 * netdev_lower_get_next_private - Get the next ->private from the 6984 * lower neighbour list 6985 * @dev: device 6986 * @iter: list_head ** of the current position 6987 * 6988 * Gets the next netdev_adjacent->private from the dev's lower neighbour 6989 * list, starting from iter position. The caller must hold either hold the 6990 * RTNL lock or its own locking that guarantees that the neighbour lower 6991 * list will remain unchanged. 6992 */ 6993 void *netdev_lower_get_next_private(struct net_device *dev, 6994 struct list_head **iter) 6995 { 6996 struct netdev_adjacent *lower; 6997 6998 lower = list_entry(*iter, struct netdev_adjacent, list); 6999 7000 if (&lower->list == &dev->adj_list.lower) 7001 return NULL; 7002 7003 *iter = lower->list.next; 7004 7005 return lower->private; 7006 } 7007 EXPORT_SYMBOL(netdev_lower_get_next_private); 7008 7009 /** 7010 * netdev_lower_get_next_private_rcu - Get the next ->private from the 7011 * lower neighbour list, RCU 7012 * variant 7013 * @dev: device 7014 * @iter: list_head ** of the current position 7015 * 7016 * Gets the next netdev_adjacent->private from the dev's lower neighbour 7017 * list, starting from iter position. The caller must hold RCU read lock. 7018 */ 7019 void *netdev_lower_get_next_private_rcu(struct net_device *dev, 7020 struct list_head **iter) 7021 { 7022 struct netdev_adjacent *lower; 7023 7024 WARN_ON_ONCE(!rcu_read_lock_held()); 7025 7026 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 7027 7028 if (&lower->list == &dev->adj_list.lower) 7029 return NULL; 7030 7031 *iter = &lower->list; 7032 7033 return lower->private; 7034 } 7035 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu); 7036 7037 /** 7038 * netdev_lower_get_next - Get the next device from the lower neighbour 7039 * list 7040 * @dev: device 7041 * @iter: list_head ** of the current position 7042 * 7043 * Gets the next netdev_adjacent from the dev's lower neighbour 7044 * list, starting from iter position. The caller must hold RTNL lock or 7045 * its own locking that guarantees that the neighbour lower 7046 * list will remain unchanged. 7047 */ 7048 void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter) 7049 { 7050 struct netdev_adjacent *lower; 7051 7052 lower = list_entry(*iter, struct netdev_adjacent, list); 7053 7054 if (&lower->list == &dev->adj_list.lower) 7055 return NULL; 7056 7057 *iter = lower->list.next; 7058 7059 return lower->dev; 7060 } 7061 EXPORT_SYMBOL(netdev_lower_get_next); 7062 7063 static struct net_device *netdev_next_lower_dev(struct net_device *dev, 7064 struct list_head **iter) 7065 { 7066 struct netdev_adjacent *lower; 7067 7068 lower = list_entry((*iter)->next, struct netdev_adjacent, list); 7069 7070 if (&lower->list == &dev->adj_list.lower) 7071 return NULL; 7072 7073 *iter = &lower->list; 7074 7075 return lower->dev; 7076 } 7077 7078 static struct net_device *__netdev_next_lower_dev(struct net_device *dev, 7079 struct list_head **iter, 7080 bool *ignore) 7081 { 7082 struct netdev_adjacent *lower; 7083 7084 lower = list_entry((*iter)->next, struct netdev_adjacent, list); 7085 7086 if (&lower->list == &dev->adj_list.lower) 7087 return NULL; 7088 7089 *iter = &lower->list; 7090 *ignore = lower->ignore; 7091 7092 return lower->dev; 7093 } 7094 7095 int netdev_walk_all_lower_dev(struct net_device *dev, 7096 int (*fn)(struct net_device *dev, 7097 void *data), 7098 void *data) 7099 { 7100 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; 7101 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; 7102 int ret, cur = 0; 7103 7104 now = dev; 7105 iter = &dev->adj_list.lower; 7106 7107 while (1) { 7108 if (now != dev) { 7109 ret = fn(now, data); 7110 if (ret) 7111 return ret; 7112 } 7113 7114 next = NULL; 7115 while (1) { 7116 ldev = netdev_next_lower_dev(now, &iter); 7117 if (!ldev) 7118 break; 7119 7120 next = ldev; 7121 niter = &ldev->adj_list.lower; 7122 dev_stack[cur] = now; 7123 iter_stack[cur++] = iter; 7124 break; 7125 } 7126 7127 if (!next) { 7128 if (!cur) 7129 return 0; 7130 next = dev_stack[--cur]; 7131 niter = iter_stack[cur]; 7132 } 7133 7134 now = next; 7135 iter = niter; 7136 } 7137 7138 return 0; 7139 } 7140 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev); 7141 7142 static int __netdev_walk_all_lower_dev(struct net_device *dev, 7143 int (*fn)(struct net_device *dev, 7144 void *data), 7145 void *data) 7146 { 7147 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; 7148 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; 7149 int ret, cur = 0; 7150 bool ignore; 7151 7152 now = dev; 7153 iter = &dev->adj_list.lower; 7154 7155 while (1) { 7156 if (now != dev) { 7157 ret = fn(now, data); 7158 if (ret) 7159 return ret; 7160 } 7161 7162 next = NULL; 7163 while (1) { 7164 ldev = __netdev_next_lower_dev(now, &iter, &ignore); 7165 if (!ldev) 7166 break; 7167 if (ignore) 7168 continue; 7169 7170 next = ldev; 7171 niter = &ldev->adj_list.lower; 7172 dev_stack[cur] = now; 7173 iter_stack[cur++] = iter; 7174 break; 7175 } 7176 7177 if (!next) { 7178 if (!cur) 7179 return 0; 7180 next = dev_stack[--cur]; 7181 niter = iter_stack[cur]; 7182 } 7183 7184 now = next; 7185 iter = niter; 7186 } 7187 7188 return 0; 7189 } 7190 7191 struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev, 7192 struct list_head **iter) 7193 { 7194 struct netdev_adjacent *lower; 7195 7196 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 7197 if (&lower->list == &dev->adj_list.lower) 7198 return NULL; 7199 7200 *iter = &lower->list; 7201 7202 return lower->dev; 7203 } 7204 EXPORT_SYMBOL(netdev_next_lower_dev_rcu); 7205 7206 static u8 __netdev_upper_depth(struct net_device *dev) 7207 { 7208 struct net_device *udev; 7209 struct list_head *iter; 7210 u8 max_depth = 0; 7211 bool ignore; 7212 7213 for (iter = &dev->adj_list.upper, 7214 udev = __netdev_next_upper_dev(dev, &iter, &ignore); 7215 udev; 7216 udev = __netdev_next_upper_dev(dev, &iter, &ignore)) { 7217 if (ignore) 7218 continue; 7219 if (max_depth < udev->upper_level) 7220 max_depth = udev->upper_level; 7221 } 7222 7223 return max_depth; 7224 } 7225 7226 static u8 __netdev_lower_depth(struct net_device *dev) 7227 { 7228 struct net_device *ldev; 7229 struct list_head *iter; 7230 u8 max_depth = 0; 7231 bool ignore; 7232 7233 for (iter = &dev->adj_list.lower, 7234 ldev = __netdev_next_lower_dev(dev, &iter, &ignore); 7235 ldev; 7236 ldev = __netdev_next_lower_dev(dev, &iter, &ignore)) { 7237 if (ignore) 7238 continue; 7239 if (max_depth < ldev->lower_level) 7240 max_depth = ldev->lower_level; 7241 } 7242 7243 return max_depth; 7244 } 7245 7246 static int __netdev_update_upper_level(struct net_device *dev, void *data) 7247 { 7248 dev->upper_level = __netdev_upper_depth(dev) + 1; 7249 return 0; 7250 } 7251 7252 static int __netdev_update_lower_level(struct net_device *dev, void *data) 7253 { 7254 dev->lower_level = __netdev_lower_depth(dev) + 1; 7255 return 0; 7256 } 7257 7258 int netdev_walk_all_lower_dev_rcu(struct net_device *dev, 7259 int (*fn)(struct net_device *dev, 7260 void *data), 7261 void *data) 7262 { 7263 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1]; 7264 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1]; 7265 int ret, cur = 0; 7266 7267 now = dev; 7268 iter = &dev->adj_list.lower; 7269 7270 while (1) { 7271 if (now != dev) { 7272 ret = fn(now, data); 7273 if (ret) 7274 return ret; 7275 } 7276 7277 next = NULL; 7278 while (1) { 7279 ldev = netdev_next_lower_dev_rcu(now, &iter); 7280 if (!ldev) 7281 break; 7282 7283 next = ldev; 7284 niter = &ldev->adj_list.lower; 7285 dev_stack[cur] = now; 7286 iter_stack[cur++] = iter; 7287 break; 7288 } 7289 7290 if (!next) { 7291 if (!cur) 7292 return 0; 7293 next = dev_stack[--cur]; 7294 niter = iter_stack[cur]; 7295 } 7296 7297 now = next; 7298 iter = niter; 7299 } 7300 7301 return 0; 7302 } 7303 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu); 7304 7305 /** 7306 * netdev_lower_get_first_private_rcu - Get the first ->private from the 7307 * lower neighbour list, RCU 7308 * variant 7309 * @dev: device 7310 * 7311 * Gets the first netdev_adjacent->private from the dev's lower neighbour 7312 * list. The caller must hold RCU read lock. 7313 */ 7314 void *netdev_lower_get_first_private_rcu(struct net_device *dev) 7315 { 7316 struct netdev_adjacent *lower; 7317 7318 lower = list_first_or_null_rcu(&dev->adj_list.lower, 7319 struct netdev_adjacent, list); 7320 if (lower) 7321 return lower->private; 7322 return NULL; 7323 } 7324 EXPORT_SYMBOL(netdev_lower_get_first_private_rcu); 7325 7326 /** 7327 * netdev_master_upper_dev_get_rcu - Get master upper device 7328 * @dev: device 7329 * 7330 * Find a master upper device and return pointer to it or NULL in case 7331 * it's not there. The caller must hold the RCU read lock. 7332 */ 7333 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev) 7334 { 7335 struct netdev_adjacent *upper; 7336 7337 upper = list_first_or_null_rcu(&dev->adj_list.upper, 7338 struct netdev_adjacent, list); 7339 if (upper && likely(upper->master)) 7340 return upper->dev; 7341 return NULL; 7342 } 7343 EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu); 7344 7345 static int netdev_adjacent_sysfs_add(struct net_device *dev, 7346 struct net_device *adj_dev, 7347 struct list_head *dev_list) 7348 { 7349 char linkname[IFNAMSIZ+7]; 7350 7351 sprintf(linkname, dev_list == &dev->adj_list.upper ? 7352 "upper_%s" : "lower_%s", adj_dev->name); 7353 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj), 7354 linkname); 7355 } 7356 static void netdev_adjacent_sysfs_del(struct net_device *dev, 7357 char *name, 7358 struct list_head *dev_list) 7359 { 7360 char linkname[IFNAMSIZ+7]; 7361 7362 sprintf(linkname, dev_list == &dev->adj_list.upper ? 7363 "upper_%s" : "lower_%s", name); 7364 sysfs_remove_link(&(dev->dev.kobj), linkname); 7365 } 7366 7367 static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev, 7368 struct net_device *adj_dev, 7369 struct list_head *dev_list) 7370 { 7371 return (dev_list == &dev->adj_list.upper || 7372 dev_list == &dev->adj_list.lower) && 7373 net_eq(dev_net(dev), dev_net(adj_dev)); 7374 } 7375 7376 static int __netdev_adjacent_dev_insert(struct net_device *dev, 7377 struct net_device *adj_dev, 7378 struct list_head *dev_list, 7379 void *private, bool master) 7380 { 7381 struct netdev_adjacent *adj; 7382 int ret; 7383 7384 adj = __netdev_find_adj(adj_dev, dev_list); 7385 7386 if (adj) { 7387 adj->ref_nr += 1; 7388 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n", 7389 dev->name, adj_dev->name, adj->ref_nr); 7390 7391 return 0; 7392 } 7393 7394 adj = kmalloc(sizeof(*adj), GFP_KERNEL); 7395 if (!adj) 7396 return -ENOMEM; 7397 7398 adj->dev = adj_dev; 7399 adj->master = master; 7400 adj->ref_nr = 1; 7401 adj->private = private; 7402 adj->ignore = false; 7403 dev_hold(adj_dev); 7404 7405 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n", 7406 dev->name, adj_dev->name, adj->ref_nr, adj_dev->name); 7407 7408 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) { 7409 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list); 7410 if (ret) 7411 goto free_adj; 7412 } 7413 7414 /* Ensure that master link is always the first item in list. */ 7415 if (master) { 7416 ret = sysfs_create_link(&(dev->dev.kobj), 7417 &(adj_dev->dev.kobj), "master"); 7418 if (ret) 7419 goto remove_symlinks; 7420 7421 list_add_rcu(&adj->list, dev_list); 7422 } else { 7423 list_add_tail_rcu(&adj->list, dev_list); 7424 } 7425 7426 return 0; 7427 7428 remove_symlinks: 7429 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) 7430 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list); 7431 free_adj: 7432 kfree(adj); 7433 dev_put(adj_dev); 7434 7435 return ret; 7436 } 7437 7438 static void __netdev_adjacent_dev_remove(struct net_device *dev, 7439 struct net_device *adj_dev, 7440 u16 ref_nr, 7441 struct list_head *dev_list) 7442 { 7443 struct netdev_adjacent *adj; 7444 7445 pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n", 7446 dev->name, adj_dev->name, ref_nr); 7447 7448 adj = __netdev_find_adj(adj_dev, dev_list); 7449 7450 if (!adj) { 7451 pr_err("Adjacency does not exist for device %s from %s\n", 7452 dev->name, adj_dev->name); 7453 WARN_ON(1); 7454 return; 7455 } 7456 7457 if (adj->ref_nr > ref_nr) { 7458 pr_debug("adjacency: %s to %s ref_nr - %d = %d\n", 7459 dev->name, adj_dev->name, ref_nr, 7460 adj->ref_nr - ref_nr); 7461 adj->ref_nr -= ref_nr; 7462 return; 7463 } 7464 7465 if (adj->master) 7466 sysfs_remove_link(&(dev->dev.kobj), "master"); 7467 7468 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) 7469 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list); 7470 7471 list_del_rcu(&adj->list); 7472 pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n", 7473 adj_dev->name, dev->name, adj_dev->name); 7474 dev_put(adj_dev); 7475 kfree_rcu(adj, rcu); 7476 } 7477 7478 static int __netdev_adjacent_dev_link_lists(struct net_device *dev, 7479 struct net_device *upper_dev, 7480 struct list_head *up_list, 7481 struct list_head *down_list, 7482 void *private, bool master) 7483 { 7484 int ret; 7485 7486 ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, 7487 private, master); 7488 if (ret) 7489 return ret; 7490 7491 ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, 7492 private, false); 7493 if (ret) { 7494 __netdev_adjacent_dev_remove(dev, upper_dev, 1, up_list); 7495 return ret; 7496 } 7497 7498 return 0; 7499 } 7500 7501 static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev, 7502 struct net_device *upper_dev, 7503 u16 ref_nr, 7504 struct list_head *up_list, 7505 struct list_head *down_list) 7506 { 7507 __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list); 7508 __netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list); 7509 } 7510 7511 static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev, 7512 struct net_device *upper_dev, 7513 void *private, bool master) 7514 { 7515 return __netdev_adjacent_dev_link_lists(dev, upper_dev, 7516 &dev->adj_list.upper, 7517 &upper_dev->adj_list.lower, 7518 private, master); 7519 } 7520 7521 static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev, 7522 struct net_device *upper_dev) 7523 { 7524 __netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1, 7525 &dev->adj_list.upper, 7526 &upper_dev->adj_list.lower); 7527 } 7528 7529 static int __netdev_upper_dev_link(struct net_device *dev, 7530 struct net_device *upper_dev, bool master, 7531 void *upper_priv, void *upper_info, 7532 struct netlink_ext_ack *extack) 7533 { 7534 struct netdev_notifier_changeupper_info changeupper_info = { 7535 .info = { 7536 .dev = dev, 7537 .extack = extack, 7538 }, 7539 .upper_dev = upper_dev, 7540 .master = master, 7541 .linking = true, 7542 .upper_info = upper_info, 7543 }; 7544 struct net_device *master_dev; 7545 int ret = 0; 7546 7547 ASSERT_RTNL(); 7548 7549 if (dev == upper_dev) 7550 return -EBUSY; 7551 7552 /* To prevent loops, check if dev is not upper device to upper_dev. */ 7553 if (__netdev_has_upper_dev(upper_dev, dev)) 7554 return -EBUSY; 7555 7556 if ((dev->lower_level + upper_dev->upper_level) > MAX_NEST_DEV) 7557 return -EMLINK; 7558 7559 if (!master) { 7560 if (__netdev_has_upper_dev(dev, upper_dev)) 7561 return -EEXIST; 7562 } else { 7563 master_dev = __netdev_master_upper_dev_get(dev); 7564 if (master_dev) 7565 return master_dev == upper_dev ? -EEXIST : -EBUSY; 7566 } 7567 7568 ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, 7569 &changeupper_info.info); 7570 ret = notifier_to_errno(ret); 7571 if (ret) 7572 return ret; 7573 7574 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv, 7575 master); 7576 if (ret) 7577 return ret; 7578 7579 ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, 7580 &changeupper_info.info); 7581 ret = notifier_to_errno(ret); 7582 if (ret) 7583 goto rollback; 7584 7585 __netdev_update_upper_level(dev, NULL); 7586 __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL); 7587 7588 __netdev_update_lower_level(upper_dev, NULL); 7589 __netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level, 7590 NULL); 7591 7592 return 0; 7593 7594 rollback: 7595 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev); 7596 7597 return ret; 7598 } 7599 7600 /** 7601 * netdev_upper_dev_link - Add a link to the upper device 7602 * @dev: device 7603 * @upper_dev: new upper device 7604 * @extack: netlink extended ack 7605 * 7606 * Adds a link to device which is upper to this one. The caller must hold 7607 * the RTNL lock. On a failure a negative errno code is returned. 7608 * On success the reference counts are adjusted and the function 7609 * returns zero. 7610 */ 7611 int netdev_upper_dev_link(struct net_device *dev, 7612 struct net_device *upper_dev, 7613 struct netlink_ext_ack *extack) 7614 { 7615 return __netdev_upper_dev_link(dev, upper_dev, false, 7616 NULL, NULL, extack); 7617 } 7618 EXPORT_SYMBOL(netdev_upper_dev_link); 7619 7620 /** 7621 * netdev_master_upper_dev_link - Add a master link to the upper device 7622 * @dev: device 7623 * @upper_dev: new upper device 7624 * @upper_priv: upper device private 7625 * @upper_info: upper info to be passed down via notifier 7626 * @extack: netlink extended ack 7627 * 7628 * Adds a link to device which is upper to this one. In this case, only 7629 * one master upper device can be linked, although other non-master devices 7630 * might be linked as well. The caller must hold the RTNL lock. 7631 * On a failure a negative errno code is returned. On success the reference 7632 * counts are adjusted and the function returns zero. 7633 */ 7634 int netdev_master_upper_dev_link(struct net_device *dev, 7635 struct net_device *upper_dev, 7636 void *upper_priv, void *upper_info, 7637 struct netlink_ext_ack *extack) 7638 { 7639 return __netdev_upper_dev_link(dev, upper_dev, true, 7640 upper_priv, upper_info, extack); 7641 } 7642 EXPORT_SYMBOL(netdev_master_upper_dev_link); 7643 7644 /** 7645 * netdev_upper_dev_unlink - Removes a link to upper device 7646 * @dev: device 7647 * @upper_dev: new upper device 7648 * 7649 * Removes a link to device which is upper to this one. The caller must hold 7650 * the RTNL lock. 7651 */ 7652 void netdev_upper_dev_unlink(struct net_device *dev, 7653 struct net_device *upper_dev) 7654 { 7655 struct netdev_notifier_changeupper_info changeupper_info = { 7656 .info = { 7657 .dev = dev, 7658 }, 7659 .upper_dev = upper_dev, 7660 .linking = false, 7661 }; 7662 7663 ASSERT_RTNL(); 7664 7665 changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev; 7666 7667 call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, 7668 &changeupper_info.info); 7669 7670 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev); 7671 7672 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, 7673 &changeupper_info.info); 7674 7675 __netdev_update_upper_level(dev, NULL); 7676 __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL); 7677 7678 __netdev_update_lower_level(upper_dev, NULL); 7679 __netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level, 7680 NULL); 7681 } 7682 EXPORT_SYMBOL(netdev_upper_dev_unlink); 7683 7684 static void __netdev_adjacent_dev_set(struct net_device *upper_dev, 7685 struct net_device *lower_dev, 7686 bool val) 7687 { 7688 struct netdev_adjacent *adj; 7689 7690 adj = __netdev_find_adj(lower_dev, &upper_dev->adj_list.lower); 7691 if (adj) 7692 adj->ignore = val; 7693 7694 adj = __netdev_find_adj(upper_dev, &lower_dev->adj_list.upper); 7695 if (adj) 7696 adj->ignore = val; 7697 } 7698 7699 static void netdev_adjacent_dev_disable(struct net_device *upper_dev, 7700 struct net_device *lower_dev) 7701 { 7702 __netdev_adjacent_dev_set(upper_dev, lower_dev, true); 7703 } 7704 7705 static void netdev_adjacent_dev_enable(struct net_device *upper_dev, 7706 struct net_device *lower_dev) 7707 { 7708 __netdev_adjacent_dev_set(upper_dev, lower_dev, false); 7709 } 7710 7711 int netdev_adjacent_change_prepare(struct net_device *old_dev, 7712 struct net_device *new_dev, 7713 struct net_device *dev, 7714 struct netlink_ext_ack *extack) 7715 { 7716 int err; 7717 7718 if (!new_dev) 7719 return 0; 7720 7721 if (old_dev && new_dev != old_dev) 7722 netdev_adjacent_dev_disable(dev, old_dev); 7723 7724 err = netdev_upper_dev_link(new_dev, dev, extack); 7725 if (err) { 7726 if (old_dev && new_dev != old_dev) 7727 netdev_adjacent_dev_enable(dev, old_dev); 7728 return err; 7729 } 7730 7731 return 0; 7732 } 7733 EXPORT_SYMBOL(netdev_adjacent_change_prepare); 7734 7735 void netdev_adjacent_change_commit(struct net_device *old_dev, 7736 struct net_device *new_dev, 7737 struct net_device *dev) 7738 { 7739 if (!new_dev || !old_dev) 7740 return; 7741 7742 if (new_dev == old_dev) 7743 return; 7744 7745 netdev_adjacent_dev_enable(dev, old_dev); 7746 netdev_upper_dev_unlink(old_dev, dev); 7747 } 7748 EXPORT_SYMBOL(netdev_adjacent_change_commit); 7749 7750 void netdev_adjacent_change_abort(struct net_device *old_dev, 7751 struct net_device *new_dev, 7752 struct net_device *dev) 7753 { 7754 if (!new_dev) 7755 return; 7756 7757 if (old_dev && new_dev != old_dev) 7758 netdev_adjacent_dev_enable(dev, old_dev); 7759 7760 netdev_upper_dev_unlink(new_dev, dev); 7761 } 7762 EXPORT_SYMBOL(netdev_adjacent_change_abort); 7763 7764 /** 7765 * netdev_bonding_info_change - Dispatch event about slave change 7766 * @dev: device 7767 * @bonding_info: info to dispatch 7768 * 7769 * Send NETDEV_BONDING_INFO to netdev notifiers with info. 7770 * The caller must hold the RTNL lock. 7771 */ 7772 void netdev_bonding_info_change(struct net_device *dev, 7773 struct netdev_bonding_info *bonding_info) 7774 { 7775 struct netdev_notifier_bonding_info info = { 7776 .info.dev = dev, 7777 }; 7778 7779 memcpy(&info.bonding_info, bonding_info, 7780 sizeof(struct netdev_bonding_info)); 7781 call_netdevice_notifiers_info(NETDEV_BONDING_INFO, 7782 &info.info); 7783 } 7784 EXPORT_SYMBOL(netdev_bonding_info_change); 7785 7786 static void netdev_adjacent_add_links(struct net_device *dev) 7787 { 7788 struct netdev_adjacent *iter; 7789 7790 struct net *net = dev_net(dev); 7791 7792 list_for_each_entry(iter, &dev->adj_list.upper, list) { 7793 if (!net_eq(net, dev_net(iter->dev))) 7794 continue; 7795 netdev_adjacent_sysfs_add(iter->dev, dev, 7796 &iter->dev->adj_list.lower); 7797 netdev_adjacent_sysfs_add(dev, iter->dev, 7798 &dev->adj_list.upper); 7799 } 7800 7801 list_for_each_entry(iter, &dev->adj_list.lower, list) { 7802 if (!net_eq(net, dev_net(iter->dev))) 7803 continue; 7804 netdev_adjacent_sysfs_add(iter->dev, dev, 7805 &iter->dev->adj_list.upper); 7806 netdev_adjacent_sysfs_add(dev, iter->dev, 7807 &dev->adj_list.lower); 7808 } 7809 } 7810 7811 static void netdev_adjacent_del_links(struct net_device *dev) 7812 { 7813 struct netdev_adjacent *iter; 7814 7815 struct net *net = dev_net(dev); 7816 7817 list_for_each_entry(iter, &dev->adj_list.upper, list) { 7818 if (!net_eq(net, dev_net(iter->dev))) 7819 continue; 7820 netdev_adjacent_sysfs_del(iter->dev, dev->name, 7821 &iter->dev->adj_list.lower); 7822 netdev_adjacent_sysfs_del(dev, iter->dev->name, 7823 &dev->adj_list.upper); 7824 } 7825 7826 list_for_each_entry(iter, &dev->adj_list.lower, list) { 7827 if (!net_eq(net, dev_net(iter->dev))) 7828 continue; 7829 netdev_adjacent_sysfs_del(iter->dev, dev->name, 7830 &iter->dev->adj_list.upper); 7831 netdev_adjacent_sysfs_del(dev, iter->dev->name, 7832 &dev->adj_list.lower); 7833 } 7834 } 7835 7836 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname) 7837 { 7838 struct netdev_adjacent *iter; 7839 7840 struct net *net = dev_net(dev); 7841 7842 list_for_each_entry(iter, &dev->adj_list.upper, list) { 7843 if (!net_eq(net, dev_net(iter->dev))) 7844 continue; 7845 netdev_adjacent_sysfs_del(iter->dev, oldname, 7846 &iter->dev->adj_list.lower); 7847 netdev_adjacent_sysfs_add(iter->dev, dev, 7848 &iter->dev->adj_list.lower); 7849 } 7850 7851 list_for_each_entry(iter, &dev->adj_list.lower, list) { 7852 if (!net_eq(net, dev_net(iter->dev))) 7853 continue; 7854 netdev_adjacent_sysfs_del(iter->dev, oldname, 7855 &iter->dev->adj_list.upper); 7856 netdev_adjacent_sysfs_add(iter->dev, dev, 7857 &iter->dev->adj_list.upper); 7858 } 7859 } 7860 7861 void *netdev_lower_dev_get_private(struct net_device *dev, 7862 struct net_device *lower_dev) 7863 { 7864 struct netdev_adjacent *lower; 7865 7866 if (!lower_dev) 7867 return NULL; 7868 lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower); 7869 if (!lower) 7870 return NULL; 7871 7872 return lower->private; 7873 } 7874 EXPORT_SYMBOL(netdev_lower_dev_get_private); 7875 7876 7877 /** 7878 * netdev_lower_change - Dispatch event about lower device state change 7879 * @lower_dev: device 7880 * @lower_state_info: state to dispatch 7881 * 7882 * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info. 7883 * The caller must hold the RTNL lock. 7884 */ 7885 void netdev_lower_state_changed(struct net_device *lower_dev, 7886 void *lower_state_info) 7887 { 7888 struct netdev_notifier_changelowerstate_info changelowerstate_info = { 7889 .info.dev = lower_dev, 7890 }; 7891 7892 ASSERT_RTNL(); 7893 changelowerstate_info.lower_state_info = lower_state_info; 7894 call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE, 7895 &changelowerstate_info.info); 7896 } 7897 EXPORT_SYMBOL(netdev_lower_state_changed); 7898 7899 static void dev_change_rx_flags(struct net_device *dev, int flags) 7900 { 7901 const struct net_device_ops *ops = dev->netdev_ops; 7902 7903 if (ops->ndo_change_rx_flags) 7904 ops->ndo_change_rx_flags(dev, flags); 7905 } 7906 7907 static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify) 7908 { 7909 unsigned int old_flags = dev->flags; 7910 kuid_t uid; 7911 kgid_t gid; 7912 7913 ASSERT_RTNL(); 7914 7915 dev->flags |= IFF_PROMISC; 7916 dev->promiscuity += inc; 7917 if (dev->promiscuity == 0) { 7918 /* 7919 * Avoid overflow. 7920 * If inc causes overflow, untouch promisc and return error. 7921 */ 7922 if (inc < 0) 7923 dev->flags &= ~IFF_PROMISC; 7924 else { 7925 dev->promiscuity -= inc; 7926 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n", 7927 dev->name); 7928 return -EOVERFLOW; 7929 } 7930 } 7931 if (dev->flags != old_flags) { 7932 pr_info("device %s %s promiscuous mode\n", 7933 dev->name, 7934 dev->flags & IFF_PROMISC ? "entered" : "left"); 7935 if (audit_enabled) { 7936 current_uid_gid(&uid, &gid); 7937 audit_log(audit_context(), GFP_ATOMIC, 7938 AUDIT_ANOM_PROMISCUOUS, 7939 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u", 7940 dev->name, (dev->flags & IFF_PROMISC), 7941 (old_flags & IFF_PROMISC), 7942 from_kuid(&init_user_ns, audit_get_loginuid(current)), 7943 from_kuid(&init_user_ns, uid), 7944 from_kgid(&init_user_ns, gid), 7945 audit_get_sessionid(current)); 7946 } 7947 7948 dev_change_rx_flags(dev, IFF_PROMISC); 7949 } 7950 if (notify) 7951 __dev_notify_flags(dev, old_flags, IFF_PROMISC); 7952 return 0; 7953 } 7954 7955 /** 7956 * dev_set_promiscuity - update promiscuity count on a device 7957 * @dev: device 7958 * @inc: modifier 7959 * 7960 * Add or remove promiscuity from a device. While the count in the device 7961 * remains above zero the interface remains promiscuous. Once it hits zero 7962 * the device reverts back to normal filtering operation. A negative inc 7963 * value is used to drop promiscuity on the device. 7964 * Return 0 if successful or a negative errno code on error. 7965 */ 7966 int dev_set_promiscuity(struct net_device *dev, int inc) 7967 { 7968 unsigned int old_flags = dev->flags; 7969 int err; 7970 7971 err = __dev_set_promiscuity(dev, inc, true); 7972 if (err < 0) 7973 return err; 7974 if (dev->flags != old_flags) 7975 dev_set_rx_mode(dev); 7976 return err; 7977 } 7978 EXPORT_SYMBOL(dev_set_promiscuity); 7979 7980 static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify) 7981 { 7982 unsigned int old_flags = dev->flags, old_gflags = dev->gflags; 7983 7984 ASSERT_RTNL(); 7985 7986 dev->flags |= IFF_ALLMULTI; 7987 dev->allmulti += inc; 7988 if (dev->allmulti == 0) { 7989 /* 7990 * Avoid overflow. 7991 * If inc causes overflow, untouch allmulti and return error. 7992 */ 7993 if (inc < 0) 7994 dev->flags &= ~IFF_ALLMULTI; 7995 else { 7996 dev->allmulti -= inc; 7997 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n", 7998 dev->name); 7999 return -EOVERFLOW; 8000 } 8001 } 8002 if (dev->flags ^ old_flags) { 8003 dev_change_rx_flags(dev, IFF_ALLMULTI); 8004 dev_set_rx_mode(dev); 8005 if (notify) 8006 __dev_notify_flags(dev, old_flags, 8007 dev->gflags ^ old_gflags); 8008 } 8009 return 0; 8010 } 8011 8012 /** 8013 * dev_set_allmulti - update allmulti count on a device 8014 * @dev: device 8015 * @inc: modifier 8016 * 8017 * Add or remove reception of all multicast frames to a device. While the 8018 * count in the device remains above zero the interface remains listening 8019 * to all interfaces. Once it hits zero the device reverts back to normal 8020 * filtering operation. A negative @inc value is used to drop the counter 8021 * when releasing a resource needing all multicasts. 8022 * Return 0 if successful or a negative errno code on error. 8023 */ 8024 8025 int dev_set_allmulti(struct net_device *dev, int inc) 8026 { 8027 return __dev_set_allmulti(dev, inc, true); 8028 } 8029 EXPORT_SYMBOL(dev_set_allmulti); 8030 8031 /* 8032 * Upload unicast and multicast address lists to device and 8033 * configure RX filtering. When the device doesn't support unicast 8034 * filtering it is put in promiscuous mode while unicast addresses 8035 * are present. 8036 */ 8037 void __dev_set_rx_mode(struct net_device *dev) 8038 { 8039 const struct net_device_ops *ops = dev->netdev_ops; 8040 8041 /* dev_open will call this function so the list will stay sane. */ 8042 if (!(dev->flags&IFF_UP)) 8043 return; 8044 8045 if (!netif_device_present(dev)) 8046 return; 8047 8048 if (!(dev->priv_flags & IFF_UNICAST_FLT)) { 8049 /* Unicast addresses changes may only happen under the rtnl, 8050 * therefore calling __dev_set_promiscuity here is safe. 8051 */ 8052 if (!netdev_uc_empty(dev) && !dev->uc_promisc) { 8053 __dev_set_promiscuity(dev, 1, false); 8054 dev->uc_promisc = true; 8055 } else if (netdev_uc_empty(dev) && dev->uc_promisc) { 8056 __dev_set_promiscuity(dev, -1, false); 8057 dev->uc_promisc = false; 8058 } 8059 } 8060 8061 if (ops->ndo_set_rx_mode) 8062 ops->ndo_set_rx_mode(dev); 8063 } 8064 8065 void dev_set_rx_mode(struct net_device *dev) 8066 { 8067 netif_addr_lock_bh(dev); 8068 __dev_set_rx_mode(dev); 8069 netif_addr_unlock_bh(dev); 8070 } 8071 8072 /** 8073 * dev_get_flags - get flags reported to userspace 8074 * @dev: device 8075 * 8076 * Get the combination of flag bits exported through APIs to userspace. 8077 */ 8078 unsigned int dev_get_flags(const struct net_device *dev) 8079 { 8080 unsigned int flags; 8081 8082 flags = (dev->flags & ~(IFF_PROMISC | 8083 IFF_ALLMULTI | 8084 IFF_RUNNING | 8085 IFF_LOWER_UP | 8086 IFF_DORMANT)) | 8087 (dev->gflags & (IFF_PROMISC | 8088 IFF_ALLMULTI)); 8089 8090 if (netif_running(dev)) { 8091 if (netif_oper_up(dev)) 8092 flags |= IFF_RUNNING; 8093 if (netif_carrier_ok(dev)) 8094 flags |= IFF_LOWER_UP; 8095 if (netif_dormant(dev)) 8096 flags |= IFF_DORMANT; 8097 } 8098 8099 return flags; 8100 } 8101 EXPORT_SYMBOL(dev_get_flags); 8102 8103 int __dev_change_flags(struct net_device *dev, unsigned int flags, 8104 struct netlink_ext_ack *extack) 8105 { 8106 unsigned int old_flags = dev->flags; 8107 int ret; 8108 8109 ASSERT_RTNL(); 8110 8111 /* 8112 * Set the flags on our device. 8113 */ 8114 8115 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP | 8116 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL | 8117 IFF_AUTOMEDIA)) | 8118 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC | 8119 IFF_ALLMULTI)); 8120 8121 /* 8122 * Load in the correct multicast list now the flags have changed. 8123 */ 8124 8125 if ((old_flags ^ flags) & IFF_MULTICAST) 8126 dev_change_rx_flags(dev, IFF_MULTICAST); 8127 8128 dev_set_rx_mode(dev); 8129 8130 /* 8131 * Have we downed the interface. We handle IFF_UP ourselves 8132 * according to user attempts to set it, rather than blindly 8133 * setting it. 8134 */ 8135 8136 ret = 0; 8137 if ((old_flags ^ flags) & IFF_UP) { 8138 if (old_flags & IFF_UP) 8139 __dev_close(dev); 8140 else 8141 ret = __dev_open(dev, extack); 8142 } 8143 8144 if ((flags ^ dev->gflags) & IFF_PROMISC) { 8145 int inc = (flags & IFF_PROMISC) ? 1 : -1; 8146 unsigned int old_flags = dev->flags; 8147 8148 dev->gflags ^= IFF_PROMISC; 8149 8150 if (__dev_set_promiscuity(dev, inc, false) >= 0) 8151 if (dev->flags != old_flags) 8152 dev_set_rx_mode(dev); 8153 } 8154 8155 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI 8156 * is important. Some (broken) drivers set IFF_PROMISC, when 8157 * IFF_ALLMULTI is requested not asking us and not reporting. 8158 */ 8159 if ((flags ^ dev->gflags) & IFF_ALLMULTI) { 8160 int inc = (flags & IFF_ALLMULTI) ? 1 : -1; 8161 8162 dev->gflags ^= IFF_ALLMULTI; 8163 __dev_set_allmulti(dev, inc, false); 8164 } 8165 8166 return ret; 8167 } 8168 8169 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags, 8170 unsigned int gchanges) 8171 { 8172 unsigned int changes = dev->flags ^ old_flags; 8173 8174 if (gchanges) 8175 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC); 8176 8177 if (changes & IFF_UP) { 8178 if (dev->flags & IFF_UP) 8179 call_netdevice_notifiers(NETDEV_UP, dev); 8180 else 8181 call_netdevice_notifiers(NETDEV_DOWN, dev); 8182 } 8183 8184 if (dev->flags & IFF_UP && 8185 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) { 8186 struct netdev_notifier_change_info change_info = { 8187 .info = { 8188 .dev = dev, 8189 }, 8190 .flags_changed = changes, 8191 }; 8192 8193 call_netdevice_notifiers_info(NETDEV_CHANGE, &change_info.info); 8194 } 8195 } 8196 8197 /** 8198 * dev_change_flags - change device settings 8199 * @dev: device 8200 * @flags: device state flags 8201 * @extack: netlink extended ack 8202 * 8203 * Change settings on device based state flags. The flags are 8204 * in the userspace exported format. 8205 */ 8206 int dev_change_flags(struct net_device *dev, unsigned int flags, 8207 struct netlink_ext_ack *extack) 8208 { 8209 int ret; 8210 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags; 8211 8212 ret = __dev_change_flags(dev, flags, extack); 8213 if (ret < 0) 8214 return ret; 8215 8216 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags); 8217 __dev_notify_flags(dev, old_flags, changes); 8218 return ret; 8219 } 8220 EXPORT_SYMBOL(dev_change_flags); 8221 8222 int __dev_set_mtu(struct net_device *dev, int new_mtu) 8223 { 8224 const struct net_device_ops *ops = dev->netdev_ops; 8225 8226 if (ops->ndo_change_mtu) 8227 return ops->ndo_change_mtu(dev, new_mtu); 8228 8229 /* Pairs with all the lockless reads of dev->mtu in the stack */ 8230 WRITE_ONCE(dev->mtu, new_mtu); 8231 return 0; 8232 } 8233 EXPORT_SYMBOL(__dev_set_mtu); 8234 8235 int dev_validate_mtu(struct net_device *dev, int new_mtu, 8236 struct netlink_ext_ack *extack) 8237 { 8238 /* MTU must be positive, and in range */ 8239 if (new_mtu < 0 || new_mtu < dev->min_mtu) { 8240 NL_SET_ERR_MSG(extack, "mtu less than device minimum"); 8241 return -EINVAL; 8242 } 8243 8244 if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) { 8245 NL_SET_ERR_MSG(extack, "mtu greater than device maximum"); 8246 return -EINVAL; 8247 } 8248 return 0; 8249 } 8250 8251 /** 8252 * dev_set_mtu_ext - Change maximum transfer unit 8253 * @dev: device 8254 * @new_mtu: new transfer unit 8255 * @extack: netlink extended ack 8256 * 8257 * Change the maximum transfer size of the network device. 8258 */ 8259 int dev_set_mtu_ext(struct net_device *dev, int new_mtu, 8260 struct netlink_ext_ack *extack) 8261 { 8262 int err, orig_mtu; 8263 8264 if (new_mtu == dev->mtu) 8265 return 0; 8266 8267 err = dev_validate_mtu(dev, new_mtu, extack); 8268 if (err) 8269 return err; 8270 8271 if (!netif_device_present(dev)) 8272 return -ENODEV; 8273 8274 err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev); 8275 err = notifier_to_errno(err); 8276 if (err) 8277 return err; 8278 8279 orig_mtu = dev->mtu; 8280 err = __dev_set_mtu(dev, new_mtu); 8281 8282 if (!err) { 8283 err = call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev, 8284 orig_mtu); 8285 err = notifier_to_errno(err); 8286 if (err) { 8287 /* setting mtu back and notifying everyone again, 8288 * so that they have a chance to revert changes. 8289 */ 8290 __dev_set_mtu(dev, orig_mtu); 8291 call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev, 8292 new_mtu); 8293 } 8294 } 8295 return err; 8296 } 8297 8298 int dev_set_mtu(struct net_device *dev, int new_mtu) 8299 { 8300 struct netlink_ext_ack extack; 8301 int err; 8302 8303 memset(&extack, 0, sizeof(extack)); 8304 err = dev_set_mtu_ext(dev, new_mtu, &extack); 8305 if (err && extack._msg) 8306 net_err_ratelimited("%s: %s\n", dev->name, extack._msg); 8307 return err; 8308 } 8309 EXPORT_SYMBOL(dev_set_mtu); 8310 8311 /** 8312 * dev_change_tx_queue_len - Change TX queue length of a netdevice 8313 * @dev: device 8314 * @new_len: new tx queue length 8315 */ 8316 int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len) 8317 { 8318 unsigned int orig_len = dev->tx_queue_len; 8319 int res; 8320 8321 if (new_len != (unsigned int)new_len) 8322 return -ERANGE; 8323 8324 if (new_len != orig_len) { 8325 dev->tx_queue_len = new_len; 8326 res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev); 8327 res = notifier_to_errno(res); 8328 if (res) 8329 goto err_rollback; 8330 res = dev_qdisc_change_tx_queue_len(dev); 8331 if (res) 8332 goto err_rollback; 8333 } 8334 8335 return 0; 8336 8337 err_rollback: 8338 netdev_err(dev, "refused to change device tx_queue_len\n"); 8339 dev->tx_queue_len = orig_len; 8340 return res; 8341 } 8342 8343 /** 8344 * dev_set_group - Change group this device belongs to 8345 * @dev: device 8346 * @new_group: group this device should belong to 8347 */ 8348 void dev_set_group(struct net_device *dev, int new_group) 8349 { 8350 dev->group = new_group; 8351 } 8352 EXPORT_SYMBOL(dev_set_group); 8353 8354 /** 8355 * dev_pre_changeaddr_notify - Call NETDEV_PRE_CHANGEADDR. 8356 * @dev: device 8357 * @addr: new address 8358 * @extack: netlink extended ack 8359 */ 8360 int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr, 8361 struct netlink_ext_ack *extack) 8362 { 8363 struct netdev_notifier_pre_changeaddr_info info = { 8364 .info.dev = dev, 8365 .info.extack = extack, 8366 .dev_addr = addr, 8367 }; 8368 int rc; 8369 8370 rc = call_netdevice_notifiers_info(NETDEV_PRE_CHANGEADDR, &info.info); 8371 return notifier_to_errno(rc); 8372 } 8373 EXPORT_SYMBOL(dev_pre_changeaddr_notify); 8374 8375 /** 8376 * dev_set_mac_address - Change Media Access Control Address 8377 * @dev: device 8378 * @sa: new address 8379 * @extack: netlink extended ack 8380 * 8381 * Change the hardware (MAC) address of the device 8382 */ 8383 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa, 8384 struct netlink_ext_ack *extack) 8385 { 8386 const struct net_device_ops *ops = dev->netdev_ops; 8387 int err; 8388 8389 if (!ops->ndo_set_mac_address) 8390 return -EOPNOTSUPP; 8391 if (sa->sa_family != dev->type) 8392 return -EINVAL; 8393 if (!netif_device_present(dev)) 8394 return -ENODEV; 8395 err = dev_pre_changeaddr_notify(dev, sa->sa_data, extack); 8396 if (err) 8397 return err; 8398 err = ops->ndo_set_mac_address(dev, sa); 8399 if (err) 8400 return err; 8401 dev->addr_assign_type = NET_ADDR_SET; 8402 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 8403 add_device_randomness(dev->dev_addr, dev->addr_len); 8404 return 0; 8405 } 8406 EXPORT_SYMBOL(dev_set_mac_address); 8407 8408 /** 8409 * dev_change_carrier - Change device carrier 8410 * @dev: device 8411 * @new_carrier: new value 8412 * 8413 * Change device carrier 8414 */ 8415 int dev_change_carrier(struct net_device *dev, bool new_carrier) 8416 { 8417 const struct net_device_ops *ops = dev->netdev_ops; 8418 8419 if (!ops->ndo_change_carrier) 8420 return -EOPNOTSUPP; 8421 if (!netif_device_present(dev)) 8422 return -ENODEV; 8423 return ops->ndo_change_carrier(dev, new_carrier); 8424 } 8425 EXPORT_SYMBOL(dev_change_carrier); 8426 8427 /** 8428 * dev_get_phys_port_id - Get device physical port ID 8429 * @dev: device 8430 * @ppid: port ID 8431 * 8432 * Get device physical port ID 8433 */ 8434 int dev_get_phys_port_id(struct net_device *dev, 8435 struct netdev_phys_item_id *ppid) 8436 { 8437 const struct net_device_ops *ops = dev->netdev_ops; 8438 8439 if (!ops->ndo_get_phys_port_id) 8440 return -EOPNOTSUPP; 8441 return ops->ndo_get_phys_port_id(dev, ppid); 8442 } 8443 EXPORT_SYMBOL(dev_get_phys_port_id); 8444 8445 /** 8446 * dev_get_phys_port_name - Get device physical port name 8447 * @dev: device 8448 * @name: port name 8449 * @len: limit of bytes to copy to name 8450 * 8451 * Get device physical port name 8452 */ 8453 int dev_get_phys_port_name(struct net_device *dev, 8454 char *name, size_t len) 8455 { 8456 const struct net_device_ops *ops = dev->netdev_ops; 8457 int err; 8458 8459 if (ops->ndo_get_phys_port_name) { 8460 err = ops->ndo_get_phys_port_name(dev, name, len); 8461 if (err != -EOPNOTSUPP) 8462 return err; 8463 } 8464 return devlink_compat_phys_port_name_get(dev, name, len); 8465 } 8466 EXPORT_SYMBOL(dev_get_phys_port_name); 8467 8468 /** 8469 * dev_get_port_parent_id - Get the device's port parent identifier 8470 * @dev: network device 8471 * @ppid: pointer to a storage for the port's parent identifier 8472 * @recurse: allow/disallow recursion to lower devices 8473 * 8474 * Get the devices's port parent identifier 8475 */ 8476 int dev_get_port_parent_id(struct net_device *dev, 8477 struct netdev_phys_item_id *ppid, 8478 bool recurse) 8479 { 8480 const struct net_device_ops *ops = dev->netdev_ops; 8481 struct netdev_phys_item_id first = { }; 8482 struct net_device *lower_dev; 8483 struct list_head *iter; 8484 int err; 8485 8486 if (ops->ndo_get_port_parent_id) { 8487 err = ops->ndo_get_port_parent_id(dev, ppid); 8488 if (err != -EOPNOTSUPP) 8489 return err; 8490 } 8491 8492 err = devlink_compat_switch_id_get(dev, ppid); 8493 if (!err || err != -EOPNOTSUPP) 8494 return err; 8495 8496 if (!recurse) 8497 return -EOPNOTSUPP; 8498 8499 netdev_for_each_lower_dev(dev, lower_dev, iter) { 8500 err = dev_get_port_parent_id(lower_dev, ppid, recurse); 8501 if (err) 8502 break; 8503 if (!first.id_len) 8504 first = *ppid; 8505 else if (memcmp(&first, ppid, sizeof(*ppid))) 8506 return -ENODATA; 8507 } 8508 8509 return err; 8510 } 8511 EXPORT_SYMBOL(dev_get_port_parent_id); 8512 8513 /** 8514 * netdev_port_same_parent_id - Indicate if two network devices have 8515 * the same port parent identifier 8516 * @a: first network device 8517 * @b: second network device 8518 */ 8519 bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b) 8520 { 8521 struct netdev_phys_item_id a_id = { }; 8522 struct netdev_phys_item_id b_id = { }; 8523 8524 if (dev_get_port_parent_id(a, &a_id, true) || 8525 dev_get_port_parent_id(b, &b_id, true)) 8526 return false; 8527 8528 return netdev_phys_item_id_same(&a_id, &b_id); 8529 } 8530 EXPORT_SYMBOL(netdev_port_same_parent_id); 8531 8532 /** 8533 * dev_change_proto_down - update protocol port state information 8534 * @dev: device 8535 * @proto_down: new value 8536 * 8537 * This info can be used by switch drivers to set the phys state of the 8538 * port. 8539 */ 8540 int dev_change_proto_down(struct net_device *dev, bool proto_down) 8541 { 8542 const struct net_device_ops *ops = dev->netdev_ops; 8543 8544 if (!ops->ndo_change_proto_down) 8545 return -EOPNOTSUPP; 8546 if (!netif_device_present(dev)) 8547 return -ENODEV; 8548 return ops->ndo_change_proto_down(dev, proto_down); 8549 } 8550 EXPORT_SYMBOL(dev_change_proto_down); 8551 8552 /** 8553 * dev_change_proto_down_generic - generic implementation for 8554 * ndo_change_proto_down that sets carrier according to 8555 * proto_down. 8556 * 8557 * @dev: device 8558 * @proto_down: new value 8559 */ 8560 int dev_change_proto_down_generic(struct net_device *dev, bool proto_down) 8561 { 8562 if (proto_down) 8563 netif_carrier_off(dev); 8564 else 8565 netif_carrier_on(dev); 8566 dev->proto_down = proto_down; 8567 return 0; 8568 } 8569 EXPORT_SYMBOL(dev_change_proto_down_generic); 8570 8571 u32 __dev_xdp_query(struct net_device *dev, bpf_op_t bpf_op, 8572 enum bpf_netdev_command cmd) 8573 { 8574 struct netdev_bpf xdp; 8575 8576 if (!bpf_op) 8577 return 0; 8578 8579 memset(&xdp, 0, sizeof(xdp)); 8580 xdp.command = cmd; 8581 8582 /* Query must always succeed. */ 8583 WARN_ON(bpf_op(dev, &xdp) < 0 && cmd == XDP_QUERY_PROG); 8584 8585 return xdp.prog_id; 8586 } 8587 8588 static int dev_xdp_install(struct net_device *dev, bpf_op_t bpf_op, 8589 struct netlink_ext_ack *extack, u32 flags, 8590 struct bpf_prog *prog) 8591 { 8592 bool non_hw = !(flags & XDP_FLAGS_HW_MODE); 8593 struct bpf_prog *prev_prog = NULL; 8594 struct netdev_bpf xdp; 8595 int err; 8596 8597 if (non_hw) { 8598 prev_prog = bpf_prog_by_id(__dev_xdp_query(dev, bpf_op, 8599 XDP_QUERY_PROG)); 8600 if (IS_ERR(prev_prog)) 8601 prev_prog = NULL; 8602 } 8603 8604 memset(&xdp, 0, sizeof(xdp)); 8605 if (flags & XDP_FLAGS_HW_MODE) 8606 xdp.command = XDP_SETUP_PROG_HW; 8607 else 8608 xdp.command = XDP_SETUP_PROG; 8609 xdp.extack = extack; 8610 xdp.flags = flags; 8611 xdp.prog = prog; 8612 8613 err = bpf_op(dev, &xdp); 8614 if (!err && non_hw) 8615 bpf_prog_change_xdp(prev_prog, prog); 8616 8617 if (prev_prog) 8618 bpf_prog_put(prev_prog); 8619 8620 return err; 8621 } 8622 8623 static void dev_xdp_uninstall(struct net_device *dev) 8624 { 8625 struct netdev_bpf xdp; 8626 bpf_op_t ndo_bpf; 8627 8628 /* Remove generic XDP */ 8629 WARN_ON(dev_xdp_install(dev, generic_xdp_install, NULL, 0, NULL)); 8630 8631 /* Remove from the driver */ 8632 ndo_bpf = dev->netdev_ops->ndo_bpf; 8633 if (!ndo_bpf) 8634 return; 8635 8636 memset(&xdp, 0, sizeof(xdp)); 8637 xdp.command = XDP_QUERY_PROG; 8638 WARN_ON(ndo_bpf(dev, &xdp)); 8639 if (xdp.prog_id) 8640 WARN_ON(dev_xdp_install(dev, ndo_bpf, NULL, xdp.prog_flags, 8641 NULL)); 8642 8643 /* Remove HW offload */ 8644 memset(&xdp, 0, sizeof(xdp)); 8645 xdp.command = XDP_QUERY_PROG_HW; 8646 if (!ndo_bpf(dev, &xdp) && xdp.prog_id) 8647 WARN_ON(dev_xdp_install(dev, ndo_bpf, NULL, xdp.prog_flags, 8648 NULL)); 8649 } 8650 8651 /** 8652 * dev_change_xdp_fd - set or clear a bpf program for a device rx path 8653 * @dev: device 8654 * @extack: netlink extended ack 8655 * @fd: new program fd or negative value to clear 8656 * @flags: xdp-related flags 8657 * 8658 * Set or clear a bpf program for a device 8659 */ 8660 int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, 8661 int fd, u32 flags) 8662 { 8663 const struct net_device_ops *ops = dev->netdev_ops; 8664 enum bpf_netdev_command query; 8665 struct bpf_prog *prog = NULL; 8666 bpf_op_t bpf_op, bpf_chk; 8667 bool offload; 8668 int err; 8669 8670 ASSERT_RTNL(); 8671 8672 offload = flags & XDP_FLAGS_HW_MODE; 8673 query = offload ? XDP_QUERY_PROG_HW : XDP_QUERY_PROG; 8674 8675 bpf_op = bpf_chk = ops->ndo_bpf; 8676 if (!bpf_op && (flags & (XDP_FLAGS_DRV_MODE | XDP_FLAGS_HW_MODE))) { 8677 NL_SET_ERR_MSG(extack, "underlying driver does not support XDP in native mode"); 8678 return -EOPNOTSUPP; 8679 } 8680 if (!bpf_op || (flags & XDP_FLAGS_SKB_MODE)) 8681 bpf_op = generic_xdp_install; 8682 if (bpf_op == bpf_chk) 8683 bpf_chk = generic_xdp_install; 8684 8685 if (fd >= 0) { 8686 u32 prog_id; 8687 8688 if (!offload && __dev_xdp_query(dev, bpf_chk, XDP_QUERY_PROG)) { 8689 NL_SET_ERR_MSG(extack, "native and generic XDP can't be active at the same time"); 8690 return -EEXIST; 8691 } 8692 8693 prog_id = __dev_xdp_query(dev, bpf_op, query); 8694 if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && prog_id) { 8695 NL_SET_ERR_MSG(extack, "XDP program already attached"); 8696 return -EBUSY; 8697 } 8698 8699 prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP, 8700 bpf_op == ops->ndo_bpf); 8701 if (IS_ERR(prog)) 8702 return PTR_ERR(prog); 8703 8704 if (!offload && bpf_prog_is_dev_bound(prog->aux)) { 8705 NL_SET_ERR_MSG(extack, "using device-bound program without HW_MODE flag is not supported"); 8706 bpf_prog_put(prog); 8707 return -EINVAL; 8708 } 8709 8710 /* prog->aux->id may be 0 for orphaned device-bound progs */ 8711 if (prog->aux->id && prog->aux->id == prog_id) { 8712 bpf_prog_put(prog); 8713 return 0; 8714 } 8715 } else { 8716 if (!__dev_xdp_query(dev, bpf_op, query)) 8717 return 0; 8718 } 8719 8720 err = dev_xdp_install(dev, bpf_op, extack, flags, prog); 8721 if (err < 0 && prog) 8722 bpf_prog_put(prog); 8723 8724 return err; 8725 } 8726 8727 /** 8728 * dev_new_index - allocate an ifindex 8729 * @net: the applicable net namespace 8730 * 8731 * Returns a suitable unique value for a new device interface 8732 * number. The caller must hold the rtnl semaphore or the 8733 * dev_base_lock to be sure it remains unique. 8734 */ 8735 static int dev_new_index(struct net *net) 8736 { 8737 int ifindex = net->ifindex; 8738 8739 for (;;) { 8740 if (++ifindex <= 0) 8741 ifindex = 1; 8742 if (!__dev_get_by_index(net, ifindex)) 8743 return net->ifindex = ifindex; 8744 } 8745 } 8746 8747 /* Delayed registration/unregisteration */ 8748 static LIST_HEAD(net_todo_list); 8749 DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq); 8750 8751 static void net_set_todo(struct net_device *dev) 8752 { 8753 list_add_tail(&dev->todo_list, &net_todo_list); 8754 dev_net(dev)->dev_unreg_count++; 8755 } 8756 8757 static void rollback_registered_many(struct list_head *head) 8758 { 8759 struct net_device *dev, *tmp; 8760 LIST_HEAD(close_head); 8761 8762 BUG_ON(dev_boot_phase); 8763 ASSERT_RTNL(); 8764 8765 list_for_each_entry_safe(dev, tmp, head, unreg_list) { 8766 /* Some devices call without registering 8767 * for initialization unwind. Remove those 8768 * devices and proceed with the remaining. 8769 */ 8770 if (dev->reg_state == NETREG_UNINITIALIZED) { 8771 pr_debug("unregister_netdevice: device %s/%p never was registered\n", 8772 dev->name, dev); 8773 8774 WARN_ON(1); 8775 list_del(&dev->unreg_list); 8776 continue; 8777 } 8778 dev->dismantle = true; 8779 BUG_ON(dev->reg_state != NETREG_REGISTERED); 8780 } 8781 8782 /* If device is running, close it first. */ 8783 list_for_each_entry(dev, head, unreg_list) 8784 list_add_tail(&dev->close_list, &close_head); 8785 dev_close_many(&close_head, true); 8786 8787 list_for_each_entry(dev, head, unreg_list) { 8788 /* And unlink it from device chain. */ 8789 unlist_netdevice(dev); 8790 8791 dev->reg_state = NETREG_UNREGISTERING; 8792 } 8793 flush_all_backlogs(); 8794 8795 synchronize_net(); 8796 8797 list_for_each_entry(dev, head, unreg_list) { 8798 struct sk_buff *skb = NULL; 8799 8800 /* Shutdown queueing discipline. */ 8801 dev_shutdown(dev); 8802 8803 dev_xdp_uninstall(dev); 8804 8805 /* Notify protocols, that we are about to destroy 8806 * this device. They should clean all the things. 8807 */ 8808 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 8809 8810 if (!dev->rtnl_link_ops || 8811 dev->rtnl_link_state == RTNL_LINK_INITIALIZED) 8812 skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0, 8813 GFP_KERNEL, NULL, 0); 8814 8815 /* 8816 * Flush the unicast and multicast chains 8817 */ 8818 dev_uc_flush(dev); 8819 dev_mc_flush(dev); 8820 8821 netdev_name_node_alt_flush(dev); 8822 netdev_name_node_free(dev->name_node); 8823 8824 if (dev->netdev_ops->ndo_uninit) 8825 dev->netdev_ops->ndo_uninit(dev); 8826 8827 if (skb) 8828 rtmsg_ifinfo_send(skb, dev, GFP_KERNEL); 8829 8830 /* Notifier chain MUST detach us all upper devices. */ 8831 WARN_ON(netdev_has_any_upper_dev(dev)); 8832 WARN_ON(netdev_has_any_lower_dev(dev)); 8833 8834 /* Remove entries from kobject tree */ 8835 netdev_unregister_kobject(dev); 8836 #ifdef CONFIG_XPS 8837 /* Remove XPS queueing entries */ 8838 netif_reset_xps_queues_gt(dev, 0); 8839 #endif 8840 } 8841 8842 synchronize_net(); 8843 8844 list_for_each_entry(dev, head, unreg_list) 8845 dev_put(dev); 8846 } 8847 8848 static void rollback_registered(struct net_device *dev) 8849 { 8850 LIST_HEAD(single); 8851 8852 list_add(&dev->unreg_list, &single); 8853 rollback_registered_many(&single); 8854 list_del(&single); 8855 } 8856 8857 static netdev_features_t netdev_sync_upper_features(struct net_device *lower, 8858 struct net_device *upper, netdev_features_t features) 8859 { 8860 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES; 8861 netdev_features_t feature; 8862 int feature_bit; 8863 8864 for_each_netdev_feature(upper_disables, feature_bit) { 8865 feature = __NETIF_F_BIT(feature_bit); 8866 if (!(upper->wanted_features & feature) 8867 && (features & feature)) { 8868 netdev_dbg(lower, "Dropping feature %pNF, upper dev %s has it off.\n", 8869 &feature, upper->name); 8870 features &= ~feature; 8871 } 8872 } 8873 8874 return features; 8875 } 8876 8877 static void netdev_sync_lower_features(struct net_device *upper, 8878 struct net_device *lower, netdev_features_t features) 8879 { 8880 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES; 8881 netdev_features_t feature; 8882 int feature_bit; 8883 8884 for_each_netdev_feature(upper_disables, feature_bit) { 8885 feature = __NETIF_F_BIT(feature_bit); 8886 if (!(features & feature) && (lower->features & feature)) { 8887 netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n", 8888 &feature, lower->name); 8889 lower->wanted_features &= ~feature; 8890 netdev_update_features(lower); 8891 8892 if (unlikely(lower->features & feature)) 8893 netdev_WARN(upper, "failed to disable %pNF on %s!\n", 8894 &feature, lower->name); 8895 } 8896 } 8897 } 8898 8899 static netdev_features_t netdev_fix_features(struct net_device *dev, 8900 netdev_features_t features) 8901 { 8902 /* Fix illegal checksum combinations */ 8903 if ((features & NETIF_F_HW_CSUM) && 8904 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { 8905 netdev_warn(dev, "mixed HW and IP checksum settings.\n"); 8906 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); 8907 } 8908 8909 /* TSO requires that SG is present as well. */ 8910 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) { 8911 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n"); 8912 features &= ~NETIF_F_ALL_TSO; 8913 } 8914 8915 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) && 8916 !(features & NETIF_F_IP_CSUM)) { 8917 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n"); 8918 features &= ~NETIF_F_TSO; 8919 features &= ~NETIF_F_TSO_ECN; 8920 } 8921 8922 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) && 8923 !(features & NETIF_F_IPV6_CSUM)) { 8924 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n"); 8925 features &= ~NETIF_F_TSO6; 8926 } 8927 8928 /* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */ 8929 if ((features & NETIF_F_TSO_MANGLEID) && !(features & NETIF_F_TSO)) 8930 features &= ~NETIF_F_TSO_MANGLEID; 8931 8932 /* TSO ECN requires that TSO is present as well. */ 8933 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN) 8934 features &= ~NETIF_F_TSO_ECN; 8935 8936 /* Software GSO depends on SG. */ 8937 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) { 8938 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n"); 8939 features &= ~NETIF_F_GSO; 8940 } 8941 8942 /* GSO partial features require GSO partial be set */ 8943 if ((features & dev->gso_partial_features) && 8944 !(features & NETIF_F_GSO_PARTIAL)) { 8945 netdev_dbg(dev, 8946 "Dropping partially supported GSO features since no GSO partial.\n"); 8947 features &= ~dev->gso_partial_features; 8948 } 8949 8950 if (!(features & NETIF_F_RXCSUM)) { 8951 /* NETIF_F_GRO_HW implies doing RXCSUM since every packet 8952 * successfully merged by hardware must also have the 8953 * checksum verified by hardware. If the user does not 8954 * want to enable RXCSUM, logically, we should disable GRO_HW. 8955 */ 8956 if (features & NETIF_F_GRO_HW) { 8957 netdev_dbg(dev, "Dropping NETIF_F_GRO_HW since no RXCSUM feature.\n"); 8958 features &= ~NETIF_F_GRO_HW; 8959 } 8960 } 8961 8962 /* LRO/HW-GRO features cannot be combined with RX-FCS */ 8963 if (features & NETIF_F_RXFCS) { 8964 if (features & NETIF_F_LRO) { 8965 netdev_dbg(dev, "Dropping LRO feature since RX-FCS is requested.\n"); 8966 features &= ~NETIF_F_LRO; 8967 } 8968 8969 if (features & NETIF_F_GRO_HW) { 8970 netdev_dbg(dev, "Dropping HW-GRO feature since RX-FCS is requested.\n"); 8971 features &= ~NETIF_F_GRO_HW; 8972 } 8973 } 8974 8975 return features; 8976 } 8977 8978 int __netdev_update_features(struct net_device *dev) 8979 { 8980 struct net_device *upper, *lower; 8981 netdev_features_t features; 8982 struct list_head *iter; 8983 int err = -1; 8984 8985 ASSERT_RTNL(); 8986 8987 features = netdev_get_wanted_features(dev); 8988 8989 if (dev->netdev_ops->ndo_fix_features) 8990 features = dev->netdev_ops->ndo_fix_features(dev, features); 8991 8992 /* driver might be less strict about feature dependencies */ 8993 features = netdev_fix_features(dev, features); 8994 8995 /* some features can't be enabled if they're off an an upper device */ 8996 netdev_for_each_upper_dev_rcu(dev, upper, iter) 8997 features = netdev_sync_upper_features(dev, upper, features); 8998 8999 if (dev->features == features) 9000 goto sync_lower; 9001 9002 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n", 9003 &dev->features, &features); 9004 9005 if (dev->netdev_ops->ndo_set_features) 9006 err = dev->netdev_ops->ndo_set_features(dev, features); 9007 else 9008 err = 0; 9009 9010 if (unlikely(err < 0)) { 9011 netdev_err(dev, 9012 "set_features() failed (%d); wanted %pNF, left %pNF\n", 9013 err, &features, &dev->features); 9014 /* return non-0 since some features might have changed and 9015 * it's better to fire a spurious notification than miss it 9016 */ 9017 return -1; 9018 } 9019 9020 sync_lower: 9021 /* some features must be disabled on lower devices when disabled 9022 * on an upper device (think: bonding master or bridge) 9023 */ 9024 netdev_for_each_lower_dev(dev, lower, iter) 9025 netdev_sync_lower_features(dev, lower, features); 9026 9027 if (!err) { 9028 netdev_features_t diff = features ^ dev->features; 9029 9030 if (diff & NETIF_F_RX_UDP_TUNNEL_PORT) { 9031 /* udp_tunnel_{get,drop}_rx_info both need 9032 * NETIF_F_RX_UDP_TUNNEL_PORT enabled on the 9033 * device, or they won't do anything. 9034 * Thus we need to update dev->features 9035 * *before* calling udp_tunnel_get_rx_info, 9036 * but *after* calling udp_tunnel_drop_rx_info. 9037 */ 9038 if (features & NETIF_F_RX_UDP_TUNNEL_PORT) { 9039 dev->features = features; 9040 udp_tunnel_get_rx_info(dev); 9041 } else { 9042 udp_tunnel_drop_rx_info(dev); 9043 } 9044 } 9045 9046 if (diff & NETIF_F_HW_VLAN_CTAG_FILTER) { 9047 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) { 9048 dev->features = features; 9049 err |= vlan_get_rx_ctag_filter_info(dev); 9050 } else { 9051 vlan_drop_rx_ctag_filter_info(dev); 9052 } 9053 } 9054 9055 if (diff & NETIF_F_HW_VLAN_STAG_FILTER) { 9056 if (features & NETIF_F_HW_VLAN_STAG_FILTER) { 9057 dev->features = features; 9058 err |= vlan_get_rx_stag_filter_info(dev); 9059 } else { 9060 vlan_drop_rx_stag_filter_info(dev); 9061 } 9062 } 9063 9064 dev->features = features; 9065 } 9066 9067 return err < 0 ? 0 : 1; 9068 } 9069 9070 /** 9071 * netdev_update_features - recalculate device features 9072 * @dev: the device to check 9073 * 9074 * Recalculate dev->features set and send notifications if it 9075 * has changed. Should be called after driver or hardware dependent 9076 * conditions might have changed that influence the features. 9077 */ 9078 void netdev_update_features(struct net_device *dev) 9079 { 9080 if (__netdev_update_features(dev)) 9081 netdev_features_change(dev); 9082 } 9083 EXPORT_SYMBOL(netdev_update_features); 9084 9085 /** 9086 * netdev_change_features - recalculate device features 9087 * @dev: the device to check 9088 * 9089 * Recalculate dev->features set and send notifications even 9090 * if they have not changed. Should be called instead of 9091 * netdev_update_features() if also dev->vlan_features might 9092 * have changed to allow the changes to be propagated to stacked 9093 * VLAN devices. 9094 */ 9095 void netdev_change_features(struct net_device *dev) 9096 { 9097 __netdev_update_features(dev); 9098 netdev_features_change(dev); 9099 } 9100 EXPORT_SYMBOL(netdev_change_features); 9101 9102 /** 9103 * netif_stacked_transfer_operstate - transfer operstate 9104 * @rootdev: the root or lower level device to transfer state from 9105 * @dev: the device to transfer operstate to 9106 * 9107 * Transfer operational state from root to device. This is normally 9108 * called when a stacking relationship exists between the root 9109 * device and the device(a leaf device). 9110 */ 9111 void netif_stacked_transfer_operstate(const struct net_device *rootdev, 9112 struct net_device *dev) 9113 { 9114 if (rootdev->operstate == IF_OPER_DORMANT) 9115 netif_dormant_on(dev); 9116 else 9117 netif_dormant_off(dev); 9118 9119 if (netif_carrier_ok(rootdev)) 9120 netif_carrier_on(dev); 9121 else 9122 netif_carrier_off(dev); 9123 } 9124 EXPORT_SYMBOL(netif_stacked_transfer_operstate); 9125 9126 static int netif_alloc_rx_queues(struct net_device *dev) 9127 { 9128 unsigned int i, count = dev->num_rx_queues; 9129 struct netdev_rx_queue *rx; 9130 size_t sz = count * sizeof(*rx); 9131 int err = 0; 9132 9133 BUG_ON(count < 1); 9134 9135 rx = kvzalloc(sz, GFP_KERNEL | __GFP_RETRY_MAYFAIL); 9136 if (!rx) 9137 return -ENOMEM; 9138 9139 dev->_rx = rx; 9140 9141 for (i = 0; i < count; i++) { 9142 rx[i].dev = dev; 9143 9144 /* XDP RX-queue setup */ 9145 err = xdp_rxq_info_reg(&rx[i].xdp_rxq, dev, i); 9146 if (err < 0) 9147 goto err_rxq_info; 9148 } 9149 return 0; 9150 9151 err_rxq_info: 9152 /* Rollback successful reg's and free other resources */ 9153 while (i--) 9154 xdp_rxq_info_unreg(&rx[i].xdp_rxq); 9155 kvfree(dev->_rx); 9156 dev->_rx = NULL; 9157 return err; 9158 } 9159 9160 static void netif_free_rx_queues(struct net_device *dev) 9161 { 9162 unsigned int i, count = dev->num_rx_queues; 9163 9164 /* netif_alloc_rx_queues alloc failed, resources have been unreg'ed */ 9165 if (!dev->_rx) 9166 return; 9167 9168 for (i = 0; i < count; i++) 9169 xdp_rxq_info_unreg(&dev->_rx[i].xdp_rxq); 9170 9171 kvfree(dev->_rx); 9172 } 9173 9174 static void netdev_init_one_queue(struct net_device *dev, 9175 struct netdev_queue *queue, void *_unused) 9176 { 9177 /* Initialize queue lock */ 9178 spin_lock_init(&queue->_xmit_lock); 9179 lockdep_set_class(&queue->_xmit_lock, &dev->qdisc_xmit_lock_key); 9180 queue->xmit_lock_owner = -1; 9181 netdev_queue_numa_node_write(queue, NUMA_NO_NODE); 9182 queue->dev = dev; 9183 #ifdef CONFIG_BQL 9184 dql_init(&queue->dql, HZ); 9185 #endif 9186 } 9187 9188 static void netif_free_tx_queues(struct net_device *dev) 9189 { 9190 kvfree(dev->_tx); 9191 } 9192 9193 static int netif_alloc_netdev_queues(struct net_device *dev) 9194 { 9195 unsigned int count = dev->num_tx_queues; 9196 struct netdev_queue *tx; 9197 size_t sz = count * sizeof(*tx); 9198 9199 if (count < 1 || count > 0xffff) 9200 return -EINVAL; 9201 9202 tx = kvzalloc(sz, GFP_KERNEL | __GFP_RETRY_MAYFAIL); 9203 if (!tx) 9204 return -ENOMEM; 9205 9206 dev->_tx = tx; 9207 9208 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL); 9209 spin_lock_init(&dev->tx_global_lock); 9210 9211 return 0; 9212 } 9213 9214 void netif_tx_stop_all_queues(struct net_device *dev) 9215 { 9216 unsigned int i; 9217 9218 for (i = 0; i < dev->num_tx_queues; i++) { 9219 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 9220 9221 netif_tx_stop_queue(txq); 9222 } 9223 } 9224 EXPORT_SYMBOL(netif_tx_stop_all_queues); 9225 9226 static void netdev_register_lockdep_key(struct net_device *dev) 9227 { 9228 lockdep_register_key(&dev->qdisc_tx_busylock_key); 9229 lockdep_register_key(&dev->qdisc_running_key); 9230 lockdep_register_key(&dev->qdisc_xmit_lock_key); 9231 lockdep_register_key(&dev->addr_list_lock_key); 9232 } 9233 9234 static void netdev_unregister_lockdep_key(struct net_device *dev) 9235 { 9236 lockdep_unregister_key(&dev->qdisc_tx_busylock_key); 9237 lockdep_unregister_key(&dev->qdisc_running_key); 9238 lockdep_unregister_key(&dev->qdisc_xmit_lock_key); 9239 lockdep_unregister_key(&dev->addr_list_lock_key); 9240 } 9241 9242 void netdev_update_lockdep_key(struct net_device *dev) 9243 { 9244 lockdep_unregister_key(&dev->addr_list_lock_key); 9245 lockdep_register_key(&dev->addr_list_lock_key); 9246 9247 lockdep_set_class(&dev->addr_list_lock, &dev->addr_list_lock_key); 9248 } 9249 EXPORT_SYMBOL(netdev_update_lockdep_key); 9250 9251 /** 9252 * register_netdevice - register a network device 9253 * @dev: device to register 9254 * 9255 * Take a completed network device structure and add it to the kernel 9256 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier 9257 * chain. 0 is returned on success. A negative errno code is returned 9258 * on a failure to set up the device, or if the name is a duplicate. 9259 * 9260 * Callers must hold the rtnl semaphore. You may want 9261 * register_netdev() instead of this. 9262 * 9263 * BUGS: 9264 * The locking appears insufficient to guarantee two parallel registers 9265 * will not get the same name. 9266 */ 9267 9268 int register_netdevice(struct net_device *dev) 9269 { 9270 int ret; 9271 struct net *net = dev_net(dev); 9272 9273 BUILD_BUG_ON(sizeof(netdev_features_t) * BITS_PER_BYTE < 9274 NETDEV_FEATURE_COUNT); 9275 BUG_ON(dev_boot_phase); 9276 ASSERT_RTNL(); 9277 9278 might_sleep(); 9279 9280 /* When net_device's are persistent, this will be fatal. */ 9281 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED); 9282 BUG_ON(!net); 9283 9284 spin_lock_init(&dev->addr_list_lock); 9285 lockdep_set_class(&dev->addr_list_lock, &dev->addr_list_lock_key); 9286 9287 ret = dev_get_valid_name(net, dev, dev->name); 9288 if (ret < 0) 9289 goto out; 9290 9291 ret = -ENOMEM; 9292 dev->name_node = netdev_name_node_head_alloc(dev); 9293 if (!dev->name_node) 9294 goto out; 9295 9296 /* Init, if this function is available */ 9297 if (dev->netdev_ops->ndo_init) { 9298 ret = dev->netdev_ops->ndo_init(dev); 9299 if (ret) { 9300 if (ret > 0) 9301 ret = -EIO; 9302 goto err_free_name; 9303 } 9304 } 9305 9306 if (((dev->hw_features | dev->features) & 9307 NETIF_F_HW_VLAN_CTAG_FILTER) && 9308 (!dev->netdev_ops->ndo_vlan_rx_add_vid || 9309 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) { 9310 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n"); 9311 ret = -EINVAL; 9312 goto err_uninit; 9313 } 9314 9315 ret = -EBUSY; 9316 if (!dev->ifindex) 9317 dev->ifindex = dev_new_index(net); 9318 else if (__dev_get_by_index(net, dev->ifindex)) 9319 goto err_uninit; 9320 9321 /* Transfer changeable features to wanted_features and enable 9322 * software offloads (GSO and GRO). 9323 */ 9324 dev->hw_features |= (NETIF_F_SOFT_FEATURES | NETIF_F_SOFT_FEATURES_OFF); 9325 dev->features |= NETIF_F_SOFT_FEATURES; 9326 9327 if (dev->netdev_ops->ndo_udp_tunnel_add) { 9328 dev->features |= NETIF_F_RX_UDP_TUNNEL_PORT; 9329 dev->hw_features |= NETIF_F_RX_UDP_TUNNEL_PORT; 9330 } 9331 9332 dev->wanted_features = dev->features & dev->hw_features; 9333 9334 if (!(dev->flags & IFF_LOOPBACK)) 9335 dev->hw_features |= NETIF_F_NOCACHE_COPY; 9336 9337 /* If IPv4 TCP segmentation offload is supported we should also 9338 * allow the device to enable segmenting the frame with the option 9339 * of ignoring a static IP ID value. This doesn't enable the 9340 * feature itself but allows the user to enable it later. 9341 */ 9342 if (dev->hw_features & NETIF_F_TSO) 9343 dev->hw_features |= NETIF_F_TSO_MANGLEID; 9344 if (dev->vlan_features & NETIF_F_TSO) 9345 dev->vlan_features |= NETIF_F_TSO_MANGLEID; 9346 if (dev->mpls_features & NETIF_F_TSO) 9347 dev->mpls_features |= NETIF_F_TSO_MANGLEID; 9348 if (dev->hw_enc_features & NETIF_F_TSO) 9349 dev->hw_enc_features |= NETIF_F_TSO_MANGLEID; 9350 9351 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices. 9352 */ 9353 dev->vlan_features |= NETIF_F_HIGHDMA; 9354 9355 /* Make NETIF_F_SG inheritable to tunnel devices. 9356 */ 9357 dev->hw_enc_features |= NETIF_F_SG | NETIF_F_GSO_PARTIAL; 9358 9359 /* Make NETIF_F_SG inheritable to MPLS. 9360 */ 9361 dev->mpls_features |= NETIF_F_SG; 9362 9363 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev); 9364 ret = notifier_to_errno(ret); 9365 if (ret) 9366 goto err_uninit; 9367 9368 ret = netdev_register_kobject(dev); 9369 if (ret) { 9370 dev->reg_state = NETREG_UNREGISTERED; 9371 goto err_uninit; 9372 } 9373 dev->reg_state = NETREG_REGISTERED; 9374 9375 __netdev_update_features(dev); 9376 9377 /* 9378 * Default initial state at registry is that the 9379 * device is present. 9380 */ 9381 9382 set_bit(__LINK_STATE_PRESENT, &dev->state); 9383 9384 linkwatch_init_dev(dev); 9385 9386 dev_init_scheduler(dev); 9387 dev_hold(dev); 9388 list_netdevice(dev); 9389 add_device_randomness(dev->dev_addr, dev->addr_len); 9390 9391 /* If the device has permanent device address, driver should 9392 * set dev_addr and also addr_assign_type should be set to 9393 * NET_ADDR_PERM (default value). 9394 */ 9395 if (dev->addr_assign_type == NET_ADDR_PERM) 9396 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 9397 9398 /* Notify protocols, that a new device appeared. */ 9399 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev); 9400 ret = notifier_to_errno(ret); 9401 if (ret) { 9402 rollback_registered(dev); 9403 rcu_barrier(); 9404 9405 dev->reg_state = NETREG_UNREGISTERED; 9406 } 9407 /* 9408 * Prevent userspace races by waiting until the network 9409 * device is fully setup before sending notifications. 9410 */ 9411 if (!dev->rtnl_link_ops || 9412 dev->rtnl_link_state == RTNL_LINK_INITIALIZED) 9413 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL); 9414 9415 out: 9416 return ret; 9417 9418 err_uninit: 9419 if (dev->netdev_ops->ndo_uninit) 9420 dev->netdev_ops->ndo_uninit(dev); 9421 if (dev->priv_destructor) 9422 dev->priv_destructor(dev); 9423 err_free_name: 9424 netdev_name_node_free(dev->name_node); 9425 goto out; 9426 } 9427 EXPORT_SYMBOL(register_netdevice); 9428 9429 /** 9430 * init_dummy_netdev - init a dummy network device for NAPI 9431 * @dev: device to init 9432 * 9433 * This takes a network device structure and initialize the minimum 9434 * amount of fields so it can be used to schedule NAPI polls without 9435 * registering a full blown interface. This is to be used by drivers 9436 * that need to tie several hardware interfaces to a single NAPI 9437 * poll scheduler due to HW limitations. 9438 */ 9439 int init_dummy_netdev(struct net_device *dev) 9440 { 9441 /* Clear everything. Note we don't initialize spinlocks 9442 * are they aren't supposed to be taken by any of the 9443 * NAPI code and this dummy netdev is supposed to be 9444 * only ever used for NAPI polls 9445 */ 9446 memset(dev, 0, sizeof(struct net_device)); 9447 9448 /* make sure we BUG if trying to hit standard 9449 * register/unregister code path 9450 */ 9451 dev->reg_state = NETREG_DUMMY; 9452 9453 /* NAPI wants this */ 9454 INIT_LIST_HEAD(&dev->napi_list); 9455 9456 /* a dummy interface is started by default */ 9457 set_bit(__LINK_STATE_PRESENT, &dev->state); 9458 set_bit(__LINK_STATE_START, &dev->state); 9459 9460 /* napi_busy_loop stats accounting wants this */ 9461 dev_net_set(dev, &init_net); 9462 9463 /* Note : We dont allocate pcpu_refcnt for dummy devices, 9464 * because users of this 'device' dont need to change 9465 * its refcount. 9466 */ 9467 9468 return 0; 9469 } 9470 EXPORT_SYMBOL_GPL(init_dummy_netdev); 9471 9472 9473 /** 9474 * register_netdev - register a network device 9475 * @dev: device to register 9476 * 9477 * Take a completed network device structure and add it to the kernel 9478 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier 9479 * chain. 0 is returned on success. A negative errno code is returned 9480 * on a failure to set up the device, or if the name is a duplicate. 9481 * 9482 * This is a wrapper around register_netdevice that takes the rtnl semaphore 9483 * and expands the device name if you passed a format string to 9484 * alloc_netdev. 9485 */ 9486 int register_netdev(struct net_device *dev) 9487 { 9488 int err; 9489 9490 if (rtnl_lock_killable()) 9491 return -EINTR; 9492 err = register_netdevice(dev); 9493 rtnl_unlock(); 9494 return err; 9495 } 9496 EXPORT_SYMBOL(register_netdev); 9497 9498 int netdev_refcnt_read(const struct net_device *dev) 9499 { 9500 int i, refcnt = 0; 9501 9502 for_each_possible_cpu(i) 9503 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i); 9504 return refcnt; 9505 } 9506 EXPORT_SYMBOL(netdev_refcnt_read); 9507 9508 /** 9509 * netdev_wait_allrefs - wait until all references are gone. 9510 * @dev: target net_device 9511 * 9512 * This is called when unregistering network devices. 9513 * 9514 * Any protocol or device that holds a reference should register 9515 * for netdevice notification, and cleanup and put back the 9516 * reference if they receive an UNREGISTER event. 9517 * We can get stuck here if buggy protocols don't correctly 9518 * call dev_put. 9519 */ 9520 static void netdev_wait_allrefs(struct net_device *dev) 9521 { 9522 unsigned long rebroadcast_time, warning_time; 9523 int refcnt; 9524 9525 linkwatch_forget_dev(dev); 9526 9527 rebroadcast_time = warning_time = jiffies; 9528 refcnt = netdev_refcnt_read(dev); 9529 9530 while (refcnt != 0) { 9531 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) { 9532 rtnl_lock(); 9533 9534 /* Rebroadcast unregister notification */ 9535 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 9536 9537 __rtnl_unlock(); 9538 rcu_barrier(); 9539 rtnl_lock(); 9540 9541 if (test_bit(__LINK_STATE_LINKWATCH_PENDING, 9542 &dev->state)) { 9543 /* We must not have linkwatch events 9544 * pending on unregister. If this 9545 * happens, we simply run the queue 9546 * unscheduled, resulting in a noop 9547 * for this device. 9548 */ 9549 linkwatch_run_queue(); 9550 } 9551 9552 __rtnl_unlock(); 9553 9554 rebroadcast_time = jiffies; 9555 } 9556 9557 msleep(250); 9558 9559 refcnt = netdev_refcnt_read(dev); 9560 9561 if (refcnt && time_after(jiffies, warning_time + 10 * HZ)) { 9562 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n", 9563 dev->name, refcnt); 9564 warning_time = jiffies; 9565 } 9566 } 9567 } 9568 9569 /* The sequence is: 9570 * 9571 * rtnl_lock(); 9572 * ... 9573 * register_netdevice(x1); 9574 * register_netdevice(x2); 9575 * ... 9576 * unregister_netdevice(y1); 9577 * unregister_netdevice(y2); 9578 * ... 9579 * rtnl_unlock(); 9580 * free_netdev(y1); 9581 * free_netdev(y2); 9582 * 9583 * We are invoked by rtnl_unlock(). 9584 * This allows us to deal with problems: 9585 * 1) We can delete sysfs objects which invoke hotplug 9586 * without deadlocking with linkwatch via keventd. 9587 * 2) Since we run with the RTNL semaphore not held, we can sleep 9588 * safely in order to wait for the netdev refcnt to drop to zero. 9589 * 9590 * We must not return until all unregister events added during 9591 * the interval the lock was held have been completed. 9592 */ 9593 void netdev_run_todo(void) 9594 { 9595 struct list_head list; 9596 9597 /* Snapshot list, allow later requests */ 9598 list_replace_init(&net_todo_list, &list); 9599 9600 __rtnl_unlock(); 9601 9602 9603 /* Wait for rcu callbacks to finish before next phase */ 9604 if (!list_empty(&list)) 9605 rcu_barrier(); 9606 9607 while (!list_empty(&list)) { 9608 struct net_device *dev 9609 = list_first_entry(&list, struct net_device, todo_list); 9610 list_del(&dev->todo_list); 9611 9612 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) { 9613 pr_err("network todo '%s' but state %d\n", 9614 dev->name, dev->reg_state); 9615 dump_stack(); 9616 continue; 9617 } 9618 9619 dev->reg_state = NETREG_UNREGISTERED; 9620 9621 netdev_wait_allrefs(dev); 9622 9623 /* paranoia */ 9624 BUG_ON(netdev_refcnt_read(dev)); 9625 BUG_ON(!list_empty(&dev->ptype_all)); 9626 BUG_ON(!list_empty(&dev->ptype_specific)); 9627 WARN_ON(rcu_access_pointer(dev->ip_ptr)); 9628 WARN_ON(rcu_access_pointer(dev->ip6_ptr)); 9629 #if IS_ENABLED(CONFIG_DECNET) 9630 WARN_ON(dev->dn_ptr); 9631 #endif 9632 if (dev->priv_destructor) 9633 dev->priv_destructor(dev); 9634 if (dev->needs_free_netdev) 9635 free_netdev(dev); 9636 9637 /* Report a network device has been unregistered */ 9638 rtnl_lock(); 9639 dev_net(dev)->dev_unreg_count--; 9640 __rtnl_unlock(); 9641 wake_up(&netdev_unregistering_wq); 9642 9643 /* Free network device */ 9644 kobject_put(&dev->dev.kobj); 9645 } 9646 } 9647 9648 /* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has 9649 * all the same fields in the same order as net_device_stats, with only 9650 * the type differing, but rtnl_link_stats64 may have additional fields 9651 * at the end for newer counters. 9652 */ 9653 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, 9654 const struct net_device_stats *netdev_stats) 9655 { 9656 #if BITS_PER_LONG == 64 9657 BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats)); 9658 memcpy(stats64, netdev_stats, sizeof(*netdev_stats)); 9659 /* zero out counters that only exist in rtnl_link_stats64 */ 9660 memset((char *)stats64 + sizeof(*netdev_stats), 0, 9661 sizeof(*stats64) - sizeof(*netdev_stats)); 9662 #else 9663 size_t i, n = sizeof(*netdev_stats) / sizeof(unsigned long); 9664 const unsigned long *src = (const unsigned long *)netdev_stats; 9665 u64 *dst = (u64 *)stats64; 9666 9667 BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64)); 9668 for (i = 0; i < n; i++) 9669 dst[i] = src[i]; 9670 /* zero out counters that only exist in rtnl_link_stats64 */ 9671 memset((char *)stats64 + n * sizeof(u64), 0, 9672 sizeof(*stats64) - n * sizeof(u64)); 9673 #endif 9674 } 9675 EXPORT_SYMBOL(netdev_stats_to_stats64); 9676 9677 /** 9678 * dev_get_stats - get network device statistics 9679 * @dev: device to get statistics from 9680 * @storage: place to store stats 9681 * 9682 * Get network statistics from device. Return @storage. 9683 * The device driver may provide its own method by setting 9684 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats; 9685 * otherwise the internal statistics structure is used. 9686 */ 9687 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, 9688 struct rtnl_link_stats64 *storage) 9689 { 9690 const struct net_device_ops *ops = dev->netdev_ops; 9691 9692 if (ops->ndo_get_stats64) { 9693 memset(storage, 0, sizeof(*storage)); 9694 ops->ndo_get_stats64(dev, storage); 9695 } else if (ops->ndo_get_stats) { 9696 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev)); 9697 } else { 9698 netdev_stats_to_stats64(storage, &dev->stats); 9699 } 9700 storage->rx_dropped += (unsigned long)atomic_long_read(&dev->rx_dropped); 9701 storage->tx_dropped += (unsigned long)atomic_long_read(&dev->tx_dropped); 9702 storage->rx_nohandler += (unsigned long)atomic_long_read(&dev->rx_nohandler); 9703 return storage; 9704 } 9705 EXPORT_SYMBOL(dev_get_stats); 9706 9707 struct netdev_queue *dev_ingress_queue_create(struct net_device *dev) 9708 { 9709 struct netdev_queue *queue = dev_ingress_queue(dev); 9710 9711 #ifdef CONFIG_NET_CLS_ACT 9712 if (queue) 9713 return queue; 9714 queue = kzalloc(sizeof(*queue), GFP_KERNEL); 9715 if (!queue) 9716 return NULL; 9717 netdev_init_one_queue(dev, queue, NULL); 9718 RCU_INIT_POINTER(queue->qdisc, &noop_qdisc); 9719 queue->qdisc_sleeping = &noop_qdisc; 9720 rcu_assign_pointer(dev->ingress_queue, queue); 9721 #endif 9722 return queue; 9723 } 9724 9725 static const struct ethtool_ops default_ethtool_ops; 9726 9727 void netdev_set_default_ethtool_ops(struct net_device *dev, 9728 const struct ethtool_ops *ops) 9729 { 9730 if (dev->ethtool_ops == &default_ethtool_ops) 9731 dev->ethtool_ops = ops; 9732 } 9733 EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops); 9734 9735 void netdev_freemem(struct net_device *dev) 9736 { 9737 char *addr = (char *)dev - dev->padded; 9738 9739 kvfree(addr); 9740 } 9741 9742 /** 9743 * alloc_netdev_mqs - allocate network device 9744 * @sizeof_priv: size of private data to allocate space for 9745 * @name: device name format string 9746 * @name_assign_type: origin of device name 9747 * @setup: callback to initialize device 9748 * @txqs: the number of TX subqueues to allocate 9749 * @rxqs: the number of RX subqueues to allocate 9750 * 9751 * Allocates a struct net_device with private data area for driver use 9752 * and performs basic initialization. Also allocates subqueue structs 9753 * for each queue on the device. 9754 */ 9755 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, 9756 unsigned char name_assign_type, 9757 void (*setup)(struct net_device *), 9758 unsigned int txqs, unsigned int rxqs) 9759 { 9760 struct net_device *dev; 9761 unsigned int alloc_size; 9762 struct net_device *p; 9763 9764 BUG_ON(strlen(name) >= sizeof(dev->name)); 9765 9766 if (txqs < 1) { 9767 pr_err("alloc_netdev: Unable to allocate device with zero queues\n"); 9768 return NULL; 9769 } 9770 9771 if (rxqs < 1) { 9772 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n"); 9773 return NULL; 9774 } 9775 9776 alloc_size = sizeof(struct net_device); 9777 if (sizeof_priv) { 9778 /* ensure 32-byte alignment of private area */ 9779 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN); 9780 alloc_size += sizeof_priv; 9781 } 9782 /* ensure 32-byte alignment of whole construct */ 9783 alloc_size += NETDEV_ALIGN - 1; 9784 9785 p = kvzalloc(alloc_size, GFP_KERNEL | __GFP_RETRY_MAYFAIL); 9786 if (!p) 9787 return NULL; 9788 9789 dev = PTR_ALIGN(p, NETDEV_ALIGN); 9790 dev->padded = (char *)dev - (char *)p; 9791 9792 dev->pcpu_refcnt = alloc_percpu(int); 9793 if (!dev->pcpu_refcnt) 9794 goto free_dev; 9795 9796 if (dev_addr_init(dev)) 9797 goto free_pcpu; 9798 9799 dev_mc_init(dev); 9800 dev_uc_init(dev); 9801 9802 dev_net_set(dev, &init_net); 9803 9804 netdev_register_lockdep_key(dev); 9805 9806 dev->gso_max_size = GSO_MAX_SIZE; 9807 dev->gso_max_segs = GSO_MAX_SEGS; 9808 dev->upper_level = 1; 9809 dev->lower_level = 1; 9810 9811 INIT_LIST_HEAD(&dev->napi_list); 9812 INIT_LIST_HEAD(&dev->unreg_list); 9813 INIT_LIST_HEAD(&dev->close_list); 9814 INIT_LIST_HEAD(&dev->link_watch_list); 9815 INIT_LIST_HEAD(&dev->adj_list.upper); 9816 INIT_LIST_HEAD(&dev->adj_list.lower); 9817 INIT_LIST_HEAD(&dev->ptype_all); 9818 INIT_LIST_HEAD(&dev->ptype_specific); 9819 INIT_LIST_HEAD(&dev->net_notifier_list); 9820 #ifdef CONFIG_NET_SCHED 9821 hash_init(dev->qdisc_hash); 9822 #endif 9823 dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM; 9824 setup(dev); 9825 9826 if (!dev->tx_queue_len) { 9827 dev->priv_flags |= IFF_NO_QUEUE; 9828 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN; 9829 } 9830 9831 dev->num_tx_queues = txqs; 9832 dev->real_num_tx_queues = txqs; 9833 if (netif_alloc_netdev_queues(dev)) 9834 goto free_all; 9835 9836 dev->num_rx_queues = rxqs; 9837 dev->real_num_rx_queues = rxqs; 9838 if (netif_alloc_rx_queues(dev)) 9839 goto free_all; 9840 9841 strcpy(dev->name, name); 9842 dev->name_assign_type = name_assign_type; 9843 dev->group = INIT_NETDEV_GROUP; 9844 if (!dev->ethtool_ops) 9845 dev->ethtool_ops = &default_ethtool_ops; 9846 9847 nf_hook_ingress_init(dev); 9848 9849 return dev; 9850 9851 free_all: 9852 free_netdev(dev); 9853 return NULL; 9854 9855 free_pcpu: 9856 free_percpu(dev->pcpu_refcnt); 9857 free_dev: 9858 netdev_freemem(dev); 9859 return NULL; 9860 } 9861 EXPORT_SYMBOL(alloc_netdev_mqs); 9862 9863 /** 9864 * free_netdev - free network device 9865 * @dev: device 9866 * 9867 * This function does the last stage of destroying an allocated device 9868 * interface. The reference to the device object is released. If this 9869 * is the last reference then it will be freed.Must be called in process 9870 * context. 9871 */ 9872 void free_netdev(struct net_device *dev) 9873 { 9874 struct napi_struct *p, *n; 9875 9876 might_sleep(); 9877 netif_free_tx_queues(dev); 9878 netif_free_rx_queues(dev); 9879 9880 kfree(rcu_dereference_protected(dev->ingress_queue, 1)); 9881 9882 /* Flush device addresses */ 9883 dev_addr_flush(dev); 9884 9885 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list) 9886 netif_napi_del(p); 9887 9888 free_percpu(dev->pcpu_refcnt); 9889 dev->pcpu_refcnt = NULL; 9890 free_percpu(dev->xdp_bulkq); 9891 dev->xdp_bulkq = NULL; 9892 9893 netdev_unregister_lockdep_key(dev); 9894 9895 /* Compatibility with error handling in drivers */ 9896 if (dev->reg_state == NETREG_UNINITIALIZED) { 9897 netdev_freemem(dev); 9898 return; 9899 } 9900 9901 BUG_ON(dev->reg_state != NETREG_UNREGISTERED); 9902 dev->reg_state = NETREG_RELEASED; 9903 9904 /* will free via device release */ 9905 put_device(&dev->dev); 9906 } 9907 EXPORT_SYMBOL(free_netdev); 9908 9909 /** 9910 * synchronize_net - Synchronize with packet receive processing 9911 * 9912 * Wait for packets currently being received to be done. 9913 * Does not block later packets from starting. 9914 */ 9915 void synchronize_net(void) 9916 { 9917 might_sleep(); 9918 if (rtnl_is_locked()) 9919 synchronize_rcu_expedited(); 9920 else 9921 synchronize_rcu(); 9922 } 9923 EXPORT_SYMBOL(synchronize_net); 9924 9925 /** 9926 * unregister_netdevice_queue - remove device from the kernel 9927 * @dev: device 9928 * @head: list 9929 * 9930 * This function shuts down a device interface and removes it 9931 * from the kernel tables. 9932 * If head not NULL, device is queued to be unregistered later. 9933 * 9934 * Callers must hold the rtnl semaphore. You may want 9935 * unregister_netdev() instead of this. 9936 */ 9937 9938 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head) 9939 { 9940 ASSERT_RTNL(); 9941 9942 if (head) { 9943 list_move_tail(&dev->unreg_list, head); 9944 } else { 9945 rollback_registered(dev); 9946 /* Finish processing unregister after unlock */ 9947 net_set_todo(dev); 9948 } 9949 } 9950 EXPORT_SYMBOL(unregister_netdevice_queue); 9951 9952 /** 9953 * unregister_netdevice_many - unregister many devices 9954 * @head: list of devices 9955 * 9956 * Note: As most callers use a stack allocated list_head, 9957 * we force a list_del() to make sure stack wont be corrupted later. 9958 */ 9959 void unregister_netdevice_many(struct list_head *head) 9960 { 9961 struct net_device *dev; 9962 9963 if (!list_empty(head)) { 9964 rollback_registered_many(head); 9965 list_for_each_entry(dev, head, unreg_list) 9966 net_set_todo(dev); 9967 list_del(head); 9968 } 9969 } 9970 EXPORT_SYMBOL(unregister_netdevice_many); 9971 9972 /** 9973 * unregister_netdev - remove device from the kernel 9974 * @dev: device 9975 * 9976 * This function shuts down a device interface and removes it 9977 * from the kernel tables. 9978 * 9979 * This is just a wrapper for unregister_netdevice that takes 9980 * the rtnl semaphore. In general you want to use this and not 9981 * unregister_netdevice. 9982 */ 9983 void unregister_netdev(struct net_device *dev) 9984 { 9985 rtnl_lock(); 9986 unregister_netdevice(dev); 9987 rtnl_unlock(); 9988 } 9989 EXPORT_SYMBOL(unregister_netdev); 9990 9991 /** 9992 * dev_change_net_namespace - move device to different nethost namespace 9993 * @dev: device 9994 * @net: network namespace 9995 * @pat: If not NULL name pattern to try if the current device name 9996 * is already taken in the destination network namespace. 9997 * 9998 * This function shuts down a device interface and moves it 9999 * to a new network namespace. On success 0 is returned, on 10000 * a failure a netagive errno code is returned. 10001 * 10002 * Callers must hold the rtnl semaphore. 10003 */ 10004 10005 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat) 10006 { 10007 int err, new_nsid, new_ifindex; 10008 10009 ASSERT_RTNL(); 10010 10011 /* Don't allow namespace local devices to be moved. */ 10012 err = -EINVAL; 10013 if (dev->features & NETIF_F_NETNS_LOCAL) 10014 goto out; 10015 10016 /* Ensure the device has been registrered */ 10017 if (dev->reg_state != NETREG_REGISTERED) 10018 goto out; 10019 10020 /* Get out if there is nothing todo */ 10021 err = 0; 10022 if (net_eq(dev_net(dev), net)) 10023 goto out; 10024 10025 /* Pick the destination device name, and ensure 10026 * we can use it in the destination network namespace. 10027 */ 10028 err = -EEXIST; 10029 if (__dev_get_by_name(net, dev->name)) { 10030 /* We get here if we can't use the current device name */ 10031 if (!pat) 10032 goto out; 10033 err = dev_get_valid_name(net, dev, pat); 10034 if (err < 0) 10035 goto out; 10036 } 10037 10038 /* 10039 * And now a mini version of register_netdevice unregister_netdevice. 10040 */ 10041 10042 /* If device is running close it first. */ 10043 dev_close(dev); 10044 10045 /* And unlink it from device chain */ 10046 unlist_netdevice(dev); 10047 10048 synchronize_net(); 10049 10050 /* Shutdown queueing discipline. */ 10051 dev_shutdown(dev); 10052 10053 /* Notify protocols, that we are about to destroy 10054 * this device. They should clean all the things. 10055 * 10056 * Note that dev->reg_state stays at NETREG_REGISTERED. 10057 * This is wanted because this way 8021q and macvlan know 10058 * the device is just moving and can keep their slaves up. 10059 */ 10060 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 10061 rcu_barrier(); 10062 10063 new_nsid = peernet2id_alloc(dev_net(dev), net, GFP_KERNEL); 10064 /* If there is an ifindex conflict assign a new one */ 10065 if (__dev_get_by_index(net, dev->ifindex)) 10066 new_ifindex = dev_new_index(net); 10067 else 10068 new_ifindex = dev->ifindex; 10069 10070 rtmsg_ifinfo_newnet(RTM_DELLINK, dev, ~0U, GFP_KERNEL, &new_nsid, 10071 new_ifindex); 10072 10073 /* 10074 * Flush the unicast and multicast chains 10075 */ 10076 dev_uc_flush(dev); 10077 dev_mc_flush(dev); 10078 10079 /* Send a netdev-removed uevent to the old namespace */ 10080 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE); 10081 netdev_adjacent_del_links(dev); 10082 10083 /* Move per-net netdevice notifiers that are following the netdevice */ 10084 move_netdevice_notifiers_dev_net(dev, net); 10085 10086 /* Actually switch the network namespace */ 10087 dev_net_set(dev, net); 10088 dev->ifindex = new_ifindex; 10089 10090 /* Send a netdev-add uevent to the new namespace */ 10091 kobject_uevent(&dev->dev.kobj, KOBJ_ADD); 10092 netdev_adjacent_add_links(dev); 10093 10094 /* Fixup kobjects */ 10095 err = device_rename(&dev->dev, dev->name); 10096 WARN_ON(err); 10097 10098 /* Add the device back in the hashes */ 10099 list_netdevice(dev); 10100 10101 /* Notify protocols, that a new device appeared. */ 10102 call_netdevice_notifiers(NETDEV_REGISTER, dev); 10103 10104 /* 10105 * Prevent userspace races by waiting until the network 10106 * device is fully setup before sending notifications. 10107 */ 10108 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL); 10109 10110 synchronize_net(); 10111 err = 0; 10112 out: 10113 return err; 10114 } 10115 EXPORT_SYMBOL_GPL(dev_change_net_namespace); 10116 10117 static int dev_cpu_dead(unsigned int oldcpu) 10118 { 10119 struct sk_buff **list_skb; 10120 struct sk_buff *skb; 10121 unsigned int cpu; 10122 struct softnet_data *sd, *oldsd, *remsd = NULL; 10123 10124 local_irq_disable(); 10125 cpu = smp_processor_id(); 10126 sd = &per_cpu(softnet_data, cpu); 10127 oldsd = &per_cpu(softnet_data, oldcpu); 10128 10129 /* Find end of our completion_queue. */ 10130 list_skb = &sd->completion_queue; 10131 while (*list_skb) 10132 list_skb = &(*list_skb)->next; 10133 /* Append completion queue from offline CPU. */ 10134 *list_skb = oldsd->completion_queue; 10135 oldsd->completion_queue = NULL; 10136 10137 /* Append output queue from offline CPU. */ 10138 if (oldsd->output_queue) { 10139 *sd->output_queue_tailp = oldsd->output_queue; 10140 sd->output_queue_tailp = oldsd->output_queue_tailp; 10141 oldsd->output_queue = NULL; 10142 oldsd->output_queue_tailp = &oldsd->output_queue; 10143 } 10144 /* Append NAPI poll list from offline CPU, with one exception : 10145 * process_backlog() must be called by cpu owning percpu backlog. 10146 * We properly handle process_queue & input_pkt_queue later. 10147 */ 10148 while (!list_empty(&oldsd->poll_list)) { 10149 struct napi_struct *napi = list_first_entry(&oldsd->poll_list, 10150 struct napi_struct, 10151 poll_list); 10152 10153 list_del_init(&napi->poll_list); 10154 if (napi->poll == process_backlog) 10155 napi->state = 0; 10156 else 10157 ____napi_schedule(sd, napi); 10158 } 10159 10160 raise_softirq_irqoff(NET_TX_SOFTIRQ); 10161 local_irq_enable(); 10162 10163 #ifdef CONFIG_RPS 10164 remsd = oldsd->rps_ipi_list; 10165 oldsd->rps_ipi_list = NULL; 10166 #endif 10167 /* send out pending IPI's on offline CPU */ 10168 net_rps_send_ipi(remsd); 10169 10170 /* Process offline CPU's input_pkt_queue */ 10171 while ((skb = __skb_dequeue(&oldsd->process_queue))) { 10172 netif_rx_ni(skb); 10173 input_queue_head_incr(oldsd); 10174 } 10175 while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) { 10176 netif_rx_ni(skb); 10177 input_queue_head_incr(oldsd); 10178 } 10179 10180 return 0; 10181 } 10182 10183 /** 10184 * netdev_increment_features - increment feature set by one 10185 * @all: current feature set 10186 * @one: new feature set 10187 * @mask: mask feature set 10188 * 10189 * Computes a new feature set after adding a device with feature set 10190 * @one to the master device with current feature set @all. Will not 10191 * enable anything that is off in @mask. Returns the new feature set. 10192 */ 10193 netdev_features_t netdev_increment_features(netdev_features_t all, 10194 netdev_features_t one, netdev_features_t mask) 10195 { 10196 if (mask & NETIF_F_HW_CSUM) 10197 mask |= NETIF_F_CSUM_MASK; 10198 mask |= NETIF_F_VLAN_CHALLENGED; 10199 10200 all |= one & (NETIF_F_ONE_FOR_ALL | NETIF_F_CSUM_MASK) & mask; 10201 all &= one | ~NETIF_F_ALL_FOR_ALL; 10202 10203 /* If one device supports hw checksumming, set for all. */ 10204 if (all & NETIF_F_HW_CSUM) 10205 all &= ~(NETIF_F_CSUM_MASK & ~NETIF_F_HW_CSUM); 10206 10207 return all; 10208 } 10209 EXPORT_SYMBOL(netdev_increment_features); 10210 10211 static struct hlist_head * __net_init netdev_create_hash(void) 10212 { 10213 int i; 10214 struct hlist_head *hash; 10215 10216 hash = kmalloc_array(NETDEV_HASHENTRIES, sizeof(*hash), GFP_KERNEL); 10217 if (hash != NULL) 10218 for (i = 0; i < NETDEV_HASHENTRIES; i++) 10219 INIT_HLIST_HEAD(&hash[i]); 10220 10221 return hash; 10222 } 10223 10224 /* Initialize per network namespace state */ 10225 static int __net_init netdev_init(struct net *net) 10226 { 10227 BUILD_BUG_ON(GRO_HASH_BUCKETS > 10228 8 * sizeof_field(struct napi_struct, gro_bitmask)); 10229 10230 if (net != &init_net) 10231 INIT_LIST_HEAD(&net->dev_base_head); 10232 10233 net->dev_name_head = netdev_create_hash(); 10234 if (net->dev_name_head == NULL) 10235 goto err_name; 10236 10237 net->dev_index_head = netdev_create_hash(); 10238 if (net->dev_index_head == NULL) 10239 goto err_idx; 10240 10241 RAW_INIT_NOTIFIER_HEAD(&net->netdev_chain); 10242 10243 return 0; 10244 10245 err_idx: 10246 kfree(net->dev_name_head); 10247 err_name: 10248 return -ENOMEM; 10249 } 10250 10251 /** 10252 * netdev_drivername - network driver for the device 10253 * @dev: network device 10254 * 10255 * Determine network driver for device. 10256 */ 10257 const char *netdev_drivername(const struct net_device *dev) 10258 { 10259 const struct device_driver *driver; 10260 const struct device *parent; 10261 const char *empty = ""; 10262 10263 parent = dev->dev.parent; 10264 if (!parent) 10265 return empty; 10266 10267 driver = parent->driver; 10268 if (driver && driver->name) 10269 return driver->name; 10270 return empty; 10271 } 10272 10273 static void __netdev_printk(const char *level, const struct net_device *dev, 10274 struct va_format *vaf) 10275 { 10276 if (dev && dev->dev.parent) { 10277 dev_printk_emit(level[1] - '0', 10278 dev->dev.parent, 10279 "%s %s %s%s: %pV", 10280 dev_driver_string(dev->dev.parent), 10281 dev_name(dev->dev.parent), 10282 netdev_name(dev), netdev_reg_state(dev), 10283 vaf); 10284 } else if (dev) { 10285 printk("%s%s%s: %pV", 10286 level, netdev_name(dev), netdev_reg_state(dev), vaf); 10287 } else { 10288 printk("%s(NULL net_device): %pV", level, vaf); 10289 } 10290 } 10291 10292 void netdev_printk(const char *level, const struct net_device *dev, 10293 const char *format, ...) 10294 { 10295 struct va_format vaf; 10296 va_list args; 10297 10298 va_start(args, format); 10299 10300 vaf.fmt = format; 10301 vaf.va = &args; 10302 10303 __netdev_printk(level, dev, &vaf); 10304 10305 va_end(args); 10306 } 10307 EXPORT_SYMBOL(netdev_printk); 10308 10309 #define define_netdev_printk_level(func, level) \ 10310 void func(const struct net_device *dev, const char *fmt, ...) \ 10311 { \ 10312 struct va_format vaf; \ 10313 va_list args; \ 10314 \ 10315 va_start(args, fmt); \ 10316 \ 10317 vaf.fmt = fmt; \ 10318 vaf.va = &args; \ 10319 \ 10320 __netdev_printk(level, dev, &vaf); \ 10321 \ 10322 va_end(args); \ 10323 } \ 10324 EXPORT_SYMBOL(func); 10325 10326 define_netdev_printk_level(netdev_emerg, KERN_EMERG); 10327 define_netdev_printk_level(netdev_alert, KERN_ALERT); 10328 define_netdev_printk_level(netdev_crit, KERN_CRIT); 10329 define_netdev_printk_level(netdev_err, KERN_ERR); 10330 define_netdev_printk_level(netdev_warn, KERN_WARNING); 10331 define_netdev_printk_level(netdev_notice, KERN_NOTICE); 10332 define_netdev_printk_level(netdev_info, KERN_INFO); 10333 10334 static void __net_exit netdev_exit(struct net *net) 10335 { 10336 kfree(net->dev_name_head); 10337 kfree(net->dev_index_head); 10338 if (net != &init_net) 10339 WARN_ON_ONCE(!list_empty(&net->dev_base_head)); 10340 } 10341 10342 static struct pernet_operations __net_initdata netdev_net_ops = { 10343 .init = netdev_init, 10344 .exit = netdev_exit, 10345 }; 10346 10347 static void __net_exit default_device_exit(struct net *net) 10348 { 10349 struct net_device *dev, *aux; 10350 /* 10351 * Push all migratable network devices back to the 10352 * initial network namespace 10353 */ 10354 rtnl_lock(); 10355 for_each_netdev_safe(net, dev, aux) { 10356 int err; 10357 char fb_name[IFNAMSIZ]; 10358 10359 /* Ignore unmoveable devices (i.e. loopback) */ 10360 if (dev->features & NETIF_F_NETNS_LOCAL) 10361 continue; 10362 10363 /* Leave virtual devices for the generic cleanup */ 10364 if (dev->rtnl_link_ops) 10365 continue; 10366 10367 /* Push remaining network devices to init_net */ 10368 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex); 10369 if (__dev_get_by_name(&init_net, fb_name)) 10370 snprintf(fb_name, IFNAMSIZ, "dev%%d"); 10371 err = dev_change_net_namespace(dev, &init_net, fb_name); 10372 if (err) { 10373 pr_emerg("%s: failed to move %s to init_net: %d\n", 10374 __func__, dev->name, err); 10375 BUG(); 10376 } 10377 } 10378 rtnl_unlock(); 10379 } 10380 10381 static void __net_exit rtnl_lock_unregistering(struct list_head *net_list) 10382 { 10383 /* Return with the rtnl_lock held when there are no network 10384 * devices unregistering in any network namespace in net_list. 10385 */ 10386 struct net *net; 10387 bool unregistering; 10388 DEFINE_WAIT_FUNC(wait, woken_wake_function); 10389 10390 add_wait_queue(&netdev_unregistering_wq, &wait); 10391 for (;;) { 10392 unregistering = false; 10393 rtnl_lock(); 10394 list_for_each_entry(net, net_list, exit_list) { 10395 if (net->dev_unreg_count > 0) { 10396 unregistering = true; 10397 break; 10398 } 10399 } 10400 if (!unregistering) 10401 break; 10402 __rtnl_unlock(); 10403 10404 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); 10405 } 10406 remove_wait_queue(&netdev_unregistering_wq, &wait); 10407 } 10408 10409 static void __net_exit default_device_exit_batch(struct list_head *net_list) 10410 { 10411 /* At exit all network devices most be removed from a network 10412 * namespace. Do this in the reverse order of registration. 10413 * Do this across as many network namespaces as possible to 10414 * improve batching efficiency. 10415 */ 10416 struct net_device *dev; 10417 struct net *net; 10418 LIST_HEAD(dev_kill_list); 10419 10420 /* To prevent network device cleanup code from dereferencing 10421 * loopback devices or network devices that have been freed 10422 * wait here for all pending unregistrations to complete, 10423 * before unregistring the loopback device and allowing the 10424 * network namespace be freed. 10425 * 10426 * The netdev todo list containing all network devices 10427 * unregistrations that happen in default_device_exit_batch 10428 * will run in the rtnl_unlock() at the end of 10429 * default_device_exit_batch. 10430 */ 10431 rtnl_lock_unregistering(net_list); 10432 list_for_each_entry(net, net_list, exit_list) { 10433 for_each_netdev_reverse(net, dev) { 10434 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) 10435 dev->rtnl_link_ops->dellink(dev, &dev_kill_list); 10436 else 10437 unregister_netdevice_queue(dev, &dev_kill_list); 10438 } 10439 } 10440 unregister_netdevice_many(&dev_kill_list); 10441 rtnl_unlock(); 10442 } 10443 10444 static struct pernet_operations __net_initdata default_device_ops = { 10445 .exit = default_device_exit, 10446 .exit_batch = default_device_exit_batch, 10447 }; 10448 10449 /* 10450 * Initialize the DEV module. At boot time this walks the device list and 10451 * unhooks any devices that fail to initialise (normally hardware not 10452 * present) and leaves us with a valid list of present and active devices. 10453 * 10454 */ 10455 10456 /* 10457 * This is called single threaded during boot, so no need 10458 * to take the rtnl semaphore. 10459 */ 10460 static int __init net_dev_init(void) 10461 { 10462 int i, rc = -ENOMEM; 10463 10464 BUG_ON(!dev_boot_phase); 10465 10466 if (dev_proc_init()) 10467 goto out; 10468 10469 if (netdev_kobject_init()) 10470 goto out; 10471 10472 INIT_LIST_HEAD(&ptype_all); 10473 for (i = 0; i < PTYPE_HASH_SIZE; i++) 10474 INIT_LIST_HEAD(&ptype_base[i]); 10475 10476 INIT_LIST_HEAD(&offload_base); 10477 10478 if (register_pernet_subsys(&netdev_net_ops)) 10479 goto out; 10480 10481 /* 10482 * Initialise the packet receive queues. 10483 */ 10484 10485 for_each_possible_cpu(i) { 10486 struct work_struct *flush = per_cpu_ptr(&flush_works, i); 10487 struct softnet_data *sd = &per_cpu(softnet_data, i); 10488 10489 INIT_WORK(flush, flush_backlog); 10490 10491 skb_queue_head_init(&sd->input_pkt_queue); 10492 skb_queue_head_init(&sd->process_queue); 10493 #ifdef CONFIG_XFRM_OFFLOAD 10494 skb_queue_head_init(&sd->xfrm_backlog); 10495 #endif 10496 INIT_LIST_HEAD(&sd->poll_list); 10497 sd->output_queue_tailp = &sd->output_queue; 10498 #ifdef CONFIG_RPS 10499 sd->csd.func = rps_trigger_softirq; 10500 sd->csd.info = sd; 10501 sd->cpu = i; 10502 #endif 10503 10504 init_gro_hash(&sd->backlog); 10505 sd->backlog.poll = process_backlog; 10506 sd->backlog.weight = weight_p; 10507 } 10508 10509 dev_boot_phase = 0; 10510 10511 /* The loopback device is special if any other network devices 10512 * is present in a network namespace the loopback device must 10513 * be present. Since we now dynamically allocate and free the 10514 * loopback device ensure this invariant is maintained by 10515 * keeping the loopback device as the first device on the 10516 * list of network devices. Ensuring the loopback devices 10517 * is the first device that appears and the last network device 10518 * that disappears. 10519 */ 10520 if (register_pernet_device(&loopback_net_ops)) 10521 goto out; 10522 10523 if (register_pernet_device(&default_device_ops)) 10524 goto out; 10525 10526 open_softirq(NET_TX_SOFTIRQ, net_tx_action); 10527 open_softirq(NET_RX_SOFTIRQ, net_rx_action); 10528 10529 rc = cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD, "net/dev:dead", 10530 NULL, dev_cpu_dead); 10531 WARN_ON(rc < 0); 10532 rc = 0; 10533 out: 10534 return rc; 10535 } 10536 10537 subsys_initcall(net_dev_init); 10538