1 /* 2 * NET3 Protocol independent device support routines. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Derived from the non IP parts of dev.c 1.0.19 10 * Authors: Ross Biro 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * Mark Evans, <evansmp@uhura.aston.ac.uk> 13 * 14 * Additional Authors: 15 * Florian la Roche <rzsfl@rz.uni-sb.de> 16 * Alan Cox <gw4pts@gw4pts.ampr.org> 17 * David Hinds <dahinds@users.sourceforge.net> 18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> 19 * Adam Sulmicki <adam@cfar.umd.edu> 20 * Pekka Riikonen <priikone@poesidon.pspt.fi> 21 * 22 * Changes: 23 * D.J. Barrow : Fixed bug where dev->refcnt gets set 24 * to 2 if register_netdev gets called 25 * before net_dev_init & also removed a 26 * few lines of code in the process. 27 * Alan Cox : device private ioctl copies fields back. 28 * Alan Cox : Transmit queue code does relevant 29 * stunts to keep the queue safe. 30 * Alan Cox : Fixed double lock. 31 * Alan Cox : Fixed promisc NULL pointer trap 32 * ???????? : Support the full private ioctl range 33 * Alan Cox : Moved ioctl permission check into 34 * drivers 35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI 36 * Alan Cox : 100 backlog just doesn't cut it when 37 * you start doing multicast video 8) 38 * Alan Cox : Rewrote net_bh and list manager. 39 * Alan Cox : Fix ETH_P_ALL echoback lengths. 40 * Alan Cox : Took out transmit every packet pass 41 * Saved a few bytes in the ioctl handler 42 * Alan Cox : Network driver sets packet type before 43 * calling netif_rx. Saves a function 44 * call a packet. 45 * Alan Cox : Hashed net_bh() 46 * Richard Kooijman: Timestamp fixes. 47 * Alan Cox : Wrong field in SIOCGIFDSTADDR 48 * Alan Cox : Device lock protection. 49 * Alan Cox : Fixed nasty side effect of device close 50 * changes. 51 * Rudi Cilibrasi : Pass the right thing to 52 * set_mac_address() 53 * Dave Miller : 32bit quantity for the device lock to 54 * make it work out on a Sparc. 55 * Bjorn Ekwall : Added KERNELD hack. 56 * Alan Cox : Cleaned up the backlog initialise. 57 * Craig Metz : SIOCGIFCONF fix if space for under 58 * 1 device. 59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there 60 * is no device open function. 61 * Andi Kleen : Fix error reporting for SIOCGIFCONF 62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF 63 * Cyrus Durgin : Cleaned for KMOD 64 * Adam Sulmicki : Bug Fix : Network Device Unload 65 * A network device unload needs to purge 66 * the backlog queue. 67 * Paul Rusty Russell : SIOCSIFNAME 68 * Pekka Riikonen : Netdev boot-time settings code 69 * Andrew Morton : Make unregister_netdevice wait 70 * indefinitely on dev->refcnt 71 * J Hadi Salim : - Backlog queue sampling 72 * - netif_rx() feedback 73 */ 74 75 #include <asm/uaccess.h> 76 #include <linux/bitops.h> 77 #include <linux/capability.h> 78 #include <linux/cpu.h> 79 #include <linux/types.h> 80 #include <linux/kernel.h> 81 #include <linux/hash.h> 82 #include <linux/slab.h> 83 #include <linux/sched.h> 84 #include <linux/mutex.h> 85 #include <linux/string.h> 86 #include <linux/mm.h> 87 #include <linux/socket.h> 88 #include <linux/sockios.h> 89 #include <linux/errno.h> 90 #include <linux/interrupt.h> 91 #include <linux/if_ether.h> 92 #include <linux/netdevice.h> 93 #include <linux/etherdevice.h> 94 #include <linux/ethtool.h> 95 #include <linux/notifier.h> 96 #include <linux/skbuff.h> 97 #include <net/net_namespace.h> 98 #include <net/sock.h> 99 #include <linux/rtnetlink.h> 100 #include <linux/stat.h> 101 #include <net/dst.h> 102 #include <net/pkt_sched.h> 103 #include <net/checksum.h> 104 #include <net/xfrm.h> 105 #include <linux/highmem.h> 106 #include <linux/init.h> 107 #include <linux/module.h> 108 #include <linux/netpoll.h> 109 #include <linux/rcupdate.h> 110 #include <linux/delay.h> 111 #include <net/iw_handler.h> 112 #include <asm/current.h> 113 #include <linux/audit.h> 114 #include <linux/dmaengine.h> 115 #include <linux/err.h> 116 #include <linux/ctype.h> 117 #include <linux/if_arp.h> 118 #include <linux/if_vlan.h> 119 #include <linux/ip.h> 120 #include <net/ip.h> 121 #include <linux/ipv6.h> 122 #include <linux/in.h> 123 #include <linux/jhash.h> 124 #include <linux/random.h> 125 #include <trace/events/napi.h> 126 #include <trace/events/net.h> 127 #include <trace/events/skb.h> 128 #include <linux/pci.h> 129 #include <linux/inetdevice.h> 130 #include <linux/cpu_rmap.h> 131 #include <linux/static_key.h> 132 #include <linux/hashtable.h> 133 #include <linux/vmalloc.h> 134 #include <linux/if_macvlan.h> 135 136 #include "net-sysfs.h" 137 138 /* Instead of increasing this, you should create a hash table. */ 139 #define MAX_GRO_SKBS 8 140 141 /* This should be increased if a protocol with a bigger head is added. */ 142 #define GRO_MAX_HEAD (MAX_HEADER + 128) 143 144 static DEFINE_SPINLOCK(ptype_lock); 145 static DEFINE_SPINLOCK(offload_lock); 146 struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; 147 struct list_head ptype_all __read_mostly; /* Taps */ 148 static struct list_head offload_base __read_mostly; 149 150 static int netif_rx_internal(struct sk_buff *skb); 151 152 /* 153 * The @dev_base_head list is protected by @dev_base_lock and the rtnl 154 * semaphore. 155 * 156 * Pure readers hold dev_base_lock for reading, or rcu_read_lock() 157 * 158 * Writers must hold the rtnl semaphore while they loop through the 159 * dev_base_head list, and hold dev_base_lock for writing when they do the 160 * actual updates. This allows pure readers to access the list even 161 * while a writer is preparing to update it. 162 * 163 * To put it another way, dev_base_lock is held for writing only to 164 * protect against pure readers; the rtnl semaphore provides the 165 * protection against other writers. 166 * 167 * See, for example usages, register_netdevice() and 168 * unregister_netdevice(), which must be called with the rtnl 169 * semaphore held. 170 */ 171 DEFINE_RWLOCK(dev_base_lock); 172 EXPORT_SYMBOL(dev_base_lock); 173 174 /* protects napi_hash addition/deletion and napi_gen_id */ 175 static DEFINE_SPINLOCK(napi_hash_lock); 176 177 static unsigned int napi_gen_id; 178 static DEFINE_HASHTABLE(napi_hash, 8); 179 180 static seqcount_t devnet_rename_seq; 181 182 static inline void dev_base_seq_inc(struct net *net) 183 { 184 while (++net->dev_base_seq == 0); 185 } 186 187 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name) 188 { 189 unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ)); 190 191 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)]; 192 } 193 194 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) 195 { 196 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)]; 197 } 198 199 static inline void rps_lock(struct softnet_data *sd) 200 { 201 #ifdef CONFIG_RPS 202 spin_lock(&sd->input_pkt_queue.lock); 203 #endif 204 } 205 206 static inline void rps_unlock(struct softnet_data *sd) 207 { 208 #ifdef CONFIG_RPS 209 spin_unlock(&sd->input_pkt_queue.lock); 210 #endif 211 } 212 213 /* Device list insertion */ 214 static void list_netdevice(struct net_device *dev) 215 { 216 struct net *net = dev_net(dev); 217 218 ASSERT_RTNL(); 219 220 write_lock_bh(&dev_base_lock); 221 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head); 222 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name)); 223 hlist_add_head_rcu(&dev->index_hlist, 224 dev_index_hash(net, dev->ifindex)); 225 write_unlock_bh(&dev_base_lock); 226 227 dev_base_seq_inc(net); 228 } 229 230 /* Device list removal 231 * caller must respect a RCU grace period before freeing/reusing dev 232 */ 233 static void unlist_netdevice(struct net_device *dev) 234 { 235 ASSERT_RTNL(); 236 237 /* Unlink dev from the device chain */ 238 write_lock_bh(&dev_base_lock); 239 list_del_rcu(&dev->dev_list); 240 hlist_del_rcu(&dev->name_hlist); 241 hlist_del_rcu(&dev->index_hlist); 242 write_unlock_bh(&dev_base_lock); 243 244 dev_base_seq_inc(dev_net(dev)); 245 } 246 247 /* 248 * Our notifier list 249 */ 250 251 static RAW_NOTIFIER_HEAD(netdev_chain); 252 253 /* 254 * Device drivers call our routines to queue packets here. We empty the 255 * queue in the local softnet handler. 256 */ 257 258 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); 259 EXPORT_PER_CPU_SYMBOL(softnet_data); 260 261 #ifdef CONFIG_LOCKDEP 262 /* 263 * register_netdevice() inits txq->_xmit_lock and sets lockdep class 264 * according to dev->type 265 */ 266 static const unsigned short netdev_lock_type[] = 267 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25, 268 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET, 269 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM, 270 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP, 271 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD, 272 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25, 273 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP, 274 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD, 275 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI, 276 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE, 277 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET, 278 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL, 279 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM, 280 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE, 281 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE}; 282 283 static const char *const netdev_lock_name[] = 284 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25", 285 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET", 286 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM", 287 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP", 288 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD", 289 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25", 290 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP", 291 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD", 292 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI", 293 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE", 294 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET", 295 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL", 296 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM", 297 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE", 298 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"}; 299 300 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)]; 301 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)]; 302 303 static inline unsigned short netdev_lock_pos(unsigned short dev_type) 304 { 305 int i; 306 307 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++) 308 if (netdev_lock_type[i] == dev_type) 309 return i; 310 /* the last key is used by default */ 311 return ARRAY_SIZE(netdev_lock_type) - 1; 312 } 313 314 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, 315 unsigned short dev_type) 316 { 317 int i; 318 319 i = netdev_lock_pos(dev_type); 320 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i], 321 netdev_lock_name[i]); 322 } 323 324 static inline void netdev_set_addr_lockdep_class(struct net_device *dev) 325 { 326 int i; 327 328 i = netdev_lock_pos(dev->type); 329 lockdep_set_class_and_name(&dev->addr_list_lock, 330 &netdev_addr_lock_key[i], 331 netdev_lock_name[i]); 332 } 333 #else 334 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, 335 unsigned short dev_type) 336 { 337 } 338 static inline void netdev_set_addr_lockdep_class(struct net_device *dev) 339 { 340 } 341 #endif 342 343 /******************************************************************************* 344 345 Protocol management and registration routines 346 347 *******************************************************************************/ 348 349 /* 350 * Add a protocol ID to the list. Now that the input handler is 351 * smarter we can dispense with all the messy stuff that used to be 352 * here. 353 * 354 * BEWARE!!! Protocol handlers, mangling input packets, 355 * MUST BE last in hash buckets and checking protocol handlers 356 * MUST start from promiscuous ptype_all chain in net_bh. 357 * It is true now, do not change it. 358 * Explanation follows: if protocol handler, mangling packet, will 359 * be the first on list, it is not able to sense, that packet 360 * is cloned and should be copied-on-write, so that it will 361 * change it and subsequent readers will get broken packet. 362 * --ANK (980803) 363 */ 364 365 static inline struct list_head *ptype_head(const struct packet_type *pt) 366 { 367 if (pt->type == htons(ETH_P_ALL)) 368 return &ptype_all; 369 else 370 return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK]; 371 } 372 373 /** 374 * dev_add_pack - add packet handler 375 * @pt: packet type declaration 376 * 377 * Add a protocol handler to the networking stack. The passed &packet_type 378 * is linked into kernel lists and may not be freed until it has been 379 * removed from the kernel lists. 380 * 381 * This call does not sleep therefore it can not 382 * guarantee all CPU's that are in middle of receiving packets 383 * will see the new packet type (until the next received packet). 384 */ 385 386 void dev_add_pack(struct packet_type *pt) 387 { 388 struct list_head *head = ptype_head(pt); 389 390 spin_lock(&ptype_lock); 391 list_add_rcu(&pt->list, head); 392 spin_unlock(&ptype_lock); 393 } 394 EXPORT_SYMBOL(dev_add_pack); 395 396 /** 397 * __dev_remove_pack - remove packet handler 398 * @pt: packet type declaration 399 * 400 * Remove a protocol handler that was previously added to the kernel 401 * protocol handlers by dev_add_pack(). The passed &packet_type is removed 402 * from the kernel lists and can be freed or reused once this function 403 * returns. 404 * 405 * The packet type might still be in use by receivers 406 * and must not be freed until after all the CPU's have gone 407 * through a quiescent state. 408 */ 409 void __dev_remove_pack(struct packet_type *pt) 410 { 411 struct list_head *head = ptype_head(pt); 412 struct packet_type *pt1; 413 414 spin_lock(&ptype_lock); 415 416 list_for_each_entry(pt1, head, list) { 417 if (pt == pt1) { 418 list_del_rcu(&pt->list); 419 goto out; 420 } 421 } 422 423 pr_warn("dev_remove_pack: %p not found\n", pt); 424 out: 425 spin_unlock(&ptype_lock); 426 } 427 EXPORT_SYMBOL(__dev_remove_pack); 428 429 /** 430 * dev_remove_pack - remove packet handler 431 * @pt: packet type declaration 432 * 433 * Remove a protocol handler that was previously added to the kernel 434 * protocol handlers by dev_add_pack(). The passed &packet_type is removed 435 * from the kernel lists and can be freed or reused once this function 436 * returns. 437 * 438 * This call sleeps to guarantee that no CPU is looking at the packet 439 * type after return. 440 */ 441 void dev_remove_pack(struct packet_type *pt) 442 { 443 __dev_remove_pack(pt); 444 445 synchronize_net(); 446 } 447 EXPORT_SYMBOL(dev_remove_pack); 448 449 450 /** 451 * dev_add_offload - register offload handlers 452 * @po: protocol offload declaration 453 * 454 * Add protocol offload handlers to the networking stack. The passed 455 * &proto_offload is linked into kernel lists and may not be freed until 456 * it has been removed from the kernel lists. 457 * 458 * This call does not sleep therefore it can not 459 * guarantee all CPU's that are in middle of receiving packets 460 * will see the new offload handlers (until the next received packet). 461 */ 462 void dev_add_offload(struct packet_offload *po) 463 { 464 struct list_head *head = &offload_base; 465 466 spin_lock(&offload_lock); 467 list_add_rcu(&po->list, head); 468 spin_unlock(&offload_lock); 469 } 470 EXPORT_SYMBOL(dev_add_offload); 471 472 /** 473 * __dev_remove_offload - remove offload handler 474 * @po: packet offload declaration 475 * 476 * Remove a protocol offload handler that was previously added to the 477 * kernel offload handlers by dev_add_offload(). The passed &offload_type 478 * is removed from the kernel lists and can be freed or reused once this 479 * function returns. 480 * 481 * The packet type might still be in use by receivers 482 * and must not be freed until after all the CPU's have gone 483 * through a quiescent state. 484 */ 485 static void __dev_remove_offload(struct packet_offload *po) 486 { 487 struct list_head *head = &offload_base; 488 struct packet_offload *po1; 489 490 spin_lock(&offload_lock); 491 492 list_for_each_entry(po1, head, list) { 493 if (po == po1) { 494 list_del_rcu(&po->list); 495 goto out; 496 } 497 } 498 499 pr_warn("dev_remove_offload: %p not found\n", po); 500 out: 501 spin_unlock(&offload_lock); 502 } 503 504 /** 505 * dev_remove_offload - remove packet offload handler 506 * @po: packet offload declaration 507 * 508 * Remove a packet offload handler that was previously added to the kernel 509 * offload handlers by dev_add_offload(). The passed &offload_type is 510 * removed from the kernel lists and can be freed or reused once this 511 * function returns. 512 * 513 * This call sleeps to guarantee that no CPU is looking at the packet 514 * type after return. 515 */ 516 void dev_remove_offload(struct packet_offload *po) 517 { 518 __dev_remove_offload(po); 519 520 synchronize_net(); 521 } 522 EXPORT_SYMBOL(dev_remove_offload); 523 524 /****************************************************************************** 525 526 Device Boot-time Settings Routines 527 528 *******************************************************************************/ 529 530 /* Boot time configuration table */ 531 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX]; 532 533 /** 534 * netdev_boot_setup_add - add new setup entry 535 * @name: name of the device 536 * @map: configured settings for the device 537 * 538 * Adds new setup entry to the dev_boot_setup list. The function 539 * returns 0 on error and 1 on success. This is a generic routine to 540 * all netdevices. 541 */ 542 static int netdev_boot_setup_add(char *name, struct ifmap *map) 543 { 544 struct netdev_boot_setup *s; 545 int i; 546 547 s = dev_boot_setup; 548 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) { 549 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') { 550 memset(s[i].name, 0, sizeof(s[i].name)); 551 strlcpy(s[i].name, name, IFNAMSIZ); 552 memcpy(&s[i].map, map, sizeof(s[i].map)); 553 break; 554 } 555 } 556 557 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1; 558 } 559 560 /** 561 * netdev_boot_setup_check - check boot time settings 562 * @dev: the netdevice 563 * 564 * Check boot time settings for the device. 565 * The found settings are set for the device to be used 566 * later in the device probing. 567 * Returns 0 if no settings found, 1 if they are. 568 */ 569 int netdev_boot_setup_check(struct net_device *dev) 570 { 571 struct netdev_boot_setup *s = dev_boot_setup; 572 int i; 573 574 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) { 575 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' && 576 !strcmp(dev->name, s[i].name)) { 577 dev->irq = s[i].map.irq; 578 dev->base_addr = s[i].map.base_addr; 579 dev->mem_start = s[i].map.mem_start; 580 dev->mem_end = s[i].map.mem_end; 581 return 1; 582 } 583 } 584 return 0; 585 } 586 EXPORT_SYMBOL(netdev_boot_setup_check); 587 588 589 /** 590 * netdev_boot_base - get address from boot time settings 591 * @prefix: prefix for network device 592 * @unit: id for network device 593 * 594 * Check boot time settings for the base address of device. 595 * The found settings are set for the device to be used 596 * later in the device probing. 597 * Returns 0 if no settings found. 598 */ 599 unsigned long netdev_boot_base(const char *prefix, int unit) 600 { 601 const struct netdev_boot_setup *s = dev_boot_setup; 602 char name[IFNAMSIZ]; 603 int i; 604 605 sprintf(name, "%s%d", prefix, unit); 606 607 /* 608 * If device already registered then return base of 1 609 * to indicate not to probe for this interface 610 */ 611 if (__dev_get_by_name(&init_net, name)) 612 return 1; 613 614 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) 615 if (!strcmp(name, s[i].name)) 616 return s[i].map.base_addr; 617 return 0; 618 } 619 620 /* 621 * Saves at boot time configured settings for any netdevice. 622 */ 623 int __init netdev_boot_setup(char *str) 624 { 625 int ints[5]; 626 struct ifmap map; 627 628 str = get_options(str, ARRAY_SIZE(ints), ints); 629 if (!str || !*str) 630 return 0; 631 632 /* Save settings */ 633 memset(&map, 0, sizeof(map)); 634 if (ints[0] > 0) 635 map.irq = ints[1]; 636 if (ints[0] > 1) 637 map.base_addr = ints[2]; 638 if (ints[0] > 2) 639 map.mem_start = ints[3]; 640 if (ints[0] > 3) 641 map.mem_end = ints[4]; 642 643 /* Add new entry to the list */ 644 return netdev_boot_setup_add(str, &map); 645 } 646 647 __setup("netdev=", netdev_boot_setup); 648 649 /******************************************************************************* 650 651 Device Interface Subroutines 652 653 *******************************************************************************/ 654 655 /** 656 * __dev_get_by_name - find a device by its name 657 * @net: the applicable net namespace 658 * @name: name to find 659 * 660 * Find an interface by name. Must be called under RTNL semaphore 661 * or @dev_base_lock. If the name is found a pointer to the device 662 * is returned. If the name is not found then %NULL is returned. The 663 * reference counters are not incremented so the caller must be 664 * careful with locks. 665 */ 666 667 struct net_device *__dev_get_by_name(struct net *net, const char *name) 668 { 669 struct net_device *dev; 670 struct hlist_head *head = dev_name_hash(net, name); 671 672 hlist_for_each_entry(dev, head, name_hlist) 673 if (!strncmp(dev->name, name, IFNAMSIZ)) 674 return dev; 675 676 return NULL; 677 } 678 EXPORT_SYMBOL(__dev_get_by_name); 679 680 /** 681 * dev_get_by_name_rcu - find a device by its name 682 * @net: the applicable net namespace 683 * @name: name to find 684 * 685 * Find an interface by name. 686 * If the name is found a pointer to the device is returned. 687 * If the name is not found then %NULL is returned. 688 * The reference counters are not incremented so the caller must be 689 * careful with locks. The caller must hold RCU lock. 690 */ 691 692 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name) 693 { 694 struct net_device *dev; 695 struct hlist_head *head = dev_name_hash(net, name); 696 697 hlist_for_each_entry_rcu(dev, head, name_hlist) 698 if (!strncmp(dev->name, name, IFNAMSIZ)) 699 return dev; 700 701 return NULL; 702 } 703 EXPORT_SYMBOL(dev_get_by_name_rcu); 704 705 /** 706 * dev_get_by_name - find a device by its name 707 * @net: the applicable net namespace 708 * @name: name to find 709 * 710 * Find an interface by name. This can be called from any 711 * context and does its own locking. The returned handle has 712 * the usage count incremented and the caller must use dev_put() to 713 * release it when it is no longer needed. %NULL is returned if no 714 * matching device is found. 715 */ 716 717 struct net_device *dev_get_by_name(struct net *net, const char *name) 718 { 719 struct net_device *dev; 720 721 rcu_read_lock(); 722 dev = dev_get_by_name_rcu(net, name); 723 if (dev) 724 dev_hold(dev); 725 rcu_read_unlock(); 726 return dev; 727 } 728 EXPORT_SYMBOL(dev_get_by_name); 729 730 /** 731 * __dev_get_by_index - find a device by its ifindex 732 * @net: the applicable net namespace 733 * @ifindex: index of device 734 * 735 * Search for an interface by index. Returns %NULL if the device 736 * is not found or a pointer to the device. The device has not 737 * had its reference counter increased so the caller must be careful 738 * about locking. The caller must hold either the RTNL semaphore 739 * or @dev_base_lock. 740 */ 741 742 struct net_device *__dev_get_by_index(struct net *net, int ifindex) 743 { 744 struct net_device *dev; 745 struct hlist_head *head = dev_index_hash(net, ifindex); 746 747 hlist_for_each_entry(dev, head, index_hlist) 748 if (dev->ifindex == ifindex) 749 return dev; 750 751 return NULL; 752 } 753 EXPORT_SYMBOL(__dev_get_by_index); 754 755 /** 756 * dev_get_by_index_rcu - find a device by its ifindex 757 * @net: the applicable net namespace 758 * @ifindex: index of device 759 * 760 * Search for an interface by index. Returns %NULL if the device 761 * is not found or a pointer to the device. The device has not 762 * had its reference counter increased so the caller must be careful 763 * about locking. The caller must hold RCU lock. 764 */ 765 766 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex) 767 { 768 struct net_device *dev; 769 struct hlist_head *head = dev_index_hash(net, ifindex); 770 771 hlist_for_each_entry_rcu(dev, head, index_hlist) 772 if (dev->ifindex == ifindex) 773 return dev; 774 775 return NULL; 776 } 777 EXPORT_SYMBOL(dev_get_by_index_rcu); 778 779 780 /** 781 * dev_get_by_index - find a device by its ifindex 782 * @net: the applicable net namespace 783 * @ifindex: index of device 784 * 785 * Search for an interface by index. Returns NULL if the device 786 * is not found or a pointer to the device. The device returned has 787 * had a reference added and the pointer is safe until the user calls 788 * dev_put to indicate they have finished with it. 789 */ 790 791 struct net_device *dev_get_by_index(struct net *net, int ifindex) 792 { 793 struct net_device *dev; 794 795 rcu_read_lock(); 796 dev = dev_get_by_index_rcu(net, ifindex); 797 if (dev) 798 dev_hold(dev); 799 rcu_read_unlock(); 800 return dev; 801 } 802 EXPORT_SYMBOL(dev_get_by_index); 803 804 /** 805 * netdev_get_name - get a netdevice name, knowing its ifindex. 806 * @net: network namespace 807 * @name: a pointer to the buffer where the name will be stored. 808 * @ifindex: the ifindex of the interface to get the name from. 809 * 810 * The use of raw_seqcount_begin() and cond_resched() before 811 * retrying is required as we want to give the writers a chance 812 * to complete when CONFIG_PREEMPT is not set. 813 */ 814 int netdev_get_name(struct net *net, char *name, int ifindex) 815 { 816 struct net_device *dev; 817 unsigned int seq; 818 819 retry: 820 seq = raw_seqcount_begin(&devnet_rename_seq); 821 rcu_read_lock(); 822 dev = dev_get_by_index_rcu(net, ifindex); 823 if (!dev) { 824 rcu_read_unlock(); 825 return -ENODEV; 826 } 827 828 strcpy(name, dev->name); 829 rcu_read_unlock(); 830 if (read_seqcount_retry(&devnet_rename_seq, seq)) { 831 cond_resched(); 832 goto retry; 833 } 834 835 return 0; 836 } 837 838 /** 839 * dev_getbyhwaddr_rcu - find a device by its hardware address 840 * @net: the applicable net namespace 841 * @type: media type of device 842 * @ha: hardware address 843 * 844 * Search for an interface by MAC address. Returns NULL if the device 845 * is not found or a pointer to the device. 846 * The caller must hold RCU or RTNL. 847 * The returned device has not had its ref count increased 848 * and the caller must therefore be careful about locking 849 * 850 */ 851 852 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, 853 const char *ha) 854 { 855 struct net_device *dev; 856 857 for_each_netdev_rcu(net, dev) 858 if (dev->type == type && 859 !memcmp(dev->dev_addr, ha, dev->addr_len)) 860 return dev; 861 862 return NULL; 863 } 864 EXPORT_SYMBOL(dev_getbyhwaddr_rcu); 865 866 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type) 867 { 868 struct net_device *dev; 869 870 ASSERT_RTNL(); 871 for_each_netdev(net, dev) 872 if (dev->type == type) 873 return dev; 874 875 return NULL; 876 } 877 EXPORT_SYMBOL(__dev_getfirstbyhwtype); 878 879 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type) 880 { 881 struct net_device *dev, *ret = NULL; 882 883 rcu_read_lock(); 884 for_each_netdev_rcu(net, dev) 885 if (dev->type == type) { 886 dev_hold(dev); 887 ret = dev; 888 break; 889 } 890 rcu_read_unlock(); 891 return ret; 892 } 893 EXPORT_SYMBOL(dev_getfirstbyhwtype); 894 895 /** 896 * dev_get_by_flags_rcu - find any device with given flags 897 * @net: the applicable net namespace 898 * @if_flags: IFF_* values 899 * @mask: bitmask of bits in if_flags to check 900 * 901 * Search for any interface with the given flags. Returns NULL if a device 902 * is not found or a pointer to the device. Must be called inside 903 * rcu_read_lock(), and result refcount is unchanged. 904 */ 905 906 struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags, 907 unsigned short mask) 908 { 909 struct net_device *dev, *ret; 910 911 ret = NULL; 912 for_each_netdev_rcu(net, dev) { 913 if (((dev->flags ^ if_flags) & mask) == 0) { 914 ret = dev; 915 break; 916 } 917 } 918 return ret; 919 } 920 EXPORT_SYMBOL(dev_get_by_flags_rcu); 921 922 /** 923 * dev_valid_name - check if name is okay for network device 924 * @name: name string 925 * 926 * Network device names need to be valid file names to 927 * to allow sysfs to work. We also disallow any kind of 928 * whitespace. 929 */ 930 bool dev_valid_name(const char *name) 931 { 932 if (*name == '\0') 933 return false; 934 if (strlen(name) >= IFNAMSIZ) 935 return false; 936 if (!strcmp(name, ".") || !strcmp(name, "..")) 937 return false; 938 939 while (*name) { 940 if (*name == '/' || isspace(*name)) 941 return false; 942 name++; 943 } 944 return true; 945 } 946 EXPORT_SYMBOL(dev_valid_name); 947 948 /** 949 * __dev_alloc_name - allocate a name for a device 950 * @net: network namespace to allocate the device name in 951 * @name: name format string 952 * @buf: scratch buffer and result name string 953 * 954 * Passed a format string - eg "lt%d" it will try and find a suitable 955 * id. It scans list of devices to build up a free map, then chooses 956 * the first empty slot. The caller must hold the dev_base or rtnl lock 957 * while allocating the name and adding the device in order to avoid 958 * duplicates. 959 * Limited to bits_per_byte * page size devices (ie 32K on most platforms). 960 * Returns the number of the unit assigned or a negative errno code. 961 */ 962 963 static int __dev_alloc_name(struct net *net, const char *name, char *buf) 964 { 965 int i = 0; 966 const char *p; 967 const int max_netdevices = 8*PAGE_SIZE; 968 unsigned long *inuse; 969 struct net_device *d; 970 971 p = strnchr(name, IFNAMSIZ-1, '%'); 972 if (p) { 973 /* 974 * Verify the string as this thing may have come from 975 * the user. There must be either one "%d" and no other "%" 976 * characters. 977 */ 978 if (p[1] != 'd' || strchr(p + 2, '%')) 979 return -EINVAL; 980 981 /* Use one page as a bit array of possible slots */ 982 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC); 983 if (!inuse) 984 return -ENOMEM; 985 986 for_each_netdev(net, d) { 987 if (!sscanf(d->name, name, &i)) 988 continue; 989 if (i < 0 || i >= max_netdevices) 990 continue; 991 992 /* avoid cases where sscanf is not exact inverse of printf */ 993 snprintf(buf, IFNAMSIZ, name, i); 994 if (!strncmp(buf, d->name, IFNAMSIZ)) 995 set_bit(i, inuse); 996 } 997 998 i = find_first_zero_bit(inuse, max_netdevices); 999 free_page((unsigned long) inuse); 1000 } 1001 1002 if (buf != name) 1003 snprintf(buf, IFNAMSIZ, name, i); 1004 if (!__dev_get_by_name(net, buf)) 1005 return i; 1006 1007 /* It is possible to run out of possible slots 1008 * when the name is long and there isn't enough space left 1009 * for the digits, or if all bits are used. 1010 */ 1011 return -ENFILE; 1012 } 1013 1014 /** 1015 * dev_alloc_name - allocate a name for a device 1016 * @dev: device 1017 * @name: name format string 1018 * 1019 * Passed a format string - eg "lt%d" it will try and find a suitable 1020 * id. It scans list of devices to build up a free map, then chooses 1021 * the first empty slot. The caller must hold the dev_base or rtnl lock 1022 * while allocating the name and adding the device in order to avoid 1023 * duplicates. 1024 * Limited to bits_per_byte * page size devices (ie 32K on most platforms). 1025 * Returns the number of the unit assigned or a negative errno code. 1026 */ 1027 1028 int dev_alloc_name(struct net_device *dev, const char *name) 1029 { 1030 char buf[IFNAMSIZ]; 1031 struct net *net; 1032 int ret; 1033 1034 BUG_ON(!dev_net(dev)); 1035 net = dev_net(dev); 1036 ret = __dev_alloc_name(net, name, buf); 1037 if (ret >= 0) 1038 strlcpy(dev->name, buf, IFNAMSIZ); 1039 return ret; 1040 } 1041 EXPORT_SYMBOL(dev_alloc_name); 1042 1043 static int dev_alloc_name_ns(struct net *net, 1044 struct net_device *dev, 1045 const char *name) 1046 { 1047 char buf[IFNAMSIZ]; 1048 int ret; 1049 1050 ret = __dev_alloc_name(net, name, buf); 1051 if (ret >= 0) 1052 strlcpy(dev->name, buf, IFNAMSIZ); 1053 return ret; 1054 } 1055 1056 static int dev_get_valid_name(struct net *net, 1057 struct net_device *dev, 1058 const char *name) 1059 { 1060 BUG_ON(!net); 1061 1062 if (!dev_valid_name(name)) 1063 return -EINVAL; 1064 1065 if (strchr(name, '%')) 1066 return dev_alloc_name_ns(net, dev, name); 1067 else if (__dev_get_by_name(net, name)) 1068 return -EEXIST; 1069 else if (dev->name != name) 1070 strlcpy(dev->name, name, IFNAMSIZ); 1071 1072 return 0; 1073 } 1074 1075 /** 1076 * dev_change_name - change name of a device 1077 * @dev: device 1078 * @newname: name (or format string) must be at least IFNAMSIZ 1079 * 1080 * Change name of a device, can pass format strings "eth%d". 1081 * for wildcarding. 1082 */ 1083 int dev_change_name(struct net_device *dev, const char *newname) 1084 { 1085 char oldname[IFNAMSIZ]; 1086 int err = 0; 1087 int ret; 1088 struct net *net; 1089 1090 ASSERT_RTNL(); 1091 BUG_ON(!dev_net(dev)); 1092 1093 net = dev_net(dev); 1094 if (dev->flags & IFF_UP) 1095 return -EBUSY; 1096 1097 write_seqcount_begin(&devnet_rename_seq); 1098 1099 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) { 1100 write_seqcount_end(&devnet_rename_seq); 1101 return 0; 1102 } 1103 1104 memcpy(oldname, dev->name, IFNAMSIZ); 1105 1106 err = dev_get_valid_name(net, dev, newname); 1107 if (err < 0) { 1108 write_seqcount_end(&devnet_rename_seq); 1109 return err; 1110 } 1111 1112 rollback: 1113 ret = device_rename(&dev->dev, dev->name); 1114 if (ret) { 1115 memcpy(dev->name, oldname, IFNAMSIZ); 1116 write_seqcount_end(&devnet_rename_seq); 1117 return ret; 1118 } 1119 1120 write_seqcount_end(&devnet_rename_seq); 1121 1122 netdev_adjacent_rename_links(dev, oldname); 1123 1124 write_lock_bh(&dev_base_lock); 1125 hlist_del_rcu(&dev->name_hlist); 1126 write_unlock_bh(&dev_base_lock); 1127 1128 synchronize_rcu(); 1129 1130 write_lock_bh(&dev_base_lock); 1131 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name)); 1132 write_unlock_bh(&dev_base_lock); 1133 1134 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev); 1135 ret = notifier_to_errno(ret); 1136 1137 if (ret) { 1138 /* err >= 0 after dev_alloc_name() or stores the first errno */ 1139 if (err >= 0) { 1140 err = ret; 1141 write_seqcount_begin(&devnet_rename_seq); 1142 memcpy(dev->name, oldname, IFNAMSIZ); 1143 memcpy(oldname, newname, IFNAMSIZ); 1144 goto rollback; 1145 } else { 1146 pr_err("%s: name change rollback failed: %d\n", 1147 dev->name, ret); 1148 } 1149 } 1150 1151 return err; 1152 } 1153 1154 /** 1155 * dev_set_alias - change ifalias of a device 1156 * @dev: device 1157 * @alias: name up to IFALIASZ 1158 * @len: limit of bytes to copy from info 1159 * 1160 * Set ifalias for a device, 1161 */ 1162 int dev_set_alias(struct net_device *dev, const char *alias, size_t len) 1163 { 1164 char *new_ifalias; 1165 1166 ASSERT_RTNL(); 1167 1168 if (len >= IFALIASZ) 1169 return -EINVAL; 1170 1171 if (!len) { 1172 kfree(dev->ifalias); 1173 dev->ifalias = NULL; 1174 return 0; 1175 } 1176 1177 new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL); 1178 if (!new_ifalias) 1179 return -ENOMEM; 1180 dev->ifalias = new_ifalias; 1181 1182 strlcpy(dev->ifalias, alias, len+1); 1183 return len; 1184 } 1185 1186 1187 /** 1188 * netdev_features_change - device changes features 1189 * @dev: device to cause notification 1190 * 1191 * Called to indicate a device has changed features. 1192 */ 1193 void netdev_features_change(struct net_device *dev) 1194 { 1195 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev); 1196 } 1197 EXPORT_SYMBOL(netdev_features_change); 1198 1199 /** 1200 * netdev_state_change - device changes state 1201 * @dev: device to cause notification 1202 * 1203 * Called to indicate a device has changed state. This function calls 1204 * the notifier chains for netdev_chain and sends a NEWLINK message 1205 * to the routing socket. 1206 */ 1207 void netdev_state_change(struct net_device *dev) 1208 { 1209 if (dev->flags & IFF_UP) { 1210 call_netdevice_notifiers(NETDEV_CHANGE, dev); 1211 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL); 1212 } 1213 } 1214 EXPORT_SYMBOL(netdev_state_change); 1215 1216 /** 1217 * netdev_notify_peers - notify network peers about existence of @dev 1218 * @dev: network device 1219 * 1220 * Generate traffic such that interested network peers are aware of 1221 * @dev, such as by generating a gratuitous ARP. This may be used when 1222 * a device wants to inform the rest of the network about some sort of 1223 * reconfiguration such as a failover event or virtual machine 1224 * migration. 1225 */ 1226 void netdev_notify_peers(struct net_device *dev) 1227 { 1228 rtnl_lock(); 1229 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev); 1230 rtnl_unlock(); 1231 } 1232 EXPORT_SYMBOL(netdev_notify_peers); 1233 1234 static int __dev_open(struct net_device *dev) 1235 { 1236 const struct net_device_ops *ops = dev->netdev_ops; 1237 int ret; 1238 1239 ASSERT_RTNL(); 1240 1241 if (!netif_device_present(dev)) 1242 return -ENODEV; 1243 1244 /* Block netpoll from trying to do any rx path servicing. 1245 * If we don't do this there is a chance ndo_poll_controller 1246 * or ndo_poll may be running while we open the device 1247 */ 1248 netpoll_rx_disable(dev); 1249 1250 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev); 1251 ret = notifier_to_errno(ret); 1252 if (ret) 1253 return ret; 1254 1255 set_bit(__LINK_STATE_START, &dev->state); 1256 1257 if (ops->ndo_validate_addr) 1258 ret = ops->ndo_validate_addr(dev); 1259 1260 if (!ret && ops->ndo_open) 1261 ret = ops->ndo_open(dev); 1262 1263 netpoll_rx_enable(dev); 1264 1265 if (ret) 1266 clear_bit(__LINK_STATE_START, &dev->state); 1267 else { 1268 dev->flags |= IFF_UP; 1269 net_dmaengine_get(); 1270 dev_set_rx_mode(dev); 1271 dev_activate(dev); 1272 add_device_randomness(dev->dev_addr, dev->addr_len); 1273 } 1274 1275 return ret; 1276 } 1277 1278 /** 1279 * dev_open - prepare an interface for use. 1280 * @dev: device to open 1281 * 1282 * Takes a device from down to up state. The device's private open 1283 * function is invoked and then the multicast lists are loaded. Finally 1284 * the device is moved into the up state and a %NETDEV_UP message is 1285 * sent to the netdev notifier chain. 1286 * 1287 * Calling this function on an active interface is a nop. On a failure 1288 * a negative errno code is returned. 1289 */ 1290 int dev_open(struct net_device *dev) 1291 { 1292 int ret; 1293 1294 if (dev->flags & IFF_UP) 1295 return 0; 1296 1297 ret = __dev_open(dev); 1298 if (ret < 0) 1299 return ret; 1300 1301 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL); 1302 call_netdevice_notifiers(NETDEV_UP, dev); 1303 1304 return ret; 1305 } 1306 EXPORT_SYMBOL(dev_open); 1307 1308 static int __dev_close_many(struct list_head *head) 1309 { 1310 struct net_device *dev; 1311 1312 ASSERT_RTNL(); 1313 might_sleep(); 1314 1315 list_for_each_entry(dev, head, close_list) { 1316 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev); 1317 1318 clear_bit(__LINK_STATE_START, &dev->state); 1319 1320 /* Synchronize to scheduled poll. We cannot touch poll list, it 1321 * can be even on different cpu. So just clear netif_running(). 1322 * 1323 * dev->stop() will invoke napi_disable() on all of it's 1324 * napi_struct instances on this device. 1325 */ 1326 smp_mb__after_clear_bit(); /* Commit netif_running(). */ 1327 } 1328 1329 dev_deactivate_many(head); 1330 1331 list_for_each_entry(dev, head, close_list) { 1332 const struct net_device_ops *ops = dev->netdev_ops; 1333 1334 /* 1335 * Call the device specific close. This cannot fail. 1336 * Only if device is UP 1337 * 1338 * We allow it to be called even after a DETACH hot-plug 1339 * event. 1340 */ 1341 if (ops->ndo_stop) 1342 ops->ndo_stop(dev); 1343 1344 dev->flags &= ~IFF_UP; 1345 net_dmaengine_put(); 1346 } 1347 1348 return 0; 1349 } 1350 1351 static int __dev_close(struct net_device *dev) 1352 { 1353 int retval; 1354 LIST_HEAD(single); 1355 1356 /* Temporarily disable netpoll until the interface is down */ 1357 netpoll_rx_disable(dev); 1358 1359 list_add(&dev->close_list, &single); 1360 retval = __dev_close_many(&single); 1361 list_del(&single); 1362 1363 netpoll_rx_enable(dev); 1364 return retval; 1365 } 1366 1367 static int dev_close_many(struct list_head *head) 1368 { 1369 struct net_device *dev, *tmp; 1370 1371 /* Remove the devices that don't need to be closed */ 1372 list_for_each_entry_safe(dev, tmp, head, close_list) 1373 if (!(dev->flags & IFF_UP)) 1374 list_del_init(&dev->close_list); 1375 1376 __dev_close_many(head); 1377 1378 list_for_each_entry_safe(dev, tmp, head, close_list) { 1379 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL); 1380 call_netdevice_notifiers(NETDEV_DOWN, dev); 1381 list_del_init(&dev->close_list); 1382 } 1383 1384 return 0; 1385 } 1386 1387 /** 1388 * dev_close - shutdown an interface. 1389 * @dev: device to shutdown 1390 * 1391 * This function moves an active device into down state. A 1392 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device 1393 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier 1394 * chain. 1395 */ 1396 int dev_close(struct net_device *dev) 1397 { 1398 if (dev->flags & IFF_UP) { 1399 LIST_HEAD(single); 1400 1401 /* Block netpoll rx while the interface is going down */ 1402 netpoll_rx_disable(dev); 1403 1404 list_add(&dev->close_list, &single); 1405 dev_close_many(&single); 1406 list_del(&single); 1407 1408 netpoll_rx_enable(dev); 1409 } 1410 return 0; 1411 } 1412 EXPORT_SYMBOL(dev_close); 1413 1414 1415 /** 1416 * dev_disable_lro - disable Large Receive Offload on a device 1417 * @dev: device 1418 * 1419 * Disable Large Receive Offload (LRO) on a net device. Must be 1420 * called under RTNL. This is needed if received packets may be 1421 * forwarded to another interface. 1422 */ 1423 void dev_disable_lro(struct net_device *dev) 1424 { 1425 /* 1426 * If we're trying to disable lro on a vlan device 1427 * use the underlying physical device instead 1428 */ 1429 if (is_vlan_dev(dev)) 1430 dev = vlan_dev_real_dev(dev); 1431 1432 /* the same for macvlan devices */ 1433 if (netif_is_macvlan(dev)) 1434 dev = macvlan_dev_real_dev(dev); 1435 1436 dev->wanted_features &= ~NETIF_F_LRO; 1437 netdev_update_features(dev); 1438 1439 if (unlikely(dev->features & NETIF_F_LRO)) 1440 netdev_WARN(dev, "failed to disable LRO!\n"); 1441 } 1442 EXPORT_SYMBOL(dev_disable_lro); 1443 1444 static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val, 1445 struct net_device *dev) 1446 { 1447 struct netdev_notifier_info info; 1448 1449 netdev_notifier_info_init(&info, dev); 1450 return nb->notifier_call(nb, val, &info); 1451 } 1452 1453 static int dev_boot_phase = 1; 1454 1455 /** 1456 * register_netdevice_notifier - register a network notifier block 1457 * @nb: notifier 1458 * 1459 * Register a notifier to be called when network device events occur. 1460 * The notifier passed is linked into the kernel structures and must 1461 * not be reused until it has been unregistered. A negative errno code 1462 * is returned on a failure. 1463 * 1464 * When registered all registration and up events are replayed 1465 * to the new notifier to allow device to have a race free 1466 * view of the network device list. 1467 */ 1468 1469 int register_netdevice_notifier(struct notifier_block *nb) 1470 { 1471 struct net_device *dev; 1472 struct net_device *last; 1473 struct net *net; 1474 int err; 1475 1476 rtnl_lock(); 1477 err = raw_notifier_chain_register(&netdev_chain, nb); 1478 if (err) 1479 goto unlock; 1480 if (dev_boot_phase) 1481 goto unlock; 1482 for_each_net(net) { 1483 for_each_netdev(net, dev) { 1484 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev); 1485 err = notifier_to_errno(err); 1486 if (err) 1487 goto rollback; 1488 1489 if (!(dev->flags & IFF_UP)) 1490 continue; 1491 1492 call_netdevice_notifier(nb, NETDEV_UP, dev); 1493 } 1494 } 1495 1496 unlock: 1497 rtnl_unlock(); 1498 return err; 1499 1500 rollback: 1501 last = dev; 1502 for_each_net(net) { 1503 for_each_netdev(net, dev) { 1504 if (dev == last) 1505 goto outroll; 1506 1507 if (dev->flags & IFF_UP) { 1508 call_netdevice_notifier(nb, NETDEV_GOING_DOWN, 1509 dev); 1510 call_netdevice_notifier(nb, NETDEV_DOWN, dev); 1511 } 1512 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev); 1513 } 1514 } 1515 1516 outroll: 1517 raw_notifier_chain_unregister(&netdev_chain, nb); 1518 goto unlock; 1519 } 1520 EXPORT_SYMBOL(register_netdevice_notifier); 1521 1522 /** 1523 * unregister_netdevice_notifier - unregister a network notifier block 1524 * @nb: notifier 1525 * 1526 * Unregister a notifier previously registered by 1527 * register_netdevice_notifier(). The notifier is unlinked into the 1528 * kernel structures and may then be reused. A negative errno code 1529 * is returned on a failure. 1530 * 1531 * After unregistering unregister and down device events are synthesized 1532 * for all devices on the device list to the removed notifier to remove 1533 * the need for special case cleanup code. 1534 */ 1535 1536 int unregister_netdevice_notifier(struct notifier_block *nb) 1537 { 1538 struct net_device *dev; 1539 struct net *net; 1540 int err; 1541 1542 rtnl_lock(); 1543 err = raw_notifier_chain_unregister(&netdev_chain, nb); 1544 if (err) 1545 goto unlock; 1546 1547 for_each_net(net) { 1548 for_each_netdev(net, dev) { 1549 if (dev->flags & IFF_UP) { 1550 call_netdevice_notifier(nb, NETDEV_GOING_DOWN, 1551 dev); 1552 call_netdevice_notifier(nb, NETDEV_DOWN, dev); 1553 } 1554 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev); 1555 } 1556 } 1557 unlock: 1558 rtnl_unlock(); 1559 return err; 1560 } 1561 EXPORT_SYMBOL(unregister_netdevice_notifier); 1562 1563 /** 1564 * call_netdevice_notifiers_info - call all network notifier blocks 1565 * @val: value passed unmodified to notifier function 1566 * @dev: net_device pointer passed unmodified to notifier function 1567 * @info: notifier information data 1568 * 1569 * Call all network notifier blocks. Parameters and return value 1570 * are as for raw_notifier_call_chain(). 1571 */ 1572 1573 static int call_netdevice_notifiers_info(unsigned long val, 1574 struct net_device *dev, 1575 struct netdev_notifier_info *info) 1576 { 1577 ASSERT_RTNL(); 1578 netdev_notifier_info_init(info, dev); 1579 return raw_notifier_call_chain(&netdev_chain, val, info); 1580 } 1581 1582 /** 1583 * call_netdevice_notifiers - call all network notifier blocks 1584 * @val: value passed unmodified to notifier function 1585 * @dev: net_device pointer passed unmodified to notifier function 1586 * 1587 * Call all network notifier blocks. Parameters and return value 1588 * are as for raw_notifier_call_chain(). 1589 */ 1590 1591 int call_netdevice_notifiers(unsigned long val, struct net_device *dev) 1592 { 1593 struct netdev_notifier_info info; 1594 1595 return call_netdevice_notifiers_info(val, dev, &info); 1596 } 1597 EXPORT_SYMBOL(call_netdevice_notifiers); 1598 1599 static struct static_key netstamp_needed __read_mostly; 1600 #ifdef HAVE_JUMP_LABEL 1601 /* We are not allowed to call static_key_slow_dec() from irq context 1602 * If net_disable_timestamp() is called from irq context, defer the 1603 * static_key_slow_dec() calls. 1604 */ 1605 static atomic_t netstamp_needed_deferred; 1606 #endif 1607 1608 void net_enable_timestamp(void) 1609 { 1610 #ifdef HAVE_JUMP_LABEL 1611 int deferred = atomic_xchg(&netstamp_needed_deferred, 0); 1612 1613 if (deferred) { 1614 while (--deferred) 1615 static_key_slow_dec(&netstamp_needed); 1616 return; 1617 } 1618 #endif 1619 static_key_slow_inc(&netstamp_needed); 1620 } 1621 EXPORT_SYMBOL(net_enable_timestamp); 1622 1623 void net_disable_timestamp(void) 1624 { 1625 #ifdef HAVE_JUMP_LABEL 1626 if (in_interrupt()) { 1627 atomic_inc(&netstamp_needed_deferred); 1628 return; 1629 } 1630 #endif 1631 static_key_slow_dec(&netstamp_needed); 1632 } 1633 EXPORT_SYMBOL(net_disable_timestamp); 1634 1635 static inline void net_timestamp_set(struct sk_buff *skb) 1636 { 1637 skb->tstamp.tv64 = 0; 1638 if (static_key_false(&netstamp_needed)) 1639 __net_timestamp(skb); 1640 } 1641 1642 #define net_timestamp_check(COND, SKB) \ 1643 if (static_key_false(&netstamp_needed)) { \ 1644 if ((COND) && !(SKB)->tstamp.tv64) \ 1645 __net_timestamp(SKB); \ 1646 } \ 1647 1648 static inline bool is_skb_forwardable(struct net_device *dev, 1649 struct sk_buff *skb) 1650 { 1651 unsigned int len; 1652 1653 if (!(dev->flags & IFF_UP)) 1654 return false; 1655 1656 len = dev->mtu + dev->hard_header_len + VLAN_HLEN; 1657 if (skb->len <= len) 1658 return true; 1659 1660 /* if TSO is enabled, we don't care about the length as the packet 1661 * could be forwarded without being segmented before 1662 */ 1663 if (skb_is_gso(skb)) 1664 return true; 1665 1666 return false; 1667 } 1668 1669 /** 1670 * dev_forward_skb - loopback an skb to another netif 1671 * 1672 * @dev: destination network device 1673 * @skb: buffer to forward 1674 * 1675 * return values: 1676 * NET_RX_SUCCESS (no congestion) 1677 * NET_RX_DROP (packet was dropped, but freed) 1678 * 1679 * dev_forward_skb can be used for injecting an skb from the 1680 * start_xmit function of one device into the receive queue 1681 * of another device. 1682 * 1683 * The receiving device may be in another namespace, so 1684 * we have to clear all information in the skb that could 1685 * impact namespace isolation. 1686 */ 1687 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) 1688 { 1689 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 1690 if (skb_copy_ubufs(skb, GFP_ATOMIC)) { 1691 atomic_long_inc(&dev->rx_dropped); 1692 kfree_skb(skb); 1693 return NET_RX_DROP; 1694 } 1695 } 1696 1697 if (unlikely(!is_skb_forwardable(dev, skb))) { 1698 atomic_long_inc(&dev->rx_dropped); 1699 kfree_skb(skb); 1700 return NET_RX_DROP; 1701 } 1702 1703 skb_scrub_packet(skb, true); 1704 skb->protocol = eth_type_trans(skb, dev); 1705 1706 return netif_rx_internal(skb); 1707 } 1708 EXPORT_SYMBOL_GPL(dev_forward_skb); 1709 1710 static inline int deliver_skb(struct sk_buff *skb, 1711 struct packet_type *pt_prev, 1712 struct net_device *orig_dev) 1713 { 1714 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC))) 1715 return -ENOMEM; 1716 atomic_inc(&skb->users); 1717 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev); 1718 } 1719 1720 static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb) 1721 { 1722 if (!ptype->af_packet_priv || !skb->sk) 1723 return false; 1724 1725 if (ptype->id_match) 1726 return ptype->id_match(ptype, skb->sk); 1727 else if ((struct sock *)ptype->af_packet_priv == skb->sk) 1728 return true; 1729 1730 return false; 1731 } 1732 1733 /* 1734 * Support routine. Sends outgoing frames to any network 1735 * taps currently in use. 1736 */ 1737 1738 static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) 1739 { 1740 struct packet_type *ptype; 1741 struct sk_buff *skb2 = NULL; 1742 struct packet_type *pt_prev = NULL; 1743 1744 rcu_read_lock(); 1745 list_for_each_entry_rcu(ptype, &ptype_all, list) { 1746 /* Never send packets back to the socket 1747 * they originated from - MvS (miquels@drinkel.ow.org) 1748 */ 1749 if ((ptype->dev == dev || !ptype->dev) && 1750 (!skb_loop_sk(ptype, skb))) { 1751 if (pt_prev) { 1752 deliver_skb(skb2, pt_prev, skb->dev); 1753 pt_prev = ptype; 1754 continue; 1755 } 1756 1757 skb2 = skb_clone(skb, GFP_ATOMIC); 1758 if (!skb2) 1759 break; 1760 1761 net_timestamp_set(skb2); 1762 1763 /* skb->nh should be correctly 1764 set by sender, so that the second statement is 1765 just protection against buggy protocols. 1766 */ 1767 skb_reset_mac_header(skb2); 1768 1769 if (skb_network_header(skb2) < skb2->data || 1770 skb_network_header(skb2) > skb_tail_pointer(skb2)) { 1771 net_crit_ratelimited("protocol %04x is buggy, dev %s\n", 1772 ntohs(skb2->protocol), 1773 dev->name); 1774 skb_reset_network_header(skb2); 1775 } 1776 1777 skb2->transport_header = skb2->network_header; 1778 skb2->pkt_type = PACKET_OUTGOING; 1779 pt_prev = ptype; 1780 } 1781 } 1782 if (pt_prev) 1783 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev); 1784 rcu_read_unlock(); 1785 } 1786 1787 /** 1788 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change 1789 * @dev: Network device 1790 * @txq: number of queues available 1791 * 1792 * If real_num_tx_queues is changed the tc mappings may no longer be 1793 * valid. To resolve this verify the tc mapping remains valid and if 1794 * not NULL the mapping. With no priorities mapping to this 1795 * offset/count pair it will no longer be used. In the worst case TC0 1796 * is invalid nothing can be done so disable priority mappings. If is 1797 * expected that drivers will fix this mapping if they can before 1798 * calling netif_set_real_num_tx_queues. 1799 */ 1800 static void netif_setup_tc(struct net_device *dev, unsigned int txq) 1801 { 1802 int i; 1803 struct netdev_tc_txq *tc = &dev->tc_to_txq[0]; 1804 1805 /* If TC0 is invalidated disable TC mapping */ 1806 if (tc->offset + tc->count > txq) { 1807 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n"); 1808 dev->num_tc = 0; 1809 return; 1810 } 1811 1812 /* Invalidated prio to tc mappings set to TC0 */ 1813 for (i = 1; i < TC_BITMASK + 1; i++) { 1814 int q = netdev_get_prio_tc_map(dev, i); 1815 1816 tc = &dev->tc_to_txq[q]; 1817 if (tc->offset + tc->count > txq) { 1818 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n", 1819 i, q); 1820 netdev_set_prio_tc_map(dev, i, 0); 1821 } 1822 } 1823 } 1824 1825 #ifdef CONFIG_XPS 1826 static DEFINE_MUTEX(xps_map_mutex); 1827 #define xmap_dereference(P) \ 1828 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex)) 1829 1830 static struct xps_map *remove_xps_queue(struct xps_dev_maps *dev_maps, 1831 int cpu, u16 index) 1832 { 1833 struct xps_map *map = NULL; 1834 int pos; 1835 1836 if (dev_maps) 1837 map = xmap_dereference(dev_maps->cpu_map[cpu]); 1838 1839 for (pos = 0; map && pos < map->len; pos++) { 1840 if (map->queues[pos] == index) { 1841 if (map->len > 1) { 1842 map->queues[pos] = map->queues[--map->len]; 1843 } else { 1844 RCU_INIT_POINTER(dev_maps->cpu_map[cpu], NULL); 1845 kfree_rcu(map, rcu); 1846 map = NULL; 1847 } 1848 break; 1849 } 1850 } 1851 1852 return map; 1853 } 1854 1855 static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index) 1856 { 1857 struct xps_dev_maps *dev_maps; 1858 int cpu, i; 1859 bool active = false; 1860 1861 mutex_lock(&xps_map_mutex); 1862 dev_maps = xmap_dereference(dev->xps_maps); 1863 1864 if (!dev_maps) 1865 goto out_no_maps; 1866 1867 for_each_possible_cpu(cpu) { 1868 for (i = index; i < dev->num_tx_queues; i++) { 1869 if (!remove_xps_queue(dev_maps, cpu, i)) 1870 break; 1871 } 1872 if (i == dev->num_tx_queues) 1873 active = true; 1874 } 1875 1876 if (!active) { 1877 RCU_INIT_POINTER(dev->xps_maps, NULL); 1878 kfree_rcu(dev_maps, rcu); 1879 } 1880 1881 for (i = index; i < dev->num_tx_queues; i++) 1882 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i), 1883 NUMA_NO_NODE); 1884 1885 out_no_maps: 1886 mutex_unlock(&xps_map_mutex); 1887 } 1888 1889 static struct xps_map *expand_xps_map(struct xps_map *map, 1890 int cpu, u16 index) 1891 { 1892 struct xps_map *new_map; 1893 int alloc_len = XPS_MIN_MAP_ALLOC; 1894 int i, pos; 1895 1896 for (pos = 0; map && pos < map->len; pos++) { 1897 if (map->queues[pos] != index) 1898 continue; 1899 return map; 1900 } 1901 1902 /* Need to add queue to this CPU's existing map */ 1903 if (map) { 1904 if (pos < map->alloc_len) 1905 return map; 1906 1907 alloc_len = map->alloc_len * 2; 1908 } 1909 1910 /* Need to allocate new map to store queue on this CPU's map */ 1911 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL, 1912 cpu_to_node(cpu)); 1913 if (!new_map) 1914 return NULL; 1915 1916 for (i = 0; i < pos; i++) 1917 new_map->queues[i] = map->queues[i]; 1918 new_map->alloc_len = alloc_len; 1919 new_map->len = pos; 1920 1921 return new_map; 1922 } 1923 1924 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, 1925 u16 index) 1926 { 1927 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL; 1928 struct xps_map *map, *new_map; 1929 int maps_sz = max_t(unsigned int, XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES); 1930 int cpu, numa_node_id = -2; 1931 bool active = false; 1932 1933 mutex_lock(&xps_map_mutex); 1934 1935 dev_maps = xmap_dereference(dev->xps_maps); 1936 1937 /* allocate memory for queue storage */ 1938 for_each_online_cpu(cpu) { 1939 if (!cpumask_test_cpu(cpu, mask)) 1940 continue; 1941 1942 if (!new_dev_maps) 1943 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL); 1944 if (!new_dev_maps) { 1945 mutex_unlock(&xps_map_mutex); 1946 return -ENOMEM; 1947 } 1948 1949 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) : 1950 NULL; 1951 1952 map = expand_xps_map(map, cpu, index); 1953 if (!map) 1954 goto error; 1955 1956 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map); 1957 } 1958 1959 if (!new_dev_maps) 1960 goto out_no_new_maps; 1961 1962 for_each_possible_cpu(cpu) { 1963 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) { 1964 /* add queue to CPU maps */ 1965 int pos = 0; 1966 1967 map = xmap_dereference(new_dev_maps->cpu_map[cpu]); 1968 while ((pos < map->len) && (map->queues[pos] != index)) 1969 pos++; 1970 1971 if (pos == map->len) 1972 map->queues[map->len++] = index; 1973 #ifdef CONFIG_NUMA 1974 if (numa_node_id == -2) 1975 numa_node_id = cpu_to_node(cpu); 1976 else if (numa_node_id != cpu_to_node(cpu)) 1977 numa_node_id = -1; 1978 #endif 1979 } else if (dev_maps) { 1980 /* fill in the new device map from the old device map */ 1981 map = xmap_dereference(dev_maps->cpu_map[cpu]); 1982 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map); 1983 } 1984 1985 } 1986 1987 rcu_assign_pointer(dev->xps_maps, new_dev_maps); 1988 1989 /* Cleanup old maps */ 1990 if (dev_maps) { 1991 for_each_possible_cpu(cpu) { 1992 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]); 1993 map = xmap_dereference(dev_maps->cpu_map[cpu]); 1994 if (map && map != new_map) 1995 kfree_rcu(map, rcu); 1996 } 1997 1998 kfree_rcu(dev_maps, rcu); 1999 } 2000 2001 dev_maps = new_dev_maps; 2002 active = true; 2003 2004 out_no_new_maps: 2005 /* update Tx queue numa node */ 2006 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index), 2007 (numa_node_id >= 0) ? numa_node_id : 2008 NUMA_NO_NODE); 2009 2010 if (!dev_maps) 2011 goto out_no_maps; 2012 2013 /* removes queue from unused CPUs */ 2014 for_each_possible_cpu(cpu) { 2015 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) 2016 continue; 2017 2018 if (remove_xps_queue(dev_maps, cpu, index)) 2019 active = true; 2020 } 2021 2022 /* free map if not active */ 2023 if (!active) { 2024 RCU_INIT_POINTER(dev->xps_maps, NULL); 2025 kfree_rcu(dev_maps, rcu); 2026 } 2027 2028 out_no_maps: 2029 mutex_unlock(&xps_map_mutex); 2030 2031 return 0; 2032 error: 2033 /* remove any maps that we added */ 2034 for_each_possible_cpu(cpu) { 2035 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]); 2036 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) : 2037 NULL; 2038 if (new_map && new_map != map) 2039 kfree(new_map); 2040 } 2041 2042 mutex_unlock(&xps_map_mutex); 2043 2044 kfree(new_dev_maps); 2045 return -ENOMEM; 2046 } 2047 EXPORT_SYMBOL(netif_set_xps_queue); 2048 2049 #endif 2050 /* 2051 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues 2052 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed. 2053 */ 2054 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) 2055 { 2056 int rc; 2057 2058 if (txq < 1 || txq > dev->num_tx_queues) 2059 return -EINVAL; 2060 2061 if (dev->reg_state == NETREG_REGISTERED || 2062 dev->reg_state == NETREG_UNREGISTERING) { 2063 ASSERT_RTNL(); 2064 2065 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues, 2066 txq); 2067 if (rc) 2068 return rc; 2069 2070 if (dev->num_tc) 2071 netif_setup_tc(dev, txq); 2072 2073 if (txq < dev->real_num_tx_queues) { 2074 qdisc_reset_all_tx_gt(dev, txq); 2075 #ifdef CONFIG_XPS 2076 netif_reset_xps_queues_gt(dev, txq); 2077 #endif 2078 } 2079 } 2080 2081 dev->real_num_tx_queues = txq; 2082 return 0; 2083 } 2084 EXPORT_SYMBOL(netif_set_real_num_tx_queues); 2085 2086 #ifdef CONFIG_SYSFS 2087 /** 2088 * netif_set_real_num_rx_queues - set actual number of RX queues used 2089 * @dev: Network device 2090 * @rxq: Actual number of RX queues 2091 * 2092 * This must be called either with the rtnl_lock held or before 2093 * registration of the net device. Returns 0 on success, or a 2094 * negative error code. If called before registration, it always 2095 * succeeds. 2096 */ 2097 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq) 2098 { 2099 int rc; 2100 2101 if (rxq < 1 || rxq > dev->num_rx_queues) 2102 return -EINVAL; 2103 2104 if (dev->reg_state == NETREG_REGISTERED) { 2105 ASSERT_RTNL(); 2106 2107 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues, 2108 rxq); 2109 if (rc) 2110 return rc; 2111 } 2112 2113 dev->real_num_rx_queues = rxq; 2114 return 0; 2115 } 2116 EXPORT_SYMBOL(netif_set_real_num_rx_queues); 2117 #endif 2118 2119 /** 2120 * netif_get_num_default_rss_queues - default number of RSS queues 2121 * 2122 * This routine should set an upper limit on the number of RSS queues 2123 * used by default by multiqueue devices. 2124 */ 2125 int netif_get_num_default_rss_queues(void) 2126 { 2127 return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus()); 2128 } 2129 EXPORT_SYMBOL(netif_get_num_default_rss_queues); 2130 2131 static inline void __netif_reschedule(struct Qdisc *q) 2132 { 2133 struct softnet_data *sd; 2134 unsigned long flags; 2135 2136 local_irq_save(flags); 2137 sd = &__get_cpu_var(softnet_data); 2138 q->next_sched = NULL; 2139 *sd->output_queue_tailp = q; 2140 sd->output_queue_tailp = &q->next_sched; 2141 raise_softirq_irqoff(NET_TX_SOFTIRQ); 2142 local_irq_restore(flags); 2143 } 2144 2145 void __netif_schedule(struct Qdisc *q) 2146 { 2147 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) 2148 __netif_reschedule(q); 2149 } 2150 EXPORT_SYMBOL(__netif_schedule); 2151 2152 struct dev_kfree_skb_cb { 2153 enum skb_free_reason reason; 2154 }; 2155 2156 static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb) 2157 { 2158 return (struct dev_kfree_skb_cb *)skb->cb; 2159 } 2160 2161 void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason) 2162 { 2163 unsigned long flags; 2164 2165 if (likely(atomic_read(&skb->users) == 1)) { 2166 smp_rmb(); 2167 atomic_set(&skb->users, 0); 2168 } else if (likely(!atomic_dec_and_test(&skb->users))) { 2169 return; 2170 } 2171 get_kfree_skb_cb(skb)->reason = reason; 2172 local_irq_save(flags); 2173 skb->next = __this_cpu_read(softnet_data.completion_queue); 2174 __this_cpu_write(softnet_data.completion_queue, skb); 2175 raise_softirq_irqoff(NET_TX_SOFTIRQ); 2176 local_irq_restore(flags); 2177 } 2178 EXPORT_SYMBOL(__dev_kfree_skb_irq); 2179 2180 void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason) 2181 { 2182 if (in_irq() || irqs_disabled()) 2183 __dev_kfree_skb_irq(skb, reason); 2184 else 2185 dev_kfree_skb(skb); 2186 } 2187 EXPORT_SYMBOL(__dev_kfree_skb_any); 2188 2189 2190 /** 2191 * netif_device_detach - mark device as removed 2192 * @dev: network device 2193 * 2194 * Mark device as removed from system and therefore no longer available. 2195 */ 2196 void netif_device_detach(struct net_device *dev) 2197 { 2198 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) && 2199 netif_running(dev)) { 2200 netif_tx_stop_all_queues(dev); 2201 } 2202 } 2203 EXPORT_SYMBOL(netif_device_detach); 2204 2205 /** 2206 * netif_device_attach - mark device as attached 2207 * @dev: network device 2208 * 2209 * Mark device as attached from system and restart if needed. 2210 */ 2211 void netif_device_attach(struct net_device *dev) 2212 { 2213 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) && 2214 netif_running(dev)) { 2215 netif_tx_wake_all_queues(dev); 2216 __netdev_watchdog_up(dev); 2217 } 2218 } 2219 EXPORT_SYMBOL(netif_device_attach); 2220 2221 static void skb_warn_bad_offload(const struct sk_buff *skb) 2222 { 2223 static const netdev_features_t null_features = 0; 2224 struct net_device *dev = skb->dev; 2225 const char *driver = ""; 2226 2227 if (!net_ratelimit()) 2228 return; 2229 2230 if (dev && dev->dev.parent) 2231 driver = dev_driver_string(dev->dev.parent); 2232 2233 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d " 2234 "gso_type=%d ip_summed=%d\n", 2235 driver, dev ? &dev->features : &null_features, 2236 skb->sk ? &skb->sk->sk_route_caps : &null_features, 2237 skb->len, skb->data_len, skb_shinfo(skb)->gso_size, 2238 skb_shinfo(skb)->gso_type, skb->ip_summed); 2239 } 2240 2241 /* 2242 * Invalidate hardware checksum when packet is to be mangled, and 2243 * complete checksum manually on outgoing path. 2244 */ 2245 int skb_checksum_help(struct sk_buff *skb) 2246 { 2247 __wsum csum; 2248 int ret = 0, offset; 2249 2250 if (skb->ip_summed == CHECKSUM_COMPLETE) 2251 goto out_set_summed; 2252 2253 if (unlikely(skb_shinfo(skb)->gso_size)) { 2254 skb_warn_bad_offload(skb); 2255 return -EINVAL; 2256 } 2257 2258 /* Before computing a checksum, we should make sure no frag could 2259 * be modified by an external entity : checksum could be wrong. 2260 */ 2261 if (skb_has_shared_frag(skb)) { 2262 ret = __skb_linearize(skb); 2263 if (ret) 2264 goto out; 2265 } 2266 2267 offset = skb_checksum_start_offset(skb); 2268 BUG_ON(offset >= skb_headlen(skb)); 2269 csum = skb_checksum(skb, offset, skb->len - offset, 0); 2270 2271 offset += skb->csum_offset; 2272 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb)); 2273 2274 if (skb_cloned(skb) && 2275 !skb_clone_writable(skb, offset + sizeof(__sum16))) { 2276 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2277 if (ret) 2278 goto out; 2279 } 2280 2281 *(__sum16 *)(skb->data + offset) = csum_fold(csum); 2282 out_set_summed: 2283 skb->ip_summed = CHECKSUM_NONE; 2284 out: 2285 return ret; 2286 } 2287 EXPORT_SYMBOL(skb_checksum_help); 2288 2289 __be16 skb_network_protocol(struct sk_buff *skb) 2290 { 2291 __be16 type = skb->protocol; 2292 int vlan_depth = ETH_HLEN; 2293 2294 /* Tunnel gso handlers can set protocol to ethernet. */ 2295 if (type == htons(ETH_P_TEB)) { 2296 struct ethhdr *eth; 2297 2298 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr)))) 2299 return 0; 2300 2301 eth = (struct ethhdr *)skb_mac_header(skb); 2302 type = eth->h_proto; 2303 } 2304 2305 while (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) { 2306 struct vlan_hdr *vh; 2307 2308 if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN))) 2309 return 0; 2310 2311 vh = (struct vlan_hdr *)(skb->data + vlan_depth); 2312 type = vh->h_vlan_encapsulated_proto; 2313 vlan_depth += VLAN_HLEN; 2314 } 2315 2316 return type; 2317 } 2318 2319 /** 2320 * skb_mac_gso_segment - mac layer segmentation handler. 2321 * @skb: buffer to segment 2322 * @features: features for the output path (see dev->features) 2323 */ 2324 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, 2325 netdev_features_t features) 2326 { 2327 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); 2328 struct packet_offload *ptype; 2329 __be16 type = skb_network_protocol(skb); 2330 2331 if (unlikely(!type)) 2332 return ERR_PTR(-EINVAL); 2333 2334 __skb_pull(skb, skb->mac_len); 2335 2336 rcu_read_lock(); 2337 list_for_each_entry_rcu(ptype, &offload_base, list) { 2338 if (ptype->type == type && ptype->callbacks.gso_segment) { 2339 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) { 2340 int err; 2341 2342 err = ptype->callbacks.gso_send_check(skb); 2343 segs = ERR_PTR(err); 2344 if (err || skb_gso_ok(skb, features)) 2345 break; 2346 __skb_push(skb, (skb->data - 2347 skb_network_header(skb))); 2348 } 2349 segs = ptype->callbacks.gso_segment(skb, features); 2350 break; 2351 } 2352 } 2353 rcu_read_unlock(); 2354 2355 __skb_push(skb, skb->data - skb_mac_header(skb)); 2356 2357 return segs; 2358 } 2359 EXPORT_SYMBOL(skb_mac_gso_segment); 2360 2361 2362 /* openvswitch calls this on rx path, so we need a different check. 2363 */ 2364 static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path) 2365 { 2366 if (tx_path) 2367 return skb->ip_summed != CHECKSUM_PARTIAL; 2368 else 2369 return skb->ip_summed == CHECKSUM_NONE; 2370 } 2371 2372 /** 2373 * __skb_gso_segment - Perform segmentation on skb. 2374 * @skb: buffer to segment 2375 * @features: features for the output path (see dev->features) 2376 * @tx_path: whether it is called in TX path 2377 * 2378 * This function segments the given skb and returns a list of segments. 2379 * 2380 * It may return NULL if the skb requires no segmentation. This is 2381 * only possible when GSO is used for verifying header integrity. 2382 */ 2383 struct sk_buff *__skb_gso_segment(struct sk_buff *skb, 2384 netdev_features_t features, bool tx_path) 2385 { 2386 if (unlikely(skb_needs_check(skb, tx_path))) { 2387 int err; 2388 2389 skb_warn_bad_offload(skb); 2390 2391 if (skb_header_cloned(skb) && 2392 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) 2393 return ERR_PTR(err); 2394 } 2395 2396 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb); 2397 SKB_GSO_CB(skb)->encap_level = 0; 2398 2399 skb_reset_mac_header(skb); 2400 skb_reset_mac_len(skb); 2401 2402 return skb_mac_gso_segment(skb, features); 2403 } 2404 EXPORT_SYMBOL(__skb_gso_segment); 2405 2406 /* Take action when hardware reception checksum errors are detected. */ 2407 #ifdef CONFIG_BUG 2408 void netdev_rx_csum_fault(struct net_device *dev) 2409 { 2410 if (net_ratelimit()) { 2411 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>"); 2412 dump_stack(); 2413 } 2414 } 2415 EXPORT_SYMBOL(netdev_rx_csum_fault); 2416 #endif 2417 2418 /* Actually, we should eliminate this check as soon as we know, that: 2419 * 1. IOMMU is present and allows to map all the memory. 2420 * 2. No high memory really exists on this machine. 2421 */ 2422 2423 static int illegal_highdma(const struct net_device *dev, struct sk_buff *skb) 2424 { 2425 #ifdef CONFIG_HIGHMEM 2426 int i; 2427 if (!(dev->features & NETIF_F_HIGHDMA)) { 2428 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2429 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2430 if (PageHighMem(skb_frag_page(frag))) 2431 return 1; 2432 } 2433 } 2434 2435 if (PCI_DMA_BUS_IS_PHYS) { 2436 struct device *pdev = dev->dev.parent; 2437 2438 if (!pdev) 2439 return 0; 2440 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2441 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2442 dma_addr_t addr = page_to_phys(skb_frag_page(frag)); 2443 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask) 2444 return 1; 2445 } 2446 } 2447 #endif 2448 return 0; 2449 } 2450 2451 struct dev_gso_cb { 2452 void (*destructor)(struct sk_buff *skb); 2453 }; 2454 2455 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb) 2456 2457 static void dev_gso_skb_destructor(struct sk_buff *skb) 2458 { 2459 struct dev_gso_cb *cb; 2460 2461 kfree_skb_list(skb->next); 2462 skb->next = NULL; 2463 2464 cb = DEV_GSO_CB(skb); 2465 if (cb->destructor) 2466 cb->destructor(skb); 2467 } 2468 2469 /** 2470 * dev_gso_segment - Perform emulated hardware segmentation on skb. 2471 * @skb: buffer to segment 2472 * @features: device features as applicable to this skb 2473 * 2474 * This function segments the given skb and stores the list of segments 2475 * in skb->next. 2476 */ 2477 static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features) 2478 { 2479 struct sk_buff *segs; 2480 2481 segs = skb_gso_segment(skb, features); 2482 2483 /* Verifying header integrity only. */ 2484 if (!segs) 2485 return 0; 2486 2487 if (IS_ERR(segs)) 2488 return PTR_ERR(segs); 2489 2490 skb->next = segs; 2491 DEV_GSO_CB(skb)->destructor = skb->destructor; 2492 skb->destructor = dev_gso_skb_destructor; 2493 2494 return 0; 2495 } 2496 2497 static netdev_features_t harmonize_features(struct sk_buff *skb, 2498 const struct net_device *dev, 2499 netdev_features_t features) 2500 { 2501 if (skb->ip_summed != CHECKSUM_NONE && 2502 !can_checksum_protocol(features, skb_network_protocol(skb))) { 2503 features &= ~NETIF_F_ALL_CSUM; 2504 } else if (illegal_highdma(dev, skb)) { 2505 features &= ~NETIF_F_SG; 2506 } 2507 2508 return features; 2509 } 2510 2511 netdev_features_t netif_skb_dev_features(struct sk_buff *skb, 2512 const struct net_device *dev) 2513 { 2514 __be16 protocol = skb->protocol; 2515 netdev_features_t features = dev->features; 2516 2517 if (skb_shinfo(skb)->gso_segs > dev->gso_max_segs) 2518 features &= ~NETIF_F_GSO_MASK; 2519 2520 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) { 2521 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; 2522 protocol = veh->h_vlan_encapsulated_proto; 2523 } else if (!vlan_tx_tag_present(skb)) { 2524 return harmonize_features(skb, dev, features); 2525 } 2526 2527 features &= (dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX | 2528 NETIF_F_HW_VLAN_STAG_TX); 2529 2530 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) 2531 features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | 2532 NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX | 2533 NETIF_F_HW_VLAN_STAG_TX; 2534 2535 return harmonize_features(skb, dev, features); 2536 } 2537 EXPORT_SYMBOL(netif_skb_dev_features); 2538 2539 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 2540 struct netdev_queue *txq) 2541 { 2542 const struct net_device_ops *ops = dev->netdev_ops; 2543 int rc = NETDEV_TX_OK; 2544 unsigned int skb_len; 2545 2546 if (likely(!skb->next)) { 2547 netdev_features_t features; 2548 2549 /* 2550 * If device doesn't need skb->dst, release it right now while 2551 * its hot in this cpu cache 2552 */ 2553 if (dev->priv_flags & IFF_XMIT_DST_RELEASE) 2554 skb_dst_drop(skb); 2555 2556 features = netif_skb_features(skb); 2557 2558 if (vlan_tx_tag_present(skb) && 2559 !vlan_hw_offload_capable(features, skb->vlan_proto)) { 2560 skb = __vlan_put_tag(skb, skb->vlan_proto, 2561 vlan_tx_tag_get(skb)); 2562 if (unlikely(!skb)) 2563 goto out; 2564 2565 skb->vlan_tci = 0; 2566 } 2567 2568 /* If encapsulation offload request, verify we are testing 2569 * hardware encapsulation features instead of standard 2570 * features for the netdev 2571 */ 2572 if (skb->encapsulation) 2573 features &= dev->hw_enc_features; 2574 2575 if (netif_needs_gso(skb, features)) { 2576 if (unlikely(dev_gso_segment(skb, features))) 2577 goto out_kfree_skb; 2578 if (skb->next) 2579 goto gso; 2580 } else { 2581 if (skb_needs_linearize(skb, features) && 2582 __skb_linearize(skb)) 2583 goto out_kfree_skb; 2584 2585 /* If packet is not checksummed and device does not 2586 * support checksumming for this protocol, complete 2587 * checksumming here. 2588 */ 2589 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2590 if (skb->encapsulation) 2591 skb_set_inner_transport_header(skb, 2592 skb_checksum_start_offset(skb)); 2593 else 2594 skb_set_transport_header(skb, 2595 skb_checksum_start_offset(skb)); 2596 if (!(features & NETIF_F_ALL_CSUM) && 2597 skb_checksum_help(skb)) 2598 goto out_kfree_skb; 2599 } 2600 } 2601 2602 if (!list_empty(&ptype_all)) 2603 dev_queue_xmit_nit(skb, dev); 2604 2605 skb_len = skb->len; 2606 trace_net_dev_start_xmit(skb, dev); 2607 rc = ops->ndo_start_xmit(skb, dev); 2608 trace_net_dev_xmit(skb, rc, dev, skb_len); 2609 if (rc == NETDEV_TX_OK) 2610 txq_trans_update(txq); 2611 return rc; 2612 } 2613 2614 gso: 2615 do { 2616 struct sk_buff *nskb = skb->next; 2617 2618 skb->next = nskb->next; 2619 nskb->next = NULL; 2620 2621 if (!list_empty(&ptype_all)) 2622 dev_queue_xmit_nit(nskb, dev); 2623 2624 skb_len = nskb->len; 2625 trace_net_dev_start_xmit(nskb, dev); 2626 rc = ops->ndo_start_xmit(nskb, dev); 2627 trace_net_dev_xmit(nskb, rc, dev, skb_len); 2628 if (unlikely(rc != NETDEV_TX_OK)) { 2629 if (rc & ~NETDEV_TX_MASK) 2630 goto out_kfree_gso_skb; 2631 nskb->next = skb->next; 2632 skb->next = nskb; 2633 return rc; 2634 } 2635 txq_trans_update(txq); 2636 if (unlikely(netif_xmit_stopped(txq) && skb->next)) 2637 return NETDEV_TX_BUSY; 2638 } while (skb->next); 2639 2640 out_kfree_gso_skb: 2641 if (likely(skb->next == NULL)) { 2642 skb->destructor = DEV_GSO_CB(skb)->destructor; 2643 consume_skb(skb); 2644 return rc; 2645 } 2646 out_kfree_skb: 2647 kfree_skb(skb); 2648 out: 2649 return rc; 2650 } 2651 EXPORT_SYMBOL_GPL(dev_hard_start_xmit); 2652 2653 static void qdisc_pkt_len_init(struct sk_buff *skb) 2654 { 2655 const struct skb_shared_info *shinfo = skb_shinfo(skb); 2656 2657 qdisc_skb_cb(skb)->pkt_len = skb->len; 2658 2659 /* To get more precise estimation of bytes sent on wire, 2660 * we add to pkt_len the headers size of all segments 2661 */ 2662 if (shinfo->gso_size) { 2663 unsigned int hdr_len; 2664 u16 gso_segs = shinfo->gso_segs; 2665 2666 /* mac layer + network layer */ 2667 hdr_len = skb_transport_header(skb) - skb_mac_header(skb); 2668 2669 /* + transport layer */ 2670 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) 2671 hdr_len += tcp_hdrlen(skb); 2672 else 2673 hdr_len += sizeof(struct udphdr); 2674 2675 if (shinfo->gso_type & SKB_GSO_DODGY) 2676 gso_segs = DIV_ROUND_UP(skb->len - hdr_len, 2677 shinfo->gso_size); 2678 2679 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len; 2680 } 2681 } 2682 2683 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, 2684 struct net_device *dev, 2685 struct netdev_queue *txq) 2686 { 2687 spinlock_t *root_lock = qdisc_lock(q); 2688 bool contended; 2689 int rc; 2690 2691 qdisc_pkt_len_init(skb); 2692 qdisc_calculate_pkt_len(skb, q); 2693 /* 2694 * Heuristic to force contended enqueues to serialize on a 2695 * separate lock before trying to get qdisc main lock. 2696 * This permits __QDISC_STATE_RUNNING owner to get the lock more often 2697 * and dequeue packets faster. 2698 */ 2699 contended = qdisc_is_running(q); 2700 if (unlikely(contended)) 2701 spin_lock(&q->busylock); 2702 2703 spin_lock(root_lock); 2704 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) { 2705 kfree_skb(skb); 2706 rc = NET_XMIT_DROP; 2707 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) && 2708 qdisc_run_begin(q)) { 2709 /* 2710 * This is a work-conserving queue; there are no old skbs 2711 * waiting to be sent out; and the qdisc is not running - 2712 * xmit the skb directly. 2713 */ 2714 if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE)) 2715 skb_dst_force(skb); 2716 2717 qdisc_bstats_update(q, skb); 2718 2719 if (sch_direct_xmit(skb, q, dev, txq, root_lock)) { 2720 if (unlikely(contended)) { 2721 spin_unlock(&q->busylock); 2722 contended = false; 2723 } 2724 __qdisc_run(q); 2725 } else 2726 qdisc_run_end(q); 2727 2728 rc = NET_XMIT_SUCCESS; 2729 } else { 2730 skb_dst_force(skb); 2731 rc = q->enqueue(skb, q) & NET_XMIT_MASK; 2732 if (qdisc_run_begin(q)) { 2733 if (unlikely(contended)) { 2734 spin_unlock(&q->busylock); 2735 contended = false; 2736 } 2737 __qdisc_run(q); 2738 } 2739 } 2740 spin_unlock(root_lock); 2741 if (unlikely(contended)) 2742 spin_unlock(&q->busylock); 2743 return rc; 2744 } 2745 2746 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) 2747 static void skb_update_prio(struct sk_buff *skb) 2748 { 2749 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap); 2750 2751 if (!skb->priority && skb->sk && map) { 2752 unsigned int prioidx = skb->sk->sk_cgrp_prioidx; 2753 2754 if (prioidx < map->priomap_len) 2755 skb->priority = map->priomap[prioidx]; 2756 } 2757 } 2758 #else 2759 #define skb_update_prio(skb) 2760 #endif 2761 2762 static DEFINE_PER_CPU(int, xmit_recursion); 2763 #define RECURSION_LIMIT 10 2764 2765 /** 2766 * dev_loopback_xmit - loop back @skb 2767 * @skb: buffer to transmit 2768 */ 2769 int dev_loopback_xmit(struct sk_buff *skb) 2770 { 2771 skb_reset_mac_header(skb); 2772 __skb_pull(skb, skb_network_offset(skb)); 2773 skb->pkt_type = PACKET_LOOPBACK; 2774 skb->ip_summed = CHECKSUM_UNNECESSARY; 2775 WARN_ON(!skb_dst(skb)); 2776 skb_dst_force(skb); 2777 netif_rx_ni(skb); 2778 return 0; 2779 } 2780 EXPORT_SYMBOL(dev_loopback_xmit); 2781 2782 /** 2783 * __dev_queue_xmit - transmit a buffer 2784 * @skb: buffer to transmit 2785 * @accel_priv: private data used for L2 forwarding offload 2786 * 2787 * Queue a buffer for transmission to a network device. The caller must 2788 * have set the device and priority and built the buffer before calling 2789 * this function. The function can be called from an interrupt. 2790 * 2791 * A negative errno code is returned on a failure. A success does not 2792 * guarantee the frame will be transmitted as it may be dropped due 2793 * to congestion or traffic shaping. 2794 * 2795 * ----------------------------------------------------------------------------------- 2796 * I notice this method can also return errors from the queue disciplines, 2797 * including NET_XMIT_DROP, which is a positive value. So, errors can also 2798 * be positive. 2799 * 2800 * Regardless of the return value, the skb is consumed, so it is currently 2801 * difficult to retry a send to this method. (You can bump the ref count 2802 * before sending to hold a reference for retry if you are careful.) 2803 * 2804 * When calling this method, interrupts MUST be enabled. This is because 2805 * the BH enable code must have IRQs enabled so that it will not deadlock. 2806 * --BLG 2807 */ 2808 static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv) 2809 { 2810 struct net_device *dev = skb->dev; 2811 struct netdev_queue *txq; 2812 struct Qdisc *q; 2813 int rc = -ENOMEM; 2814 2815 skb_reset_mac_header(skb); 2816 2817 /* Disable soft irqs for various locks below. Also 2818 * stops preemption for RCU. 2819 */ 2820 rcu_read_lock_bh(); 2821 2822 skb_update_prio(skb); 2823 2824 txq = netdev_pick_tx(dev, skb, accel_priv); 2825 q = rcu_dereference_bh(txq->qdisc); 2826 2827 #ifdef CONFIG_NET_CLS_ACT 2828 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS); 2829 #endif 2830 trace_net_dev_queue(skb); 2831 if (q->enqueue) { 2832 rc = __dev_xmit_skb(skb, q, dev, txq); 2833 goto out; 2834 } 2835 2836 /* The device has no queue. Common case for software devices: 2837 loopback, all the sorts of tunnels... 2838 2839 Really, it is unlikely that netif_tx_lock protection is necessary 2840 here. (f.e. loopback and IP tunnels are clean ignoring statistics 2841 counters.) 2842 However, it is possible, that they rely on protection 2843 made by us here. 2844 2845 Check this and shot the lock. It is not prone from deadlocks. 2846 Either shot noqueue qdisc, it is even simpler 8) 2847 */ 2848 if (dev->flags & IFF_UP) { 2849 int cpu = smp_processor_id(); /* ok because BHs are off */ 2850 2851 if (txq->xmit_lock_owner != cpu) { 2852 2853 if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT) 2854 goto recursion_alert; 2855 2856 HARD_TX_LOCK(dev, txq, cpu); 2857 2858 if (!netif_xmit_stopped(txq)) { 2859 __this_cpu_inc(xmit_recursion); 2860 rc = dev_hard_start_xmit(skb, dev, txq); 2861 __this_cpu_dec(xmit_recursion); 2862 if (dev_xmit_complete(rc)) { 2863 HARD_TX_UNLOCK(dev, txq); 2864 goto out; 2865 } 2866 } 2867 HARD_TX_UNLOCK(dev, txq); 2868 net_crit_ratelimited("Virtual device %s asks to queue packet!\n", 2869 dev->name); 2870 } else { 2871 /* Recursion is detected! It is possible, 2872 * unfortunately 2873 */ 2874 recursion_alert: 2875 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n", 2876 dev->name); 2877 } 2878 } 2879 2880 rc = -ENETDOWN; 2881 rcu_read_unlock_bh(); 2882 2883 kfree_skb(skb); 2884 return rc; 2885 out: 2886 rcu_read_unlock_bh(); 2887 return rc; 2888 } 2889 2890 int dev_queue_xmit(struct sk_buff *skb) 2891 { 2892 return __dev_queue_xmit(skb, NULL); 2893 } 2894 EXPORT_SYMBOL(dev_queue_xmit); 2895 2896 int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv) 2897 { 2898 return __dev_queue_xmit(skb, accel_priv); 2899 } 2900 EXPORT_SYMBOL(dev_queue_xmit_accel); 2901 2902 2903 /*======================================================================= 2904 Receiver routines 2905 =======================================================================*/ 2906 2907 int netdev_max_backlog __read_mostly = 1000; 2908 EXPORT_SYMBOL(netdev_max_backlog); 2909 2910 int netdev_tstamp_prequeue __read_mostly = 1; 2911 int netdev_budget __read_mostly = 300; 2912 int weight_p __read_mostly = 64; /* old backlog weight */ 2913 2914 /* Called with irq disabled */ 2915 static inline void ____napi_schedule(struct softnet_data *sd, 2916 struct napi_struct *napi) 2917 { 2918 list_add_tail(&napi->poll_list, &sd->poll_list); 2919 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 2920 } 2921 2922 #ifdef CONFIG_RPS 2923 2924 /* One global table that all flow-based protocols share. */ 2925 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly; 2926 EXPORT_SYMBOL(rps_sock_flow_table); 2927 2928 struct static_key rps_needed __read_mostly; 2929 2930 static struct rps_dev_flow * 2931 set_rps_cpu(struct net_device *dev, struct sk_buff *skb, 2932 struct rps_dev_flow *rflow, u16 next_cpu) 2933 { 2934 if (next_cpu != RPS_NO_CPU) { 2935 #ifdef CONFIG_RFS_ACCEL 2936 struct netdev_rx_queue *rxqueue; 2937 struct rps_dev_flow_table *flow_table; 2938 struct rps_dev_flow *old_rflow; 2939 u32 flow_id; 2940 u16 rxq_index; 2941 int rc; 2942 2943 /* Should we steer this flow to a different hardware queue? */ 2944 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap || 2945 !(dev->features & NETIF_F_NTUPLE)) 2946 goto out; 2947 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu); 2948 if (rxq_index == skb_get_rx_queue(skb)) 2949 goto out; 2950 2951 rxqueue = dev->_rx + rxq_index; 2952 flow_table = rcu_dereference(rxqueue->rps_flow_table); 2953 if (!flow_table) 2954 goto out; 2955 flow_id = skb->rxhash & flow_table->mask; 2956 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb, 2957 rxq_index, flow_id); 2958 if (rc < 0) 2959 goto out; 2960 old_rflow = rflow; 2961 rflow = &flow_table->flows[flow_id]; 2962 rflow->filter = rc; 2963 if (old_rflow->filter == rflow->filter) 2964 old_rflow->filter = RPS_NO_FILTER; 2965 out: 2966 #endif 2967 rflow->last_qtail = 2968 per_cpu(softnet_data, next_cpu).input_queue_head; 2969 } 2970 2971 rflow->cpu = next_cpu; 2972 return rflow; 2973 } 2974 2975 /* 2976 * get_rps_cpu is called from netif_receive_skb and returns the target 2977 * CPU from the RPS map of the receiving queue for a given skb. 2978 * rcu_read_lock must be held on entry. 2979 */ 2980 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, 2981 struct rps_dev_flow **rflowp) 2982 { 2983 struct netdev_rx_queue *rxqueue; 2984 struct rps_map *map; 2985 struct rps_dev_flow_table *flow_table; 2986 struct rps_sock_flow_table *sock_flow_table; 2987 int cpu = -1; 2988 u16 tcpu; 2989 2990 if (skb_rx_queue_recorded(skb)) { 2991 u16 index = skb_get_rx_queue(skb); 2992 if (unlikely(index >= dev->real_num_rx_queues)) { 2993 WARN_ONCE(dev->real_num_rx_queues > 1, 2994 "%s received packet on queue %u, but number " 2995 "of RX queues is %u\n", 2996 dev->name, index, dev->real_num_rx_queues); 2997 goto done; 2998 } 2999 rxqueue = dev->_rx + index; 3000 } else 3001 rxqueue = dev->_rx; 3002 3003 map = rcu_dereference(rxqueue->rps_map); 3004 if (map) { 3005 if (map->len == 1 && 3006 !rcu_access_pointer(rxqueue->rps_flow_table)) { 3007 tcpu = map->cpus[0]; 3008 if (cpu_online(tcpu)) 3009 cpu = tcpu; 3010 goto done; 3011 } 3012 } else if (!rcu_access_pointer(rxqueue->rps_flow_table)) { 3013 goto done; 3014 } 3015 3016 skb_reset_network_header(skb); 3017 if (!skb_get_hash(skb)) 3018 goto done; 3019 3020 flow_table = rcu_dereference(rxqueue->rps_flow_table); 3021 sock_flow_table = rcu_dereference(rps_sock_flow_table); 3022 if (flow_table && sock_flow_table) { 3023 u16 next_cpu; 3024 struct rps_dev_flow *rflow; 3025 3026 rflow = &flow_table->flows[skb->rxhash & flow_table->mask]; 3027 tcpu = rflow->cpu; 3028 3029 next_cpu = sock_flow_table->ents[skb->rxhash & 3030 sock_flow_table->mask]; 3031 3032 /* 3033 * If the desired CPU (where last recvmsg was done) is 3034 * different from current CPU (one in the rx-queue flow 3035 * table entry), switch if one of the following holds: 3036 * - Current CPU is unset (equal to RPS_NO_CPU). 3037 * - Current CPU is offline. 3038 * - The current CPU's queue tail has advanced beyond the 3039 * last packet that was enqueued using this table entry. 3040 * This guarantees that all previous packets for the flow 3041 * have been dequeued, thus preserving in order delivery. 3042 */ 3043 if (unlikely(tcpu != next_cpu) && 3044 (tcpu == RPS_NO_CPU || !cpu_online(tcpu) || 3045 ((int)(per_cpu(softnet_data, tcpu).input_queue_head - 3046 rflow->last_qtail)) >= 0)) { 3047 tcpu = next_cpu; 3048 rflow = set_rps_cpu(dev, skb, rflow, next_cpu); 3049 } 3050 3051 if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) { 3052 *rflowp = rflow; 3053 cpu = tcpu; 3054 goto done; 3055 } 3056 } 3057 3058 if (map) { 3059 tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32]; 3060 3061 if (cpu_online(tcpu)) { 3062 cpu = tcpu; 3063 goto done; 3064 } 3065 } 3066 3067 done: 3068 return cpu; 3069 } 3070 3071 #ifdef CONFIG_RFS_ACCEL 3072 3073 /** 3074 * rps_may_expire_flow - check whether an RFS hardware filter may be removed 3075 * @dev: Device on which the filter was set 3076 * @rxq_index: RX queue index 3077 * @flow_id: Flow ID passed to ndo_rx_flow_steer() 3078 * @filter_id: Filter ID returned by ndo_rx_flow_steer() 3079 * 3080 * Drivers that implement ndo_rx_flow_steer() should periodically call 3081 * this function for each installed filter and remove the filters for 3082 * which it returns %true. 3083 */ 3084 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, 3085 u32 flow_id, u16 filter_id) 3086 { 3087 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index; 3088 struct rps_dev_flow_table *flow_table; 3089 struct rps_dev_flow *rflow; 3090 bool expire = true; 3091 int cpu; 3092 3093 rcu_read_lock(); 3094 flow_table = rcu_dereference(rxqueue->rps_flow_table); 3095 if (flow_table && flow_id <= flow_table->mask) { 3096 rflow = &flow_table->flows[flow_id]; 3097 cpu = ACCESS_ONCE(rflow->cpu); 3098 if (rflow->filter == filter_id && cpu != RPS_NO_CPU && 3099 ((int)(per_cpu(softnet_data, cpu).input_queue_head - 3100 rflow->last_qtail) < 3101 (int)(10 * flow_table->mask))) 3102 expire = false; 3103 } 3104 rcu_read_unlock(); 3105 return expire; 3106 } 3107 EXPORT_SYMBOL(rps_may_expire_flow); 3108 3109 #endif /* CONFIG_RFS_ACCEL */ 3110 3111 /* Called from hardirq (IPI) context */ 3112 static void rps_trigger_softirq(void *data) 3113 { 3114 struct softnet_data *sd = data; 3115 3116 ____napi_schedule(sd, &sd->backlog); 3117 sd->received_rps++; 3118 } 3119 3120 #endif /* CONFIG_RPS */ 3121 3122 /* 3123 * Check if this softnet_data structure is another cpu one 3124 * If yes, queue it to our IPI list and return 1 3125 * If no, return 0 3126 */ 3127 static int rps_ipi_queued(struct softnet_data *sd) 3128 { 3129 #ifdef CONFIG_RPS 3130 struct softnet_data *mysd = &__get_cpu_var(softnet_data); 3131 3132 if (sd != mysd) { 3133 sd->rps_ipi_next = mysd->rps_ipi_list; 3134 mysd->rps_ipi_list = sd; 3135 3136 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 3137 return 1; 3138 } 3139 #endif /* CONFIG_RPS */ 3140 return 0; 3141 } 3142 3143 #ifdef CONFIG_NET_FLOW_LIMIT 3144 int netdev_flow_limit_table_len __read_mostly = (1 << 12); 3145 #endif 3146 3147 static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen) 3148 { 3149 #ifdef CONFIG_NET_FLOW_LIMIT 3150 struct sd_flow_limit *fl; 3151 struct softnet_data *sd; 3152 unsigned int old_flow, new_flow; 3153 3154 if (qlen < (netdev_max_backlog >> 1)) 3155 return false; 3156 3157 sd = &__get_cpu_var(softnet_data); 3158 3159 rcu_read_lock(); 3160 fl = rcu_dereference(sd->flow_limit); 3161 if (fl) { 3162 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1); 3163 old_flow = fl->history[fl->history_head]; 3164 fl->history[fl->history_head] = new_flow; 3165 3166 fl->history_head++; 3167 fl->history_head &= FLOW_LIMIT_HISTORY - 1; 3168 3169 if (likely(fl->buckets[old_flow])) 3170 fl->buckets[old_flow]--; 3171 3172 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) { 3173 fl->count++; 3174 rcu_read_unlock(); 3175 return true; 3176 } 3177 } 3178 rcu_read_unlock(); 3179 #endif 3180 return false; 3181 } 3182 3183 /* 3184 * enqueue_to_backlog is called to queue an skb to a per CPU backlog 3185 * queue (may be a remote CPU queue). 3186 */ 3187 static int enqueue_to_backlog(struct sk_buff *skb, int cpu, 3188 unsigned int *qtail) 3189 { 3190 struct softnet_data *sd; 3191 unsigned long flags; 3192 unsigned int qlen; 3193 3194 sd = &per_cpu(softnet_data, cpu); 3195 3196 local_irq_save(flags); 3197 3198 rps_lock(sd); 3199 qlen = skb_queue_len(&sd->input_pkt_queue); 3200 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) { 3201 if (skb_queue_len(&sd->input_pkt_queue)) { 3202 enqueue: 3203 __skb_queue_tail(&sd->input_pkt_queue, skb); 3204 input_queue_tail_incr_save(sd, qtail); 3205 rps_unlock(sd); 3206 local_irq_restore(flags); 3207 return NET_RX_SUCCESS; 3208 } 3209 3210 /* Schedule NAPI for backlog device 3211 * We can use non atomic operation since we own the queue lock 3212 */ 3213 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) { 3214 if (!rps_ipi_queued(sd)) 3215 ____napi_schedule(sd, &sd->backlog); 3216 } 3217 goto enqueue; 3218 } 3219 3220 sd->dropped++; 3221 rps_unlock(sd); 3222 3223 local_irq_restore(flags); 3224 3225 atomic_long_inc(&skb->dev->rx_dropped); 3226 kfree_skb(skb); 3227 return NET_RX_DROP; 3228 } 3229 3230 static int netif_rx_internal(struct sk_buff *skb) 3231 { 3232 int ret; 3233 3234 /* if netpoll wants it, pretend we never saw it */ 3235 if (netpoll_rx(skb)) 3236 return NET_RX_DROP; 3237 3238 net_timestamp_check(netdev_tstamp_prequeue, skb); 3239 3240 trace_netif_rx(skb); 3241 #ifdef CONFIG_RPS 3242 if (static_key_false(&rps_needed)) { 3243 struct rps_dev_flow voidflow, *rflow = &voidflow; 3244 int cpu; 3245 3246 preempt_disable(); 3247 rcu_read_lock(); 3248 3249 cpu = get_rps_cpu(skb->dev, skb, &rflow); 3250 if (cpu < 0) 3251 cpu = smp_processor_id(); 3252 3253 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 3254 3255 rcu_read_unlock(); 3256 preempt_enable(); 3257 } else 3258 #endif 3259 { 3260 unsigned int qtail; 3261 ret = enqueue_to_backlog(skb, get_cpu(), &qtail); 3262 put_cpu(); 3263 } 3264 return ret; 3265 } 3266 3267 /** 3268 * netif_rx - post buffer to the network code 3269 * @skb: buffer to post 3270 * 3271 * This function receives a packet from a device driver and queues it for 3272 * the upper (protocol) levels to process. It always succeeds. The buffer 3273 * may be dropped during processing for congestion control or by the 3274 * protocol layers. 3275 * 3276 * return values: 3277 * NET_RX_SUCCESS (no congestion) 3278 * NET_RX_DROP (packet was dropped) 3279 * 3280 */ 3281 3282 int netif_rx(struct sk_buff *skb) 3283 { 3284 trace_netif_rx_entry(skb); 3285 3286 return netif_rx_internal(skb); 3287 } 3288 EXPORT_SYMBOL(netif_rx); 3289 3290 int netif_rx_ni(struct sk_buff *skb) 3291 { 3292 int err; 3293 3294 trace_netif_rx_ni_entry(skb); 3295 3296 preempt_disable(); 3297 err = netif_rx_internal(skb); 3298 if (local_softirq_pending()) 3299 do_softirq(); 3300 preempt_enable(); 3301 3302 return err; 3303 } 3304 EXPORT_SYMBOL(netif_rx_ni); 3305 3306 static void net_tx_action(struct softirq_action *h) 3307 { 3308 struct softnet_data *sd = &__get_cpu_var(softnet_data); 3309 3310 if (sd->completion_queue) { 3311 struct sk_buff *clist; 3312 3313 local_irq_disable(); 3314 clist = sd->completion_queue; 3315 sd->completion_queue = NULL; 3316 local_irq_enable(); 3317 3318 while (clist) { 3319 struct sk_buff *skb = clist; 3320 clist = clist->next; 3321 3322 WARN_ON(atomic_read(&skb->users)); 3323 if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED)) 3324 trace_consume_skb(skb); 3325 else 3326 trace_kfree_skb(skb, net_tx_action); 3327 __kfree_skb(skb); 3328 } 3329 } 3330 3331 if (sd->output_queue) { 3332 struct Qdisc *head; 3333 3334 local_irq_disable(); 3335 head = sd->output_queue; 3336 sd->output_queue = NULL; 3337 sd->output_queue_tailp = &sd->output_queue; 3338 local_irq_enable(); 3339 3340 while (head) { 3341 struct Qdisc *q = head; 3342 spinlock_t *root_lock; 3343 3344 head = head->next_sched; 3345 3346 root_lock = qdisc_lock(q); 3347 if (spin_trylock(root_lock)) { 3348 smp_mb__before_clear_bit(); 3349 clear_bit(__QDISC_STATE_SCHED, 3350 &q->state); 3351 qdisc_run(q); 3352 spin_unlock(root_lock); 3353 } else { 3354 if (!test_bit(__QDISC_STATE_DEACTIVATED, 3355 &q->state)) { 3356 __netif_reschedule(q); 3357 } else { 3358 smp_mb__before_clear_bit(); 3359 clear_bit(__QDISC_STATE_SCHED, 3360 &q->state); 3361 } 3362 } 3363 } 3364 } 3365 } 3366 3367 #if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \ 3368 (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)) 3369 /* This hook is defined here for ATM LANE */ 3370 int (*br_fdb_test_addr_hook)(struct net_device *dev, 3371 unsigned char *addr) __read_mostly; 3372 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook); 3373 #endif 3374 3375 #ifdef CONFIG_NET_CLS_ACT 3376 /* TODO: Maybe we should just force sch_ingress to be compiled in 3377 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions 3378 * a compare and 2 stores extra right now if we dont have it on 3379 * but have CONFIG_NET_CLS_ACT 3380 * NOTE: This doesn't stop any functionality; if you dont have 3381 * the ingress scheduler, you just can't add policies on ingress. 3382 * 3383 */ 3384 static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq) 3385 { 3386 struct net_device *dev = skb->dev; 3387 u32 ttl = G_TC_RTTL(skb->tc_verd); 3388 int result = TC_ACT_OK; 3389 struct Qdisc *q; 3390 3391 if (unlikely(MAX_RED_LOOP < ttl++)) { 3392 net_warn_ratelimited("Redir loop detected Dropping packet (%d->%d)\n", 3393 skb->skb_iif, dev->ifindex); 3394 return TC_ACT_SHOT; 3395 } 3396 3397 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl); 3398 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS); 3399 3400 q = rxq->qdisc; 3401 if (q != &noop_qdisc) { 3402 spin_lock(qdisc_lock(q)); 3403 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) 3404 result = qdisc_enqueue_root(skb, q); 3405 spin_unlock(qdisc_lock(q)); 3406 } 3407 3408 return result; 3409 } 3410 3411 static inline struct sk_buff *handle_ing(struct sk_buff *skb, 3412 struct packet_type **pt_prev, 3413 int *ret, struct net_device *orig_dev) 3414 { 3415 struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue); 3416 3417 if (!rxq || rxq->qdisc == &noop_qdisc) 3418 goto out; 3419 3420 if (*pt_prev) { 3421 *ret = deliver_skb(skb, *pt_prev, orig_dev); 3422 *pt_prev = NULL; 3423 } 3424 3425 switch (ing_filter(skb, rxq)) { 3426 case TC_ACT_SHOT: 3427 case TC_ACT_STOLEN: 3428 kfree_skb(skb); 3429 return NULL; 3430 } 3431 3432 out: 3433 skb->tc_verd = 0; 3434 return skb; 3435 } 3436 #endif 3437 3438 /** 3439 * netdev_rx_handler_register - register receive handler 3440 * @dev: device to register a handler for 3441 * @rx_handler: receive handler to register 3442 * @rx_handler_data: data pointer that is used by rx handler 3443 * 3444 * Register a receive hander for a device. This handler will then be 3445 * called from __netif_receive_skb. A negative errno code is returned 3446 * on a failure. 3447 * 3448 * The caller must hold the rtnl_mutex. 3449 * 3450 * For a general description of rx_handler, see enum rx_handler_result. 3451 */ 3452 int netdev_rx_handler_register(struct net_device *dev, 3453 rx_handler_func_t *rx_handler, 3454 void *rx_handler_data) 3455 { 3456 ASSERT_RTNL(); 3457 3458 if (dev->rx_handler) 3459 return -EBUSY; 3460 3461 /* Note: rx_handler_data must be set before rx_handler */ 3462 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data); 3463 rcu_assign_pointer(dev->rx_handler, rx_handler); 3464 3465 return 0; 3466 } 3467 EXPORT_SYMBOL_GPL(netdev_rx_handler_register); 3468 3469 /** 3470 * netdev_rx_handler_unregister - unregister receive handler 3471 * @dev: device to unregister a handler from 3472 * 3473 * Unregister a receive handler from a device. 3474 * 3475 * The caller must hold the rtnl_mutex. 3476 */ 3477 void netdev_rx_handler_unregister(struct net_device *dev) 3478 { 3479 3480 ASSERT_RTNL(); 3481 RCU_INIT_POINTER(dev->rx_handler, NULL); 3482 /* a reader seeing a non NULL rx_handler in a rcu_read_lock() 3483 * section has a guarantee to see a non NULL rx_handler_data 3484 * as well. 3485 */ 3486 synchronize_net(); 3487 RCU_INIT_POINTER(dev->rx_handler_data, NULL); 3488 } 3489 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); 3490 3491 /* 3492 * Limit the use of PFMEMALLOC reserves to those protocols that implement 3493 * the special handling of PFMEMALLOC skbs. 3494 */ 3495 static bool skb_pfmemalloc_protocol(struct sk_buff *skb) 3496 { 3497 switch (skb->protocol) { 3498 case __constant_htons(ETH_P_ARP): 3499 case __constant_htons(ETH_P_IP): 3500 case __constant_htons(ETH_P_IPV6): 3501 case __constant_htons(ETH_P_8021Q): 3502 case __constant_htons(ETH_P_8021AD): 3503 return true; 3504 default: 3505 return false; 3506 } 3507 } 3508 3509 static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc) 3510 { 3511 struct packet_type *ptype, *pt_prev; 3512 rx_handler_func_t *rx_handler; 3513 struct net_device *orig_dev; 3514 struct net_device *null_or_dev; 3515 bool deliver_exact = false; 3516 int ret = NET_RX_DROP; 3517 __be16 type; 3518 3519 net_timestamp_check(!netdev_tstamp_prequeue, skb); 3520 3521 trace_netif_receive_skb(skb); 3522 3523 /* if we've gotten here through NAPI, check netpoll */ 3524 if (netpoll_receive_skb(skb)) 3525 goto out; 3526 3527 orig_dev = skb->dev; 3528 3529 skb_reset_network_header(skb); 3530 if (!skb_transport_header_was_set(skb)) 3531 skb_reset_transport_header(skb); 3532 skb_reset_mac_len(skb); 3533 3534 pt_prev = NULL; 3535 3536 rcu_read_lock(); 3537 3538 another_round: 3539 skb->skb_iif = skb->dev->ifindex; 3540 3541 __this_cpu_inc(softnet_data.processed); 3542 3543 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) || 3544 skb->protocol == cpu_to_be16(ETH_P_8021AD)) { 3545 skb = vlan_untag(skb); 3546 if (unlikely(!skb)) 3547 goto unlock; 3548 } 3549 3550 #ifdef CONFIG_NET_CLS_ACT 3551 if (skb->tc_verd & TC_NCLS) { 3552 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd); 3553 goto ncls; 3554 } 3555 #endif 3556 3557 if (pfmemalloc) 3558 goto skip_taps; 3559 3560 list_for_each_entry_rcu(ptype, &ptype_all, list) { 3561 if (!ptype->dev || ptype->dev == skb->dev) { 3562 if (pt_prev) 3563 ret = deliver_skb(skb, pt_prev, orig_dev); 3564 pt_prev = ptype; 3565 } 3566 } 3567 3568 skip_taps: 3569 #ifdef CONFIG_NET_CLS_ACT 3570 skb = handle_ing(skb, &pt_prev, &ret, orig_dev); 3571 if (!skb) 3572 goto unlock; 3573 ncls: 3574 #endif 3575 3576 if (pfmemalloc && !skb_pfmemalloc_protocol(skb)) 3577 goto drop; 3578 3579 if (vlan_tx_tag_present(skb)) { 3580 if (pt_prev) { 3581 ret = deliver_skb(skb, pt_prev, orig_dev); 3582 pt_prev = NULL; 3583 } 3584 if (vlan_do_receive(&skb)) 3585 goto another_round; 3586 else if (unlikely(!skb)) 3587 goto unlock; 3588 } 3589 3590 rx_handler = rcu_dereference(skb->dev->rx_handler); 3591 if (rx_handler) { 3592 if (pt_prev) { 3593 ret = deliver_skb(skb, pt_prev, orig_dev); 3594 pt_prev = NULL; 3595 } 3596 switch (rx_handler(&skb)) { 3597 case RX_HANDLER_CONSUMED: 3598 ret = NET_RX_SUCCESS; 3599 goto unlock; 3600 case RX_HANDLER_ANOTHER: 3601 goto another_round; 3602 case RX_HANDLER_EXACT: 3603 deliver_exact = true; 3604 case RX_HANDLER_PASS: 3605 break; 3606 default: 3607 BUG(); 3608 } 3609 } 3610 3611 if (unlikely(vlan_tx_tag_present(skb))) { 3612 if (vlan_tx_tag_get_id(skb)) 3613 skb->pkt_type = PACKET_OTHERHOST; 3614 /* Note: we might in the future use prio bits 3615 * and set skb->priority like in vlan_do_receive() 3616 * For the time being, just ignore Priority Code Point 3617 */ 3618 skb->vlan_tci = 0; 3619 } 3620 3621 /* deliver only exact match when indicated */ 3622 null_or_dev = deliver_exact ? skb->dev : NULL; 3623 3624 type = skb->protocol; 3625 list_for_each_entry_rcu(ptype, 3626 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { 3627 if (ptype->type == type && 3628 (ptype->dev == null_or_dev || ptype->dev == skb->dev || 3629 ptype->dev == orig_dev)) { 3630 if (pt_prev) 3631 ret = deliver_skb(skb, pt_prev, orig_dev); 3632 pt_prev = ptype; 3633 } 3634 } 3635 3636 if (pt_prev) { 3637 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC))) 3638 goto drop; 3639 else 3640 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev); 3641 } else { 3642 drop: 3643 atomic_long_inc(&skb->dev->rx_dropped); 3644 kfree_skb(skb); 3645 /* Jamal, now you will not able to escape explaining 3646 * me how you were going to use this. :-) 3647 */ 3648 ret = NET_RX_DROP; 3649 } 3650 3651 unlock: 3652 rcu_read_unlock(); 3653 out: 3654 return ret; 3655 } 3656 3657 static int __netif_receive_skb(struct sk_buff *skb) 3658 { 3659 int ret; 3660 3661 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) { 3662 unsigned long pflags = current->flags; 3663 3664 /* 3665 * PFMEMALLOC skbs are special, they should 3666 * - be delivered to SOCK_MEMALLOC sockets only 3667 * - stay away from userspace 3668 * - have bounded memory usage 3669 * 3670 * Use PF_MEMALLOC as this saves us from propagating the allocation 3671 * context down to all allocation sites. 3672 */ 3673 current->flags |= PF_MEMALLOC; 3674 ret = __netif_receive_skb_core(skb, true); 3675 tsk_restore_flags(current, pflags, PF_MEMALLOC); 3676 } else 3677 ret = __netif_receive_skb_core(skb, false); 3678 3679 return ret; 3680 } 3681 3682 static int netif_receive_skb_internal(struct sk_buff *skb) 3683 { 3684 net_timestamp_check(netdev_tstamp_prequeue, skb); 3685 3686 if (skb_defer_rx_timestamp(skb)) 3687 return NET_RX_SUCCESS; 3688 3689 #ifdef CONFIG_RPS 3690 if (static_key_false(&rps_needed)) { 3691 struct rps_dev_flow voidflow, *rflow = &voidflow; 3692 int cpu, ret; 3693 3694 rcu_read_lock(); 3695 3696 cpu = get_rps_cpu(skb->dev, skb, &rflow); 3697 3698 if (cpu >= 0) { 3699 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 3700 rcu_read_unlock(); 3701 return ret; 3702 } 3703 rcu_read_unlock(); 3704 } 3705 #endif 3706 return __netif_receive_skb(skb); 3707 } 3708 3709 /** 3710 * netif_receive_skb - process receive buffer from network 3711 * @skb: buffer to process 3712 * 3713 * netif_receive_skb() is the main receive data processing function. 3714 * It always succeeds. The buffer may be dropped during processing 3715 * for congestion control or by the protocol layers. 3716 * 3717 * This function may only be called from softirq context and interrupts 3718 * should be enabled. 3719 * 3720 * Return values (usually ignored): 3721 * NET_RX_SUCCESS: no congestion 3722 * NET_RX_DROP: packet was dropped 3723 */ 3724 int netif_receive_skb(struct sk_buff *skb) 3725 { 3726 trace_netif_receive_skb_entry(skb); 3727 3728 return netif_receive_skb_internal(skb); 3729 } 3730 EXPORT_SYMBOL(netif_receive_skb); 3731 3732 /* Network device is going away, flush any packets still pending 3733 * Called with irqs disabled. 3734 */ 3735 static void flush_backlog(void *arg) 3736 { 3737 struct net_device *dev = arg; 3738 struct softnet_data *sd = &__get_cpu_var(softnet_data); 3739 struct sk_buff *skb, *tmp; 3740 3741 rps_lock(sd); 3742 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) { 3743 if (skb->dev == dev) { 3744 __skb_unlink(skb, &sd->input_pkt_queue); 3745 kfree_skb(skb); 3746 input_queue_head_incr(sd); 3747 } 3748 } 3749 rps_unlock(sd); 3750 3751 skb_queue_walk_safe(&sd->process_queue, skb, tmp) { 3752 if (skb->dev == dev) { 3753 __skb_unlink(skb, &sd->process_queue); 3754 kfree_skb(skb); 3755 input_queue_head_incr(sd); 3756 } 3757 } 3758 } 3759 3760 static int napi_gro_complete(struct sk_buff *skb) 3761 { 3762 struct packet_offload *ptype; 3763 __be16 type = skb->protocol; 3764 struct list_head *head = &offload_base; 3765 int err = -ENOENT; 3766 3767 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb)); 3768 3769 if (NAPI_GRO_CB(skb)->count == 1) { 3770 skb_shinfo(skb)->gso_size = 0; 3771 goto out; 3772 } 3773 3774 rcu_read_lock(); 3775 list_for_each_entry_rcu(ptype, head, list) { 3776 if (ptype->type != type || !ptype->callbacks.gro_complete) 3777 continue; 3778 3779 err = ptype->callbacks.gro_complete(skb, 0); 3780 break; 3781 } 3782 rcu_read_unlock(); 3783 3784 if (err) { 3785 WARN_ON(&ptype->list == head); 3786 kfree_skb(skb); 3787 return NET_RX_SUCCESS; 3788 } 3789 3790 out: 3791 return netif_receive_skb_internal(skb); 3792 } 3793 3794 /* napi->gro_list contains packets ordered by age. 3795 * youngest packets at the head of it. 3796 * Complete skbs in reverse order to reduce latencies. 3797 */ 3798 void napi_gro_flush(struct napi_struct *napi, bool flush_old) 3799 { 3800 struct sk_buff *skb, *prev = NULL; 3801 3802 /* scan list and build reverse chain */ 3803 for (skb = napi->gro_list; skb != NULL; skb = skb->next) { 3804 skb->prev = prev; 3805 prev = skb; 3806 } 3807 3808 for (skb = prev; skb; skb = prev) { 3809 skb->next = NULL; 3810 3811 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies) 3812 return; 3813 3814 prev = skb->prev; 3815 napi_gro_complete(skb); 3816 napi->gro_count--; 3817 } 3818 3819 napi->gro_list = NULL; 3820 } 3821 EXPORT_SYMBOL(napi_gro_flush); 3822 3823 static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb) 3824 { 3825 struct sk_buff *p; 3826 unsigned int maclen = skb->dev->hard_header_len; 3827 u32 hash = skb_get_hash_raw(skb); 3828 3829 for (p = napi->gro_list; p; p = p->next) { 3830 unsigned long diffs; 3831 3832 NAPI_GRO_CB(p)->flush = 0; 3833 3834 if (hash != skb_get_hash_raw(p)) { 3835 NAPI_GRO_CB(p)->same_flow = 0; 3836 continue; 3837 } 3838 3839 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev; 3840 diffs |= p->vlan_tci ^ skb->vlan_tci; 3841 if (maclen == ETH_HLEN) 3842 diffs |= compare_ether_header(skb_mac_header(p), 3843 skb_gro_mac_header(skb)); 3844 else if (!diffs) 3845 diffs = memcmp(skb_mac_header(p), 3846 skb_gro_mac_header(skb), 3847 maclen); 3848 NAPI_GRO_CB(p)->same_flow = !diffs; 3849 } 3850 } 3851 3852 static void skb_gro_reset_offset(struct sk_buff *skb) 3853 { 3854 const struct skb_shared_info *pinfo = skb_shinfo(skb); 3855 const skb_frag_t *frag0 = &pinfo->frags[0]; 3856 3857 NAPI_GRO_CB(skb)->data_offset = 0; 3858 NAPI_GRO_CB(skb)->frag0 = NULL; 3859 NAPI_GRO_CB(skb)->frag0_len = 0; 3860 3861 if (skb_mac_header(skb) == skb_tail_pointer(skb) && 3862 pinfo->nr_frags && 3863 !PageHighMem(skb_frag_page(frag0))) { 3864 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0); 3865 NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0); 3866 } 3867 } 3868 3869 static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 3870 { 3871 struct sk_buff **pp = NULL; 3872 struct packet_offload *ptype; 3873 __be16 type = skb->protocol; 3874 struct list_head *head = &offload_base; 3875 int same_flow; 3876 enum gro_result ret; 3877 3878 if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb)) 3879 goto normal; 3880 3881 if (skb_is_gso(skb) || skb_has_frag_list(skb)) 3882 goto normal; 3883 3884 skb_gro_reset_offset(skb); 3885 gro_list_prepare(napi, skb); 3886 NAPI_GRO_CB(skb)->csum = skb->csum; /* Needed for CHECKSUM_COMPLETE */ 3887 3888 rcu_read_lock(); 3889 list_for_each_entry_rcu(ptype, head, list) { 3890 if (ptype->type != type || !ptype->callbacks.gro_receive) 3891 continue; 3892 3893 skb_set_network_header(skb, skb_gro_offset(skb)); 3894 skb_reset_mac_len(skb); 3895 NAPI_GRO_CB(skb)->same_flow = 0; 3896 NAPI_GRO_CB(skb)->flush = 0; 3897 NAPI_GRO_CB(skb)->free = 0; 3898 NAPI_GRO_CB(skb)->udp_mark = 0; 3899 3900 pp = ptype->callbacks.gro_receive(&napi->gro_list, skb); 3901 break; 3902 } 3903 rcu_read_unlock(); 3904 3905 if (&ptype->list == head) 3906 goto normal; 3907 3908 same_flow = NAPI_GRO_CB(skb)->same_flow; 3909 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED; 3910 3911 if (pp) { 3912 struct sk_buff *nskb = *pp; 3913 3914 *pp = nskb->next; 3915 nskb->next = NULL; 3916 napi_gro_complete(nskb); 3917 napi->gro_count--; 3918 } 3919 3920 if (same_flow) 3921 goto ok; 3922 3923 if (NAPI_GRO_CB(skb)->flush) 3924 goto normal; 3925 3926 if (unlikely(napi->gro_count >= MAX_GRO_SKBS)) { 3927 struct sk_buff *nskb = napi->gro_list; 3928 3929 /* locate the end of the list to select the 'oldest' flow */ 3930 while (nskb->next) { 3931 pp = &nskb->next; 3932 nskb = *pp; 3933 } 3934 *pp = NULL; 3935 nskb->next = NULL; 3936 napi_gro_complete(nskb); 3937 } else { 3938 napi->gro_count++; 3939 } 3940 NAPI_GRO_CB(skb)->count = 1; 3941 NAPI_GRO_CB(skb)->age = jiffies; 3942 skb_shinfo(skb)->gso_size = skb_gro_len(skb); 3943 skb->next = napi->gro_list; 3944 napi->gro_list = skb; 3945 ret = GRO_HELD; 3946 3947 pull: 3948 if (skb_headlen(skb) < skb_gro_offset(skb)) { 3949 int grow = skb_gro_offset(skb) - skb_headlen(skb); 3950 3951 BUG_ON(skb->end - skb->tail < grow); 3952 3953 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow); 3954 3955 skb->tail += grow; 3956 skb->data_len -= grow; 3957 3958 skb_shinfo(skb)->frags[0].page_offset += grow; 3959 skb_frag_size_sub(&skb_shinfo(skb)->frags[0], grow); 3960 3961 if (unlikely(!skb_frag_size(&skb_shinfo(skb)->frags[0]))) { 3962 skb_frag_unref(skb, 0); 3963 memmove(skb_shinfo(skb)->frags, 3964 skb_shinfo(skb)->frags + 1, 3965 --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t)); 3966 } 3967 } 3968 3969 ok: 3970 return ret; 3971 3972 normal: 3973 ret = GRO_NORMAL; 3974 goto pull; 3975 } 3976 3977 struct packet_offload *gro_find_receive_by_type(__be16 type) 3978 { 3979 struct list_head *offload_head = &offload_base; 3980 struct packet_offload *ptype; 3981 3982 list_for_each_entry_rcu(ptype, offload_head, list) { 3983 if (ptype->type != type || !ptype->callbacks.gro_receive) 3984 continue; 3985 return ptype; 3986 } 3987 return NULL; 3988 } 3989 EXPORT_SYMBOL(gro_find_receive_by_type); 3990 3991 struct packet_offload *gro_find_complete_by_type(__be16 type) 3992 { 3993 struct list_head *offload_head = &offload_base; 3994 struct packet_offload *ptype; 3995 3996 list_for_each_entry_rcu(ptype, offload_head, list) { 3997 if (ptype->type != type || !ptype->callbacks.gro_complete) 3998 continue; 3999 return ptype; 4000 } 4001 return NULL; 4002 } 4003 EXPORT_SYMBOL(gro_find_complete_by_type); 4004 4005 static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb) 4006 { 4007 switch (ret) { 4008 case GRO_NORMAL: 4009 if (netif_receive_skb_internal(skb)) 4010 ret = GRO_DROP; 4011 break; 4012 4013 case GRO_DROP: 4014 kfree_skb(skb); 4015 break; 4016 4017 case GRO_MERGED_FREE: 4018 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) 4019 kmem_cache_free(skbuff_head_cache, skb); 4020 else 4021 __kfree_skb(skb); 4022 break; 4023 4024 case GRO_HELD: 4025 case GRO_MERGED: 4026 break; 4027 } 4028 4029 return ret; 4030 } 4031 4032 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 4033 { 4034 trace_napi_gro_receive_entry(skb); 4035 4036 return napi_skb_finish(dev_gro_receive(napi, skb), skb); 4037 } 4038 EXPORT_SYMBOL(napi_gro_receive); 4039 4040 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) 4041 { 4042 __skb_pull(skb, skb_headlen(skb)); 4043 /* restore the reserve we had after netdev_alloc_skb_ip_align() */ 4044 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb)); 4045 skb->vlan_tci = 0; 4046 skb->dev = napi->dev; 4047 skb->skb_iif = 0; 4048 4049 napi->skb = skb; 4050 } 4051 4052 struct sk_buff *napi_get_frags(struct napi_struct *napi) 4053 { 4054 struct sk_buff *skb = napi->skb; 4055 4056 if (!skb) { 4057 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD); 4058 napi->skb = skb; 4059 } 4060 return skb; 4061 } 4062 EXPORT_SYMBOL(napi_get_frags); 4063 4064 static gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, 4065 gro_result_t ret) 4066 { 4067 switch (ret) { 4068 case GRO_NORMAL: 4069 if (netif_receive_skb_internal(skb)) 4070 ret = GRO_DROP; 4071 break; 4072 4073 case GRO_DROP: 4074 case GRO_MERGED_FREE: 4075 napi_reuse_skb(napi, skb); 4076 break; 4077 4078 case GRO_HELD: 4079 case GRO_MERGED: 4080 break; 4081 } 4082 4083 return ret; 4084 } 4085 4086 static struct sk_buff *napi_frags_skb(struct napi_struct *napi) 4087 { 4088 struct sk_buff *skb = napi->skb; 4089 4090 napi->skb = NULL; 4091 4092 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr)))) { 4093 napi_reuse_skb(napi, skb); 4094 return NULL; 4095 } 4096 skb->protocol = eth_type_trans(skb, skb->dev); 4097 4098 return skb; 4099 } 4100 4101 gro_result_t napi_gro_frags(struct napi_struct *napi) 4102 { 4103 struct sk_buff *skb = napi_frags_skb(napi); 4104 4105 if (!skb) 4106 return GRO_DROP; 4107 4108 trace_napi_gro_frags_entry(skb); 4109 4110 return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb)); 4111 } 4112 EXPORT_SYMBOL(napi_gro_frags); 4113 4114 /* 4115 * net_rps_action_and_irq_enable sends any pending IPI's for rps. 4116 * Note: called with local irq disabled, but exits with local irq enabled. 4117 */ 4118 static void net_rps_action_and_irq_enable(struct softnet_data *sd) 4119 { 4120 #ifdef CONFIG_RPS 4121 struct softnet_data *remsd = sd->rps_ipi_list; 4122 4123 if (remsd) { 4124 sd->rps_ipi_list = NULL; 4125 4126 local_irq_enable(); 4127 4128 /* Send pending IPI's to kick RPS processing on remote cpus. */ 4129 while (remsd) { 4130 struct softnet_data *next = remsd->rps_ipi_next; 4131 4132 if (cpu_online(remsd->cpu)) 4133 __smp_call_function_single(remsd->cpu, 4134 &remsd->csd, 0); 4135 remsd = next; 4136 } 4137 } else 4138 #endif 4139 local_irq_enable(); 4140 } 4141 4142 static int process_backlog(struct napi_struct *napi, int quota) 4143 { 4144 int work = 0; 4145 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog); 4146 4147 #ifdef CONFIG_RPS 4148 /* Check if we have pending ipi, its better to send them now, 4149 * not waiting net_rx_action() end. 4150 */ 4151 if (sd->rps_ipi_list) { 4152 local_irq_disable(); 4153 net_rps_action_and_irq_enable(sd); 4154 } 4155 #endif 4156 napi->weight = weight_p; 4157 local_irq_disable(); 4158 while (work < quota) { 4159 struct sk_buff *skb; 4160 unsigned int qlen; 4161 4162 while ((skb = __skb_dequeue(&sd->process_queue))) { 4163 local_irq_enable(); 4164 __netif_receive_skb(skb); 4165 local_irq_disable(); 4166 input_queue_head_incr(sd); 4167 if (++work >= quota) { 4168 local_irq_enable(); 4169 return work; 4170 } 4171 } 4172 4173 rps_lock(sd); 4174 qlen = skb_queue_len(&sd->input_pkt_queue); 4175 if (qlen) 4176 skb_queue_splice_tail_init(&sd->input_pkt_queue, 4177 &sd->process_queue); 4178 4179 if (qlen < quota - work) { 4180 /* 4181 * Inline a custom version of __napi_complete(). 4182 * only current cpu owns and manipulates this napi, 4183 * and NAPI_STATE_SCHED is the only possible flag set on backlog. 4184 * we can use a plain write instead of clear_bit(), 4185 * and we dont need an smp_mb() memory barrier. 4186 */ 4187 list_del(&napi->poll_list); 4188 napi->state = 0; 4189 4190 quota = work + qlen; 4191 } 4192 rps_unlock(sd); 4193 } 4194 local_irq_enable(); 4195 4196 return work; 4197 } 4198 4199 /** 4200 * __napi_schedule - schedule for receive 4201 * @n: entry to schedule 4202 * 4203 * The entry's receive function will be scheduled to run 4204 */ 4205 void __napi_schedule(struct napi_struct *n) 4206 { 4207 unsigned long flags; 4208 4209 local_irq_save(flags); 4210 ____napi_schedule(&__get_cpu_var(softnet_data), n); 4211 local_irq_restore(flags); 4212 } 4213 EXPORT_SYMBOL(__napi_schedule); 4214 4215 void __napi_complete(struct napi_struct *n) 4216 { 4217 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); 4218 BUG_ON(n->gro_list); 4219 4220 list_del(&n->poll_list); 4221 smp_mb__before_clear_bit(); 4222 clear_bit(NAPI_STATE_SCHED, &n->state); 4223 } 4224 EXPORT_SYMBOL(__napi_complete); 4225 4226 void napi_complete(struct napi_struct *n) 4227 { 4228 unsigned long flags; 4229 4230 /* 4231 * don't let napi dequeue from the cpu poll list 4232 * just in case its running on a different cpu 4233 */ 4234 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state))) 4235 return; 4236 4237 napi_gro_flush(n, false); 4238 local_irq_save(flags); 4239 __napi_complete(n); 4240 local_irq_restore(flags); 4241 } 4242 EXPORT_SYMBOL(napi_complete); 4243 4244 /* must be called under rcu_read_lock(), as we dont take a reference */ 4245 struct napi_struct *napi_by_id(unsigned int napi_id) 4246 { 4247 unsigned int hash = napi_id % HASH_SIZE(napi_hash); 4248 struct napi_struct *napi; 4249 4250 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node) 4251 if (napi->napi_id == napi_id) 4252 return napi; 4253 4254 return NULL; 4255 } 4256 EXPORT_SYMBOL_GPL(napi_by_id); 4257 4258 void napi_hash_add(struct napi_struct *napi) 4259 { 4260 if (!test_and_set_bit(NAPI_STATE_HASHED, &napi->state)) { 4261 4262 spin_lock(&napi_hash_lock); 4263 4264 /* 0 is not a valid id, we also skip an id that is taken 4265 * we expect both events to be extremely rare 4266 */ 4267 napi->napi_id = 0; 4268 while (!napi->napi_id) { 4269 napi->napi_id = ++napi_gen_id; 4270 if (napi_by_id(napi->napi_id)) 4271 napi->napi_id = 0; 4272 } 4273 4274 hlist_add_head_rcu(&napi->napi_hash_node, 4275 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]); 4276 4277 spin_unlock(&napi_hash_lock); 4278 } 4279 } 4280 EXPORT_SYMBOL_GPL(napi_hash_add); 4281 4282 /* Warning : caller is responsible to make sure rcu grace period 4283 * is respected before freeing memory containing @napi 4284 */ 4285 void napi_hash_del(struct napi_struct *napi) 4286 { 4287 spin_lock(&napi_hash_lock); 4288 4289 if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state)) 4290 hlist_del_rcu(&napi->napi_hash_node); 4291 4292 spin_unlock(&napi_hash_lock); 4293 } 4294 EXPORT_SYMBOL_GPL(napi_hash_del); 4295 4296 void netif_napi_add(struct net_device *dev, struct napi_struct *napi, 4297 int (*poll)(struct napi_struct *, int), int weight) 4298 { 4299 INIT_LIST_HEAD(&napi->poll_list); 4300 napi->gro_count = 0; 4301 napi->gro_list = NULL; 4302 napi->skb = NULL; 4303 napi->poll = poll; 4304 if (weight > NAPI_POLL_WEIGHT) 4305 pr_err_once("netif_napi_add() called with weight %d on device %s\n", 4306 weight, dev->name); 4307 napi->weight = weight; 4308 list_add(&napi->dev_list, &dev->napi_list); 4309 napi->dev = dev; 4310 #ifdef CONFIG_NETPOLL 4311 spin_lock_init(&napi->poll_lock); 4312 napi->poll_owner = -1; 4313 #endif 4314 set_bit(NAPI_STATE_SCHED, &napi->state); 4315 } 4316 EXPORT_SYMBOL(netif_napi_add); 4317 4318 void netif_napi_del(struct napi_struct *napi) 4319 { 4320 list_del_init(&napi->dev_list); 4321 napi_free_frags(napi); 4322 4323 kfree_skb_list(napi->gro_list); 4324 napi->gro_list = NULL; 4325 napi->gro_count = 0; 4326 } 4327 EXPORT_SYMBOL(netif_napi_del); 4328 4329 static void net_rx_action(struct softirq_action *h) 4330 { 4331 struct softnet_data *sd = &__get_cpu_var(softnet_data); 4332 unsigned long time_limit = jiffies + 2; 4333 int budget = netdev_budget; 4334 void *have; 4335 4336 local_irq_disable(); 4337 4338 while (!list_empty(&sd->poll_list)) { 4339 struct napi_struct *n; 4340 int work, weight; 4341 4342 /* If softirq window is exhuasted then punt. 4343 * Allow this to run for 2 jiffies since which will allow 4344 * an average latency of 1.5/HZ. 4345 */ 4346 if (unlikely(budget <= 0 || time_after_eq(jiffies, time_limit))) 4347 goto softnet_break; 4348 4349 local_irq_enable(); 4350 4351 /* Even though interrupts have been re-enabled, this 4352 * access is safe because interrupts can only add new 4353 * entries to the tail of this list, and only ->poll() 4354 * calls can remove this head entry from the list. 4355 */ 4356 n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list); 4357 4358 have = netpoll_poll_lock(n); 4359 4360 weight = n->weight; 4361 4362 /* This NAPI_STATE_SCHED test is for avoiding a race 4363 * with netpoll's poll_napi(). Only the entity which 4364 * obtains the lock and sees NAPI_STATE_SCHED set will 4365 * actually make the ->poll() call. Therefore we avoid 4366 * accidentally calling ->poll() when NAPI is not scheduled. 4367 */ 4368 work = 0; 4369 if (test_bit(NAPI_STATE_SCHED, &n->state)) { 4370 work = n->poll(n, weight); 4371 trace_napi_poll(n); 4372 } 4373 4374 WARN_ON_ONCE(work > weight); 4375 4376 budget -= work; 4377 4378 local_irq_disable(); 4379 4380 /* Drivers must not modify the NAPI state if they 4381 * consume the entire weight. In such cases this code 4382 * still "owns" the NAPI instance and therefore can 4383 * move the instance around on the list at-will. 4384 */ 4385 if (unlikely(work == weight)) { 4386 if (unlikely(napi_disable_pending(n))) { 4387 local_irq_enable(); 4388 napi_complete(n); 4389 local_irq_disable(); 4390 } else { 4391 if (n->gro_list) { 4392 /* flush too old packets 4393 * If HZ < 1000, flush all packets. 4394 */ 4395 local_irq_enable(); 4396 napi_gro_flush(n, HZ >= 1000); 4397 local_irq_disable(); 4398 } 4399 list_move_tail(&n->poll_list, &sd->poll_list); 4400 } 4401 } 4402 4403 netpoll_poll_unlock(have); 4404 } 4405 out: 4406 net_rps_action_and_irq_enable(sd); 4407 4408 #ifdef CONFIG_NET_DMA 4409 /* 4410 * There may not be any more sk_buffs coming right now, so push 4411 * any pending DMA copies to hardware 4412 */ 4413 dma_issue_pending_all(); 4414 #endif 4415 4416 return; 4417 4418 softnet_break: 4419 sd->time_squeeze++; 4420 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 4421 goto out; 4422 } 4423 4424 struct netdev_adjacent { 4425 struct net_device *dev; 4426 4427 /* upper master flag, there can only be one master device per list */ 4428 bool master; 4429 4430 /* counter for the number of times this device was added to us */ 4431 u16 ref_nr; 4432 4433 /* private field for the users */ 4434 void *private; 4435 4436 struct list_head list; 4437 struct rcu_head rcu; 4438 }; 4439 4440 static struct netdev_adjacent *__netdev_find_adj(struct net_device *dev, 4441 struct net_device *adj_dev, 4442 struct list_head *adj_list) 4443 { 4444 struct netdev_adjacent *adj; 4445 4446 list_for_each_entry(adj, adj_list, list) { 4447 if (adj->dev == adj_dev) 4448 return adj; 4449 } 4450 return NULL; 4451 } 4452 4453 /** 4454 * netdev_has_upper_dev - Check if device is linked to an upper device 4455 * @dev: device 4456 * @upper_dev: upper device to check 4457 * 4458 * Find out if a device is linked to specified upper device and return true 4459 * in case it is. Note that this checks only immediate upper device, 4460 * not through a complete stack of devices. The caller must hold the RTNL lock. 4461 */ 4462 bool netdev_has_upper_dev(struct net_device *dev, 4463 struct net_device *upper_dev) 4464 { 4465 ASSERT_RTNL(); 4466 4467 return __netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper); 4468 } 4469 EXPORT_SYMBOL(netdev_has_upper_dev); 4470 4471 /** 4472 * netdev_has_any_upper_dev - Check if device is linked to some device 4473 * @dev: device 4474 * 4475 * Find out if a device is linked to an upper device and return true in case 4476 * it is. The caller must hold the RTNL lock. 4477 */ 4478 static bool netdev_has_any_upper_dev(struct net_device *dev) 4479 { 4480 ASSERT_RTNL(); 4481 4482 return !list_empty(&dev->all_adj_list.upper); 4483 } 4484 4485 /** 4486 * netdev_master_upper_dev_get - Get master upper device 4487 * @dev: device 4488 * 4489 * Find a master upper device and return pointer to it or NULL in case 4490 * it's not there. The caller must hold the RTNL lock. 4491 */ 4492 struct net_device *netdev_master_upper_dev_get(struct net_device *dev) 4493 { 4494 struct netdev_adjacent *upper; 4495 4496 ASSERT_RTNL(); 4497 4498 if (list_empty(&dev->adj_list.upper)) 4499 return NULL; 4500 4501 upper = list_first_entry(&dev->adj_list.upper, 4502 struct netdev_adjacent, list); 4503 if (likely(upper->master)) 4504 return upper->dev; 4505 return NULL; 4506 } 4507 EXPORT_SYMBOL(netdev_master_upper_dev_get); 4508 4509 void *netdev_adjacent_get_private(struct list_head *adj_list) 4510 { 4511 struct netdev_adjacent *adj; 4512 4513 adj = list_entry(adj_list, struct netdev_adjacent, list); 4514 4515 return adj->private; 4516 } 4517 EXPORT_SYMBOL(netdev_adjacent_get_private); 4518 4519 /** 4520 * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list 4521 * @dev: device 4522 * @iter: list_head ** of the current position 4523 * 4524 * Gets the next device from the dev's upper list, starting from iter 4525 * position. The caller must hold RCU read lock. 4526 */ 4527 struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev, 4528 struct list_head **iter) 4529 { 4530 struct netdev_adjacent *upper; 4531 4532 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held()); 4533 4534 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 4535 4536 if (&upper->list == &dev->all_adj_list.upper) 4537 return NULL; 4538 4539 *iter = &upper->list; 4540 4541 return upper->dev; 4542 } 4543 EXPORT_SYMBOL(netdev_all_upper_get_next_dev_rcu); 4544 4545 /** 4546 * netdev_lower_get_next_private - Get the next ->private from the 4547 * lower neighbour list 4548 * @dev: device 4549 * @iter: list_head ** of the current position 4550 * 4551 * Gets the next netdev_adjacent->private from the dev's lower neighbour 4552 * list, starting from iter position. The caller must hold either hold the 4553 * RTNL lock or its own locking that guarantees that the neighbour lower 4554 * list will remain unchainged. 4555 */ 4556 void *netdev_lower_get_next_private(struct net_device *dev, 4557 struct list_head **iter) 4558 { 4559 struct netdev_adjacent *lower; 4560 4561 lower = list_entry(*iter, struct netdev_adjacent, list); 4562 4563 if (&lower->list == &dev->adj_list.lower) 4564 return NULL; 4565 4566 if (iter) 4567 *iter = lower->list.next; 4568 4569 return lower->private; 4570 } 4571 EXPORT_SYMBOL(netdev_lower_get_next_private); 4572 4573 /** 4574 * netdev_lower_get_next_private_rcu - Get the next ->private from the 4575 * lower neighbour list, RCU 4576 * variant 4577 * @dev: device 4578 * @iter: list_head ** of the current position 4579 * 4580 * Gets the next netdev_adjacent->private from the dev's lower neighbour 4581 * list, starting from iter position. The caller must hold RCU read lock. 4582 */ 4583 void *netdev_lower_get_next_private_rcu(struct net_device *dev, 4584 struct list_head **iter) 4585 { 4586 struct netdev_adjacent *lower; 4587 4588 WARN_ON_ONCE(!rcu_read_lock_held()); 4589 4590 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 4591 4592 if (&lower->list == &dev->adj_list.lower) 4593 return NULL; 4594 4595 if (iter) 4596 *iter = &lower->list; 4597 4598 return lower->private; 4599 } 4600 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu); 4601 4602 /** 4603 * netdev_lower_get_first_private_rcu - Get the first ->private from the 4604 * lower neighbour list, RCU 4605 * variant 4606 * @dev: device 4607 * 4608 * Gets the first netdev_adjacent->private from the dev's lower neighbour 4609 * list. The caller must hold RCU read lock. 4610 */ 4611 void *netdev_lower_get_first_private_rcu(struct net_device *dev) 4612 { 4613 struct netdev_adjacent *lower; 4614 4615 lower = list_first_or_null_rcu(&dev->adj_list.lower, 4616 struct netdev_adjacent, list); 4617 if (lower) 4618 return lower->private; 4619 return NULL; 4620 } 4621 EXPORT_SYMBOL(netdev_lower_get_first_private_rcu); 4622 4623 /** 4624 * netdev_master_upper_dev_get_rcu - Get master upper device 4625 * @dev: device 4626 * 4627 * Find a master upper device and return pointer to it or NULL in case 4628 * it's not there. The caller must hold the RCU read lock. 4629 */ 4630 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev) 4631 { 4632 struct netdev_adjacent *upper; 4633 4634 upper = list_first_or_null_rcu(&dev->adj_list.upper, 4635 struct netdev_adjacent, list); 4636 if (upper && likely(upper->master)) 4637 return upper->dev; 4638 return NULL; 4639 } 4640 EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu); 4641 4642 static int netdev_adjacent_sysfs_add(struct net_device *dev, 4643 struct net_device *adj_dev, 4644 struct list_head *dev_list) 4645 { 4646 char linkname[IFNAMSIZ+7]; 4647 sprintf(linkname, dev_list == &dev->adj_list.upper ? 4648 "upper_%s" : "lower_%s", adj_dev->name); 4649 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj), 4650 linkname); 4651 } 4652 static void netdev_adjacent_sysfs_del(struct net_device *dev, 4653 char *name, 4654 struct list_head *dev_list) 4655 { 4656 char linkname[IFNAMSIZ+7]; 4657 sprintf(linkname, dev_list == &dev->adj_list.upper ? 4658 "upper_%s" : "lower_%s", name); 4659 sysfs_remove_link(&(dev->dev.kobj), linkname); 4660 } 4661 4662 #define netdev_adjacent_is_neigh_list(dev, dev_list) \ 4663 (dev_list == &dev->adj_list.upper || \ 4664 dev_list == &dev->adj_list.lower) 4665 4666 static int __netdev_adjacent_dev_insert(struct net_device *dev, 4667 struct net_device *adj_dev, 4668 struct list_head *dev_list, 4669 void *private, bool master) 4670 { 4671 struct netdev_adjacent *adj; 4672 int ret; 4673 4674 adj = __netdev_find_adj(dev, adj_dev, dev_list); 4675 4676 if (adj) { 4677 adj->ref_nr++; 4678 return 0; 4679 } 4680 4681 adj = kmalloc(sizeof(*adj), GFP_KERNEL); 4682 if (!adj) 4683 return -ENOMEM; 4684 4685 adj->dev = adj_dev; 4686 adj->master = master; 4687 adj->ref_nr = 1; 4688 adj->private = private; 4689 dev_hold(adj_dev); 4690 4691 pr_debug("dev_hold for %s, because of link added from %s to %s\n", 4692 adj_dev->name, dev->name, adj_dev->name); 4693 4694 if (netdev_adjacent_is_neigh_list(dev, dev_list)) { 4695 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list); 4696 if (ret) 4697 goto free_adj; 4698 } 4699 4700 /* Ensure that master link is always the first item in list. */ 4701 if (master) { 4702 ret = sysfs_create_link(&(dev->dev.kobj), 4703 &(adj_dev->dev.kobj), "master"); 4704 if (ret) 4705 goto remove_symlinks; 4706 4707 list_add_rcu(&adj->list, dev_list); 4708 } else { 4709 list_add_tail_rcu(&adj->list, dev_list); 4710 } 4711 4712 return 0; 4713 4714 remove_symlinks: 4715 if (netdev_adjacent_is_neigh_list(dev, dev_list)) 4716 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list); 4717 free_adj: 4718 kfree(adj); 4719 dev_put(adj_dev); 4720 4721 return ret; 4722 } 4723 4724 static void __netdev_adjacent_dev_remove(struct net_device *dev, 4725 struct net_device *adj_dev, 4726 struct list_head *dev_list) 4727 { 4728 struct netdev_adjacent *adj; 4729 4730 adj = __netdev_find_adj(dev, adj_dev, dev_list); 4731 4732 if (!adj) { 4733 pr_err("tried to remove device %s from %s\n", 4734 dev->name, adj_dev->name); 4735 BUG(); 4736 } 4737 4738 if (adj->ref_nr > 1) { 4739 pr_debug("%s to %s ref_nr-- = %d\n", dev->name, adj_dev->name, 4740 adj->ref_nr-1); 4741 adj->ref_nr--; 4742 return; 4743 } 4744 4745 if (adj->master) 4746 sysfs_remove_link(&(dev->dev.kobj), "master"); 4747 4748 if (netdev_adjacent_is_neigh_list(dev, dev_list)) 4749 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list); 4750 4751 list_del_rcu(&adj->list); 4752 pr_debug("dev_put for %s, because link removed from %s to %s\n", 4753 adj_dev->name, dev->name, adj_dev->name); 4754 dev_put(adj_dev); 4755 kfree_rcu(adj, rcu); 4756 } 4757 4758 static int __netdev_adjacent_dev_link_lists(struct net_device *dev, 4759 struct net_device *upper_dev, 4760 struct list_head *up_list, 4761 struct list_head *down_list, 4762 void *private, bool master) 4763 { 4764 int ret; 4765 4766 ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, private, 4767 master); 4768 if (ret) 4769 return ret; 4770 4771 ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, private, 4772 false); 4773 if (ret) { 4774 __netdev_adjacent_dev_remove(dev, upper_dev, up_list); 4775 return ret; 4776 } 4777 4778 return 0; 4779 } 4780 4781 static int __netdev_adjacent_dev_link(struct net_device *dev, 4782 struct net_device *upper_dev) 4783 { 4784 return __netdev_adjacent_dev_link_lists(dev, upper_dev, 4785 &dev->all_adj_list.upper, 4786 &upper_dev->all_adj_list.lower, 4787 NULL, false); 4788 } 4789 4790 static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev, 4791 struct net_device *upper_dev, 4792 struct list_head *up_list, 4793 struct list_head *down_list) 4794 { 4795 __netdev_adjacent_dev_remove(dev, upper_dev, up_list); 4796 __netdev_adjacent_dev_remove(upper_dev, dev, down_list); 4797 } 4798 4799 static void __netdev_adjacent_dev_unlink(struct net_device *dev, 4800 struct net_device *upper_dev) 4801 { 4802 __netdev_adjacent_dev_unlink_lists(dev, upper_dev, 4803 &dev->all_adj_list.upper, 4804 &upper_dev->all_adj_list.lower); 4805 } 4806 4807 static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev, 4808 struct net_device *upper_dev, 4809 void *private, bool master) 4810 { 4811 int ret = __netdev_adjacent_dev_link(dev, upper_dev); 4812 4813 if (ret) 4814 return ret; 4815 4816 ret = __netdev_adjacent_dev_link_lists(dev, upper_dev, 4817 &dev->adj_list.upper, 4818 &upper_dev->adj_list.lower, 4819 private, master); 4820 if (ret) { 4821 __netdev_adjacent_dev_unlink(dev, upper_dev); 4822 return ret; 4823 } 4824 4825 return 0; 4826 } 4827 4828 static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev, 4829 struct net_device *upper_dev) 4830 { 4831 __netdev_adjacent_dev_unlink(dev, upper_dev); 4832 __netdev_adjacent_dev_unlink_lists(dev, upper_dev, 4833 &dev->adj_list.upper, 4834 &upper_dev->adj_list.lower); 4835 } 4836 4837 static int __netdev_upper_dev_link(struct net_device *dev, 4838 struct net_device *upper_dev, bool master, 4839 void *private) 4840 { 4841 struct netdev_adjacent *i, *j, *to_i, *to_j; 4842 int ret = 0; 4843 4844 ASSERT_RTNL(); 4845 4846 if (dev == upper_dev) 4847 return -EBUSY; 4848 4849 /* To prevent loops, check if dev is not upper device to upper_dev. */ 4850 if (__netdev_find_adj(upper_dev, dev, &upper_dev->all_adj_list.upper)) 4851 return -EBUSY; 4852 4853 if (__netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper)) 4854 return -EEXIST; 4855 4856 if (master && netdev_master_upper_dev_get(dev)) 4857 return -EBUSY; 4858 4859 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, private, 4860 master); 4861 if (ret) 4862 return ret; 4863 4864 /* Now that we linked these devs, make all the upper_dev's 4865 * all_adj_list.upper visible to every dev's all_adj_list.lower an 4866 * versa, and don't forget the devices itself. All of these 4867 * links are non-neighbours. 4868 */ 4869 list_for_each_entry(i, &dev->all_adj_list.lower, list) { 4870 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) { 4871 pr_debug("Interlinking %s with %s, non-neighbour\n", 4872 i->dev->name, j->dev->name); 4873 ret = __netdev_adjacent_dev_link(i->dev, j->dev); 4874 if (ret) 4875 goto rollback_mesh; 4876 } 4877 } 4878 4879 /* add dev to every upper_dev's upper device */ 4880 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) { 4881 pr_debug("linking %s's upper device %s with %s\n", 4882 upper_dev->name, i->dev->name, dev->name); 4883 ret = __netdev_adjacent_dev_link(dev, i->dev); 4884 if (ret) 4885 goto rollback_upper_mesh; 4886 } 4887 4888 /* add upper_dev to every dev's lower device */ 4889 list_for_each_entry(i, &dev->all_adj_list.lower, list) { 4890 pr_debug("linking %s's lower device %s with %s\n", dev->name, 4891 i->dev->name, upper_dev->name); 4892 ret = __netdev_adjacent_dev_link(i->dev, upper_dev); 4893 if (ret) 4894 goto rollback_lower_mesh; 4895 } 4896 4897 call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev); 4898 return 0; 4899 4900 rollback_lower_mesh: 4901 to_i = i; 4902 list_for_each_entry(i, &dev->all_adj_list.lower, list) { 4903 if (i == to_i) 4904 break; 4905 __netdev_adjacent_dev_unlink(i->dev, upper_dev); 4906 } 4907 4908 i = NULL; 4909 4910 rollback_upper_mesh: 4911 to_i = i; 4912 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) { 4913 if (i == to_i) 4914 break; 4915 __netdev_adjacent_dev_unlink(dev, i->dev); 4916 } 4917 4918 i = j = NULL; 4919 4920 rollback_mesh: 4921 to_i = i; 4922 to_j = j; 4923 list_for_each_entry(i, &dev->all_adj_list.lower, list) { 4924 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) { 4925 if (i == to_i && j == to_j) 4926 break; 4927 __netdev_adjacent_dev_unlink(i->dev, j->dev); 4928 } 4929 if (i == to_i) 4930 break; 4931 } 4932 4933 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev); 4934 4935 return ret; 4936 } 4937 4938 /** 4939 * netdev_upper_dev_link - Add a link to the upper device 4940 * @dev: device 4941 * @upper_dev: new upper device 4942 * 4943 * Adds a link to device which is upper to this one. The caller must hold 4944 * the RTNL lock. On a failure a negative errno code is returned. 4945 * On success the reference counts are adjusted and the function 4946 * returns zero. 4947 */ 4948 int netdev_upper_dev_link(struct net_device *dev, 4949 struct net_device *upper_dev) 4950 { 4951 return __netdev_upper_dev_link(dev, upper_dev, false, NULL); 4952 } 4953 EXPORT_SYMBOL(netdev_upper_dev_link); 4954 4955 /** 4956 * netdev_master_upper_dev_link - Add a master link to the upper device 4957 * @dev: device 4958 * @upper_dev: new upper device 4959 * 4960 * Adds a link to device which is upper to this one. In this case, only 4961 * one master upper device can be linked, although other non-master devices 4962 * might be linked as well. The caller must hold the RTNL lock. 4963 * On a failure a negative errno code is returned. On success the reference 4964 * counts are adjusted and the function returns zero. 4965 */ 4966 int netdev_master_upper_dev_link(struct net_device *dev, 4967 struct net_device *upper_dev) 4968 { 4969 return __netdev_upper_dev_link(dev, upper_dev, true, NULL); 4970 } 4971 EXPORT_SYMBOL(netdev_master_upper_dev_link); 4972 4973 int netdev_master_upper_dev_link_private(struct net_device *dev, 4974 struct net_device *upper_dev, 4975 void *private) 4976 { 4977 return __netdev_upper_dev_link(dev, upper_dev, true, private); 4978 } 4979 EXPORT_SYMBOL(netdev_master_upper_dev_link_private); 4980 4981 /** 4982 * netdev_upper_dev_unlink - Removes a link to upper device 4983 * @dev: device 4984 * @upper_dev: new upper device 4985 * 4986 * Removes a link to device which is upper to this one. The caller must hold 4987 * the RTNL lock. 4988 */ 4989 void netdev_upper_dev_unlink(struct net_device *dev, 4990 struct net_device *upper_dev) 4991 { 4992 struct netdev_adjacent *i, *j; 4993 ASSERT_RTNL(); 4994 4995 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev); 4996 4997 /* Here is the tricky part. We must remove all dev's lower 4998 * devices from all upper_dev's upper devices and vice 4999 * versa, to maintain the graph relationship. 5000 */ 5001 list_for_each_entry(i, &dev->all_adj_list.lower, list) 5002 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) 5003 __netdev_adjacent_dev_unlink(i->dev, j->dev); 5004 5005 /* remove also the devices itself from lower/upper device 5006 * list 5007 */ 5008 list_for_each_entry(i, &dev->all_adj_list.lower, list) 5009 __netdev_adjacent_dev_unlink(i->dev, upper_dev); 5010 5011 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) 5012 __netdev_adjacent_dev_unlink(dev, i->dev); 5013 5014 call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev); 5015 } 5016 EXPORT_SYMBOL(netdev_upper_dev_unlink); 5017 5018 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname) 5019 { 5020 struct netdev_adjacent *iter; 5021 5022 list_for_each_entry(iter, &dev->adj_list.upper, list) { 5023 netdev_adjacent_sysfs_del(iter->dev, oldname, 5024 &iter->dev->adj_list.lower); 5025 netdev_adjacent_sysfs_add(iter->dev, dev, 5026 &iter->dev->adj_list.lower); 5027 } 5028 5029 list_for_each_entry(iter, &dev->adj_list.lower, list) { 5030 netdev_adjacent_sysfs_del(iter->dev, oldname, 5031 &iter->dev->adj_list.upper); 5032 netdev_adjacent_sysfs_add(iter->dev, dev, 5033 &iter->dev->adj_list.upper); 5034 } 5035 } 5036 5037 void *netdev_lower_dev_get_private(struct net_device *dev, 5038 struct net_device *lower_dev) 5039 { 5040 struct netdev_adjacent *lower; 5041 5042 if (!lower_dev) 5043 return NULL; 5044 lower = __netdev_find_adj(dev, lower_dev, &dev->adj_list.lower); 5045 if (!lower) 5046 return NULL; 5047 5048 return lower->private; 5049 } 5050 EXPORT_SYMBOL(netdev_lower_dev_get_private); 5051 5052 static void dev_change_rx_flags(struct net_device *dev, int flags) 5053 { 5054 const struct net_device_ops *ops = dev->netdev_ops; 5055 5056 if (ops->ndo_change_rx_flags) 5057 ops->ndo_change_rx_flags(dev, flags); 5058 } 5059 5060 static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify) 5061 { 5062 unsigned int old_flags = dev->flags; 5063 kuid_t uid; 5064 kgid_t gid; 5065 5066 ASSERT_RTNL(); 5067 5068 dev->flags |= IFF_PROMISC; 5069 dev->promiscuity += inc; 5070 if (dev->promiscuity == 0) { 5071 /* 5072 * Avoid overflow. 5073 * If inc causes overflow, untouch promisc and return error. 5074 */ 5075 if (inc < 0) 5076 dev->flags &= ~IFF_PROMISC; 5077 else { 5078 dev->promiscuity -= inc; 5079 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n", 5080 dev->name); 5081 return -EOVERFLOW; 5082 } 5083 } 5084 if (dev->flags != old_flags) { 5085 pr_info("device %s %s promiscuous mode\n", 5086 dev->name, 5087 dev->flags & IFF_PROMISC ? "entered" : "left"); 5088 if (audit_enabled) { 5089 current_uid_gid(&uid, &gid); 5090 audit_log(current->audit_context, GFP_ATOMIC, 5091 AUDIT_ANOM_PROMISCUOUS, 5092 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u", 5093 dev->name, (dev->flags & IFF_PROMISC), 5094 (old_flags & IFF_PROMISC), 5095 from_kuid(&init_user_ns, audit_get_loginuid(current)), 5096 from_kuid(&init_user_ns, uid), 5097 from_kgid(&init_user_ns, gid), 5098 audit_get_sessionid(current)); 5099 } 5100 5101 dev_change_rx_flags(dev, IFF_PROMISC); 5102 } 5103 if (notify) 5104 __dev_notify_flags(dev, old_flags, IFF_PROMISC); 5105 return 0; 5106 } 5107 5108 /** 5109 * dev_set_promiscuity - update promiscuity count on a device 5110 * @dev: device 5111 * @inc: modifier 5112 * 5113 * Add or remove promiscuity from a device. While the count in the device 5114 * remains above zero the interface remains promiscuous. Once it hits zero 5115 * the device reverts back to normal filtering operation. A negative inc 5116 * value is used to drop promiscuity on the device. 5117 * Return 0 if successful or a negative errno code on error. 5118 */ 5119 int dev_set_promiscuity(struct net_device *dev, int inc) 5120 { 5121 unsigned int old_flags = dev->flags; 5122 int err; 5123 5124 err = __dev_set_promiscuity(dev, inc, true); 5125 if (err < 0) 5126 return err; 5127 if (dev->flags != old_flags) 5128 dev_set_rx_mode(dev); 5129 return err; 5130 } 5131 EXPORT_SYMBOL(dev_set_promiscuity); 5132 5133 static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify) 5134 { 5135 unsigned int old_flags = dev->flags, old_gflags = dev->gflags; 5136 5137 ASSERT_RTNL(); 5138 5139 dev->flags |= IFF_ALLMULTI; 5140 dev->allmulti += inc; 5141 if (dev->allmulti == 0) { 5142 /* 5143 * Avoid overflow. 5144 * If inc causes overflow, untouch allmulti and return error. 5145 */ 5146 if (inc < 0) 5147 dev->flags &= ~IFF_ALLMULTI; 5148 else { 5149 dev->allmulti -= inc; 5150 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n", 5151 dev->name); 5152 return -EOVERFLOW; 5153 } 5154 } 5155 if (dev->flags ^ old_flags) { 5156 dev_change_rx_flags(dev, IFF_ALLMULTI); 5157 dev_set_rx_mode(dev); 5158 if (notify) 5159 __dev_notify_flags(dev, old_flags, 5160 dev->gflags ^ old_gflags); 5161 } 5162 return 0; 5163 } 5164 5165 /** 5166 * dev_set_allmulti - update allmulti count on a device 5167 * @dev: device 5168 * @inc: modifier 5169 * 5170 * Add or remove reception of all multicast frames to a device. While the 5171 * count in the device remains above zero the interface remains listening 5172 * to all interfaces. Once it hits zero the device reverts back to normal 5173 * filtering operation. A negative @inc value is used to drop the counter 5174 * when releasing a resource needing all multicasts. 5175 * Return 0 if successful or a negative errno code on error. 5176 */ 5177 5178 int dev_set_allmulti(struct net_device *dev, int inc) 5179 { 5180 return __dev_set_allmulti(dev, inc, true); 5181 } 5182 EXPORT_SYMBOL(dev_set_allmulti); 5183 5184 /* 5185 * Upload unicast and multicast address lists to device and 5186 * configure RX filtering. When the device doesn't support unicast 5187 * filtering it is put in promiscuous mode while unicast addresses 5188 * are present. 5189 */ 5190 void __dev_set_rx_mode(struct net_device *dev) 5191 { 5192 const struct net_device_ops *ops = dev->netdev_ops; 5193 5194 /* dev_open will call this function so the list will stay sane. */ 5195 if (!(dev->flags&IFF_UP)) 5196 return; 5197 5198 if (!netif_device_present(dev)) 5199 return; 5200 5201 if (!(dev->priv_flags & IFF_UNICAST_FLT)) { 5202 /* Unicast addresses changes may only happen under the rtnl, 5203 * therefore calling __dev_set_promiscuity here is safe. 5204 */ 5205 if (!netdev_uc_empty(dev) && !dev->uc_promisc) { 5206 __dev_set_promiscuity(dev, 1, false); 5207 dev->uc_promisc = true; 5208 } else if (netdev_uc_empty(dev) && dev->uc_promisc) { 5209 __dev_set_promiscuity(dev, -1, false); 5210 dev->uc_promisc = false; 5211 } 5212 } 5213 5214 if (ops->ndo_set_rx_mode) 5215 ops->ndo_set_rx_mode(dev); 5216 } 5217 5218 void dev_set_rx_mode(struct net_device *dev) 5219 { 5220 netif_addr_lock_bh(dev); 5221 __dev_set_rx_mode(dev); 5222 netif_addr_unlock_bh(dev); 5223 } 5224 5225 /** 5226 * dev_get_flags - get flags reported to userspace 5227 * @dev: device 5228 * 5229 * Get the combination of flag bits exported through APIs to userspace. 5230 */ 5231 unsigned int dev_get_flags(const struct net_device *dev) 5232 { 5233 unsigned int flags; 5234 5235 flags = (dev->flags & ~(IFF_PROMISC | 5236 IFF_ALLMULTI | 5237 IFF_RUNNING | 5238 IFF_LOWER_UP | 5239 IFF_DORMANT)) | 5240 (dev->gflags & (IFF_PROMISC | 5241 IFF_ALLMULTI)); 5242 5243 if (netif_running(dev)) { 5244 if (netif_oper_up(dev)) 5245 flags |= IFF_RUNNING; 5246 if (netif_carrier_ok(dev)) 5247 flags |= IFF_LOWER_UP; 5248 if (netif_dormant(dev)) 5249 flags |= IFF_DORMANT; 5250 } 5251 5252 return flags; 5253 } 5254 EXPORT_SYMBOL(dev_get_flags); 5255 5256 int __dev_change_flags(struct net_device *dev, unsigned int flags) 5257 { 5258 unsigned int old_flags = dev->flags; 5259 int ret; 5260 5261 ASSERT_RTNL(); 5262 5263 /* 5264 * Set the flags on our device. 5265 */ 5266 5267 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP | 5268 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL | 5269 IFF_AUTOMEDIA)) | 5270 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC | 5271 IFF_ALLMULTI)); 5272 5273 /* 5274 * Load in the correct multicast list now the flags have changed. 5275 */ 5276 5277 if ((old_flags ^ flags) & IFF_MULTICAST) 5278 dev_change_rx_flags(dev, IFF_MULTICAST); 5279 5280 dev_set_rx_mode(dev); 5281 5282 /* 5283 * Have we downed the interface. We handle IFF_UP ourselves 5284 * according to user attempts to set it, rather than blindly 5285 * setting it. 5286 */ 5287 5288 ret = 0; 5289 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */ 5290 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev); 5291 5292 if (!ret) 5293 dev_set_rx_mode(dev); 5294 } 5295 5296 if ((flags ^ dev->gflags) & IFF_PROMISC) { 5297 int inc = (flags & IFF_PROMISC) ? 1 : -1; 5298 unsigned int old_flags = dev->flags; 5299 5300 dev->gflags ^= IFF_PROMISC; 5301 5302 if (__dev_set_promiscuity(dev, inc, false) >= 0) 5303 if (dev->flags != old_flags) 5304 dev_set_rx_mode(dev); 5305 } 5306 5307 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI 5308 is important. Some (broken) drivers set IFF_PROMISC, when 5309 IFF_ALLMULTI is requested not asking us and not reporting. 5310 */ 5311 if ((flags ^ dev->gflags) & IFF_ALLMULTI) { 5312 int inc = (flags & IFF_ALLMULTI) ? 1 : -1; 5313 5314 dev->gflags ^= IFF_ALLMULTI; 5315 __dev_set_allmulti(dev, inc, false); 5316 } 5317 5318 return ret; 5319 } 5320 5321 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags, 5322 unsigned int gchanges) 5323 { 5324 unsigned int changes = dev->flags ^ old_flags; 5325 5326 if (gchanges) 5327 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC); 5328 5329 if (changes & IFF_UP) { 5330 if (dev->flags & IFF_UP) 5331 call_netdevice_notifiers(NETDEV_UP, dev); 5332 else 5333 call_netdevice_notifiers(NETDEV_DOWN, dev); 5334 } 5335 5336 if (dev->flags & IFF_UP && 5337 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) { 5338 struct netdev_notifier_change_info change_info; 5339 5340 change_info.flags_changed = changes; 5341 call_netdevice_notifiers_info(NETDEV_CHANGE, dev, 5342 &change_info.info); 5343 } 5344 } 5345 5346 /** 5347 * dev_change_flags - change device settings 5348 * @dev: device 5349 * @flags: device state flags 5350 * 5351 * Change settings on device based state flags. The flags are 5352 * in the userspace exported format. 5353 */ 5354 int dev_change_flags(struct net_device *dev, unsigned int flags) 5355 { 5356 int ret; 5357 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags; 5358 5359 ret = __dev_change_flags(dev, flags); 5360 if (ret < 0) 5361 return ret; 5362 5363 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags); 5364 __dev_notify_flags(dev, old_flags, changes); 5365 return ret; 5366 } 5367 EXPORT_SYMBOL(dev_change_flags); 5368 5369 static int __dev_set_mtu(struct net_device *dev, int new_mtu) 5370 { 5371 const struct net_device_ops *ops = dev->netdev_ops; 5372 5373 if (ops->ndo_change_mtu) 5374 return ops->ndo_change_mtu(dev, new_mtu); 5375 5376 dev->mtu = new_mtu; 5377 return 0; 5378 } 5379 5380 /** 5381 * dev_set_mtu - Change maximum transfer unit 5382 * @dev: device 5383 * @new_mtu: new transfer unit 5384 * 5385 * Change the maximum transfer size of the network device. 5386 */ 5387 int dev_set_mtu(struct net_device *dev, int new_mtu) 5388 { 5389 int err, orig_mtu; 5390 5391 if (new_mtu == dev->mtu) 5392 return 0; 5393 5394 /* MTU must be positive. */ 5395 if (new_mtu < 0) 5396 return -EINVAL; 5397 5398 if (!netif_device_present(dev)) 5399 return -ENODEV; 5400 5401 err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev); 5402 err = notifier_to_errno(err); 5403 if (err) 5404 return err; 5405 5406 orig_mtu = dev->mtu; 5407 err = __dev_set_mtu(dev, new_mtu); 5408 5409 if (!err) { 5410 err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev); 5411 err = notifier_to_errno(err); 5412 if (err) { 5413 /* setting mtu back and notifying everyone again, 5414 * so that they have a chance to revert changes. 5415 */ 5416 __dev_set_mtu(dev, orig_mtu); 5417 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev); 5418 } 5419 } 5420 return err; 5421 } 5422 EXPORT_SYMBOL(dev_set_mtu); 5423 5424 /** 5425 * dev_set_group - Change group this device belongs to 5426 * @dev: device 5427 * @new_group: group this device should belong to 5428 */ 5429 void dev_set_group(struct net_device *dev, int new_group) 5430 { 5431 dev->group = new_group; 5432 } 5433 EXPORT_SYMBOL(dev_set_group); 5434 5435 /** 5436 * dev_set_mac_address - Change Media Access Control Address 5437 * @dev: device 5438 * @sa: new address 5439 * 5440 * Change the hardware (MAC) address of the device 5441 */ 5442 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa) 5443 { 5444 const struct net_device_ops *ops = dev->netdev_ops; 5445 int err; 5446 5447 if (!ops->ndo_set_mac_address) 5448 return -EOPNOTSUPP; 5449 if (sa->sa_family != dev->type) 5450 return -EINVAL; 5451 if (!netif_device_present(dev)) 5452 return -ENODEV; 5453 err = ops->ndo_set_mac_address(dev, sa); 5454 if (err) 5455 return err; 5456 dev->addr_assign_type = NET_ADDR_SET; 5457 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 5458 add_device_randomness(dev->dev_addr, dev->addr_len); 5459 return 0; 5460 } 5461 EXPORT_SYMBOL(dev_set_mac_address); 5462 5463 /** 5464 * dev_change_carrier - Change device carrier 5465 * @dev: device 5466 * @new_carrier: new value 5467 * 5468 * Change device carrier 5469 */ 5470 int dev_change_carrier(struct net_device *dev, bool new_carrier) 5471 { 5472 const struct net_device_ops *ops = dev->netdev_ops; 5473 5474 if (!ops->ndo_change_carrier) 5475 return -EOPNOTSUPP; 5476 if (!netif_device_present(dev)) 5477 return -ENODEV; 5478 return ops->ndo_change_carrier(dev, new_carrier); 5479 } 5480 EXPORT_SYMBOL(dev_change_carrier); 5481 5482 /** 5483 * dev_get_phys_port_id - Get device physical port ID 5484 * @dev: device 5485 * @ppid: port ID 5486 * 5487 * Get device physical port ID 5488 */ 5489 int dev_get_phys_port_id(struct net_device *dev, 5490 struct netdev_phys_port_id *ppid) 5491 { 5492 const struct net_device_ops *ops = dev->netdev_ops; 5493 5494 if (!ops->ndo_get_phys_port_id) 5495 return -EOPNOTSUPP; 5496 return ops->ndo_get_phys_port_id(dev, ppid); 5497 } 5498 EXPORT_SYMBOL(dev_get_phys_port_id); 5499 5500 /** 5501 * dev_new_index - allocate an ifindex 5502 * @net: the applicable net namespace 5503 * 5504 * Returns a suitable unique value for a new device interface 5505 * number. The caller must hold the rtnl semaphore or the 5506 * dev_base_lock to be sure it remains unique. 5507 */ 5508 static int dev_new_index(struct net *net) 5509 { 5510 int ifindex = net->ifindex; 5511 for (;;) { 5512 if (++ifindex <= 0) 5513 ifindex = 1; 5514 if (!__dev_get_by_index(net, ifindex)) 5515 return net->ifindex = ifindex; 5516 } 5517 } 5518 5519 /* Delayed registration/unregisteration */ 5520 static LIST_HEAD(net_todo_list); 5521 static DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq); 5522 5523 static void net_set_todo(struct net_device *dev) 5524 { 5525 list_add_tail(&dev->todo_list, &net_todo_list); 5526 dev_net(dev)->dev_unreg_count++; 5527 } 5528 5529 static void rollback_registered_many(struct list_head *head) 5530 { 5531 struct net_device *dev, *tmp; 5532 LIST_HEAD(close_head); 5533 5534 BUG_ON(dev_boot_phase); 5535 ASSERT_RTNL(); 5536 5537 list_for_each_entry_safe(dev, tmp, head, unreg_list) { 5538 /* Some devices call without registering 5539 * for initialization unwind. Remove those 5540 * devices and proceed with the remaining. 5541 */ 5542 if (dev->reg_state == NETREG_UNINITIALIZED) { 5543 pr_debug("unregister_netdevice: device %s/%p never was registered\n", 5544 dev->name, dev); 5545 5546 WARN_ON(1); 5547 list_del(&dev->unreg_list); 5548 continue; 5549 } 5550 dev->dismantle = true; 5551 BUG_ON(dev->reg_state != NETREG_REGISTERED); 5552 } 5553 5554 /* If device is running, close it first. */ 5555 list_for_each_entry(dev, head, unreg_list) 5556 list_add_tail(&dev->close_list, &close_head); 5557 dev_close_many(&close_head); 5558 5559 list_for_each_entry(dev, head, unreg_list) { 5560 /* And unlink it from device chain. */ 5561 unlist_netdevice(dev); 5562 5563 dev->reg_state = NETREG_UNREGISTERING; 5564 } 5565 5566 synchronize_net(); 5567 5568 list_for_each_entry(dev, head, unreg_list) { 5569 /* Shutdown queueing discipline. */ 5570 dev_shutdown(dev); 5571 5572 5573 /* Notify protocols, that we are about to destroy 5574 this device. They should clean all the things. 5575 */ 5576 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 5577 5578 if (!dev->rtnl_link_ops || 5579 dev->rtnl_link_state == RTNL_LINK_INITIALIZED) 5580 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL); 5581 5582 /* 5583 * Flush the unicast and multicast chains 5584 */ 5585 dev_uc_flush(dev); 5586 dev_mc_flush(dev); 5587 5588 if (dev->netdev_ops->ndo_uninit) 5589 dev->netdev_ops->ndo_uninit(dev); 5590 5591 /* Notifier chain MUST detach us all upper devices. */ 5592 WARN_ON(netdev_has_any_upper_dev(dev)); 5593 5594 /* Remove entries from kobject tree */ 5595 netdev_unregister_kobject(dev); 5596 #ifdef CONFIG_XPS 5597 /* Remove XPS queueing entries */ 5598 netif_reset_xps_queues_gt(dev, 0); 5599 #endif 5600 } 5601 5602 synchronize_net(); 5603 5604 list_for_each_entry(dev, head, unreg_list) 5605 dev_put(dev); 5606 } 5607 5608 static void rollback_registered(struct net_device *dev) 5609 { 5610 LIST_HEAD(single); 5611 5612 list_add(&dev->unreg_list, &single); 5613 rollback_registered_many(&single); 5614 list_del(&single); 5615 } 5616 5617 static netdev_features_t netdev_fix_features(struct net_device *dev, 5618 netdev_features_t features) 5619 { 5620 /* Fix illegal checksum combinations */ 5621 if ((features & NETIF_F_HW_CSUM) && 5622 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { 5623 netdev_warn(dev, "mixed HW and IP checksum settings.\n"); 5624 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); 5625 } 5626 5627 /* TSO requires that SG is present as well. */ 5628 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) { 5629 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n"); 5630 features &= ~NETIF_F_ALL_TSO; 5631 } 5632 5633 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) && 5634 !(features & NETIF_F_IP_CSUM)) { 5635 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n"); 5636 features &= ~NETIF_F_TSO; 5637 features &= ~NETIF_F_TSO_ECN; 5638 } 5639 5640 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) && 5641 !(features & NETIF_F_IPV6_CSUM)) { 5642 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n"); 5643 features &= ~NETIF_F_TSO6; 5644 } 5645 5646 /* TSO ECN requires that TSO is present as well. */ 5647 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN) 5648 features &= ~NETIF_F_TSO_ECN; 5649 5650 /* Software GSO depends on SG. */ 5651 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) { 5652 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n"); 5653 features &= ~NETIF_F_GSO; 5654 } 5655 5656 /* UFO needs SG and checksumming */ 5657 if (features & NETIF_F_UFO) { 5658 /* maybe split UFO into V4 and V6? */ 5659 if (!((features & NETIF_F_GEN_CSUM) || 5660 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM)) 5661 == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { 5662 netdev_dbg(dev, 5663 "Dropping NETIF_F_UFO since no checksum offload features.\n"); 5664 features &= ~NETIF_F_UFO; 5665 } 5666 5667 if (!(features & NETIF_F_SG)) { 5668 netdev_dbg(dev, 5669 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n"); 5670 features &= ~NETIF_F_UFO; 5671 } 5672 } 5673 5674 return features; 5675 } 5676 5677 int __netdev_update_features(struct net_device *dev) 5678 { 5679 netdev_features_t features; 5680 int err = 0; 5681 5682 ASSERT_RTNL(); 5683 5684 features = netdev_get_wanted_features(dev); 5685 5686 if (dev->netdev_ops->ndo_fix_features) 5687 features = dev->netdev_ops->ndo_fix_features(dev, features); 5688 5689 /* driver might be less strict about feature dependencies */ 5690 features = netdev_fix_features(dev, features); 5691 5692 if (dev->features == features) 5693 return 0; 5694 5695 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n", 5696 &dev->features, &features); 5697 5698 if (dev->netdev_ops->ndo_set_features) 5699 err = dev->netdev_ops->ndo_set_features(dev, features); 5700 5701 if (unlikely(err < 0)) { 5702 netdev_err(dev, 5703 "set_features() failed (%d); wanted %pNF, left %pNF\n", 5704 err, &features, &dev->features); 5705 return -1; 5706 } 5707 5708 if (!err) 5709 dev->features = features; 5710 5711 return 1; 5712 } 5713 5714 /** 5715 * netdev_update_features - recalculate device features 5716 * @dev: the device to check 5717 * 5718 * Recalculate dev->features set and send notifications if it 5719 * has changed. Should be called after driver or hardware dependent 5720 * conditions might have changed that influence the features. 5721 */ 5722 void netdev_update_features(struct net_device *dev) 5723 { 5724 if (__netdev_update_features(dev)) 5725 netdev_features_change(dev); 5726 } 5727 EXPORT_SYMBOL(netdev_update_features); 5728 5729 /** 5730 * netdev_change_features - recalculate device features 5731 * @dev: the device to check 5732 * 5733 * Recalculate dev->features set and send notifications even 5734 * if they have not changed. Should be called instead of 5735 * netdev_update_features() if also dev->vlan_features might 5736 * have changed to allow the changes to be propagated to stacked 5737 * VLAN devices. 5738 */ 5739 void netdev_change_features(struct net_device *dev) 5740 { 5741 __netdev_update_features(dev); 5742 netdev_features_change(dev); 5743 } 5744 EXPORT_SYMBOL(netdev_change_features); 5745 5746 /** 5747 * netif_stacked_transfer_operstate - transfer operstate 5748 * @rootdev: the root or lower level device to transfer state from 5749 * @dev: the device to transfer operstate to 5750 * 5751 * Transfer operational state from root to device. This is normally 5752 * called when a stacking relationship exists between the root 5753 * device and the device(a leaf device). 5754 */ 5755 void netif_stacked_transfer_operstate(const struct net_device *rootdev, 5756 struct net_device *dev) 5757 { 5758 if (rootdev->operstate == IF_OPER_DORMANT) 5759 netif_dormant_on(dev); 5760 else 5761 netif_dormant_off(dev); 5762 5763 if (netif_carrier_ok(rootdev)) { 5764 if (!netif_carrier_ok(dev)) 5765 netif_carrier_on(dev); 5766 } else { 5767 if (netif_carrier_ok(dev)) 5768 netif_carrier_off(dev); 5769 } 5770 } 5771 EXPORT_SYMBOL(netif_stacked_transfer_operstate); 5772 5773 #ifdef CONFIG_SYSFS 5774 static int netif_alloc_rx_queues(struct net_device *dev) 5775 { 5776 unsigned int i, count = dev->num_rx_queues; 5777 struct netdev_rx_queue *rx; 5778 5779 BUG_ON(count < 1); 5780 5781 rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL); 5782 if (!rx) 5783 return -ENOMEM; 5784 5785 dev->_rx = rx; 5786 5787 for (i = 0; i < count; i++) 5788 rx[i].dev = dev; 5789 return 0; 5790 } 5791 #endif 5792 5793 static void netdev_init_one_queue(struct net_device *dev, 5794 struct netdev_queue *queue, void *_unused) 5795 { 5796 /* Initialize queue lock */ 5797 spin_lock_init(&queue->_xmit_lock); 5798 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type); 5799 queue->xmit_lock_owner = -1; 5800 netdev_queue_numa_node_write(queue, NUMA_NO_NODE); 5801 queue->dev = dev; 5802 #ifdef CONFIG_BQL 5803 dql_init(&queue->dql, HZ); 5804 #endif 5805 } 5806 5807 static void netif_free_tx_queues(struct net_device *dev) 5808 { 5809 if (is_vmalloc_addr(dev->_tx)) 5810 vfree(dev->_tx); 5811 else 5812 kfree(dev->_tx); 5813 } 5814 5815 static int netif_alloc_netdev_queues(struct net_device *dev) 5816 { 5817 unsigned int count = dev->num_tx_queues; 5818 struct netdev_queue *tx; 5819 size_t sz = count * sizeof(*tx); 5820 5821 BUG_ON(count < 1 || count > 0xffff); 5822 5823 tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); 5824 if (!tx) { 5825 tx = vzalloc(sz); 5826 if (!tx) 5827 return -ENOMEM; 5828 } 5829 dev->_tx = tx; 5830 5831 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL); 5832 spin_lock_init(&dev->tx_global_lock); 5833 5834 return 0; 5835 } 5836 5837 /** 5838 * register_netdevice - register a network device 5839 * @dev: device to register 5840 * 5841 * Take a completed network device structure and add it to the kernel 5842 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier 5843 * chain. 0 is returned on success. A negative errno code is returned 5844 * on a failure to set up the device, or if the name is a duplicate. 5845 * 5846 * Callers must hold the rtnl semaphore. You may want 5847 * register_netdev() instead of this. 5848 * 5849 * BUGS: 5850 * The locking appears insufficient to guarantee two parallel registers 5851 * will not get the same name. 5852 */ 5853 5854 int register_netdevice(struct net_device *dev) 5855 { 5856 int ret; 5857 struct net *net = dev_net(dev); 5858 5859 BUG_ON(dev_boot_phase); 5860 ASSERT_RTNL(); 5861 5862 might_sleep(); 5863 5864 /* When net_device's are persistent, this will be fatal. */ 5865 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED); 5866 BUG_ON(!net); 5867 5868 spin_lock_init(&dev->addr_list_lock); 5869 netdev_set_addr_lockdep_class(dev); 5870 5871 dev->iflink = -1; 5872 5873 ret = dev_get_valid_name(net, dev, dev->name); 5874 if (ret < 0) 5875 goto out; 5876 5877 /* Init, if this function is available */ 5878 if (dev->netdev_ops->ndo_init) { 5879 ret = dev->netdev_ops->ndo_init(dev); 5880 if (ret) { 5881 if (ret > 0) 5882 ret = -EIO; 5883 goto out; 5884 } 5885 } 5886 5887 if (((dev->hw_features | dev->features) & 5888 NETIF_F_HW_VLAN_CTAG_FILTER) && 5889 (!dev->netdev_ops->ndo_vlan_rx_add_vid || 5890 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) { 5891 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n"); 5892 ret = -EINVAL; 5893 goto err_uninit; 5894 } 5895 5896 ret = -EBUSY; 5897 if (!dev->ifindex) 5898 dev->ifindex = dev_new_index(net); 5899 else if (__dev_get_by_index(net, dev->ifindex)) 5900 goto err_uninit; 5901 5902 if (dev->iflink == -1) 5903 dev->iflink = dev->ifindex; 5904 5905 /* Transfer changeable features to wanted_features and enable 5906 * software offloads (GSO and GRO). 5907 */ 5908 dev->hw_features |= NETIF_F_SOFT_FEATURES; 5909 dev->features |= NETIF_F_SOFT_FEATURES; 5910 dev->wanted_features = dev->features & dev->hw_features; 5911 5912 if (!(dev->flags & IFF_LOOPBACK)) { 5913 dev->hw_features |= NETIF_F_NOCACHE_COPY; 5914 } 5915 5916 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices. 5917 */ 5918 dev->vlan_features |= NETIF_F_HIGHDMA; 5919 5920 /* Make NETIF_F_SG inheritable to tunnel devices. 5921 */ 5922 dev->hw_enc_features |= NETIF_F_SG; 5923 5924 /* Make NETIF_F_SG inheritable to MPLS. 5925 */ 5926 dev->mpls_features |= NETIF_F_SG; 5927 5928 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev); 5929 ret = notifier_to_errno(ret); 5930 if (ret) 5931 goto err_uninit; 5932 5933 ret = netdev_register_kobject(dev); 5934 if (ret) 5935 goto err_uninit; 5936 dev->reg_state = NETREG_REGISTERED; 5937 5938 __netdev_update_features(dev); 5939 5940 /* 5941 * Default initial state at registry is that the 5942 * device is present. 5943 */ 5944 5945 set_bit(__LINK_STATE_PRESENT, &dev->state); 5946 5947 linkwatch_init_dev(dev); 5948 5949 dev_init_scheduler(dev); 5950 dev_hold(dev); 5951 list_netdevice(dev); 5952 add_device_randomness(dev->dev_addr, dev->addr_len); 5953 5954 /* If the device has permanent device address, driver should 5955 * set dev_addr and also addr_assign_type should be set to 5956 * NET_ADDR_PERM (default value). 5957 */ 5958 if (dev->addr_assign_type == NET_ADDR_PERM) 5959 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 5960 5961 /* Notify protocols, that a new device appeared. */ 5962 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev); 5963 ret = notifier_to_errno(ret); 5964 if (ret) { 5965 rollback_registered(dev); 5966 dev->reg_state = NETREG_UNREGISTERED; 5967 } 5968 /* 5969 * Prevent userspace races by waiting until the network 5970 * device is fully setup before sending notifications. 5971 */ 5972 if (!dev->rtnl_link_ops || 5973 dev->rtnl_link_state == RTNL_LINK_INITIALIZED) 5974 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL); 5975 5976 out: 5977 return ret; 5978 5979 err_uninit: 5980 if (dev->netdev_ops->ndo_uninit) 5981 dev->netdev_ops->ndo_uninit(dev); 5982 goto out; 5983 } 5984 EXPORT_SYMBOL(register_netdevice); 5985 5986 /** 5987 * init_dummy_netdev - init a dummy network device for NAPI 5988 * @dev: device to init 5989 * 5990 * This takes a network device structure and initialize the minimum 5991 * amount of fields so it can be used to schedule NAPI polls without 5992 * registering a full blown interface. This is to be used by drivers 5993 * that need to tie several hardware interfaces to a single NAPI 5994 * poll scheduler due to HW limitations. 5995 */ 5996 int init_dummy_netdev(struct net_device *dev) 5997 { 5998 /* Clear everything. Note we don't initialize spinlocks 5999 * are they aren't supposed to be taken by any of the 6000 * NAPI code and this dummy netdev is supposed to be 6001 * only ever used for NAPI polls 6002 */ 6003 memset(dev, 0, sizeof(struct net_device)); 6004 6005 /* make sure we BUG if trying to hit standard 6006 * register/unregister code path 6007 */ 6008 dev->reg_state = NETREG_DUMMY; 6009 6010 /* NAPI wants this */ 6011 INIT_LIST_HEAD(&dev->napi_list); 6012 6013 /* a dummy interface is started by default */ 6014 set_bit(__LINK_STATE_PRESENT, &dev->state); 6015 set_bit(__LINK_STATE_START, &dev->state); 6016 6017 /* Note : We dont allocate pcpu_refcnt for dummy devices, 6018 * because users of this 'device' dont need to change 6019 * its refcount. 6020 */ 6021 6022 return 0; 6023 } 6024 EXPORT_SYMBOL_GPL(init_dummy_netdev); 6025 6026 6027 /** 6028 * register_netdev - register a network device 6029 * @dev: device to register 6030 * 6031 * Take a completed network device structure and add it to the kernel 6032 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier 6033 * chain. 0 is returned on success. A negative errno code is returned 6034 * on a failure to set up the device, or if the name is a duplicate. 6035 * 6036 * This is a wrapper around register_netdevice that takes the rtnl semaphore 6037 * and expands the device name if you passed a format string to 6038 * alloc_netdev. 6039 */ 6040 int register_netdev(struct net_device *dev) 6041 { 6042 int err; 6043 6044 rtnl_lock(); 6045 err = register_netdevice(dev); 6046 rtnl_unlock(); 6047 return err; 6048 } 6049 EXPORT_SYMBOL(register_netdev); 6050 6051 int netdev_refcnt_read(const struct net_device *dev) 6052 { 6053 int i, refcnt = 0; 6054 6055 for_each_possible_cpu(i) 6056 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i); 6057 return refcnt; 6058 } 6059 EXPORT_SYMBOL(netdev_refcnt_read); 6060 6061 /** 6062 * netdev_wait_allrefs - wait until all references are gone. 6063 * @dev: target net_device 6064 * 6065 * This is called when unregistering network devices. 6066 * 6067 * Any protocol or device that holds a reference should register 6068 * for netdevice notification, and cleanup and put back the 6069 * reference if they receive an UNREGISTER event. 6070 * We can get stuck here if buggy protocols don't correctly 6071 * call dev_put. 6072 */ 6073 static void netdev_wait_allrefs(struct net_device *dev) 6074 { 6075 unsigned long rebroadcast_time, warning_time; 6076 int refcnt; 6077 6078 linkwatch_forget_dev(dev); 6079 6080 rebroadcast_time = warning_time = jiffies; 6081 refcnt = netdev_refcnt_read(dev); 6082 6083 while (refcnt != 0) { 6084 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) { 6085 rtnl_lock(); 6086 6087 /* Rebroadcast unregister notification */ 6088 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 6089 6090 __rtnl_unlock(); 6091 rcu_barrier(); 6092 rtnl_lock(); 6093 6094 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev); 6095 if (test_bit(__LINK_STATE_LINKWATCH_PENDING, 6096 &dev->state)) { 6097 /* We must not have linkwatch events 6098 * pending on unregister. If this 6099 * happens, we simply run the queue 6100 * unscheduled, resulting in a noop 6101 * for this device. 6102 */ 6103 linkwatch_run_queue(); 6104 } 6105 6106 __rtnl_unlock(); 6107 6108 rebroadcast_time = jiffies; 6109 } 6110 6111 msleep(250); 6112 6113 refcnt = netdev_refcnt_read(dev); 6114 6115 if (time_after(jiffies, warning_time + 10 * HZ)) { 6116 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n", 6117 dev->name, refcnt); 6118 warning_time = jiffies; 6119 } 6120 } 6121 } 6122 6123 /* The sequence is: 6124 * 6125 * rtnl_lock(); 6126 * ... 6127 * register_netdevice(x1); 6128 * register_netdevice(x2); 6129 * ... 6130 * unregister_netdevice(y1); 6131 * unregister_netdevice(y2); 6132 * ... 6133 * rtnl_unlock(); 6134 * free_netdev(y1); 6135 * free_netdev(y2); 6136 * 6137 * We are invoked by rtnl_unlock(). 6138 * This allows us to deal with problems: 6139 * 1) We can delete sysfs objects which invoke hotplug 6140 * without deadlocking with linkwatch via keventd. 6141 * 2) Since we run with the RTNL semaphore not held, we can sleep 6142 * safely in order to wait for the netdev refcnt to drop to zero. 6143 * 6144 * We must not return until all unregister events added during 6145 * the interval the lock was held have been completed. 6146 */ 6147 void netdev_run_todo(void) 6148 { 6149 struct list_head list; 6150 6151 /* Snapshot list, allow later requests */ 6152 list_replace_init(&net_todo_list, &list); 6153 6154 __rtnl_unlock(); 6155 6156 6157 /* Wait for rcu callbacks to finish before next phase */ 6158 if (!list_empty(&list)) 6159 rcu_barrier(); 6160 6161 while (!list_empty(&list)) { 6162 struct net_device *dev 6163 = list_first_entry(&list, struct net_device, todo_list); 6164 list_del(&dev->todo_list); 6165 6166 rtnl_lock(); 6167 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev); 6168 __rtnl_unlock(); 6169 6170 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) { 6171 pr_err("network todo '%s' but state %d\n", 6172 dev->name, dev->reg_state); 6173 dump_stack(); 6174 continue; 6175 } 6176 6177 dev->reg_state = NETREG_UNREGISTERED; 6178 6179 on_each_cpu(flush_backlog, dev, 1); 6180 6181 netdev_wait_allrefs(dev); 6182 6183 /* paranoia */ 6184 BUG_ON(netdev_refcnt_read(dev)); 6185 WARN_ON(rcu_access_pointer(dev->ip_ptr)); 6186 WARN_ON(rcu_access_pointer(dev->ip6_ptr)); 6187 WARN_ON(dev->dn_ptr); 6188 6189 if (dev->destructor) 6190 dev->destructor(dev); 6191 6192 /* Report a network device has been unregistered */ 6193 rtnl_lock(); 6194 dev_net(dev)->dev_unreg_count--; 6195 __rtnl_unlock(); 6196 wake_up(&netdev_unregistering_wq); 6197 6198 /* Free network device */ 6199 kobject_put(&dev->dev.kobj); 6200 } 6201 } 6202 6203 /* Convert net_device_stats to rtnl_link_stats64. They have the same 6204 * fields in the same order, with only the type differing. 6205 */ 6206 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, 6207 const struct net_device_stats *netdev_stats) 6208 { 6209 #if BITS_PER_LONG == 64 6210 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats)); 6211 memcpy(stats64, netdev_stats, sizeof(*stats64)); 6212 #else 6213 size_t i, n = sizeof(*stats64) / sizeof(u64); 6214 const unsigned long *src = (const unsigned long *)netdev_stats; 6215 u64 *dst = (u64 *)stats64; 6216 6217 BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) != 6218 sizeof(*stats64) / sizeof(u64)); 6219 for (i = 0; i < n; i++) 6220 dst[i] = src[i]; 6221 #endif 6222 } 6223 EXPORT_SYMBOL(netdev_stats_to_stats64); 6224 6225 /** 6226 * dev_get_stats - get network device statistics 6227 * @dev: device to get statistics from 6228 * @storage: place to store stats 6229 * 6230 * Get network statistics from device. Return @storage. 6231 * The device driver may provide its own method by setting 6232 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats; 6233 * otherwise the internal statistics structure is used. 6234 */ 6235 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, 6236 struct rtnl_link_stats64 *storage) 6237 { 6238 const struct net_device_ops *ops = dev->netdev_ops; 6239 6240 if (ops->ndo_get_stats64) { 6241 memset(storage, 0, sizeof(*storage)); 6242 ops->ndo_get_stats64(dev, storage); 6243 } else if (ops->ndo_get_stats) { 6244 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev)); 6245 } else { 6246 netdev_stats_to_stats64(storage, &dev->stats); 6247 } 6248 storage->rx_dropped += atomic_long_read(&dev->rx_dropped); 6249 return storage; 6250 } 6251 EXPORT_SYMBOL(dev_get_stats); 6252 6253 struct netdev_queue *dev_ingress_queue_create(struct net_device *dev) 6254 { 6255 struct netdev_queue *queue = dev_ingress_queue(dev); 6256 6257 #ifdef CONFIG_NET_CLS_ACT 6258 if (queue) 6259 return queue; 6260 queue = kzalloc(sizeof(*queue), GFP_KERNEL); 6261 if (!queue) 6262 return NULL; 6263 netdev_init_one_queue(dev, queue, NULL); 6264 queue->qdisc = &noop_qdisc; 6265 queue->qdisc_sleeping = &noop_qdisc; 6266 rcu_assign_pointer(dev->ingress_queue, queue); 6267 #endif 6268 return queue; 6269 } 6270 6271 static const struct ethtool_ops default_ethtool_ops; 6272 6273 void netdev_set_default_ethtool_ops(struct net_device *dev, 6274 const struct ethtool_ops *ops) 6275 { 6276 if (dev->ethtool_ops == &default_ethtool_ops) 6277 dev->ethtool_ops = ops; 6278 } 6279 EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops); 6280 6281 void netdev_freemem(struct net_device *dev) 6282 { 6283 char *addr = (char *)dev - dev->padded; 6284 6285 if (is_vmalloc_addr(addr)) 6286 vfree(addr); 6287 else 6288 kfree(addr); 6289 } 6290 6291 /** 6292 * alloc_netdev_mqs - allocate network device 6293 * @sizeof_priv: size of private data to allocate space for 6294 * @name: device name format string 6295 * @setup: callback to initialize device 6296 * @txqs: the number of TX subqueues to allocate 6297 * @rxqs: the number of RX subqueues to allocate 6298 * 6299 * Allocates a struct net_device with private data area for driver use 6300 * and performs basic initialization. Also allocates subqueue structs 6301 * for each queue on the device. 6302 */ 6303 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, 6304 void (*setup)(struct net_device *), 6305 unsigned int txqs, unsigned int rxqs) 6306 { 6307 struct net_device *dev; 6308 size_t alloc_size; 6309 struct net_device *p; 6310 6311 BUG_ON(strlen(name) >= sizeof(dev->name)); 6312 6313 if (txqs < 1) { 6314 pr_err("alloc_netdev: Unable to allocate device with zero queues\n"); 6315 return NULL; 6316 } 6317 6318 #ifdef CONFIG_SYSFS 6319 if (rxqs < 1) { 6320 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n"); 6321 return NULL; 6322 } 6323 #endif 6324 6325 alloc_size = sizeof(struct net_device); 6326 if (sizeof_priv) { 6327 /* ensure 32-byte alignment of private area */ 6328 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN); 6329 alloc_size += sizeof_priv; 6330 } 6331 /* ensure 32-byte alignment of whole construct */ 6332 alloc_size += NETDEV_ALIGN - 1; 6333 6334 p = kzalloc(alloc_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); 6335 if (!p) 6336 p = vzalloc(alloc_size); 6337 if (!p) 6338 return NULL; 6339 6340 dev = PTR_ALIGN(p, NETDEV_ALIGN); 6341 dev->padded = (char *)dev - (char *)p; 6342 6343 dev->pcpu_refcnt = alloc_percpu(int); 6344 if (!dev->pcpu_refcnt) 6345 goto free_dev; 6346 6347 if (dev_addr_init(dev)) 6348 goto free_pcpu; 6349 6350 dev_mc_init(dev); 6351 dev_uc_init(dev); 6352 6353 dev_net_set(dev, &init_net); 6354 6355 dev->gso_max_size = GSO_MAX_SIZE; 6356 dev->gso_max_segs = GSO_MAX_SEGS; 6357 6358 INIT_LIST_HEAD(&dev->napi_list); 6359 INIT_LIST_HEAD(&dev->unreg_list); 6360 INIT_LIST_HEAD(&dev->close_list); 6361 INIT_LIST_HEAD(&dev->link_watch_list); 6362 INIT_LIST_HEAD(&dev->adj_list.upper); 6363 INIT_LIST_HEAD(&dev->adj_list.lower); 6364 INIT_LIST_HEAD(&dev->all_adj_list.upper); 6365 INIT_LIST_HEAD(&dev->all_adj_list.lower); 6366 dev->priv_flags = IFF_XMIT_DST_RELEASE; 6367 setup(dev); 6368 6369 dev->num_tx_queues = txqs; 6370 dev->real_num_tx_queues = txqs; 6371 if (netif_alloc_netdev_queues(dev)) 6372 goto free_all; 6373 6374 #ifdef CONFIG_SYSFS 6375 dev->num_rx_queues = rxqs; 6376 dev->real_num_rx_queues = rxqs; 6377 if (netif_alloc_rx_queues(dev)) 6378 goto free_all; 6379 #endif 6380 6381 strcpy(dev->name, name); 6382 dev->group = INIT_NETDEV_GROUP; 6383 if (!dev->ethtool_ops) 6384 dev->ethtool_ops = &default_ethtool_ops; 6385 return dev; 6386 6387 free_all: 6388 free_netdev(dev); 6389 return NULL; 6390 6391 free_pcpu: 6392 free_percpu(dev->pcpu_refcnt); 6393 netif_free_tx_queues(dev); 6394 #ifdef CONFIG_SYSFS 6395 kfree(dev->_rx); 6396 #endif 6397 6398 free_dev: 6399 netdev_freemem(dev); 6400 return NULL; 6401 } 6402 EXPORT_SYMBOL(alloc_netdev_mqs); 6403 6404 /** 6405 * free_netdev - free network device 6406 * @dev: device 6407 * 6408 * This function does the last stage of destroying an allocated device 6409 * interface. The reference to the device object is released. 6410 * If this is the last reference then it will be freed. 6411 */ 6412 void free_netdev(struct net_device *dev) 6413 { 6414 struct napi_struct *p, *n; 6415 6416 release_net(dev_net(dev)); 6417 6418 netif_free_tx_queues(dev); 6419 #ifdef CONFIG_SYSFS 6420 kfree(dev->_rx); 6421 #endif 6422 6423 kfree(rcu_dereference_protected(dev->ingress_queue, 1)); 6424 6425 /* Flush device addresses */ 6426 dev_addr_flush(dev); 6427 6428 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list) 6429 netif_napi_del(p); 6430 6431 free_percpu(dev->pcpu_refcnt); 6432 dev->pcpu_refcnt = NULL; 6433 6434 /* Compatibility with error handling in drivers */ 6435 if (dev->reg_state == NETREG_UNINITIALIZED) { 6436 netdev_freemem(dev); 6437 return; 6438 } 6439 6440 BUG_ON(dev->reg_state != NETREG_UNREGISTERED); 6441 dev->reg_state = NETREG_RELEASED; 6442 6443 /* will free via device release */ 6444 put_device(&dev->dev); 6445 } 6446 EXPORT_SYMBOL(free_netdev); 6447 6448 /** 6449 * synchronize_net - Synchronize with packet receive processing 6450 * 6451 * Wait for packets currently being received to be done. 6452 * Does not block later packets from starting. 6453 */ 6454 void synchronize_net(void) 6455 { 6456 might_sleep(); 6457 if (rtnl_is_locked()) 6458 synchronize_rcu_expedited(); 6459 else 6460 synchronize_rcu(); 6461 } 6462 EXPORT_SYMBOL(synchronize_net); 6463 6464 /** 6465 * unregister_netdevice_queue - remove device from the kernel 6466 * @dev: device 6467 * @head: list 6468 * 6469 * This function shuts down a device interface and removes it 6470 * from the kernel tables. 6471 * If head not NULL, device is queued to be unregistered later. 6472 * 6473 * Callers must hold the rtnl semaphore. You may want 6474 * unregister_netdev() instead of this. 6475 */ 6476 6477 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head) 6478 { 6479 ASSERT_RTNL(); 6480 6481 if (head) { 6482 list_move_tail(&dev->unreg_list, head); 6483 } else { 6484 rollback_registered(dev); 6485 /* Finish processing unregister after unlock */ 6486 net_set_todo(dev); 6487 } 6488 } 6489 EXPORT_SYMBOL(unregister_netdevice_queue); 6490 6491 /** 6492 * unregister_netdevice_many - unregister many devices 6493 * @head: list of devices 6494 */ 6495 void unregister_netdevice_many(struct list_head *head) 6496 { 6497 struct net_device *dev; 6498 6499 if (!list_empty(head)) { 6500 rollback_registered_many(head); 6501 list_for_each_entry(dev, head, unreg_list) 6502 net_set_todo(dev); 6503 } 6504 } 6505 EXPORT_SYMBOL(unregister_netdevice_many); 6506 6507 /** 6508 * unregister_netdev - remove device from the kernel 6509 * @dev: device 6510 * 6511 * This function shuts down a device interface and removes it 6512 * from the kernel tables. 6513 * 6514 * This is just a wrapper for unregister_netdevice that takes 6515 * the rtnl semaphore. In general you want to use this and not 6516 * unregister_netdevice. 6517 */ 6518 void unregister_netdev(struct net_device *dev) 6519 { 6520 rtnl_lock(); 6521 unregister_netdevice(dev); 6522 rtnl_unlock(); 6523 } 6524 EXPORT_SYMBOL(unregister_netdev); 6525 6526 /** 6527 * dev_change_net_namespace - move device to different nethost namespace 6528 * @dev: device 6529 * @net: network namespace 6530 * @pat: If not NULL name pattern to try if the current device name 6531 * is already taken in the destination network namespace. 6532 * 6533 * This function shuts down a device interface and moves it 6534 * to a new network namespace. On success 0 is returned, on 6535 * a failure a netagive errno code is returned. 6536 * 6537 * Callers must hold the rtnl semaphore. 6538 */ 6539 6540 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat) 6541 { 6542 int err; 6543 6544 ASSERT_RTNL(); 6545 6546 /* Don't allow namespace local devices to be moved. */ 6547 err = -EINVAL; 6548 if (dev->features & NETIF_F_NETNS_LOCAL) 6549 goto out; 6550 6551 /* Ensure the device has been registrered */ 6552 if (dev->reg_state != NETREG_REGISTERED) 6553 goto out; 6554 6555 /* Get out if there is nothing todo */ 6556 err = 0; 6557 if (net_eq(dev_net(dev), net)) 6558 goto out; 6559 6560 /* Pick the destination device name, and ensure 6561 * we can use it in the destination network namespace. 6562 */ 6563 err = -EEXIST; 6564 if (__dev_get_by_name(net, dev->name)) { 6565 /* We get here if we can't use the current device name */ 6566 if (!pat) 6567 goto out; 6568 if (dev_get_valid_name(net, dev, pat) < 0) 6569 goto out; 6570 } 6571 6572 /* 6573 * And now a mini version of register_netdevice unregister_netdevice. 6574 */ 6575 6576 /* If device is running close it first. */ 6577 dev_close(dev); 6578 6579 /* And unlink it from device chain */ 6580 err = -ENODEV; 6581 unlist_netdevice(dev); 6582 6583 synchronize_net(); 6584 6585 /* Shutdown queueing discipline. */ 6586 dev_shutdown(dev); 6587 6588 /* Notify protocols, that we are about to destroy 6589 this device. They should clean all the things. 6590 6591 Note that dev->reg_state stays at NETREG_REGISTERED. 6592 This is wanted because this way 8021q and macvlan know 6593 the device is just moving and can keep their slaves up. 6594 */ 6595 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 6596 rcu_barrier(); 6597 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev); 6598 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL); 6599 6600 /* 6601 * Flush the unicast and multicast chains 6602 */ 6603 dev_uc_flush(dev); 6604 dev_mc_flush(dev); 6605 6606 /* Send a netdev-removed uevent to the old namespace */ 6607 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE); 6608 6609 /* Actually switch the network namespace */ 6610 dev_net_set(dev, net); 6611 6612 /* If there is an ifindex conflict assign a new one */ 6613 if (__dev_get_by_index(net, dev->ifindex)) { 6614 int iflink = (dev->iflink == dev->ifindex); 6615 dev->ifindex = dev_new_index(net); 6616 if (iflink) 6617 dev->iflink = dev->ifindex; 6618 } 6619 6620 /* Send a netdev-add uevent to the new namespace */ 6621 kobject_uevent(&dev->dev.kobj, KOBJ_ADD); 6622 6623 /* Fixup kobjects */ 6624 err = device_rename(&dev->dev, dev->name); 6625 WARN_ON(err); 6626 6627 /* Add the device back in the hashes */ 6628 list_netdevice(dev); 6629 6630 /* Notify protocols, that a new device appeared. */ 6631 call_netdevice_notifiers(NETDEV_REGISTER, dev); 6632 6633 /* 6634 * Prevent userspace races by waiting until the network 6635 * device is fully setup before sending notifications. 6636 */ 6637 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL); 6638 6639 synchronize_net(); 6640 err = 0; 6641 out: 6642 return err; 6643 } 6644 EXPORT_SYMBOL_GPL(dev_change_net_namespace); 6645 6646 static int dev_cpu_callback(struct notifier_block *nfb, 6647 unsigned long action, 6648 void *ocpu) 6649 { 6650 struct sk_buff **list_skb; 6651 struct sk_buff *skb; 6652 unsigned int cpu, oldcpu = (unsigned long)ocpu; 6653 struct softnet_data *sd, *oldsd; 6654 6655 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN) 6656 return NOTIFY_OK; 6657 6658 local_irq_disable(); 6659 cpu = smp_processor_id(); 6660 sd = &per_cpu(softnet_data, cpu); 6661 oldsd = &per_cpu(softnet_data, oldcpu); 6662 6663 /* Find end of our completion_queue. */ 6664 list_skb = &sd->completion_queue; 6665 while (*list_skb) 6666 list_skb = &(*list_skb)->next; 6667 /* Append completion queue from offline CPU. */ 6668 *list_skb = oldsd->completion_queue; 6669 oldsd->completion_queue = NULL; 6670 6671 /* Append output queue from offline CPU. */ 6672 if (oldsd->output_queue) { 6673 *sd->output_queue_tailp = oldsd->output_queue; 6674 sd->output_queue_tailp = oldsd->output_queue_tailp; 6675 oldsd->output_queue = NULL; 6676 oldsd->output_queue_tailp = &oldsd->output_queue; 6677 } 6678 /* Append NAPI poll list from offline CPU. */ 6679 if (!list_empty(&oldsd->poll_list)) { 6680 list_splice_init(&oldsd->poll_list, &sd->poll_list); 6681 raise_softirq_irqoff(NET_RX_SOFTIRQ); 6682 } 6683 6684 raise_softirq_irqoff(NET_TX_SOFTIRQ); 6685 local_irq_enable(); 6686 6687 /* Process offline CPU's input_pkt_queue */ 6688 while ((skb = __skb_dequeue(&oldsd->process_queue))) { 6689 netif_rx_internal(skb); 6690 input_queue_head_incr(oldsd); 6691 } 6692 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) { 6693 netif_rx_internal(skb); 6694 input_queue_head_incr(oldsd); 6695 } 6696 6697 return NOTIFY_OK; 6698 } 6699 6700 6701 /** 6702 * netdev_increment_features - increment feature set by one 6703 * @all: current feature set 6704 * @one: new feature set 6705 * @mask: mask feature set 6706 * 6707 * Computes a new feature set after adding a device with feature set 6708 * @one to the master device with current feature set @all. Will not 6709 * enable anything that is off in @mask. Returns the new feature set. 6710 */ 6711 netdev_features_t netdev_increment_features(netdev_features_t all, 6712 netdev_features_t one, netdev_features_t mask) 6713 { 6714 if (mask & NETIF_F_GEN_CSUM) 6715 mask |= NETIF_F_ALL_CSUM; 6716 mask |= NETIF_F_VLAN_CHALLENGED; 6717 6718 all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask; 6719 all &= one | ~NETIF_F_ALL_FOR_ALL; 6720 6721 /* If one device supports hw checksumming, set for all. */ 6722 if (all & NETIF_F_GEN_CSUM) 6723 all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM); 6724 6725 return all; 6726 } 6727 EXPORT_SYMBOL(netdev_increment_features); 6728 6729 static struct hlist_head * __net_init netdev_create_hash(void) 6730 { 6731 int i; 6732 struct hlist_head *hash; 6733 6734 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL); 6735 if (hash != NULL) 6736 for (i = 0; i < NETDEV_HASHENTRIES; i++) 6737 INIT_HLIST_HEAD(&hash[i]); 6738 6739 return hash; 6740 } 6741 6742 /* Initialize per network namespace state */ 6743 static int __net_init netdev_init(struct net *net) 6744 { 6745 if (net != &init_net) 6746 INIT_LIST_HEAD(&net->dev_base_head); 6747 6748 net->dev_name_head = netdev_create_hash(); 6749 if (net->dev_name_head == NULL) 6750 goto err_name; 6751 6752 net->dev_index_head = netdev_create_hash(); 6753 if (net->dev_index_head == NULL) 6754 goto err_idx; 6755 6756 return 0; 6757 6758 err_idx: 6759 kfree(net->dev_name_head); 6760 err_name: 6761 return -ENOMEM; 6762 } 6763 6764 /** 6765 * netdev_drivername - network driver for the device 6766 * @dev: network device 6767 * 6768 * Determine network driver for device. 6769 */ 6770 const char *netdev_drivername(const struct net_device *dev) 6771 { 6772 const struct device_driver *driver; 6773 const struct device *parent; 6774 const char *empty = ""; 6775 6776 parent = dev->dev.parent; 6777 if (!parent) 6778 return empty; 6779 6780 driver = parent->driver; 6781 if (driver && driver->name) 6782 return driver->name; 6783 return empty; 6784 } 6785 6786 static int __netdev_printk(const char *level, const struct net_device *dev, 6787 struct va_format *vaf) 6788 { 6789 int r; 6790 6791 if (dev && dev->dev.parent) { 6792 r = dev_printk_emit(level[1] - '0', 6793 dev->dev.parent, 6794 "%s %s %s: %pV", 6795 dev_driver_string(dev->dev.parent), 6796 dev_name(dev->dev.parent), 6797 netdev_name(dev), vaf); 6798 } else if (dev) { 6799 r = printk("%s%s: %pV", level, netdev_name(dev), vaf); 6800 } else { 6801 r = printk("%s(NULL net_device): %pV", level, vaf); 6802 } 6803 6804 return r; 6805 } 6806 6807 int netdev_printk(const char *level, const struct net_device *dev, 6808 const char *format, ...) 6809 { 6810 struct va_format vaf; 6811 va_list args; 6812 int r; 6813 6814 va_start(args, format); 6815 6816 vaf.fmt = format; 6817 vaf.va = &args; 6818 6819 r = __netdev_printk(level, dev, &vaf); 6820 6821 va_end(args); 6822 6823 return r; 6824 } 6825 EXPORT_SYMBOL(netdev_printk); 6826 6827 #define define_netdev_printk_level(func, level) \ 6828 int func(const struct net_device *dev, const char *fmt, ...) \ 6829 { \ 6830 int r; \ 6831 struct va_format vaf; \ 6832 va_list args; \ 6833 \ 6834 va_start(args, fmt); \ 6835 \ 6836 vaf.fmt = fmt; \ 6837 vaf.va = &args; \ 6838 \ 6839 r = __netdev_printk(level, dev, &vaf); \ 6840 \ 6841 va_end(args); \ 6842 \ 6843 return r; \ 6844 } \ 6845 EXPORT_SYMBOL(func); 6846 6847 define_netdev_printk_level(netdev_emerg, KERN_EMERG); 6848 define_netdev_printk_level(netdev_alert, KERN_ALERT); 6849 define_netdev_printk_level(netdev_crit, KERN_CRIT); 6850 define_netdev_printk_level(netdev_err, KERN_ERR); 6851 define_netdev_printk_level(netdev_warn, KERN_WARNING); 6852 define_netdev_printk_level(netdev_notice, KERN_NOTICE); 6853 define_netdev_printk_level(netdev_info, KERN_INFO); 6854 6855 static void __net_exit netdev_exit(struct net *net) 6856 { 6857 kfree(net->dev_name_head); 6858 kfree(net->dev_index_head); 6859 } 6860 6861 static struct pernet_operations __net_initdata netdev_net_ops = { 6862 .init = netdev_init, 6863 .exit = netdev_exit, 6864 }; 6865 6866 static void __net_exit default_device_exit(struct net *net) 6867 { 6868 struct net_device *dev, *aux; 6869 /* 6870 * Push all migratable network devices back to the 6871 * initial network namespace 6872 */ 6873 rtnl_lock(); 6874 for_each_netdev_safe(net, dev, aux) { 6875 int err; 6876 char fb_name[IFNAMSIZ]; 6877 6878 /* Ignore unmoveable devices (i.e. loopback) */ 6879 if (dev->features & NETIF_F_NETNS_LOCAL) 6880 continue; 6881 6882 /* Leave virtual devices for the generic cleanup */ 6883 if (dev->rtnl_link_ops) 6884 continue; 6885 6886 /* Push remaining network devices to init_net */ 6887 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex); 6888 err = dev_change_net_namespace(dev, &init_net, fb_name); 6889 if (err) { 6890 pr_emerg("%s: failed to move %s to init_net: %d\n", 6891 __func__, dev->name, err); 6892 BUG(); 6893 } 6894 } 6895 rtnl_unlock(); 6896 } 6897 6898 static void __net_exit rtnl_lock_unregistering(struct list_head *net_list) 6899 { 6900 /* Return with the rtnl_lock held when there are no network 6901 * devices unregistering in any network namespace in net_list. 6902 */ 6903 struct net *net; 6904 bool unregistering; 6905 DEFINE_WAIT(wait); 6906 6907 for (;;) { 6908 prepare_to_wait(&netdev_unregistering_wq, &wait, 6909 TASK_UNINTERRUPTIBLE); 6910 unregistering = false; 6911 rtnl_lock(); 6912 list_for_each_entry(net, net_list, exit_list) { 6913 if (net->dev_unreg_count > 0) { 6914 unregistering = true; 6915 break; 6916 } 6917 } 6918 if (!unregistering) 6919 break; 6920 __rtnl_unlock(); 6921 schedule(); 6922 } 6923 finish_wait(&netdev_unregistering_wq, &wait); 6924 } 6925 6926 static void __net_exit default_device_exit_batch(struct list_head *net_list) 6927 { 6928 /* At exit all network devices most be removed from a network 6929 * namespace. Do this in the reverse order of registration. 6930 * Do this across as many network namespaces as possible to 6931 * improve batching efficiency. 6932 */ 6933 struct net_device *dev; 6934 struct net *net; 6935 LIST_HEAD(dev_kill_list); 6936 6937 /* To prevent network device cleanup code from dereferencing 6938 * loopback devices or network devices that have been freed 6939 * wait here for all pending unregistrations to complete, 6940 * before unregistring the loopback device and allowing the 6941 * network namespace be freed. 6942 * 6943 * The netdev todo list containing all network devices 6944 * unregistrations that happen in default_device_exit_batch 6945 * will run in the rtnl_unlock() at the end of 6946 * default_device_exit_batch. 6947 */ 6948 rtnl_lock_unregistering(net_list); 6949 list_for_each_entry(net, net_list, exit_list) { 6950 for_each_netdev_reverse(net, dev) { 6951 if (dev->rtnl_link_ops) 6952 dev->rtnl_link_ops->dellink(dev, &dev_kill_list); 6953 else 6954 unregister_netdevice_queue(dev, &dev_kill_list); 6955 } 6956 } 6957 unregister_netdevice_many(&dev_kill_list); 6958 list_del(&dev_kill_list); 6959 rtnl_unlock(); 6960 } 6961 6962 static struct pernet_operations __net_initdata default_device_ops = { 6963 .exit = default_device_exit, 6964 .exit_batch = default_device_exit_batch, 6965 }; 6966 6967 /* 6968 * Initialize the DEV module. At boot time this walks the device list and 6969 * unhooks any devices that fail to initialise (normally hardware not 6970 * present) and leaves us with a valid list of present and active devices. 6971 * 6972 */ 6973 6974 /* 6975 * This is called single threaded during boot, so no need 6976 * to take the rtnl semaphore. 6977 */ 6978 static int __init net_dev_init(void) 6979 { 6980 int i, rc = -ENOMEM; 6981 6982 BUG_ON(!dev_boot_phase); 6983 6984 if (dev_proc_init()) 6985 goto out; 6986 6987 if (netdev_kobject_init()) 6988 goto out; 6989 6990 INIT_LIST_HEAD(&ptype_all); 6991 for (i = 0; i < PTYPE_HASH_SIZE; i++) 6992 INIT_LIST_HEAD(&ptype_base[i]); 6993 6994 INIT_LIST_HEAD(&offload_base); 6995 6996 if (register_pernet_subsys(&netdev_net_ops)) 6997 goto out; 6998 6999 /* 7000 * Initialise the packet receive queues. 7001 */ 7002 7003 for_each_possible_cpu(i) { 7004 struct softnet_data *sd = &per_cpu(softnet_data, i); 7005 7006 skb_queue_head_init(&sd->input_pkt_queue); 7007 skb_queue_head_init(&sd->process_queue); 7008 INIT_LIST_HEAD(&sd->poll_list); 7009 sd->output_queue_tailp = &sd->output_queue; 7010 #ifdef CONFIG_RPS 7011 sd->csd.func = rps_trigger_softirq; 7012 sd->csd.info = sd; 7013 sd->cpu = i; 7014 #endif 7015 7016 sd->backlog.poll = process_backlog; 7017 sd->backlog.weight = weight_p; 7018 } 7019 7020 dev_boot_phase = 0; 7021 7022 /* The loopback device is special if any other network devices 7023 * is present in a network namespace the loopback device must 7024 * be present. Since we now dynamically allocate and free the 7025 * loopback device ensure this invariant is maintained by 7026 * keeping the loopback device as the first device on the 7027 * list of network devices. Ensuring the loopback devices 7028 * is the first device that appears and the last network device 7029 * that disappears. 7030 */ 7031 if (register_pernet_device(&loopback_net_ops)) 7032 goto out; 7033 7034 if (register_pernet_device(&default_device_ops)) 7035 goto out; 7036 7037 open_softirq(NET_TX_SOFTIRQ, net_tx_action); 7038 open_softirq(NET_RX_SOFTIRQ, net_rx_action); 7039 7040 hotcpu_notifier(dev_cpu_callback, 0); 7041 dst_init(); 7042 rc = 0; 7043 out: 7044 return rc; 7045 } 7046 7047 subsys_initcall(net_dev_init); 7048