1 /* 2 * NET3 Protocol independent device support routines. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Derived from the non IP parts of dev.c 1.0.19 10 * Authors: Ross Biro 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * Mark Evans, <evansmp@uhura.aston.ac.uk> 13 * 14 * Additional Authors: 15 * Florian la Roche <rzsfl@rz.uni-sb.de> 16 * Alan Cox <gw4pts@gw4pts.ampr.org> 17 * David Hinds <dahinds@users.sourceforge.net> 18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> 19 * Adam Sulmicki <adam@cfar.umd.edu> 20 * Pekka Riikonen <priikone@poesidon.pspt.fi> 21 * 22 * Changes: 23 * D.J. Barrow : Fixed bug where dev->refcnt gets set 24 * to 2 if register_netdev gets called 25 * before net_dev_init & also removed a 26 * few lines of code in the process. 27 * Alan Cox : device private ioctl copies fields back. 28 * Alan Cox : Transmit queue code does relevant 29 * stunts to keep the queue safe. 30 * Alan Cox : Fixed double lock. 31 * Alan Cox : Fixed promisc NULL pointer trap 32 * ???????? : Support the full private ioctl range 33 * Alan Cox : Moved ioctl permission check into 34 * drivers 35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI 36 * Alan Cox : 100 backlog just doesn't cut it when 37 * you start doing multicast video 8) 38 * Alan Cox : Rewrote net_bh and list manager. 39 * Alan Cox : Fix ETH_P_ALL echoback lengths. 40 * Alan Cox : Took out transmit every packet pass 41 * Saved a few bytes in the ioctl handler 42 * Alan Cox : Network driver sets packet type before 43 * calling netif_rx. Saves a function 44 * call a packet. 45 * Alan Cox : Hashed net_bh() 46 * Richard Kooijman: Timestamp fixes. 47 * Alan Cox : Wrong field in SIOCGIFDSTADDR 48 * Alan Cox : Device lock protection. 49 * Alan Cox : Fixed nasty side effect of device close 50 * changes. 51 * Rudi Cilibrasi : Pass the right thing to 52 * set_mac_address() 53 * Dave Miller : 32bit quantity for the device lock to 54 * make it work out on a Sparc. 55 * Bjorn Ekwall : Added KERNELD hack. 56 * Alan Cox : Cleaned up the backlog initialise. 57 * Craig Metz : SIOCGIFCONF fix if space for under 58 * 1 device. 59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there 60 * is no device open function. 61 * Andi Kleen : Fix error reporting for SIOCGIFCONF 62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF 63 * Cyrus Durgin : Cleaned for KMOD 64 * Adam Sulmicki : Bug Fix : Network Device Unload 65 * A network device unload needs to purge 66 * the backlog queue. 67 * Paul Rusty Russell : SIOCSIFNAME 68 * Pekka Riikonen : Netdev boot-time settings code 69 * Andrew Morton : Make unregister_netdevice wait 70 * indefinitely on dev->refcnt 71 * J Hadi Salim : - Backlog queue sampling 72 * - netif_rx() feedback 73 */ 74 75 #include <asm/uaccess.h> 76 #include <linux/bitops.h> 77 #include <linux/capability.h> 78 #include <linux/cpu.h> 79 #include <linux/types.h> 80 #include <linux/kernel.h> 81 #include <linux/hash.h> 82 #include <linux/slab.h> 83 #include <linux/sched.h> 84 #include <linux/mutex.h> 85 #include <linux/string.h> 86 #include <linux/mm.h> 87 #include <linux/socket.h> 88 #include <linux/sockios.h> 89 #include <linux/errno.h> 90 #include <linux/interrupt.h> 91 #include <linux/if_ether.h> 92 #include <linux/netdevice.h> 93 #include <linux/etherdevice.h> 94 #include <linux/ethtool.h> 95 #include <linux/notifier.h> 96 #include <linux/skbuff.h> 97 #include <net/net_namespace.h> 98 #include <net/sock.h> 99 #include <linux/rtnetlink.h> 100 #include <linux/stat.h> 101 #include <net/dst.h> 102 #include <net/pkt_sched.h> 103 #include <net/checksum.h> 104 #include <net/xfrm.h> 105 #include <linux/highmem.h> 106 #include <linux/init.h> 107 #include <linux/module.h> 108 #include <linux/netpoll.h> 109 #include <linux/rcupdate.h> 110 #include <linux/delay.h> 111 #include <net/iw_handler.h> 112 #include <asm/current.h> 113 #include <linux/audit.h> 114 #include <linux/dmaengine.h> 115 #include <linux/err.h> 116 #include <linux/ctype.h> 117 #include <linux/if_arp.h> 118 #include <linux/if_vlan.h> 119 #include <linux/ip.h> 120 #include <net/ip.h> 121 #include <linux/ipv6.h> 122 #include <linux/in.h> 123 #include <linux/jhash.h> 124 #include <linux/random.h> 125 #include <trace/events/napi.h> 126 #include <trace/events/net.h> 127 #include <trace/events/skb.h> 128 #include <linux/pci.h> 129 #include <linux/inetdevice.h> 130 #include <linux/cpu_rmap.h> 131 #include <linux/static_key.h> 132 #include <linux/hashtable.h> 133 #include <linux/vmalloc.h> 134 #include <linux/if_macvlan.h> 135 136 #include "net-sysfs.h" 137 138 /* Instead of increasing this, you should create a hash table. */ 139 #define MAX_GRO_SKBS 8 140 141 /* This should be increased if a protocol with a bigger head is added. */ 142 #define GRO_MAX_HEAD (MAX_HEADER + 128) 143 144 static DEFINE_SPINLOCK(ptype_lock); 145 static DEFINE_SPINLOCK(offload_lock); 146 struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; 147 struct list_head ptype_all __read_mostly; /* Taps */ 148 static struct list_head offload_base __read_mostly; 149 150 static int netif_rx_internal(struct sk_buff *skb); 151 152 /* 153 * The @dev_base_head list is protected by @dev_base_lock and the rtnl 154 * semaphore. 155 * 156 * Pure readers hold dev_base_lock for reading, or rcu_read_lock() 157 * 158 * Writers must hold the rtnl semaphore while they loop through the 159 * dev_base_head list, and hold dev_base_lock for writing when they do the 160 * actual updates. This allows pure readers to access the list even 161 * while a writer is preparing to update it. 162 * 163 * To put it another way, dev_base_lock is held for writing only to 164 * protect against pure readers; the rtnl semaphore provides the 165 * protection against other writers. 166 * 167 * See, for example usages, register_netdevice() and 168 * unregister_netdevice(), which must be called with the rtnl 169 * semaphore held. 170 */ 171 DEFINE_RWLOCK(dev_base_lock); 172 EXPORT_SYMBOL(dev_base_lock); 173 174 /* protects napi_hash addition/deletion and napi_gen_id */ 175 static DEFINE_SPINLOCK(napi_hash_lock); 176 177 static unsigned int napi_gen_id; 178 static DEFINE_HASHTABLE(napi_hash, 8); 179 180 static seqcount_t devnet_rename_seq; 181 182 static inline void dev_base_seq_inc(struct net *net) 183 { 184 while (++net->dev_base_seq == 0); 185 } 186 187 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name) 188 { 189 unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ)); 190 191 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)]; 192 } 193 194 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) 195 { 196 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)]; 197 } 198 199 static inline void rps_lock(struct softnet_data *sd) 200 { 201 #ifdef CONFIG_RPS 202 spin_lock(&sd->input_pkt_queue.lock); 203 #endif 204 } 205 206 static inline void rps_unlock(struct softnet_data *sd) 207 { 208 #ifdef CONFIG_RPS 209 spin_unlock(&sd->input_pkt_queue.lock); 210 #endif 211 } 212 213 /* Device list insertion */ 214 static void list_netdevice(struct net_device *dev) 215 { 216 struct net *net = dev_net(dev); 217 218 ASSERT_RTNL(); 219 220 write_lock_bh(&dev_base_lock); 221 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head); 222 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name)); 223 hlist_add_head_rcu(&dev->index_hlist, 224 dev_index_hash(net, dev->ifindex)); 225 write_unlock_bh(&dev_base_lock); 226 227 dev_base_seq_inc(net); 228 } 229 230 /* Device list removal 231 * caller must respect a RCU grace period before freeing/reusing dev 232 */ 233 static void unlist_netdevice(struct net_device *dev) 234 { 235 ASSERT_RTNL(); 236 237 /* Unlink dev from the device chain */ 238 write_lock_bh(&dev_base_lock); 239 list_del_rcu(&dev->dev_list); 240 hlist_del_rcu(&dev->name_hlist); 241 hlist_del_rcu(&dev->index_hlist); 242 write_unlock_bh(&dev_base_lock); 243 244 dev_base_seq_inc(dev_net(dev)); 245 } 246 247 /* 248 * Our notifier list 249 */ 250 251 static RAW_NOTIFIER_HEAD(netdev_chain); 252 253 /* 254 * Device drivers call our routines to queue packets here. We empty the 255 * queue in the local softnet handler. 256 */ 257 258 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); 259 EXPORT_PER_CPU_SYMBOL(softnet_data); 260 261 #ifdef CONFIG_LOCKDEP 262 /* 263 * register_netdevice() inits txq->_xmit_lock and sets lockdep class 264 * according to dev->type 265 */ 266 static const unsigned short netdev_lock_type[] = 267 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25, 268 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET, 269 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM, 270 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP, 271 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD, 272 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25, 273 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP, 274 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD, 275 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI, 276 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE, 277 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET, 278 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL, 279 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM, 280 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE, 281 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE}; 282 283 static const char *const netdev_lock_name[] = 284 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25", 285 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET", 286 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM", 287 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP", 288 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD", 289 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25", 290 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP", 291 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD", 292 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI", 293 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE", 294 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET", 295 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL", 296 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM", 297 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE", 298 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"}; 299 300 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)]; 301 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)]; 302 303 static inline unsigned short netdev_lock_pos(unsigned short dev_type) 304 { 305 int i; 306 307 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++) 308 if (netdev_lock_type[i] == dev_type) 309 return i; 310 /* the last key is used by default */ 311 return ARRAY_SIZE(netdev_lock_type) - 1; 312 } 313 314 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, 315 unsigned short dev_type) 316 { 317 int i; 318 319 i = netdev_lock_pos(dev_type); 320 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i], 321 netdev_lock_name[i]); 322 } 323 324 static inline void netdev_set_addr_lockdep_class(struct net_device *dev) 325 { 326 int i; 327 328 i = netdev_lock_pos(dev->type); 329 lockdep_set_class_and_name(&dev->addr_list_lock, 330 &netdev_addr_lock_key[i], 331 netdev_lock_name[i]); 332 } 333 #else 334 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, 335 unsigned short dev_type) 336 { 337 } 338 static inline void netdev_set_addr_lockdep_class(struct net_device *dev) 339 { 340 } 341 #endif 342 343 /******************************************************************************* 344 345 Protocol management and registration routines 346 347 *******************************************************************************/ 348 349 /* 350 * Add a protocol ID to the list. Now that the input handler is 351 * smarter we can dispense with all the messy stuff that used to be 352 * here. 353 * 354 * BEWARE!!! Protocol handlers, mangling input packets, 355 * MUST BE last in hash buckets and checking protocol handlers 356 * MUST start from promiscuous ptype_all chain in net_bh. 357 * It is true now, do not change it. 358 * Explanation follows: if protocol handler, mangling packet, will 359 * be the first on list, it is not able to sense, that packet 360 * is cloned and should be copied-on-write, so that it will 361 * change it and subsequent readers will get broken packet. 362 * --ANK (980803) 363 */ 364 365 static inline struct list_head *ptype_head(const struct packet_type *pt) 366 { 367 if (pt->type == htons(ETH_P_ALL)) 368 return &ptype_all; 369 else 370 return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK]; 371 } 372 373 /** 374 * dev_add_pack - add packet handler 375 * @pt: packet type declaration 376 * 377 * Add a protocol handler to the networking stack. The passed &packet_type 378 * is linked into kernel lists and may not be freed until it has been 379 * removed from the kernel lists. 380 * 381 * This call does not sleep therefore it can not 382 * guarantee all CPU's that are in middle of receiving packets 383 * will see the new packet type (until the next received packet). 384 */ 385 386 void dev_add_pack(struct packet_type *pt) 387 { 388 struct list_head *head = ptype_head(pt); 389 390 spin_lock(&ptype_lock); 391 list_add_rcu(&pt->list, head); 392 spin_unlock(&ptype_lock); 393 } 394 EXPORT_SYMBOL(dev_add_pack); 395 396 /** 397 * __dev_remove_pack - remove packet handler 398 * @pt: packet type declaration 399 * 400 * Remove a protocol handler that was previously added to the kernel 401 * protocol handlers by dev_add_pack(). The passed &packet_type is removed 402 * from the kernel lists and can be freed or reused once this function 403 * returns. 404 * 405 * The packet type might still be in use by receivers 406 * and must not be freed until after all the CPU's have gone 407 * through a quiescent state. 408 */ 409 void __dev_remove_pack(struct packet_type *pt) 410 { 411 struct list_head *head = ptype_head(pt); 412 struct packet_type *pt1; 413 414 spin_lock(&ptype_lock); 415 416 list_for_each_entry(pt1, head, list) { 417 if (pt == pt1) { 418 list_del_rcu(&pt->list); 419 goto out; 420 } 421 } 422 423 pr_warn("dev_remove_pack: %p not found\n", pt); 424 out: 425 spin_unlock(&ptype_lock); 426 } 427 EXPORT_SYMBOL(__dev_remove_pack); 428 429 /** 430 * dev_remove_pack - remove packet handler 431 * @pt: packet type declaration 432 * 433 * Remove a protocol handler that was previously added to the kernel 434 * protocol handlers by dev_add_pack(). The passed &packet_type is removed 435 * from the kernel lists and can be freed or reused once this function 436 * returns. 437 * 438 * This call sleeps to guarantee that no CPU is looking at the packet 439 * type after return. 440 */ 441 void dev_remove_pack(struct packet_type *pt) 442 { 443 __dev_remove_pack(pt); 444 445 synchronize_net(); 446 } 447 EXPORT_SYMBOL(dev_remove_pack); 448 449 450 /** 451 * dev_add_offload - register offload handlers 452 * @po: protocol offload declaration 453 * 454 * Add protocol offload handlers to the networking stack. The passed 455 * &proto_offload is linked into kernel lists and may not be freed until 456 * it has been removed from the kernel lists. 457 * 458 * This call does not sleep therefore it can not 459 * guarantee all CPU's that are in middle of receiving packets 460 * will see the new offload handlers (until the next received packet). 461 */ 462 void dev_add_offload(struct packet_offload *po) 463 { 464 struct list_head *head = &offload_base; 465 466 spin_lock(&offload_lock); 467 list_add_rcu(&po->list, head); 468 spin_unlock(&offload_lock); 469 } 470 EXPORT_SYMBOL(dev_add_offload); 471 472 /** 473 * __dev_remove_offload - remove offload handler 474 * @po: packet offload declaration 475 * 476 * Remove a protocol offload handler that was previously added to the 477 * kernel offload handlers by dev_add_offload(). The passed &offload_type 478 * is removed from the kernel lists and can be freed or reused once this 479 * function returns. 480 * 481 * The packet type might still be in use by receivers 482 * and must not be freed until after all the CPU's have gone 483 * through a quiescent state. 484 */ 485 static void __dev_remove_offload(struct packet_offload *po) 486 { 487 struct list_head *head = &offload_base; 488 struct packet_offload *po1; 489 490 spin_lock(&offload_lock); 491 492 list_for_each_entry(po1, head, list) { 493 if (po == po1) { 494 list_del_rcu(&po->list); 495 goto out; 496 } 497 } 498 499 pr_warn("dev_remove_offload: %p not found\n", po); 500 out: 501 spin_unlock(&offload_lock); 502 } 503 504 /** 505 * dev_remove_offload - remove packet offload handler 506 * @po: packet offload declaration 507 * 508 * Remove a packet offload handler that was previously added to the kernel 509 * offload handlers by dev_add_offload(). The passed &offload_type is 510 * removed from the kernel lists and can be freed or reused once this 511 * function returns. 512 * 513 * This call sleeps to guarantee that no CPU is looking at the packet 514 * type after return. 515 */ 516 void dev_remove_offload(struct packet_offload *po) 517 { 518 __dev_remove_offload(po); 519 520 synchronize_net(); 521 } 522 EXPORT_SYMBOL(dev_remove_offload); 523 524 /****************************************************************************** 525 526 Device Boot-time Settings Routines 527 528 *******************************************************************************/ 529 530 /* Boot time configuration table */ 531 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX]; 532 533 /** 534 * netdev_boot_setup_add - add new setup entry 535 * @name: name of the device 536 * @map: configured settings for the device 537 * 538 * Adds new setup entry to the dev_boot_setup list. The function 539 * returns 0 on error and 1 on success. This is a generic routine to 540 * all netdevices. 541 */ 542 static int netdev_boot_setup_add(char *name, struct ifmap *map) 543 { 544 struct netdev_boot_setup *s; 545 int i; 546 547 s = dev_boot_setup; 548 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) { 549 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') { 550 memset(s[i].name, 0, sizeof(s[i].name)); 551 strlcpy(s[i].name, name, IFNAMSIZ); 552 memcpy(&s[i].map, map, sizeof(s[i].map)); 553 break; 554 } 555 } 556 557 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1; 558 } 559 560 /** 561 * netdev_boot_setup_check - check boot time settings 562 * @dev: the netdevice 563 * 564 * Check boot time settings for the device. 565 * The found settings are set for the device to be used 566 * later in the device probing. 567 * Returns 0 if no settings found, 1 if they are. 568 */ 569 int netdev_boot_setup_check(struct net_device *dev) 570 { 571 struct netdev_boot_setup *s = dev_boot_setup; 572 int i; 573 574 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) { 575 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' && 576 !strcmp(dev->name, s[i].name)) { 577 dev->irq = s[i].map.irq; 578 dev->base_addr = s[i].map.base_addr; 579 dev->mem_start = s[i].map.mem_start; 580 dev->mem_end = s[i].map.mem_end; 581 return 1; 582 } 583 } 584 return 0; 585 } 586 EXPORT_SYMBOL(netdev_boot_setup_check); 587 588 589 /** 590 * netdev_boot_base - get address from boot time settings 591 * @prefix: prefix for network device 592 * @unit: id for network device 593 * 594 * Check boot time settings for the base address of device. 595 * The found settings are set for the device to be used 596 * later in the device probing. 597 * Returns 0 if no settings found. 598 */ 599 unsigned long netdev_boot_base(const char *prefix, int unit) 600 { 601 const struct netdev_boot_setup *s = dev_boot_setup; 602 char name[IFNAMSIZ]; 603 int i; 604 605 sprintf(name, "%s%d", prefix, unit); 606 607 /* 608 * If device already registered then return base of 1 609 * to indicate not to probe for this interface 610 */ 611 if (__dev_get_by_name(&init_net, name)) 612 return 1; 613 614 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) 615 if (!strcmp(name, s[i].name)) 616 return s[i].map.base_addr; 617 return 0; 618 } 619 620 /* 621 * Saves at boot time configured settings for any netdevice. 622 */ 623 int __init netdev_boot_setup(char *str) 624 { 625 int ints[5]; 626 struct ifmap map; 627 628 str = get_options(str, ARRAY_SIZE(ints), ints); 629 if (!str || !*str) 630 return 0; 631 632 /* Save settings */ 633 memset(&map, 0, sizeof(map)); 634 if (ints[0] > 0) 635 map.irq = ints[1]; 636 if (ints[0] > 1) 637 map.base_addr = ints[2]; 638 if (ints[0] > 2) 639 map.mem_start = ints[3]; 640 if (ints[0] > 3) 641 map.mem_end = ints[4]; 642 643 /* Add new entry to the list */ 644 return netdev_boot_setup_add(str, &map); 645 } 646 647 __setup("netdev=", netdev_boot_setup); 648 649 /******************************************************************************* 650 651 Device Interface Subroutines 652 653 *******************************************************************************/ 654 655 /** 656 * __dev_get_by_name - find a device by its name 657 * @net: the applicable net namespace 658 * @name: name to find 659 * 660 * Find an interface by name. Must be called under RTNL semaphore 661 * or @dev_base_lock. If the name is found a pointer to the device 662 * is returned. If the name is not found then %NULL is returned. The 663 * reference counters are not incremented so the caller must be 664 * careful with locks. 665 */ 666 667 struct net_device *__dev_get_by_name(struct net *net, const char *name) 668 { 669 struct net_device *dev; 670 struct hlist_head *head = dev_name_hash(net, name); 671 672 hlist_for_each_entry(dev, head, name_hlist) 673 if (!strncmp(dev->name, name, IFNAMSIZ)) 674 return dev; 675 676 return NULL; 677 } 678 EXPORT_SYMBOL(__dev_get_by_name); 679 680 /** 681 * dev_get_by_name_rcu - find a device by its name 682 * @net: the applicable net namespace 683 * @name: name to find 684 * 685 * Find an interface by name. 686 * If the name is found a pointer to the device is returned. 687 * If the name is not found then %NULL is returned. 688 * The reference counters are not incremented so the caller must be 689 * careful with locks. The caller must hold RCU lock. 690 */ 691 692 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name) 693 { 694 struct net_device *dev; 695 struct hlist_head *head = dev_name_hash(net, name); 696 697 hlist_for_each_entry_rcu(dev, head, name_hlist) 698 if (!strncmp(dev->name, name, IFNAMSIZ)) 699 return dev; 700 701 return NULL; 702 } 703 EXPORT_SYMBOL(dev_get_by_name_rcu); 704 705 /** 706 * dev_get_by_name - find a device by its name 707 * @net: the applicable net namespace 708 * @name: name to find 709 * 710 * Find an interface by name. This can be called from any 711 * context and does its own locking. The returned handle has 712 * the usage count incremented and the caller must use dev_put() to 713 * release it when it is no longer needed. %NULL is returned if no 714 * matching device is found. 715 */ 716 717 struct net_device *dev_get_by_name(struct net *net, const char *name) 718 { 719 struct net_device *dev; 720 721 rcu_read_lock(); 722 dev = dev_get_by_name_rcu(net, name); 723 if (dev) 724 dev_hold(dev); 725 rcu_read_unlock(); 726 return dev; 727 } 728 EXPORT_SYMBOL(dev_get_by_name); 729 730 /** 731 * __dev_get_by_index - find a device by its ifindex 732 * @net: the applicable net namespace 733 * @ifindex: index of device 734 * 735 * Search for an interface by index. Returns %NULL if the device 736 * is not found or a pointer to the device. The device has not 737 * had its reference counter increased so the caller must be careful 738 * about locking. The caller must hold either the RTNL semaphore 739 * or @dev_base_lock. 740 */ 741 742 struct net_device *__dev_get_by_index(struct net *net, int ifindex) 743 { 744 struct net_device *dev; 745 struct hlist_head *head = dev_index_hash(net, ifindex); 746 747 hlist_for_each_entry(dev, head, index_hlist) 748 if (dev->ifindex == ifindex) 749 return dev; 750 751 return NULL; 752 } 753 EXPORT_SYMBOL(__dev_get_by_index); 754 755 /** 756 * dev_get_by_index_rcu - find a device by its ifindex 757 * @net: the applicable net namespace 758 * @ifindex: index of device 759 * 760 * Search for an interface by index. Returns %NULL if the device 761 * is not found or a pointer to the device. The device has not 762 * had its reference counter increased so the caller must be careful 763 * about locking. The caller must hold RCU lock. 764 */ 765 766 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex) 767 { 768 struct net_device *dev; 769 struct hlist_head *head = dev_index_hash(net, ifindex); 770 771 hlist_for_each_entry_rcu(dev, head, index_hlist) 772 if (dev->ifindex == ifindex) 773 return dev; 774 775 return NULL; 776 } 777 EXPORT_SYMBOL(dev_get_by_index_rcu); 778 779 780 /** 781 * dev_get_by_index - find a device by its ifindex 782 * @net: the applicable net namespace 783 * @ifindex: index of device 784 * 785 * Search for an interface by index. Returns NULL if the device 786 * is not found or a pointer to the device. The device returned has 787 * had a reference added and the pointer is safe until the user calls 788 * dev_put to indicate they have finished with it. 789 */ 790 791 struct net_device *dev_get_by_index(struct net *net, int ifindex) 792 { 793 struct net_device *dev; 794 795 rcu_read_lock(); 796 dev = dev_get_by_index_rcu(net, ifindex); 797 if (dev) 798 dev_hold(dev); 799 rcu_read_unlock(); 800 return dev; 801 } 802 EXPORT_SYMBOL(dev_get_by_index); 803 804 /** 805 * netdev_get_name - get a netdevice name, knowing its ifindex. 806 * @net: network namespace 807 * @name: a pointer to the buffer where the name will be stored. 808 * @ifindex: the ifindex of the interface to get the name from. 809 * 810 * The use of raw_seqcount_begin() and cond_resched() before 811 * retrying is required as we want to give the writers a chance 812 * to complete when CONFIG_PREEMPT is not set. 813 */ 814 int netdev_get_name(struct net *net, char *name, int ifindex) 815 { 816 struct net_device *dev; 817 unsigned int seq; 818 819 retry: 820 seq = raw_seqcount_begin(&devnet_rename_seq); 821 rcu_read_lock(); 822 dev = dev_get_by_index_rcu(net, ifindex); 823 if (!dev) { 824 rcu_read_unlock(); 825 return -ENODEV; 826 } 827 828 strcpy(name, dev->name); 829 rcu_read_unlock(); 830 if (read_seqcount_retry(&devnet_rename_seq, seq)) { 831 cond_resched(); 832 goto retry; 833 } 834 835 return 0; 836 } 837 838 /** 839 * dev_getbyhwaddr_rcu - find a device by its hardware address 840 * @net: the applicable net namespace 841 * @type: media type of device 842 * @ha: hardware address 843 * 844 * Search for an interface by MAC address. Returns NULL if the device 845 * is not found or a pointer to the device. 846 * The caller must hold RCU or RTNL. 847 * The returned device has not had its ref count increased 848 * and the caller must therefore be careful about locking 849 * 850 */ 851 852 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, 853 const char *ha) 854 { 855 struct net_device *dev; 856 857 for_each_netdev_rcu(net, dev) 858 if (dev->type == type && 859 !memcmp(dev->dev_addr, ha, dev->addr_len)) 860 return dev; 861 862 return NULL; 863 } 864 EXPORT_SYMBOL(dev_getbyhwaddr_rcu); 865 866 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type) 867 { 868 struct net_device *dev; 869 870 ASSERT_RTNL(); 871 for_each_netdev(net, dev) 872 if (dev->type == type) 873 return dev; 874 875 return NULL; 876 } 877 EXPORT_SYMBOL(__dev_getfirstbyhwtype); 878 879 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type) 880 { 881 struct net_device *dev, *ret = NULL; 882 883 rcu_read_lock(); 884 for_each_netdev_rcu(net, dev) 885 if (dev->type == type) { 886 dev_hold(dev); 887 ret = dev; 888 break; 889 } 890 rcu_read_unlock(); 891 return ret; 892 } 893 EXPORT_SYMBOL(dev_getfirstbyhwtype); 894 895 /** 896 * dev_get_by_flags_rcu - find any device with given flags 897 * @net: the applicable net namespace 898 * @if_flags: IFF_* values 899 * @mask: bitmask of bits in if_flags to check 900 * 901 * Search for any interface with the given flags. Returns NULL if a device 902 * is not found or a pointer to the device. Must be called inside 903 * rcu_read_lock(), and result refcount is unchanged. 904 */ 905 906 struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags, 907 unsigned short mask) 908 { 909 struct net_device *dev, *ret; 910 911 ret = NULL; 912 for_each_netdev_rcu(net, dev) { 913 if (((dev->flags ^ if_flags) & mask) == 0) { 914 ret = dev; 915 break; 916 } 917 } 918 return ret; 919 } 920 EXPORT_SYMBOL(dev_get_by_flags_rcu); 921 922 /** 923 * dev_valid_name - check if name is okay for network device 924 * @name: name string 925 * 926 * Network device names need to be valid file names to 927 * to allow sysfs to work. We also disallow any kind of 928 * whitespace. 929 */ 930 bool dev_valid_name(const char *name) 931 { 932 if (*name == '\0') 933 return false; 934 if (strlen(name) >= IFNAMSIZ) 935 return false; 936 if (!strcmp(name, ".") || !strcmp(name, "..")) 937 return false; 938 939 while (*name) { 940 if (*name == '/' || isspace(*name)) 941 return false; 942 name++; 943 } 944 return true; 945 } 946 EXPORT_SYMBOL(dev_valid_name); 947 948 /** 949 * __dev_alloc_name - allocate a name for a device 950 * @net: network namespace to allocate the device name in 951 * @name: name format string 952 * @buf: scratch buffer and result name string 953 * 954 * Passed a format string - eg "lt%d" it will try and find a suitable 955 * id. It scans list of devices to build up a free map, then chooses 956 * the first empty slot. The caller must hold the dev_base or rtnl lock 957 * while allocating the name and adding the device in order to avoid 958 * duplicates. 959 * Limited to bits_per_byte * page size devices (ie 32K on most platforms). 960 * Returns the number of the unit assigned or a negative errno code. 961 */ 962 963 static int __dev_alloc_name(struct net *net, const char *name, char *buf) 964 { 965 int i = 0; 966 const char *p; 967 const int max_netdevices = 8*PAGE_SIZE; 968 unsigned long *inuse; 969 struct net_device *d; 970 971 p = strnchr(name, IFNAMSIZ-1, '%'); 972 if (p) { 973 /* 974 * Verify the string as this thing may have come from 975 * the user. There must be either one "%d" and no other "%" 976 * characters. 977 */ 978 if (p[1] != 'd' || strchr(p + 2, '%')) 979 return -EINVAL; 980 981 /* Use one page as a bit array of possible slots */ 982 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC); 983 if (!inuse) 984 return -ENOMEM; 985 986 for_each_netdev(net, d) { 987 if (!sscanf(d->name, name, &i)) 988 continue; 989 if (i < 0 || i >= max_netdevices) 990 continue; 991 992 /* avoid cases where sscanf is not exact inverse of printf */ 993 snprintf(buf, IFNAMSIZ, name, i); 994 if (!strncmp(buf, d->name, IFNAMSIZ)) 995 set_bit(i, inuse); 996 } 997 998 i = find_first_zero_bit(inuse, max_netdevices); 999 free_page((unsigned long) inuse); 1000 } 1001 1002 if (buf != name) 1003 snprintf(buf, IFNAMSIZ, name, i); 1004 if (!__dev_get_by_name(net, buf)) 1005 return i; 1006 1007 /* It is possible to run out of possible slots 1008 * when the name is long and there isn't enough space left 1009 * for the digits, or if all bits are used. 1010 */ 1011 return -ENFILE; 1012 } 1013 1014 /** 1015 * dev_alloc_name - allocate a name for a device 1016 * @dev: device 1017 * @name: name format string 1018 * 1019 * Passed a format string - eg "lt%d" it will try and find a suitable 1020 * id. It scans list of devices to build up a free map, then chooses 1021 * the first empty slot. The caller must hold the dev_base or rtnl lock 1022 * while allocating the name and adding the device in order to avoid 1023 * duplicates. 1024 * Limited to bits_per_byte * page size devices (ie 32K on most platforms). 1025 * Returns the number of the unit assigned or a negative errno code. 1026 */ 1027 1028 int dev_alloc_name(struct net_device *dev, const char *name) 1029 { 1030 char buf[IFNAMSIZ]; 1031 struct net *net; 1032 int ret; 1033 1034 BUG_ON(!dev_net(dev)); 1035 net = dev_net(dev); 1036 ret = __dev_alloc_name(net, name, buf); 1037 if (ret >= 0) 1038 strlcpy(dev->name, buf, IFNAMSIZ); 1039 return ret; 1040 } 1041 EXPORT_SYMBOL(dev_alloc_name); 1042 1043 static int dev_alloc_name_ns(struct net *net, 1044 struct net_device *dev, 1045 const char *name) 1046 { 1047 char buf[IFNAMSIZ]; 1048 int ret; 1049 1050 ret = __dev_alloc_name(net, name, buf); 1051 if (ret >= 0) 1052 strlcpy(dev->name, buf, IFNAMSIZ); 1053 return ret; 1054 } 1055 1056 static int dev_get_valid_name(struct net *net, 1057 struct net_device *dev, 1058 const char *name) 1059 { 1060 BUG_ON(!net); 1061 1062 if (!dev_valid_name(name)) 1063 return -EINVAL; 1064 1065 if (strchr(name, '%')) 1066 return dev_alloc_name_ns(net, dev, name); 1067 else if (__dev_get_by_name(net, name)) 1068 return -EEXIST; 1069 else if (dev->name != name) 1070 strlcpy(dev->name, name, IFNAMSIZ); 1071 1072 return 0; 1073 } 1074 1075 /** 1076 * dev_change_name - change name of a device 1077 * @dev: device 1078 * @newname: name (or format string) must be at least IFNAMSIZ 1079 * 1080 * Change name of a device, can pass format strings "eth%d". 1081 * for wildcarding. 1082 */ 1083 int dev_change_name(struct net_device *dev, const char *newname) 1084 { 1085 char oldname[IFNAMSIZ]; 1086 int err = 0; 1087 int ret; 1088 struct net *net; 1089 1090 ASSERT_RTNL(); 1091 BUG_ON(!dev_net(dev)); 1092 1093 net = dev_net(dev); 1094 if (dev->flags & IFF_UP) 1095 return -EBUSY; 1096 1097 write_seqcount_begin(&devnet_rename_seq); 1098 1099 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) { 1100 write_seqcount_end(&devnet_rename_seq); 1101 return 0; 1102 } 1103 1104 memcpy(oldname, dev->name, IFNAMSIZ); 1105 1106 err = dev_get_valid_name(net, dev, newname); 1107 if (err < 0) { 1108 write_seqcount_end(&devnet_rename_seq); 1109 return err; 1110 } 1111 1112 rollback: 1113 ret = device_rename(&dev->dev, dev->name); 1114 if (ret) { 1115 memcpy(dev->name, oldname, IFNAMSIZ); 1116 write_seqcount_end(&devnet_rename_seq); 1117 return ret; 1118 } 1119 1120 write_seqcount_end(&devnet_rename_seq); 1121 1122 netdev_adjacent_rename_links(dev, oldname); 1123 1124 write_lock_bh(&dev_base_lock); 1125 hlist_del_rcu(&dev->name_hlist); 1126 write_unlock_bh(&dev_base_lock); 1127 1128 synchronize_rcu(); 1129 1130 write_lock_bh(&dev_base_lock); 1131 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name)); 1132 write_unlock_bh(&dev_base_lock); 1133 1134 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev); 1135 ret = notifier_to_errno(ret); 1136 1137 if (ret) { 1138 /* err >= 0 after dev_alloc_name() or stores the first errno */ 1139 if (err >= 0) { 1140 err = ret; 1141 write_seqcount_begin(&devnet_rename_seq); 1142 memcpy(dev->name, oldname, IFNAMSIZ); 1143 memcpy(oldname, newname, IFNAMSIZ); 1144 goto rollback; 1145 } else { 1146 pr_err("%s: name change rollback failed: %d\n", 1147 dev->name, ret); 1148 } 1149 } 1150 1151 return err; 1152 } 1153 1154 /** 1155 * dev_set_alias - change ifalias of a device 1156 * @dev: device 1157 * @alias: name up to IFALIASZ 1158 * @len: limit of bytes to copy from info 1159 * 1160 * Set ifalias for a device, 1161 */ 1162 int dev_set_alias(struct net_device *dev, const char *alias, size_t len) 1163 { 1164 char *new_ifalias; 1165 1166 ASSERT_RTNL(); 1167 1168 if (len >= IFALIASZ) 1169 return -EINVAL; 1170 1171 if (!len) { 1172 kfree(dev->ifalias); 1173 dev->ifalias = NULL; 1174 return 0; 1175 } 1176 1177 new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL); 1178 if (!new_ifalias) 1179 return -ENOMEM; 1180 dev->ifalias = new_ifalias; 1181 1182 strlcpy(dev->ifalias, alias, len+1); 1183 return len; 1184 } 1185 1186 1187 /** 1188 * netdev_features_change - device changes features 1189 * @dev: device to cause notification 1190 * 1191 * Called to indicate a device has changed features. 1192 */ 1193 void netdev_features_change(struct net_device *dev) 1194 { 1195 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev); 1196 } 1197 EXPORT_SYMBOL(netdev_features_change); 1198 1199 /** 1200 * netdev_state_change - device changes state 1201 * @dev: device to cause notification 1202 * 1203 * Called to indicate a device has changed state. This function calls 1204 * the notifier chains for netdev_chain and sends a NEWLINK message 1205 * to the routing socket. 1206 */ 1207 void netdev_state_change(struct net_device *dev) 1208 { 1209 if (dev->flags & IFF_UP) { 1210 call_netdevice_notifiers(NETDEV_CHANGE, dev); 1211 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL); 1212 } 1213 } 1214 EXPORT_SYMBOL(netdev_state_change); 1215 1216 /** 1217 * netdev_notify_peers - notify network peers about existence of @dev 1218 * @dev: network device 1219 * 1220 * Generate traffic such that interested network peers are aware of 1221 * @dev, such as by generating a gratuitous ARP. This may be used when 1222 * a device wants to inform the rest of the network about some sort of 1223 * reconfiguration such as a failover event or virtual machine 1224 * migration. 1225 */ 1226 void netdev_notify_peers(struct net_device *dev) 1227 { 1228 rtnl_lock(); 1229 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev); 1230 rtnl_unlock(); 1231 } 1232 EXPORT_SYMBOL(netdev_notify_peers); 1233 1234 static int __dev_open(struct net_device *dev) 1235 { 1236 const struct net_device_ops *ops = dev->netdev_ops; 1237 int ret; 1238 1239 ASSERT_RTNL(); 1240 1241 if (!netif_device_present(dev)) 1242 return -ENODEV; 1243 1244 /* Block netpoll from trying to do any rx path servicing. 1245 * If we don't do this there is a chance ndo_poll_controller 1246 * or ndo_poll may be running while we open the device 1247 */ 1248 netpoll_poll_disable(dev); 1249 1250 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev); 1251 ret = notifier_to_errno(ret); 1252 if (ret) 1253 return ret; 1254 1255 set_bit(__LINK_STATE_START, &dev->state); 1256 1257 if (ops->ndo_validate_addr) 1258 ret = ops->ndo_validate_addr(dev); 1259 1260 if (!ret && ops->ndo_open) 1261 ret = ops->ndo_open(dev); 1262 1263 netpoll_poll_enable(dev); 1264 1265 if (ret) 1266 clear_bit(__LINK_STATE_START, &dev->state); 1267 else { 1268 dev->flags |= IFF_UP; 1269 net_dmaengine_get(); 1270 dev_set_rx_mode(dev); 1271 dev_activate(dev); 1272 add_device_randomness(dev->dev_addr, dev->addr_len); 1273 } 1274 1275 return ret; 1276 } 1277 1278 /** 1279 * dev_open - prepare an interface for use. 1280 * @dev: device to open 1281 * 1282 * Takes a device from down to up state. The device's private open 1283 * function is invoked and then the multicast lists are loaded. Finally 1284 * the device is moved into the up state and a %NETDEV_UP message is 1285 * sent to the netdev notifier chain. 1286 * 1287 * Calling this function on an active interface is a nop. On a failure 1288 * a negative errno code is returned. 1289 */ 1290 int dev_open(struct net_device *dev) 1291 { 1292 int ret; 1293 1294 if (dev->flags & IFF_UP) 1295 return 0; 1296 1297 ret = __dev_open(dev); 1298 if (ret < 0) 1299 return ret; 1300 1301 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL); 1302 call_netdevice_notifiers(NETDEV_UP, dev); 1303 1304 return ret; 1305 } 1306 EXPORT_SYMBOL(dev_open); 1307 1308 static int __dev_close_many(struct list_head *head) 1309 { 1310 struct net_device *dev; 1311 1312 ASSERT_RTNL(); 1313 might_sleep(); 1314 1315 list_for_each_entry(dev, head, close_list) { 1316 /* Temporarily disable netpoll until the interface is down */ 1317 netpoll_poll_disable(dev); 1318 1319 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev); 1320 1321 clear_bit(__LINK_STATE_START, &dev->state); 1322 1323 /* Synchronize to scheduled poll. We cannot touch poll list, it 1324 * can be even on different cpu. So just clear netif_running(). 1325 * 1326 * dev->stop() will invoke napi_disable() on all of it's 1327 * napi_struct instances on this device. 1328 */ 1329 smp_mb__after_atomic(); /* Commit netif_running(). */ 1330 } 1331 1332 dev_deactivate_many(head); 1333 1334 list_for_each_entry(dev, head, close_list) { 1335 const struct net_device_ops *ops = dev->netdev_ops; 1336 1337 /* 1338 * Call the device specific close. This cannot fail. 1339 * Only if device is UP 1340 * 1341 * We allow it to be called even after a DETACH hot-plug 1342 * event. 1343 */ 1344 if (ops->ndo_stop) 1345 ops->ndo_stop(dev); 1346 1347 dev->flags &= ~IFF_UP; 1348 net_dmaengine_put(); 1349 netpoll_poll_enable(dev); 1350 } 1351 1352 return 0; 1353 } 1354 1355 static int __dev_close(struct net_device *dev) 1356 { 1357 int retval; 1358 LIST_HEAD(single); 1359 1360 list_add(&dev->close_list, &single); 1361 retval = __dev_close_many(&single); 1362 list_del(&single); 1363 1364 return retval; 1365 } 1366 1367 static int dev_close_many(struct list_head *head) 1368 { 1369 struct net_device *dev, *tmp; 1370 1371 /* Remove the devices that don't need to be closed */ 1372 list_for_each_entry_safe(dev, tmp, head, close_list) 1373 if (!(dev->flags & IFF_UP)) 1374 list_del_init(&dev->close_list); 1375 1376 __dev_close_many(head); 1377 1378 list_for_each_entry_safe(dev, tmp, head, close_list) { 1379 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL); 1380 call_netdevice_notifiers(NETDEV_DOWN, dev); 1381 list_del_init(&dev->close_list); 1382 } 1383 1384 return 0; 1385 } 1386 1387 /** 1388 * dev_close - shutdown an interface. 1389 * @dev: device to shutdown 1390 * 1391 * This function moves an active device into down state. A 1392 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device 1393 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier 1394 * chain. 1395 */ 1396 int dev_close(struct net_device *dev) 1397 { 1398 if (dev->flags & IFF_UP) { 1399 LIST_HEAD(single); 1400 1401 list_add(&dev->close_list, &single); 1402 dev_close_many(&single); 1403 list_del(&single); 1404 } 1405 return 0; 1406 } 1407 EXPORT_SYMBOL(dev_close); 1408 1409 1410 /** 1411 * dev_disable_lro - disable Large Receive Offload on a device 1412 * @dev: device 1413 * 1414 * Disable Large Receive Offload (LRO) on a net device. Must be 1415 * called under RTNL. This is needed if received packets may be 1416 * forwarded to another interface. 1417 */ 1418 void dev_disable_lro(struct net_device *dev) 1419 { 1420 /* 1421 * If we're trying to disable lro on a vlan device 1422 * use the underlying physical device instead 1423 */ 1424 if (is_vlan_dev(dev)) 1425 dev = vlan_dev_real_dev(dev); 1426 1427 /* the same for macvlan devices */ 1428 if (netif_is_macvlan(dev)) 1429 dev = macvlan_dev_real_dev(dev); 1430 1431 dev->wanted_features &= ~NETIF_F_LRO; 1432 netdev_update_features(dev); 1433 1434 if (unlikely(dev->features & NETIF_F_LRO)) 1435 netdev_WARN(dev, "failed to disable LRO!\n"); 1436 } 1437 EXPORT_SYMBOL(dev_disable_lro); 1438 1439 static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val, 1440 struct net_device *dev) 1441 { 1442 struct netdev_notifier_info info; 1443 1444 netdev_notifier_info_init(&info, dev); 1445 return nb->notifier_call(nb, val, &info); 1446 } 1447 1448 static int dev_boot_phase = 1; 1449 1450 /** 1451 * register_netdevice_notifier - register a network notifier block 1452 * @nb: notifier 1453 * 1454 * Register a notifier to be called when network device events occur. 1455 * The notifier passed is linked into the kernel structures and must 1456 * not be reused until it has been unregistered. A negative errno code 1457 * is returned on a failure. 1458 * 1459 * When registered all registration and up events are replayed 1460 * to the new notifier to allow device to have a race free 1461 * view of the network device list. 1462 */ 1463 1464 int register_netdevice_notifier(struct notifier_block *nb) 1465 { 1466 struct net_device *dev; 1467 struct net_device *last; 1468 struct net *net; 1469 int err; 1470 1471 rtnl_lock(); 1472 err = raw_notifier_chain_register(&netdev_chain, nb); 1473 if (err) 1474 goto unlock; 1475 if (dev_boot_phase) 1476 goto unlock; 1477 for_each_net(net) { 1478 for_each_netdev(net, dev) { 1479 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev); 1480 err = notifier_to_errno(err); 1481 if (err) 1482 goto rollback; 1483 1484 if (!(dev->flags & IFF_UP)) 1485 continue; 1486 1487 call_netdevice_notifier(nb, NETDEV_UP, dev); 1488 } 1489 } 1490 1491 unlock: 1492 rtnl_unlock(); 1493 return err; 1494 1495 rollback: 1496 last = dev; 1497 for_each_net(net) { 1498 for_each_netdev(net, dev) { 1499 if (dev == last) 1500 goto outroll; 1501 1502 if (dev->flags & IFF_UP) { 1503 call_netdevice_notifier(nb, NETDEV_GOING_DOWN, 1504 dev); 1505 call_netdevice_notifier(nb, NETDEV_DOWN, dev); 1506 } 1507 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev); 1508 } 1509 } 1510 1511 outroll: 1512 raw_notifier_chain_unregister(&netdev_chain, nb); 1513 goto unlock; 1514 } 1515 EXPORT_SYMBOL(register_netdevice_notifier); 1516 1517 /** 1518 * unregister_netdevice_notifier - unregister a network notifier block 1519 * @nb: notifier 1520 * 1521 * Unregister a notifier previously registered by 1522 * register_netdevice_notifier(). The notifier is unlinked into the 1523 * kernel structures and may then be reused. A negative errno code 1524 * is returned on a failure. 1525 * 1526 * After unregistering unregister and down device events are synthesized 1527 * for all devices on the device list to the removed notifier to remove 1528 * the need for special case cleanup code. 1529 */ 1530 1531 int unregister_netdevice_notifier(struct notifier_block *nb) 1532 { 1533 struct net_device *dev; 1534 struct net *net; 1535 int err; 1536 1537 rtnl_lock(); 1538 err = raw_notifier_chain_unregister(&netdev_chain, nb); 1539 if (err) 1540 goto unlock; 1541 1542 for_each_net(net) { 1543 for_each_netdev(net, dev) { 1544 if (dev->flags & IFF_UP) { 1545 call_netdevice_notifier(nb, NETDEV_GOING_DOWN, 1546 dev); 1547 call_netdevice_notifier(nb, NETDEV_DOWN, dev); 1548 } 1549 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev); 1550 } 1551 } 1552 unlock: 1553 rtnl_unlock(); 1554 return err; 1555 } 1556 EXPORT_SYMBOL(unregister_netdevice_notifier); 1557 1558 /** 1559 * call_netdevice_notifiers_info - call all network notifier blocks 1560 * @val: value passed unmodified to notifier function 1561 * @dev: net_device pointer passed unmodified to notifier function 1562 * @info: notifier information data 1563 * 1564 * Call all network notifier blocks. Parameters and return value 1565 * are as for raw_notifier_call_chain(). 1566 */ 1567 1568 static int call_netdevice_notifiers_info(unsigned long val, 1569 struct net_device *dev, 1570 struct netdev_notifier_info *info) 1571 { 1572 ASSERT_RTNL(); 1573 netdev_notifier_info_init(info, dev); 1574 return raw_notifier_call_chain(&netdev_chain, val, info); 1575 } 1576 1577 /** 1578 * call_netdevice_notifiers - call all network notifier blocks 1579 * @val: value passed unmodified to notifier function 1580 * @dev: net_device pointer passed unmodified to notifier function 1581 * 1582 * Call all network notifier blocks. Parameters and return value 1583 * are as for raw_notifier_call_chain(). 1584 */ 1585 1586 int call_netdevice_notifiers(unsigned long val, struct net_device *dev) 1587 { 1588 struct netdev_notifier_info info; 1589 1590 return call_netdevice_notifiers_info(val, dev, &info); 1591 } 1592 EXPORT_SYMBOL(call_netdevice_notifiers); 1593 1594 static struct static_key netstamp_needed __read_mostly; 1595 #ifdef HAVE_JUMP_LABEL 1596 /* We are not allowed to call static_key_slow_dec() from irq context 1597 * If net_disable_timestamp() is called from irq context, defer the 1598 * static_key_slow_dec() calls. 1599 */ 1600 static atomic_t netstamp_needed_deferred; 1601 #endif 1602 1603 void net_enable_timestamp(void) 1604 { 1605 #ifdef HAVE_JUMP_LABEL 1606 int deferred = atomic_xchg(&netstamp_needed_deferred, 0); 1607 1608 if (deferred) { 1609 while (--deferred) 1610 static_key_slow_dec(&netstamp_needed); 1611 return; 1612 } 1613 #endif 1614 static_key_slow_inc(&netstamp_needed); 1615 } 1616 EXPORT_SYMBOL(net_enable_timestamp); 1617 1618 void net_disable_timestamp(void) 1619 { 1620 #ifdef HAVE_JUMP_LABEL 1621 if (in_interrupt()) { 1622 atomic_inc(&netstamp_needed_deferred); 1623 return; 1624 } 1625 #endif 1626 static_key_slow_dec(&netstamp_needed); 1627 } 1628 EXPORT_SYMBOL(net_disable_timestamp); 1629 1630 static inline void net_timestamp_set(struct sk_buff *skb) 1631 { 1632 skb->tstamp.tv64 = 0; 1633 if (static_key_false(&netstamp_needed)) 1634 __net_timestamp(skb); 1635 } 1636 1637 #define net_timestamp_check(COND, SKB) \ 1638 if (static_key_false(&netstamp_needed)) { \ 1639 if ((COND) && !(SKB)->tstamp.tv64) \ 1640 __net_timestamp(SKB); \ 1641 } \ 1642 1643 bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb) 1644 { 1645 unsigned int len; 1646 1647 if (!(dev->flags & IFF_UP)) 1648 return false; 1649 1650 len = dev->mtu + dev->hard_header_len + VLAN_HLEN; 1651 if (skb->len <= len) 1652 return true; 1653 1654 /* if TSO is enabled, we don't care about the length as the packet 1655 * could be forwarded without being segmented before 1656 */ 1657 if (skb_is_gso(skb)) 1658 return true; 1659 1660 return false; 1661 } 1662 EXPORT_SYMBOL_GPL(is_skb_forwardable); 1663 1664 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb) 1665 { 1666 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 1667 if (skb_copy_ubufs(skb, GFP_ATOMIC)) { 1668 atomic_long_inc(&dev->rx_dropped); 1669 kfree_skb(skb); 1670 return NET_RX_DROP; 1671 } 1672 } 1673 1674 if (unlikely(!is_skb_forwardable(dev, skb))) { 1675 atomic_long_inc(&dev->rx_dropped); 1676 kfree_skb(skb); 1677 return NET_RX_DROP; 1678 } 1679 1680 skb_scrub_packet(skb, true); 1681 skb->protocol = eth_type_trans(skb, dev); 1682 1683 return 0; 1684 } 1685 EXPORT_SYMBOL_GPL(__dev_forward_skb); 1686 1687 /** 1688 * dev_forward_skb - loopback an skb to another netif 1689 * 1690 * @dev: destination network device 1691 * @skb: buffer to forward 1692 * 1693 * return values: 1694 * NET_RX_SUCCESS (no congestion) 1695 * NET_RX_DROP (packet was dropped, but freed) 1696 * 1697 * dev_forward_skb can be used for injecting an skb from the 1698 * start_xmit function of one device into the receive queue 1699 * of another device. 1700 * 1701 * The receiving device may be in another namespace, so 1702 * we have to clear all information in the skb that could 1703 * impact namespace isolation. 1704 */ 1705 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) 1706 { 1707 return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb); 1708 } 1709 EXPORT_SYMBOL_GPL(dev_forward_skb); 1710 1711 static inline int deliver_skb(struct sk_buff *skb, 1712 struct packet_type *pt_prev, 1713 struct net_device *orig_dev) 1714 { 1715 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC))) 1716 return -ENOMEM; 1717 atomic_inc(&skb->users); 1718 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev); 1719 } 1720 1721 static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb) 1722 { 1723 if (!ptype->af_packet_priv || !skb->sk) 1724 return false; 1725 1726 if (ptype->id_match) 1727 return ptype->id_match(ptype, skb->sk); 1728 else if ((struct sock *)ptype->af_packet_priv == skb->sk) 1729 return true; 1730 1731 return false; 1732 } 1733 1734 /* 1735 * Support routine. Sends outgoing frames to any network 1736 * taps currently in use. 1737 */ 1738 1739 static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) 1740 { 1741 struct packet_type *ptype; 1742 struct sk_buff *skb2 = NULL; 1743 struct packet_type *pt_prev = NULL; 1744 1745 rcu_read_lock(); 1746 list_for_each_entry_rcu(ptype, &ptype_all, list) { 1747 /* Never send packets back to the socket 1748 * they originated from - MvS (miquels@drinkel.ow.org) 1749 */ 1750 if ((ptype->dev == dev || !ptype->dev) && 1751 (!skb_loop_sk(ptype, skb))) { 1752 if (pt_prev) { 1753 deliver_skb(skb2, pt_prev, skb->dev); 1754 pt_prev = ptype; 1755 continue; 1756 } 1757 1758 skb2 = skb_clone(skb, GFP_ATOMIC); 1759 if (!skb2) 1760 break; 1761 1762 net_timestamp_set(skb2); 1763 1764 /* skb->nh should be correctly 1765 set by sender, so that the second statement is 1766 just protection against buggy protocols. 1767 */ 1768 skb_reset_mac_header(skb2); 1769 1770 if (skb_network_header(skb2) < skb2->data || 1771 skb_network_header(skb2) > skb_tail_pointer(skb2)) { 1772 net_crit_ratelimited("protocol %04x is buggy, dev %s\n", 1773 ntohs(skb2->protocol), 1774 dev->name); 1775 skb_reset_network_header(skb2); 1776 } 1777 1778 skb2->transport_header = skb2->network_header; 1779 skb2->pkt_type = PACKET_OUTGOING; 1780 pt_prev = ptype; 1781 } 1782 } 1783 if (pt_prev) 1784 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev); 1785 rcu_read_unlock(); 1786 } 1787 1788 /** 1789 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change 1790 * @dev: Network device 1791 * @txq: number of queues available 1792 * 1793 * If real_num_tx_queues is changed the tc mappings may no longer be 1794 * valid. To resolve this verify the tc mapping remains valid and if 1795 * not NULL the mapping. With no priorities mapping to this 1796 * offset/count pair it will no longer be used. In the worst case TC0 1797 * is invalid nothing can be done so disable priority mappings. If is 1798 * expected that drivers will fix this mapping if they can before 1799 * calling netif_set_real_num_tx_queues. 1800 */ 1801 static void netif_setup_tc(struct net_device *dev, unsigned int txq) 1802 { 1803 int i; 1804 struct netdev_tc_txq *tc = &dev->tc_to_txq[0]; 1805 1806 /* If TC0 is invalidated disable TC mapping */ 1807 if (tc->offset + tc->count > txq) { 1808 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n"); 1809 dev->num_tc = 0; 1810 return; 1811 } 1812 1813 /* Invalidated prio to tc mappings set to TC0 */ 1814 for (i = 1; i < TC_BITMASK + 1; i++) { 1815 int q = netdev_get_prio_tc_map(dev, i); 1816 1817 tc = &dev->tc_to_txq[q]; 1818 if (tc->offset + tc->count > txq) { 1819 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n", 1820 i, q); 1821 netdev_set_prio_tc_map(dev, i, 0); 1822 } 1823 } 1824 } 1825 1826 #ifdef CONFIG_XPS 1827 static DEFINE_MUTEX(xps_map_mutex); 1828 #define xmap_dereference(P) \ 1829 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex)) 1830 1831 static struct xps_map *remove_xps_queue(struct xps_dev_maps *dev_maps, 1832 int cpu, u16 index) 1833 { 1834 struct xps_map *map = NULL; 1835 int pos; 1836 1837 if (dev_maps) 1838 map = xmap_dereference(dev_maps->cpu_map[cpu]); 1839 1840 for (pos = 0; map && pos < map->len; pos++) { 1841 if (map->queues[pos] == index) { 1842 if (map->len > 1) { 1843 map->queues[pos] = map->queues[--map->len]; 1844 } else { 1845 RCU_INIT_POINTER(dev_maps->cpu_map[cpu], NULL); 1846 kfree_rcu(map, rcu); 1847 map = NULL; 1848 } 1849 break; 1850 } 1851 } 1852 1853 return map; 1854 } 1855 1856 static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index) 1857 { 1858 struct xps_dev_maps *dev_maps; 1859 int cpu, i; 1860 bool active = false; 1861 1862 mutex_lock(&xps_map_mutex); 1863 dev_maps = xmap_dereference(dev->xps_maps); 1864 1865 if (!dev_maps) 1866 goto out_no_maps; 1867 1868 for_each_possible_cpu(cpu) { 1869 for (i = index; i < dev->num_tx_queues; i++) { 1870 if (!remove_xps_queue(dev_maps, cpu, i)) 1871 break; 1872 } 1873 if (i == dev->num_tx_queues) 1874 active = true; 1875 } 1876 1877 if (!active) { 1878 RCU_INIT_POINTER(dev->xps_maps, NULL); 1879 kfree_rcu(dev_maps, rcu); 1880 } 1881 1882 for (i = index; i < dev->num_tx_queues; i++) 1883 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i), 1884 NUMA_NO_NODE); 1885 1886 out_no_maps: 1887 mutex_unlock(&xps_map_mutex); 1888 } 1889 1890 static struct xps_map *expand_xps_map(struct xps_map *map, 1891 int cpu, u16 index) 1892 { 1893 struct xps_map *new_map; 1894 int alloc_len = XPS_MIN_MAP_ALLOC; 1895 int i, pos; 1896 1897 for (pos = 0; map && pos < map->len; pos++) { 1898 if (map->queues[pos] != index) 1899 continue; 1900 return map; 1901 } 1902 1903 /* Need to add queue to this CPU's existing map */ 1904 if (map) { 1905 if (pos < map->alloc_len) 1906 return map; 1907 1908 alloc_len = map->alloc_len * 2; 1909 } 1910 1911 /* Need to allocate new map to store queue on this CPU's map */ 1912 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL, 1913 cpu_to_node(cpu)); 1914 if (!new_map) 1915 return NULL; 1916 1917 for (i = 0; i < pos; i++) 1918 new_map->queues[i] = map->queues[i]; 1919 new_map->alloc_len = alloc_len; 1920 new_map->len = pos; 1921 1922 return new_map; 1923 } 1924 1925 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, 1926 u16 index) 1927 { 1928 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL; 1929 struct xps_map *map, *new_map; 1930 int maps_sz = max_t(unsigned int, XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES); 1931 int cpu, numa_node_id = -2; 1932 bool active = false; 1933 1934 mutex_lock(&xps_map_mutex); 1935 1936 dev_maps = xmap_dereference(dev->xps_maps); 1937 1938 /* allocate memory for queue storage */ 1939 for_each_online_cpu(cpu) { 1940 if (!cpumask_test_cpu(cpu, mask)) 1941 continue; 1942 1943 if (!new_dev_maps) 1944 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL); 1945 if (!new_dev_maps) { 1946 mutex_unlock(&xps_map_mutex); 1947 return -ENOMEM; 1948 } 1949 1950 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) : 1951 NULL; 1952 1953 map = expand_xps_map(map, cpu, index); 1954 if (!map) 1955 goto error; 1956 1957 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map); 1958 } 1959 1960 if (!new_dev_maps) 1961 goto out_no_new_maps; 1962 1963 for_each_possible_cpu(cpu) { 1964 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) { 1965 /* add queue to CPU maps */ 1966 int pos = 0; 1967 1968 map = xmap_dereference(new_dev_maps->cpu_map[cpu]); 1969 while ((pos < map->len) && (map->queues[pos] != index)) 1970 pos++; 1971 1972 if (pos == map->len) 1973 map->queues[map->len++] = index; 1974 #ifdef CONFIG_NUMA 1975 if (numa_node_id == -2) 1976 numa_node_id = cpu_to_node(cpu); 1977 else if (numa_node_id != cpu_to_node(cpu)) 1978 numa_node_id = -1; 1979 #endif 1980 } else if (dev_maps) { 1981 /* fill in the new device map from the old device map */ 1982 map = xmap_dereference(dev_maps->cpu_map[cpu]); 1983 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map); 1984 } 1985 1986 } 1987 1988 rcu_assign_pointer(dev->xps_maps, new_dev_maps); 1989 1990 /* Cleanup old maps */ 1991 if (dev_maps) { 1992 for_each_possible_cpu(cpu) { 1993 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]); 1994 map = xmap_dereference(dev_maps->cpu_map[cpu]); 1995 if (map && map != new_map) 1996 kfree_rcu(map, rcu); 1997 } 1998 1999 kfree_rcu(dev_maps, rcu); 2000 } 2001 2002 dev_maps = new_dev_maps; 2003 active = true; 2004 2005 out_no_new_maps: 2006 /* update Tx queue numa node */ 2007 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index), 2008 (numa_node_id >= 0) ? numa_node_id : 2009 NUMA_NO_NODE); 2010 2011 if (!dev_maps) 2012 goto out_no_maps; 2013 2014 /* removes queue from unused CPUs */ 2015 for_each_possible_cpu(cpu) { 2016 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) 2017 continue; 2018 2019 if (remove_xps_queue(dev_maps, cpu, index)) 2020 active = true; 2021 } 2022 2023 /* free map if not active */ 2024 if (!active) { 2025 RCU_INIT_POINTER(dev->xps_maps, NULL); 2026 kfree_rcu(dev_maps, rcu); 2027 } 2028 2029 out_no_maps: 2030 mutex_unlock(&xps_map_mutex); 2031 2032 return 0; 2033 error: 2034 /* remove any maps that we added */ 2035 for_each_possible_cpu(cpu) { 2036 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]); 2037 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) : 2038 NULL; 2039 if (new_map && new_map != map) 2040 kfree(new_map); 2041 } 2042 2043 mutex_unlock(&xps_map_mutex); 2044 2045 kfree(new_dev_maps); 2046 return -ENOMEM; 2047 } 2048 EXPORT_SYMBOL(netif_set_xps_queue); 2049 2050 #endif 2051 /* 2052 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues 2053 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed. 2054 */ 2055 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) 2056 { 2057 int rc; 2058 2059 if (txq < 1 || txq > dev->num_tx_queues) 2060 return -EINVAL; 2061 2062 if (dev->reg_state == NETREG_REGISTERED || 2063 dev->reg_state == NETREG_UNREGISTERING) { 2064 ASSERT_RTNL(); 2065 2066 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues, 2067 txq); 2068 if (rc) 2069 return rc; 2070 2071 if (dev->num_tc) 2072 netif_setup_tc(dev, txq); 2073 2074 if (txq < dev->real_num_tx_queues) { 2075 qdisc_reset_all_tx_gt(dev, txq); 2076 #ifdef CONFIG_XPS 2077 netif_reset_xps_queues_gt(dev, txq); 2078 #endif 2079 } 2080 } 2081 2082 dev->real_num_tx_queues = txq; 2083 return 0; 2084 } 2085 EXPORT_SYMBOL(netif_set_real_num_tx_queues); 2086 2087 #ifdef CONFIG_SYSFS 2088 /** 2089 * netif_set_real_num_rx_queues - set actual number of RX queues used 2090 * @dev: Network device 2091 * @rxq: Actual number of RX queues 2092 * 2093 * This must be called either with the rtnl_lock held or before 2094 * registration of the net device. Returns 0 on success, or a 2095 * negative error code. If called before registration, it always 2096 * succeeds. 2097 */ 2098 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq) 2099 { 2100 int rc; 2101 2102 if (rxq < 1 || rxq > dev->num_rx_queues) 2103 return -EINVAL; 2104 2105 if (dev->reg_state == NETREG_REGISTERED) { 2106 ASSERT_RTNL(); 2107 2108 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues, 2109 rxq); 2110 if (rc) 2111 return rc; 2112 } 2113 2114 dev->real_num_rx_queues = rxq; 2115 return 0; 2116 } 2117 EXPORT_SYMBOL(netif_set_real_num_rx_queues); 2118 #endif 2119 2120 /** 2121 * netif_get_num_default_rss_queues - default number of RSS queues 2122 * 2123 * This routine should set an upper limit on the number of RSS queues 2124 * used by default by multiqueue devices. 2125 */ 2126 int netif_get_num_default_rss_queues(void) 2127 { 2128 return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus()); 2129 } 2130 EXPORT_SYMBOL(netif_get_num_default_rss_queues); 2131 2132 static inline void __netif_reschedule(struct Qdisc *q) 2133 { 2134 struct softnet_data *sd; 2135 unsigned long flags; 2136 2137 local_irq_save(flags); 2138 sd = &__get_cpu_var(softnet_data); 2139 q->next_sched = NULL; 2140 *sd->output_queue_tailp = q; 2141 sd->output_queue_tailp = &q->next_sched; 2142 raise_softirq_irqoff(NET_TX_SOFTIRQ); 2143 local_irq_restore(flags); 2144 } 2145 2146 void __netif_schedule(struct Qdisc *q) 2147 { 2148 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) 2149 __netif_reschedule(q); 2150 } 2151 EXPORT_SYMBOL(__netif_schedule); 2152 2153 struct dev_kfree_skb_cb { 2154 enum skb_free_reason reason; 2155 }; 2156 2157 static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb) 2158 { 2159 return (struct dev_kfree_skb_cb *)skb->cb; 2160 } 2161 2162 void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason) 2163 { 2164 unsigned long flags; 2165 2166 if (likely(atomic_read(&skb->users) == 1)) { 2167 smp_rmb(); 2168 atomic_set(&skb->users, 0); 2169 } else if (likely(!atomic_dec_and_test(&skb->users))) { 2170 return; 2171 } 2172 get_kfree_skb_cb(skb)->reason = reason; 2173 local_irq_save(flags); 2174 skb->next = __this_cpu_read(softnet_data.completion_queue); 2175 __this_cpu_write(softnet_data.completion_queue, skb); 2176 raise_softirq_irqoff(NET_TX_SOFTIRQ); 2177 local_irq_restore(flags); 2178 } 2179 EXPORT_SYMBOL(__dev_kfree_skb_irq); 2180 2181 void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason) 2182 { 2183 if (in_irq() || irqs_disabled()) 2184 __dev_kfree_skb_irq(skb, reason); 2185 else 2186 dev_kfree_skb(skb); 2187 } 2188 EXPORT_SYMBOL(__dev_kfree_skb_any); 2189 2190 2191 /** 2192 * netif_device_detach - mark device as removed 2193 * @dev: network device 2194 * 2195 * Mark device as removed from system and therefore no longer available. 2196 */ 2197 void netif_device_detach(struct net_device *dev) 2198 { 2199 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) && 2200 netif_running(dev)) { 2201 netif_tx_stop_all_queues(dev); 2202 } 2203 } 2204 EXPORT_SYMBOL(netif_device_detach); 2205 2206 /** 2207 * netif_device_attach - mark device as attached 2208 * @dev: network device 2209 * 2210 * Mark device as attached from system and restart if needed. 2211 */ 2212 void netif_device_attach(struct net_device *dev) 2213 { 2214 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) && 2215 netif_running(dev)) { 2216 netif_tx_wake_all_queues(dev); 2217 __netdev_watchdog_up(dev); 2218 } 2219 } 2220 EXPORT_SYMBOL(netif_device_attach); 2221 2222 static void skb_warn_bad_offload(const struct sk_buff *skb) 2223 { 2224 static const netdev_features_t null_features = 0; 2225 struct net_device *dev = skb->dev; 2226 const char *driver = ""; 2227 2228 if (!net_ratelimit()) 2229 return; 2230 2231 if (dev && dev->dev.parent) 2232 driver = dev_driver_string(dev->dev.parent); 2233 2234 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d " 2235 "gso_type=%d ip_summed=%d\n", 2236 driver, dev ? &dev->features : &null_features, 2237 skb->sk ? &skb->sk->sk_route_caps : &null_features, 2238 skb->len, skb->data_len, skb_shinfo(skb)->gso_size, 2239 skb_shinfo(skb)->gso_type, skb->ip_summed); 2240 } 2241 2242 /* 2243 * Invalidate hardware checksum when packet is to be mangled, and 2244 * complete checksum manually on outgoing path. 2245 */ 2246 int skb_checksum_help(struct sk_buff *skb) 2247 { 2248 __wsum csum; 2249 int ret = 0, offset; 2250 2251 if (skb->ip_summed == CHECKSUM_COMPLETE) 2252 goto out_set_summed; 2253 2254 if (unlikely(skb_shinfo(skb)->gso_size)) { 2255 skb_warn_bad_offload(skb); 2256 return -EINVAL; 2257 } 2258 2259 /* Before computing a checksum, we should make sure no frag could 2260 * be modified by an external entity : checksum could be wrong. 2261 */ 2262 if (skb_has_shared_frag(skb)) { 2263 ret = __skb_linearize(skb); 2264 if (ret) 2265 goto out; 2266 } 2267 2268 offset = skb_checksum_start_offset(skb); 2269 BUG_ON(offset >= skb_headlen(skb)); 2270 csum = skb_checksum(skb, offset, skb->len - offset, 0); 2271 2272 offset += skb->csum_offset; 2273 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb)); 2274 2275 if (skb_cloned(skb) && 2276 !skb_clone_writable(skb, offset + sizeof(__sum16))) { 2277 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2278 if (ret) 2279 goto out; 2280 } 2281 2282 *(__sum16 *)(skb->data + offset) = csum_fold(csum); 2283 out_set_summed: 2284 skb->ip_summed = CHECKSUM_NONE; 2285 out: 2286 return ret; 2287 } 2288 EXPORT_SYMBOL(skb_checksum_help); 2289 2290 __be16 skb_network_protocol(struct sk_buff *skb, int *depth) 2291 { 2292 unsigned int vlan_depth = skb->mac_len; 2293 __be16 type = skb->protocol; 2294 2295 /* Tunnel gso handlers can set protocol to ethernet. */ 2296 if (type == htons(ETH_P_TEB)) { 2297 struct ethhdr *eth; 2298 2299 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr)))) 2300 return 0; 2301 2302 eth = (struct ethhdr *)skb_mac_header(skb); 2303 type = eth->h_proto; 2304 } 2305 2306 /* if skb->protocol is 802.1Q/AD then the header should already be 2307 * present at mac_len - VLAN_HLEN (if mac_len > 0), or at 2308 * ETH_HLEN otherwise 2309 */ 2310 if (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) { 2311 if (vlan_depth) { 2312 if (unlikely(WARN_ON(vlan_depth < VLAN_HLEN))) 2313 return 0; 2314 vlan_depth -= VLAN_HLEN; 2315 } else { 2316 vlan_depth = ETH_HLEN; 2317 } 2318 do { 2319 struct vlan_hdr *vh; 2320 2321 if (unlikely(!pskb_may_pull(skb, 2322 vlan_depth + VLAN_HLEN))) 2323 return 0; 2324 2325 vh = (struct vlan_hdr *)(skb->data + vlan_depth); 2326 type = vh->h_vlan_encapsulated_proto; 2327 vlan_depth += VLAN_HLEN; 2328 } while (type == htons(ETH_P_8021Q) || 2329 type == htons(ETH_P_8021AD)); 2330 } 2331 2332 *depth = vlan_depth; 2333 2334 return type; 2335 } 2336 2337 /** 2338 * skb_mac_gso_segment - mac layer segmentation handler. 2339 * @skb: buffer to segment 2340 * @features: features for the output path (see dev->features) 2341 */ 2342 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, 2343 netdev_features_t features) 2344 { 2345 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); 2346 struct packet_offload *ptype; 2347 int vlan_depth = skb->mac_len; 2348 __be16 type = skb_network_protocol(skb, &vlan_depth); 2349 2350 if (unlikely(!type)) 2351 return ERR_PTR(-EINVAL); 2352 2353 __skb_pull(skb, vlan_depth); 2354 2355 rcu_read_lock(); 2356 list_for_each_entry_rcu(ptype, &offload_base, list) { 2357 if (ptype->type == type && ptype->callbacks.gso_segment) { 2358 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) { 2359 int err; 2360 2361 err = ptype->callbacks.gso_send_check(skb); 2362 segs = ERR_PTR(err); 2363 if (err || skb_gso_ok(skb, features)) 2364 break; 2365 __skb_push(skb, (skb->data - 2366 skb_network_header(skb))); 2367 } 2368 segs = ptype->callbacks.gso_segment(skb, features); 2369 break; 2370 } 2371 } 2372 rcu_read_unlock(); 2373 2374 __skb_push(skb, skb->data - skb_mac_header(skb)); 2375 2376 return segs; 2377 } 2378 EXPORT_SYMBOL(skb_mac_gso_segment); 2379 2380 2381 /* openvswitch calls this on rx path, so we need a different check. 2382 */ 2383 static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path) 2384 { 2385 if (tx_path) 2386 return skb->ip_summed != CHECKSUM_PARTIAL; 2387 else 2388 return skb->ip_summed == CHECKSUM_NONE; 2389 } 2390 2391 /** 2392 * __skb_gso_segment - Perform segmentation on skb. 2393 * @skb: buffer to segment 2394 * @features: features for the output path (see dev->features) 2395 * @tx_path: whether it is called in TX path 2396 * 2397 * This function segments the given skb and returns a list of segments. 2398 * 2399 * It may return NULL if the skb requires no segmentation. This is 2400 * only possible when GSO is used for verifying header integrity. 2401 */ 2402 struct sk_buff *__skb_gso_segment(struct sk_buff *skb, 2403 netdev_features_t features, bool tx_path) 2404 { 2405 if (unlikely(skb_needs_check(skb, tx_path))) { 2406 int err; 2407 2408 skb_warn_bad_offload(skb); 2409 2410 if (skb_header_cloned(skb) && 2411 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) 2412 return ERR_PTR(err); 2413 } 2414 2415 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb); 2416 SKB_GSO_CB(skb)->encap_level = 0; 2417 2418 skb_reset_mac_header(skb); 2419 skb_reset_mac_len(skb); 2420 2421 return skb_mac_gso_segment(skb, features); 2422 } 2423 EXPORT_SYMBOL(__skb_gso_segment); 2424 2425 /* Take action when hardware reception checksum errors are detected. */ 2426 #ifdef CONFIG_BUG 2427 void netdev_rx_csum_fault(struct net_device *dev) 2428 { 2429 if (net_ratelimit()) { 2430 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>"); 2431 dump_stack(); 2432 } 2433 } 2434 EXPORT_SYMBOL(netdev_rx_csum_fault); 2435 #endif 2436 2437 /* Actually, we should eliminate this check as soon as we know, that: 2438 * 1. IOMMU is present and allows to map all the memory. 2439 * 2. No high memory really exists on this machine. 2440 */ 2441 2442 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb) 2443 { 2444 #ifdef CONFIG_HIGHMEM 2445 int i; 2446 if (!(dev->features & NETIF_F_HIGHDMA)) { 2447 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2448 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2449 if (PageHighMem(skb_frag_page(frag))) 2450 return 1; 2451 } 2452 } 2453 2454 if (PCI_DMA_BUS_IS_PHYS) { 2455 struct device *pdev = dev->dev.parent; 2456 2457 if (!pdev) 2458 return 0; 2459 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2460 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2461 dma_addr_t addr = page_to_phys(skb_frag_page(frag)); 2462 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask) 2463 return 1; 2464 } 2465 } 2466 #endif 2467 return 0; 2468 } 2469 2470 struct dev_gso_cb { 2471 void (*destructor)(struct sk_buff *skb); 2472 }; 2473 2474 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb) 2475 2476 static void dev_gso_skb_destructor(struct sk_buff *skb) 2477 { 2478 struct dev_gso_cb *cb; 2479 2480 kfree_skb_list(skb->next); 2481 skb->next = NULL; 2482 2483 cb = DEV_GSO_CB(skb); 2484 if (cb->destructor) 2485 cb->destructor(skb); 2486 } 2487 2488 /** 2489 * dev_gso_segment - Perform emulated hardware segmentation on skb. 2490 * @skb: buffer to segment 2491 * @features: device features as applicable to this skb 2492 * 2493 * This function segments the given skb and stores the list of segments 2494 * in skb->next. 2495 */ 2496 static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features) 2497 { 2498 struct sk_buff *segs; 2499 2500 segs = skb_gso_segment(skb, features); 2501 2502 /* Verifying header integrity only. */ 2503 if (!segs) 2504 return 0; 2505 2506 if (IS_ERR(segs)) 2507 return PTR_ERR(segs); 2508 2509 skb->next = segs; 2510 DEV_GSO_CB(skb)->destructor = skb->destructor; 2511 skb->destructor = dev_gso_skb_destructor; 2512 2513 return 0; 2514 } 2515 2516 /* If MPLS offload request, verify we are testing hardware MPLS features 2517 * instead of standard features for the netdev. 2518 */ 2519 #ifdef CONFIG_NET_MPLS_GSO 2520 static netdev_features_t net_mpls_features(struct sk_buff *skb, 2521 netdev_features_t features, 2522 __be16 type) 2523 { 2524 if (type == htons(ETH_P_MPLS_UC) || type == htons(ETH_P_MPLS_MC)) 2525 features &= skb->dev->mpls_features; 2526 2527 return features; 2528 } 2529 #else 2530 static netdev_features_t net_mpls_features(struct sk_buff *skb, 2531 netdev_features_t features, 2532 __be16 type) 2533 { 2534 return features; 2535 } 2536 #endif 2537 2538 static netdev_features_t harmonize_features(struct sk_buff *skb, 2539 netdev_features_t features) 2540 { 2541 int tmp; 2542 __be16 type; 2543 2544 type = skb_network_protocol(skb, &tmp); 2545 features = net_mpls_features(skb, features, type); 2546 2547 if (skb->ip_summed != CHECKSUM_NONE && 2548 !can_checksum_protocol(features, type)) { 2549 features &= ~NETIF_F_ALL_CSUM; 2550 } else if (illegal_highdma(skb->dev, skb)) { 2551 features &= ~NETIF_F_SG; 2552 } 2553 2554 return features; 2555 } 2556 2557 netdev_features_t netif_skb_features(struct sk_buff *skb) 2558 { 2559 __be16 protocol = skb->protocol; 2560 netdev_features_t features = skb->dev->features; 2561 2562 if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs) 2563 features &= ~NETIF_F_GSO_MASK; 2564 2565 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) { 2566 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; 2567 protocol = veh->h_vlan_encapsulated_proto; 2568 } else if (!vlan_tx_tag_present(skb)) { 2569 return harmonize_features(skb, features); 2570 } 2571 2572 features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX | 2573 NETIF_F_HW_VLAN_STAG_TX); 2574 2575 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) 2576 features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | 2577 NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX | 2578 NETIF_F_HW_VLAN_STAG_TX; 2579 2580 return harmonize_features(skb, features); 2581 } 2582 EXPORT_SYMBOL(netif_skb_features); 2583 2584 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 2585 struct netdev_queue *txq) 2586 { 2587 const struct net_device_ops *ops = dev->netdev_ops; 2588 int rc = NETDEV_TX_OK; 2589 unsigned int skb_len; 2590 2591 if (likely(!skb->next)) { 2592 netdev_features_t features; 2593 2594 /* 2595 * If device doesn't need skb->dst, release it right now while 2596 * its hot in this cpu cache 2597 */ 2598 if (dev->priv_flags & IFF_XMIT_DST_RELEASE) 2599 skb_dst_drop(skb); 2600 2601 features = netif_skb_features(skb); 2602 2603 if (vlan_tx_tag_present(skb) && 2604 !vlan_hw_offload_capable(features, skb->vlan_proto)) { 2605 skb = __vlan_put_tag(skb, skb->vlan_proto, 2606 vlan_tx_tag_get(skb)); 2607 if (unlikely(!skb)) 2608 goto out; 2609 2610 skb->vlan_tci = 0; 2611 } 2612 2613 /* If encapsulation offload request, verify we are testing 2614 * hardware encapsulation features instead of standard 2615 * features for the netdev 2616 */ 2617 if (skb->encapsulation) 2618 features &= dev->hw_enc_features; 2619 2620 if (netif_needs_gso(skb, features)) { 2621 if (unlikely(dev_gso_segment(skb, features))) 2622 goto out_kfree_skb; 2623 if (skb->next) 2624 goto gso; 2625 } else { 2626 if (skb_needs_linearize(skb, features) && 2627 __skb_linearize(skb)) 2628 goto out_kfree_skb; 2629 2630 /* If packet is not checksummed and device does not 2631 * support checksumming for this protocol, complete 2632 * checksumming here. 2633 */ 2634 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2635 if (skb->encapsulation) 2636 skb_set_inner_transport_header(skb, 2637 skb_checksum_start_offset(skb)); 2638 else 2639 skb_set_transport_header(skb, 2640 skb_checksum_start_offset(skb)); 2641 if (!(features & NETIF_F_ALL_CSUM) && 2642 skb_checksum_help(skb)) 2643 goto out_kfree_skb; 2644 } 2645 } 2646 2647 if (!list_empty(&ptype_all)) 2648 dev_queue_xmit_nit(skb, dev); 2649 2650 skb_len = skb->len; 2651 trace_net_dev_start_xmit(skb, dev); 2652 rc = ops->ndo_start_xmit(skb, dev); 2653 trace_net_dev_xmit(skb, rc, dev, skb_len); 2654 if (rc == NETDEV_TX_OK) 2655 txq_trans_update(txq); 2656 return rc; 2657 } 2658 2659 gso: 2660 do { 2661 struct sk_buff *nskb = skb->next; 2662 2663 skb->next = nskb->next; 2664 nskb->next = NULL; 2665 2666 if (!list_empty(&ptype_all)) 2667 dev_queue_xmit_nit(nskb, dev); 2668 2669 skb_len = nskb->len; 2670 trace_net_dev_start_xmit(nskb, dev); 2671 rc = ops->ndo_start_xmit(nskb, dev); 2672 trace_net_dev_xmit(nskb, rc, dev, skb_len); 2673 if (unlikely(rc != NETDEV_TX_OK)) { 2674 if (rc & ~NETDEV_TX_MASK) 2675 goto out_kfree_gso_skb; 2676 nskb->next = skb->next; 2677 skb->next = nskb; 2678 return rc; 2679 } 2680 txq_trans_update(txq); 2681 if (unlikely(netif_xmit_stopped(txq) && skb->next)) 2682 return NETDEV_TX_BUSY; 2683 } while (skb->next); 2684 2685 out_kfree_gso_skb: 2686 if (likely(skb->next == NULL)) { 2687 skb->destructor = DEV_GSO_CB(skb)->destructor; 2688 consume_skb(skb); 2689 return rc; 2690 } 2691 out_kfree_skb: 2692 kfree_skb(skb); 2693 out: 2694 return rc; 2695 } 2696 EXPORT_SYMBOL_GPL(dev_hard_start_xmit); 2697 2698 static void qdisc_pkt_len_init(struct sk_buff *skb) 2699 { 2700 const struct skb_shared_info *shinfo = skb_shinfo(skb); 2701 2702 qdisc_skb_cb(skb)->pkt_len = skb->len; 2703 2704 /* To get more precise estimation of bytes sent on wire, 2705 * we add to pkt_len the headers size of all segments 2706 */ 2707 if (shinfo->gso_size) { 2708 unsigned int hdr_len; 2709 u16 gso_segs = shinfo->gso_segs; 2710 2711 /* mac layer + network layer */ 2712 hdr_len = skb_transport_header(skb) - skb_mac_header(skb); 2713 2714 /* + transport layer */ 2715 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) 2716 hdr_len += tcp_hdrlen(skb); 2717 else 2718 hdr_len += sizeof(struct udphdr); 2719 2720 if (shinfo->gso_type & SKB_GSO_DODGY) 2721 gso_segs = DIV_ROUND_UP(skb->len - hdr_len, 2722 shinfo->gso_size); 2723 2724 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len; 2725 } 2726 } 2727 2728 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, 2729 struct net_device *dev, 2730 struct netdev_queue *txq) 2731 { 2732 spinlock_t *root_lock = qdisc_lock(q); 2733 bool contended; 2734 int rc; 2735 2736 qdisc_pkt_len_init(skb); 2737 qdisc_calculate_pkt_len(skb, q); 2738 /* 2739 * Heuristic to force contended enqueues to serialize on a 2740 * separate lock before trying to get qdisc main lock. 2741 * This permits __QDISC_STATE_RUNNING owner to get the lock more often 2742 * and dequeue packets faster. 2743 */ 2744 contended = qdisc_is_running(q); 2745 if (unlikely(contended)) 2746 spin_lock(&q->busylock); 2747 2748 spin_lock(root_lock); 2749 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) { 2750 kfree_skb(skb); 2751 rc = NET_XMIT_DROP; 2752 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) && 2753 qdisc_run_begin(q)) { 2754 /* 2755 * This is a work-conserving queue; there are no old skbs 2756 * waiting to be sent out; and the qdisc is not running - 2757 * xmit the skb directly. 2758 */ 2759 if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE)) 2760 skb_dst_force(skb); 2761 2762 qdisc_bstats_update(q, skb); 2763 2764 if (sch_direct_xmit(skb, q, dev, txq, root_lock)) { 2765 if (unlikely(contended)) { 2766 spin_unlock(&q->busylock); 2767 contended = false; 2768 } 2769 __qdisc_run(q); 2770 } else 2771 qdisc_run_end(q); 2772 2773 rc = NET_XMIT_SUCCESS; 2774 } else { 2775 skb_dst_force(skb); 2776 rc = q->enqueue(skb, q) & NET_XMIT_MASK; 2777 if (qdisc_run_begin(q)) { 2778 if (unlikely(contended)) { 2779 spin_unlock(&q->busylock); 2780 contended = false; 2781 } 2782 __qdisc_run(q); 2783 } 2784 } 2785 spin_unlock(root_lock); 2786 if (unlikely(contended)) 2787 spin_unlock(&q->busylock); 2788 return rc; 2789 } 2790 2791 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) 2792 static void skb_update_prio(struct sk_buff *skb) 2793 { 2794 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap); 2795 2796 if (!skb->priority && skb->sk && map) { 2797 unsigned int prioidx = skb->sk->sk_cgrp_prioidx; 2798 2799 if (prioidx < map->priomap_len) 2800 skb->priority = map->priomap[prioidx]; 2801 } 2802 } 2803 #else 2804 #define skb_update_prio(skb) 2805 #endif 2806 2807 static DEFINE_PER_CPU(int, xmit_recursion); 2808 #define RECURSION_LIMIT 10 2809 2810 /** 2811 * dev_loopback_xmit - loop back @skb 2812 * @skb: buffer to transmit 2813 */ 2814 int dev_loopback_xmit(struct sk_buff *skb) 2815 { 2816 skb_reset_mac_header(skb); 2817 __skb_pull(skb, skb_network_offset(skb)); 2818 skb->pkt_type = PACKET_LOOPBACK; 2819 skb->ip_summed = CHECKSUM_UNNECESSARY; 2820 WARN_ON(!skb_dst(skb)); 2821 skb_dst_force(skb); 2822 netif_rx_ni(skb); 2823 return 0; 2824 } 2825 EXPORT_SYMBOL(dev_loopback_xmit); 2826 2827 /** 2828 * __dev_queue_xmit - transmit a buffer 2829 * @skb: buffer to transmit 2830 * @accel_priv: private data used for L2 forwarding offload 2831 * 2832 * Queue a buffer for transmission to a network device. The caller must 2833 * have set the device and priority and built the buffer before calling 2834 * this function. The function can be called from an interrupt. 2835 * 2836 * A negative errno code is returned on a failure. A success does not 2837 * guarantee the frame will be transmitted as it may be dropped due 2838 * to congestion or traffic shaping. 2839 * 2840 * ----------------------------------------------------------------------------------- 2841 * I notice this method can also return errors from the queue disciplines, 2842 * including NET_XMIT_DROP, which is a positive value. So, errors can also 2843 * be positive. 2844 * 2845 * Regardless of the return value, the skb is consumed, so it is currently 2846 * difficult to retry a send to this method. (You can bump the ref count 2847 * before sending to hold a reference for retry if you are careful.) 2848 * 2849 * When calling this method, interrupts MUST be enabled. This is because 2850 * the BH enable code must have IRQs enabled so that it will not deadlock. 2851 * --BLG 2852 */ 2853 static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv) 2854 { 2855 struct net_device *dev = skb->dev; 2856 struct netdev_queue *txq; 2857 struct Qdisc *q; 2858 int rc = -ENOMEM; 2859 2860 skb_reset_mac_header(skb); 2861 2862 /* Disable soft irqs for various locks below. Also 2863 * stops preemption for RCU. 2864 */ 2865 rcu_read_lock_bh(); 2866 2867 skb_update_prio(skb); 2868 2869 txq = netdev_pick_tx(dev, skb, accel_priv); 2870 q = rcu_dereference_bh(txq->qdisc); 2871 2872 #ifdef CONFIG_NET_CLS_ACT 2873 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS); 2874 #endif 2875 trace_net_dev_queue(skb); 2876 if (q->enqueue) { 2877 rc = __dev_xmit_skb(skb, q, dev, txq); 2878 goto out; 2879 } 2880 2881 /* The device has no queue. Common case for software devices: 2882 loopback, all the sorts of tunnels... 2883 2884 Really, it is unlikely that netif_tx_lock protection is necessary 2885 here. (f.e. loopback and IP tunnels are clean ignoring statistics 2886 counters.) 2887 However, it is possible, that they rely on protection 2888 made by us here. 2889 2890 Check this and shot the lock. It is not prone from deadlocks. 2891 Either shot noqueue qdisc, it is even simpler 8) 2892 */ 2893 if (dev->flags & IFF_UP) { 2894 int cpu = smp_processor_id(); /* ok because BHs are off */ 2895 2896 if (txq->xmit_lock_owner != cpu) { 2897 2898 if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT) 2899 goto recursion_alert; 2900 2901 HARD_TX_LOCK(dev, txq, cpu); 2902 2903 if (!netif_xmit_stopped(txq)) { 2904 __this_cpu_inc(xmit_recursion); 2905 rc = dev_hard_start_xmit(skb, dev, txq); 2906 __this_cpu_dec(xmit_recursion); 2907 if (dev_xmit_complete(rc)) { 2908 HARD_TX_UNLOCK(dev, txq); 2909 goto out; 2910 } 2911 } 2912 HARD_TX_UNLOCK(dev, txq); 2913 net_crit_ratelimited("Virtual device %s asks to queue packet!\n", 2914 dev->name); 2915 } else { 2916 /* Recursion is detected! It is possible, 2917 * unfortunately 2918 */ 2919 recursion_alert: 2920 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n", 2921 dev->name); 2922 } 2923 } 2924 2925 rc = -ENETDOWN; 2926 rcu_read_unlock_bh(); 2927 2928 atomic_long_inc(&dev->tx_dropped); 2929 kfree_skb(skb); 2930 return rc; 2931 out: 2932 rcu_read_unlock_bh(); 2933 return rc; 2934 } 2935 2936 int dev_queue_xmit(struct sk_buff *skb) 2937 { 2938 return __dev_queue_xmit(skb, NULL); 2939 } 2940 EXPORT_SYMBOL(dev_queue_xmit); 2941 2942 int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv) 2943 { 2944 return __dev_queue_xmit(skb, accel_priv); 2945 } 2946 EXPORT_SYMBOL(dev_queue_xmit_accel); 2947 2948 2949 /*======================================================================= 2950 Receiver routines 2951 =======================================================================*/ 2952 2953 int netdev_max_backlog __read_mostly = 1000; 2954 EXPORT_SYMBOL(netdev_max_backlog); 2955 2956 int netdev_tstamp_prequeue __read_mostly = 1; 2957 int netdev_budget __read_mostly = 300; 2958 int weight_p __read_mostly = 64; /* old backlog weight */ 2959 2960 /* Called with irq disabled */ 2961 static inline void ____napi_schedule(struct softnet_data *sd, 2962 struct napi_struct *napi) 2963 { 2964 list_add_tail(&napi->poll_list, &sd->poll_list); 2965 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 2966 } 2967 2968 #ifdef CONFIG_RPS 2969 2970 /* One global table that all flow-based protocols share. */ 2971 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly; 2972 EXPORT_SYMBOL(rps_sock_flow_table); 2973 2974 struct static_key rps_needed __read_mostly; 2975 2976 static struct rps_dev_flow * 2977 set_rps_cpu(struct net_device *dev, struct sk_buff *skb, 2978 struct rps_dev_flow *rflow, u16 next_cpu) 2979 { 2980 if (next_cpu != RPS_NO_CPU) { 2981 #ifdef CONFIG_RFS_ACCEL 2982 struct netdev_rx_queue *rxqueue; 2983 struct rps_dev_flow_table *flow_table; 2984 struct rps_dev_flow *old_rflow; 2985 u32 flow_id; 2986 u16 rxq_index; 2987 int rc; 2988 2989 /* Should we steer this flow to a different hardware queue? */ 2990 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap || 2991 !(dev->features & NETIF_F_NTUPLE)) 2992 goto out; 2993 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu); 2994 if (rxq_index == skb_get_rx_queue(skb)) 2995 goto out; 2996 2997 rxqueue = dev->_rx + rxq_index; 2998 flow_table = rcu_dereference(rxqueue->rps_flow_table); 2999 if (!flow_table) 3000 goto out; 3001 flow_id = skb_get_hash(skb) & flow_table->mask; 3002 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb, 3003 rxq_index, flow_id); 3004 if (rc < 0) 3005 goto out; 3006 old_rflow = rflow; 3007 rflow = &flow_table->flows[flow_id]; 3008 rflow->filter = rc; 3009 if (old_rflow->filter == rflow->filter) 3010 old_rflow->filter = RPS_NO_FILTER; 3011 out: 3012 #endif 3013 rflow->last_qtail = 3014 per_cpu(softnet_data, next_cpu).input_queue_head; 3015 } 3016 3017 rflow->cpu = next_cpu; 3018 return rflow; 3019 } 3020 3021 /* 3022 * get_rps_cpu is called from netif_receive_skb and returns the target 3023 * CPU from the RPS map of the receiving queue for a given skb. 3024 * rcu_read_lock must be held on entry. 3025 */ 3026 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, 3027 struct rps_dev_flow **rflowp) 3028 { 3029 struct netdev_rx_queue *rxqueue; 3030 struct rps_map *map; 3031 struct rps_dev_flow_table *flow_table; 3032 struct rps_sock_flow_table *sock_flow_table; 3033 int cpu = -1; 3034 u16 tcpu; 3035 u32 hash; 3036 3037 if (skb_rx_queue_recorded(skb)) { 3038 u16 index = skb_get_rx_queue(skb); 3039 if (unlikely(index >= dev->real_num_rx_queues)) { 3040 WARN_ONCE(dev->real_num_rx_queues > 1, 3041 "%s received packet on queue %u, but number " 3042 "of RX queues is %u\n", 3043 dev->name, index, dev->real_num_rx_queues); 3044 goto done; 3045 } 3046 rxqueue = dev->_rx + index; 3047 } else 3048 rxqueue = dev->_rx; 3049 3050 map = rcu_dereference(rxqueue->rps_map); 3051 if (map) { 3052 if (map->len == 1 && 3053 !rcu_access_pointer(rxqueue->rps_flow_table)) { 3054 tcpu = map->cpus[0]; 3055 if (cpu_online(tcpu)) 3056 cpu = tcpu; 3057 goto done; 3058 } 3059 } else if (!rcu_access_pointer(rxqueue->rps_flow_table)) { 3060 goto done; 3061 } 3062 3063 skb_reset_network_header(skb); 3064 hash = skb_get_hash(skb); 3065 if (!hash) 3066 goto done; 3067 3068 flow_table = rcu_dereference(rxqueue->rps_flow_table); 3069 sock_flow_table = rcu_dereference(rps_sock_flow_table); 3070 if (flow_table && sock_flow_table) { 3071 u16 next_cpu; 3072 struct rps_dev_flow *rflow; 3073 3074 rflow = &flow_table->flows[hash & flow_table->mask]; 3075 tcpu = rflow->cpu; 3076 3077 next_cpu = sock_flow_table->ents[hash & sock_flow_table->mask]; 3078 3079 /* 3080 * If the desired CPU (where last recvmsg was done) is 3081 * different from current CPU (one in the rx-queue flow 3082 * table entry), switch if one of the following holds: 3083 * - Current CPU is unset (equal to RPS_NO_CPU). 3084 * - Current CPU is offline. 3085 * - The current CPU's queue tail has advanced beyond the 3086 * last packet that was enqueued using this table entry. 3087 * This guarantees that all previous packets for the flow 3088 * have been dequeued, thus preserving in order delivery. 3089 */ 3090 if (unlikely(tcpu != next_cpu) && 3091 (tcpu == RPS_NO_CPU || !cpu_online(tcpu) || 3092 ((int)(per_cpu(softnet_data, tcpu).input_queue_head - 3093 rflow->last_qtail)) >= 0)) { 3094 tcpu = next_cpu; 3095 rflow = set_rps_cpu(dev, skb, rflow, next_cpu); 3096 } 3097 3098 if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) { 3099 *rflowp = rflow; 3100 cpu = tcpu; 3101 goto done; 3102 } 3103 } 3104 3105 if (map) { 3106 tcpu = map->cpus[((u64) hash * map->len) >> 32]; 3107 3108 if (cpu_online(tcpu)) { 3109 cpu = tcpu; 3110 goto done; 3111 } 3112 } 3113 3114 done: 3115 return cpu; 3116 } 3117 3118 #ifdef CONFIG_RFS_ACCEL 3119 3120 /** 3121 * rps_may_expire_flow - check whether an RFS hardware filter may be removed 3122 * @dev: Device on which the filter was set 3123 * @rxq_index: RX queue index 3124 * @flow_id: Flow ID passed to ndo_rx_flow_steer() 3125 * @filter_id: Filter ID returned by ndo_rx_flow_steer() 3126 * 3127 * Drivers that implement ndo_rx_flow_steer() should periodically call 3128 * this function for each installed filter and remove the filters for 3129 * which it returns %true. 3130 */ 3131 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, 3132 u32 flow_id, u16 filter_id) 3133 { 3134 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index; 3135 struct rps_dev_flow_table *flow_table; 3136 struct rps_dev_flow *rflow; 3137 bool expire = true; 3138 int cpu; 3139 3140 rcu_read_lock(); 3141 flow_table = rcu_dereference(rxqueue->rps_flow_table); 3142 if (flow_table && flow_id <= flow_table->mask) { 3143 rflow = &flow_table->flows[flow_id]; 3144 cpu = ACCESS_ONCE(rflow->cpu); 3145 if (rflow->filter == filter_id && cpu != RPS_NO_CPU && 3146 ((int)(per_cpu(softnet_data, cpu).input_queue_head - 3147 rflow->last_qtail) < 3148 (int)(10 * flow_table->mask))) 3149 expire = false; 3150 } 3151 rcu_read_unlock(); 3152 return expire; 3153 } 3154 EXPORT_SYMBOL(rps_may_expire_flow); 3155 3156 #endif /* CONFIG_RFS_ACCEL */ 3157 3158 /* Called from hardirq (IPI) context */ 3159 static void rps_trigger_softirq(void *data) 3160 { 3161 struct softnet_data *sd = data; 3162 3163 ____napi_schedule(sd, &sd->backlog); 3164 sd->received_rps++; 3165 } 3166 3167 #endif /* CONFIG_RPS */ 3168 3169 /* 3170 * Check if this softnet_data structure is another cpu one 3171 * If yes, queue it to our IPI list and return 1 3172 * If no, return 0 3173 */ 3174 static int rps_ipi_queued(struct softnet_data *sd) 3175 { 3176 #ifdef CONFIG_RPS 3177 struct softnet_data *mysd = &__get_cpu_var(softnet_data); 3178 3179 if (sd != mysd) { 3180 sd->rps_ipi_next = mysd->rps_ipi_list; 3181 mysd->rps_ipi_list = sd; 3182 3183 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 3184 return 1; 3185 } 3186 #endif /* CONFIG_RPS */ 3187 return 0; 3188 } 3189 3190 #ifdef CONFIG_NET_FLOW_LIMIT 3191 int netdev_flow_limit_table_len __read_mostly = (1 << 12); 3192 #endif 3193 3194 static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen) 3195 { 3196 #ifdef CONFIG_NET_FLOW_LIMIT 3197 struct sd_flow_limit *fl; 3198 struct softnet_data *sd; 3199 unsigned int old_flow, new_flow; 3200 3201 if (qlen < (netdev_max_backlog >> 1)) 3202 return false; 3203 3204 sd = &__get_cpu_var(softnet_data); 3205 3206 rcu_read_lock(); 3207 fl = rcu_dereference(sd->flow_limit); 3208 if (fl) { 3209 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1); 3210 old_flow = fl->history[fl->history_head]; 3211 fl->history[fl->history_head] = new_flow; 3212 3213 fl->history_head++; 3214 fl->history_head &= FLOW_LIMIT_HISTORY - 1; 3215 3216 if (likely(fl->buckets[old_flow])) 3217 fl->buckets[old_flow]--; 3218 3219 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) { 3220 fl->count++; 3221 rcu_read_unlock(); 3222 return true; 3223 } 3224 } 3225 rcu_read_unlock(); 3226 #endif 3227 return false; 3228 } 3229 3230 /* 3231 * enqueue_to_backlog is called to queue an skb to a per CPU backlog 3232 * queue (may be a remote CPU queue). 3233 */ 3234 static int enqueue_to_backlog(struct sk_buff *skb, int cpu, 3235 unsigned int *qtail) 3236 { 3237 struct softnet_data *sd; 3238 unsigned long flags; 3239 unsigned int qlen; 3240 3241 sd = &per_cpu(softnet_data, cpu); 3242 3243 local_irq_save(flags); 3244 3245 rps_lock(sd); 3246 qlen = skb_queue_len(&sd->input_pkt_queue); 3247 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) { 3248 if (skb_queue_len(&sd->input_pkt_queue)) { 3249 enqueue: 3250 __skb_queue_tail(&sd->input_pkt_queue, skb); 3251 input_queue_tail_incr_save(sd, qtail); 3252 rps_unlock(sd); 3253 local_irq_restore(flags); 3254 return NET_RX_SUCCESS; 3255 } 3256 3257 /* Schedule NAPI for backlog device 3258 * We can use non atomic operation since we own the queue lock 3259 */ 3260 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) { 3261 if (!rps_ipi_queued(sd)) 3262 ____napi_schedule(sd, &sd->backlog); 3263 } 3264 goto enqueue; 3265 } 3266 3267 sd->dropped++; 3268 rps_unlock(sd); 3269 3270 local_irq_restore(flags); 3271 3272 atomic_long_inc(&skb->dev->rx_dropped); 3273 kfree_skb(skb); 3274 return NET_RX_DROP; 3275 } 3276 3277 static int netif_rx_internal(struct sk_buff *skb) 3278 { 3279 int ret; 3280 3281 net_timestamp_check(netdev_tstamp_prequeue, skb); 3282 3283 trace_netif_rx(skb); 3284 #ifdef CONFIG_RPS 3285 if (static_key_false(&rps_needed)) { 3286 struct rps_dev_flow voidflow, *rflow = &voidflow; 3287 int cpu; 3288 3289 preempt_disable(); 3290 rcu_read_lock(); 3291 3292 cpu = get_rps_cpu(skb->dev, skb, &rflow); 3293 if (cpu < 0) 3294 cpu = smp_processor_id(); 3295 3296 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 3297 3298 rcu_read_unlock(); 3299 preempt_enable(); 3300 } else 3301 #endif 3302 { 3303 unsigned int qtail; 3304 ret = enqueue_to_backlog(skb, get_cpu(), &qtail); 3305 put_cpu(); 3306 } 3307 return ret; 3308 } 3309 3310 /** 3311 * netif_rx - post buffer to the network code 3312 * @skb: buffer to post 3313 * 3314 * This function receives a packet from a device driver and queues it for 3315 * the upper (protocol) levels to process. It always succeeds. The buffer 3316 * may be dropped during processing for congestion control or by the 3317 * protocol layers. 3318 * 3319 * return values: 3320 * NET_RX_SUCCESS (no congestion) 3321 * NET_RX_DROP (packet was dropped) 3322 * 3323 */ 3324 3325 int netif_rx(struct sk_buff *skb) 3326 { 3327 trace_netif_rx_entry(skb); 3328 3329 return netif_rx_internal(skb); 3330 } 3331 EXPORT_SYMBOL(netif_rx); 3332 3333 int netif_rx_ni(struct sk_buff *skb) 3334 { 3335 int err; 3336 3337 trace_netif_rx_ni_entry(skb); 3338 3339 preempt_disable(); 3340 err = netif_rx_internal(skb); 3341 if (local_softirq_pending()) 3342 do_softirq(); 3343 preempt_enable(); 3344 3345 return err; 3346 } 3347 EXPORT_SYMBOL(netif_rx_ni); 3348 3349 static void net_tx_action(struct softirq_action *h) 3350 { 3351 struct softnet_data *sd = &__get_cpu_var(softnet_data); 3352 3353 if (sd->completion_queue) { 3354 struct sk_buff *clist; 3355 3356 local_irq_disable(); 3357 clist = sd->completion_queue; 3358 sd->completion_queue = NULL; 3359 local_irq_enable(); 3360 3361 while (clist) { 3362 struct sk_buff *skb = clist; 3363 clist = clist->next; 3364 3365 WARN_ON(atomic_read(&skb->users)); 3366 if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED)) 3367 trace_consume_skb(skb); 3368 else 3369 trace_kfree_skb(skb, net_tx_action); 3370 __kfree_skb(skb); 3371 } 3372 } 3373 3374 if (sd->output_queue) { 3375 struct Qdisc *head; 3376 3377 local_irq_disable(); 3378 head = sd->output_queue; 3379 sd->output_queue = NULL; 3380 sd->output_queue_tailp = &sd->output_queue; 3381 local_irq_enable(); 3382 3383 while (head) { 3384 struct Qdisc *q = head; 3385 spinlock_t *root_lock; 3386 3387 head = head->next_sched; 3388 3389 root_lock = qdisc_lock(q); 3390 if (spin_trylock(root_lock)) { 3391 smp_mb__before_atomic(); 3392 clear_bit(__QDISC_STATE_SCHED, 3393 &q->state); 3394 qdisc_run(q); 3395 spin_unlock(root_lock); 3396 } else { 3397 if (!test_bit(__QDISC_STATE_DEACTIVATED, 3398 &q->state)) { 3399 __netif_reschedule(q); 3400 } else { 3401 smp_mb__before_atomic(); 3402 clear_bit(__QDISC_STATE_SCHED, 3403 &q->state); 3404 } 3405 } 3406 } 3407 } 3408 } 3409 3410 #if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \ 3411 (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)) 3412 /* This hook is defined here for ATM LANE */ 3413 int (*br_fdb_test_addr_hook)(struct net_device *dev, 3414 unsigned char *addr) __read_mostly; 3415 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook); 3416 #endif 3417 3418 #ifdef CONFIG_NET_CLS_ACT 3419 /* TODO: Maybe we should just force sch_ingress to be compiled in 3420 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions 3421 * a compare and 2 stores extra right now if we dont have it on 3422 * but have CONFIG_NET_CLS_ACT 3423 * NOTE: This doesn't stop any functionality; if you dont have 3424 * the ingress scheduler, you just can't add policies on ingress. 3425 * 3426 */ 3427 static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq) 3428 { 3429 struct net_device *dev = skb->dev; 3430 u32 ttl = G_TC_RTTL(skb->tc_verd); 3431 int result = TC_ACT_OK; 3432 struct Qdisc *q; 3433 3434 if (unlikely(MAX_RED_LOOP < ttl++)) { 3435 net_warn_ratelimited("Redir loop detected Dropping packet (%d->%d)\n", 3436 skb->skb_iif, dev->ifindex); 3437 return TC_ACT_SHOT; 3438 } 3439 3440 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl); 3441 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS); 3442 3443 q = rxq->qdisc; 3444 if (q != &noop_qdisc) { 3445 spin_lock(qdisc_lock(q)); 3446 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) 3447 result = qdisc_enqueue_root(skb, q); 3448 spin_unlock(qdisc_lock(q)); 3449 } 3450 3451 return result; 3452 } 3453 3454 static inline struct sk_buff *handle_ing(struct sk_buff *skb, 3455 struct packet_type **pt_prev, 3456 int *ret, struct net_device *orig_dev) 3457 { 3458 struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue); 3459 3460 if (!rxq || rxq->qdisc == &noop_qdisc) 3461 goto out; 3462 3463 if (*pt_prev) { 3464 *ret = deliver_skb(skb, *pt_prev, orig_dev); 3465 *pt_prev = NULL; 3466 } 3467 3468 switch (ing_filter(skb, rxq)) { 3469 case TC_ACT_SHOT: 3470 case TC_ACT_STOLEN: 3471 kfree_skb(skb); 3472 return NULL; 3473 } 3474 3475 out: 3476 skb->tc_verd = 0; 3477 return skb; 3478 } 3479 #endif 3480 3481 /** 3482 * netdev_rx_handler_register - register receive handler 3483 * @dev: device to register a handler for 3484 * @rx_handler: receive handler to register 3485 * @rx_handler_data: data pointer that is used by rx handler 3486 * 3487 * Register a receive handler for a device. This handler will then be 3488 * called from __netif_receive_skb. A negative errno code is returned 3489 * on a failure. 3490 * 3491 * The caller must hold the rtnl_mutex. 3492 * 3493 * For a general description of rx_handler, see enum rx_handler_result. 3494 */ 3495 int netdev_rx_handler_register(struct net_device *dev, 3496 rx_handler_func_t *rx_handler, 3497 void *rx_handler_data) 3498 { 3499 ASSERT_RTNL(); 3500 3501 if (dev->rx_handler) 3502 return -EBUSY; 3503 3504 /* Note: rx_handler_data must be set before rx_handler */ 3505 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data); 3506 rcu_assign_pointer(dev->rx_handler, rx_handler); 3507 3508 return 0; 3509 } 3510 EXPORT_SYMBOL_GPL(netdev_rx_handler_register); 3511 3512 /** 3513 * netdev_rx_handler_unregister - unregister receive handler 3514 * @dev: device to unregister a handler from 3515 * 3516 * Unregister a receive handler from a device. 3517 * 3518 * The caller must hold the rtnl_mutex. 3519 */ 3520 void netdev_rx_handler_unregister(struct net_device *dev) 3521 { 3522 3523 ASSERT_RTNL(); 3524 RCU_INIT_POINTER(dev->rx_handler, NULL); 3525 /* a reader seeing a non NULL rx_handler in a rcu_read_lock() 3526 * section has a guarantee to see a non NULL rx_handler_data 3527 * as well. 3528 */ 3529 synchronize_net(); 3530 RCU_INIT_POINTER(dev->rx_handler_data, NULL); 3531 } 3532 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); 3533 3534 /* 3535 * Limit the use of PFMEMALLOC reserves to those protocols that implement 3536 * the special handling of PFMEMALLOC skbs. 3537 */ 3538 static bool skb_pfmemalloc_protocol(struct sk_buff *skb) 3539 { 3540 switch (skb->protocol) { 3541 case htons(ETH_P_ARP): 3542 case htons(ETH_P_IP): 3543 case htons(ETH_P_IPV6): 3544 case htons(ETH_P_8021Q): 3545 case htons(ETH_P_8021AD): 3546 return true; 3547 default: 3548 return false; 3549 } 3550 } 3551 3552 static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc) 3553 { 3554 struct packet_type *ptype, *pt_prev; 3555 rx_handler_func_t *rx_handler; 3556 struct net_device *orig_dev; 3557 struct net_device *null_or_dev; 3558 bool deliver_exact = false; 3559 int ret = NET_RX_DROP; 3560 __be16 type; 3561 3562 net_timestamp_check(!netdev_tstamp_prequeue, skb); 3563 3564 trace_netif_receive_skb(skb); 3565 3566 orig_dev = skb->dev; 3567 3568 skb_reset_network_header(skb); 3569 if (!skb_transport_header_was_set(skb)) 3570 skb_reset_transport_header(skb); 3571 skb_reset_mac_len(skb); 3572 3573 pt_prev = NULL; 3574 3575 rcu_read_lock(); 3576 3577 another_round: 3578 skb->skb_iif = skb->dev->ifindex; 3579 3580 __this_cpu_inc(softnet_data.processed); 3581 3582 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) || 3583 skb->protocol == cpu_to_be16(ETH_P_8021AD)) { 3584 skb = vlan_untag(skb); 3585 if (unlikely(!skb)) 3586 goto unlock; 3587 } 3588 3589 #ifdef CONFIG_NET_CLS_ACT 3590 if (skb->tc_verd & TC_NCLS) { 3591 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd); 3592 goto ncls; 3593 } 3594 #endif 3595 3596 if (pfmemalloc) 3597 goto skip_taps; 3598 3599 list_for_each_entry_rcu(ptype, &ptype_all, list) { 3600 if (!ptype->dev || ptype->dev == skb->dev) { 3601 if (pt_prev) 3602 ret = deliver_skb(skb, pt_prev, orig_dev); 3603 pt_prev = ptype; 3604 } 3605 } 3606 3607 skip_taps: 3608 #ifdef CONFIG_NET_CLS_ACT 3609 skb = handle_ing(skb, &pt_prev, &ret, orig_dev); 3610 if (!skb) 3611 goto unlock; 3612 ncls: 3613 #endif 3614 3615 if (pfmemalloc && !skb_pfmemalloc_protocol(skb)) 3616 goto drop; 3617 3618 if (vlan_tx_tag_present(skb)) { 3619 if (pt_prev) { 3620 ret = deliver_skb(skb, pt_prev, orig_dev); 3621 pt_prev = NULL; 3622 } 3623 if (vlan_do_receive(&skb)) 3624 goto another_round; 3625 else if (unlikely(!skb)) 3626 goto unlock; 3627 } 3628 3629 rx_handler = rcu_dereference(skb->dev->rx_handler); 3630 if (rx_handler) { 3631 if (pt_prev) { 3632 ret = deliver_skb(skb, pt_prev, orig_dev); 3633 pt_prev = NULL; 3634 } 3635 switch (rx_handler(&skb)) { 3636 case RX_HANDLER_CONSUMED: 3637 ret = NET_RX_SUCCESS; 3638 goto unlock; 3639 case RX_HANDLER_ANOTHER: 3640 goto another_round; 3641 case RX_HANDLER_EXACT: 3642 deliver_exact = true; 3643 case RX_HANDLER_PASS: 3644 break; 3645 default: 3646 BUG(); 3647 } 3648 } 3649 3650 if (unlikely(vlan_tx_tag_present(skb))) { 3651 if (vlan_tx_tag_get_id(skb)) 3652 skb->pkt_type = PACKET_OTHERHOST; 3653 /* Note: we might in the future use prio bits 3654 * and set skb->priority like in vlan_do_receive() 3655 * For the time being, just ignore Priority Code Point 3656 */ 3657 skb->vlan_tci = 0; 3658 } 3659 3660 /* deliver only exact match when indicated */ 3661 null_or_dev = deliver_exact ? skb->dev : NULL; 3662 3663 type = skb->protocol; 3664 list_for_each_entry_rcu(ptype, 3665 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { 3666 if (ptype->type == type && 3667 (ptype->dev == null_or_dev || ptype->dev == skb->dev || 3668 ptype->dev == orig_dev)) { 3669 if (pt_prev) 3670 ret = deliver_skb(skb, pt_prev, orig_dev); 3671 pt_prev = ptype; 3672 } 3673 } 3674 3675 if (pt_prev) { 3676 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC))) 3677 goto drop; 3678 else 3679 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev); 3680 } else { 3681 drop: 3682 atomic_long_inc(&skb->dev->rx_dropped); 3683 kfree_skb(skb); 3684 /* Jamal, now you will not able to escape explaining 3685 * me how you were going to use this. :-) 3686 */ 3687 ret = NET_RX_DROP; 3688 } 3689 3690 unlock: 3691 rcu_read_unlock(); 3692 return ret; 3693 } 3694 3695 static int __netif_receive_skb(struct sk_buff *skb) 3696 { 3697 int ret; 3698 3699 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) { 3700 unsigned long pflags = current->flags; 3701 3702 /* 3703 * PFMEMALLOC skbs are special, they should 3704 * - be delivered to SOCK_MEMALLOC sockets only 3705 * - stay away from userspace 3706 * - have bounded memory usage 3707 * 3708 * Use PF_MEMALLOC as this saves us from propagating the allocation 3709 * context down to all allocation sites. 3710 */ 3711 current->flags |= PF_MEMALLOC; 3712 ret = __netif_receive_skb_core(skb, true); 3713 tsk_restore_flags(current, pflags, PF_MEMALLOC); 3714 } else 3715 ret = __netif_receive_skb_core(skb, false); 3716 3717 return ret; 3718 } 3719 3720 static int netif_receive_skb_internal(struct sk_buff *skb) 3721 { 3722 net_timestamp_check(netdev_tstamp_prequeue, skb); 3723 3724 if (skb_defer_rx_timestamp(skb)) 3725 return NET_RX_SUCCESS; 3726 3727 #ifdef CONFIG_RPS 3728 if (static_key_false(&rps_needed)) { 3729 struct rps_dev_flow voidflow, *rflow = &voidflow; 3730 int cpu, ret; 3731 3732 rcu_read_lock(); 3733 3734 cpu = get_rps_cpu(skb->dev, skb, &rflow); 3735 3736 if (cpu >= 0) { 3737 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 3738 rcu_read_unlock(); 3739 return ret; 3740 } 3741 rcu_read_unlock(); 3742 } 3743 #endif 3744 return __netif_receive_skb(skb); 3745 } 3746 3747 /** 3748 * netif_receive_skb - process receive buffer from network 3749 * @skb: buffer to process 3750 * 3751 * netif_receive_skb() is the main receive data processing function. 3752 * It always succeeds. The buffer may be dropped during processing 3753 * for congestion control or by the protocol layers. 3754 * 3755 * This function may only be called from softirq context and interrupts 3756 * should be enabled. 3757 * 3758 * Return values (usually ignored): 3759 * NET_RX_SUCCESS: no congestion 3760 * NET_RX_DROP: packet was dropped 3761 */ 3762 int netif_receive_skb(struct sk_buff *skb) 3763 { 3764 trace_netif_receive_skb_entry(skb); 3765 3766 return netif_receive_skb_internal(skb); 3767 } 3768 EXPORT_SYMBOL(netif_receive_skb); 3769 3770 /* Network device is going away, flush any packets still pending 3771 * Called with irqs disabled. 3772 */ 3773 static void flush_backlog(void *arg) 3774 { 3775 struct net_device *dev = arg; 3776 struct softnet_data *sd = &__get_cpu_var(softnet_data); 3777 struct sk_buff *skb, *tmp; 3778 3779 rps_lock(sd); 3780 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) { 3781 if (skb->dev == dev) { 3782 __skb_unlink(skb, &sd->input_pkt_queue); 3783 kfree_skb(skb); 3784 input_queue_head_incr(sd); 3785 } 3786 } 3787 rps_unlock(sd); 3788 3789 skb_queue_walk_safe(&sd->process_queue, skb, tmp) { 3790 if (skb->dev == dev) { 3791 __skb_unlink(skb, &sd->process_queue); 3792 kfree_skb(skb); 3793 input_queue_head_incr(sd); 3794 } 3795 } 3796 } 3797 3798 static int napi_gro_complete(struct sk_buff *skb) 3799 { 3800 struct packet_offload *ptype; 3801 __be16 type = skb->protocol; 3802 struct list_head *head = &offload_base; 3803 int err = -ENOENT; 3804 3805 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb)); 3806 3807 if (NAPI_GRO_CB(skb)->count == 1) { 3808 skb_shinfo(skb)->gso_size = 0; 3809 goto out; 3810 } 3811 3812 rcu_read_lock(); 3813 list_for_each_entry_rcu(ptype, head, list) { 3814 if (ptype->type != type || !ptype->callbacks.gro_complete) 3815 continue; 3816 3817 err = ptype->callbacks.gro_complete(skb, 0); 3818 break; 3819 } 3820 rcu_read_unlock(); 3821 3822 if (err) { 3823 WARN_ON(&ptype->list == head); 3824 kfree_skb(skb); 3825 return NET_RX_SUCCESS; 3826 } 3827 3828 out: 3829 return netif_receive_skb_internal(skb); 3830 } 3831 3832 /* napi->gro_list contains packets ordered by age. 3833 * youngest packets at the head of it. 3834 * Complete skbs in reverse order to reduce latencies. 3835 */ 3836 void napi_gro_flush(struct napi_struct *napi, bool flush_old) 3837 { 3838 struct sk_buff *skb, *prev = NULL; 3839 3840 /* scan list and build reverse chain */ 3841 for (skb = napi->gro_list; skb != NULL; skb = skb->next) { 3842 skb->prev = prev; 3843 prev = skb; 3844 } 3845 3846 for (skb = prev; skb; skb = prev) { 3847 skb->next = NULL; 3848 3849 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies) 3850 return; 3851 3852 prev = skb->prev; 3853 napi_gro_complete(skb); 3854 napi->gro_count--; 3855 } 3856 3857 napi->gro_list = NULL; 3858 } 3859 EXPORT_SYMBOL(napi_gro_flush); 3860 3861 static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb) 3862 { 3863 struct sk_buff *p; 3864 unsigned int maclen = skb->dev->hard_header_len; 3865 u32 hash = skb_get_hash_raw(skb); 3866 3867 for (p = napi->gro_list; p; p = p->next) { 3868 unsigned long diffs; 3869 3870 NAPI_GRO_CB(p)->flush = 0; 3871 3872 if (hash != skb_get_hash_raw(p)) { 3873 NAPI_GRO_CB(p)->same_flow = 0; 3874 continue; 3875 } 3876 3877 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev; 3878 diffs |= p->vlan_tci ^ skb->vlan_tci; 3879 if (maclen == ETH_HLEN) 3880 diffs |= compare_ether_header(skb_mac_header(p), 3881 skb_mac_header(skb)); 3882 else if (!diffs) 3883 diffs = memcmp(skb_mac_header(p), 3884 skb_mac_header(skb), 3885 maclen); 3886 NAPI_GRO_CB(p)->same_flow = !diffs; 3887 } 3888 } 3889 3890 static void skb_gro_reset_offset(struct sk_buff *skb) 3891 { 3892 const struct skb_shared_info *pinfo = skb_shinfo(skb); 3893 const skb_frag_t *frag0 = &pinfo->frags[0]; 3894 3895 NAPI_GRO_CB(skb)->data_offset = 0; 3896 NAPI_GRO_CB(skb)->frag0 = NULL; 3897 NAPI_GRO_CB(skb)->frag0_len = 0; 3898 3899 if (skb_mac_header(skb) == skb_tail_pointer(skb) && 3900 pinfo->nr_frags && 3901 !PageHighMem(skb_frag_page(frag0))) { 3902 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0); 3903 NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0); 3904 } 3905 } 3906 3907 static void gro_pull_from_frag0(struct sk_buff *skb, int grow) 3908 { 3909 struct skb_shared_info *pinfo = skb_shinfo(skb); 3910 3911 BUG_ON(skb->end - skb->tail < grow); 3912 3913 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow); 3914 3915 skb->data_len -= grow; 3916 skb->tail += grow; 3917 3918 pinfo->frags[0].page_offset += grow; 3919 skb_frag_size_sub(&pinfo->frags[0], grow); 3920 3921 if (unlikely(!skb_frag_size(&pinfo->frags[0]))) { 3922 skb_frag_unref(skb, 0); 3923 memmove(pinfo->frags, pinfo->frags + 1, 3924 --pinfo->nr_frags * sizeof(pinfo->frags[0])); 3925 } 3926 } 3927 3928 static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 3929 { 3930 struct sk_buff **pp = NULL; 3931 struct packet_offload *ptype; 3932 __be16 type = skb->protocol; 3933 struct list_head *head = &offload_base; 3934 int same_flow; 3935 enum gro_result ret; 3936 int grow; 3937 3938 if (!(skb->dev->features & NETIF_F_GRO)) 3939 goto normal; 3940 3941 if (skb_is_gso(skb) || skb_has_frag_list(skb)) 3942 goto normal; 3943 3944 gro_list_prepare(napi, skb); 3945 NAPI_GRO_CB(skb)->csum = skb->csum; /* Needed for CHECKSUM_COMPLETE */ 3946 3947 rcu_read_lock(); 3948 list_for_each_entry_rcu(ptype, head, list) { 3949 if (ptype->type != type || !ptype->callbacks.gro_receive) 3950 continue; 3951 3952 skb_set_network_header(skb, skb_gro_offset(skb)); 3953 skb_reset_mac_len(skb); 3954 NAPI_GRO_CB(skb)->same_flow = 0; 3955 NAPI_GRO_CB(skb)->flush = 0; 3956 NAPI_GRO_CB(skb)->free = 0; 3957 NAPI_GRO_CB(skb)->udp_mark = 0; 3958 3959 pp = ptype->callbacks.gro_receive(&napi->gro_list, skb); 3960 break; 3961 } 3962 rcu_read_unlock(); 3963 3964 if (&ptype->list == head) 3965 goto normal; 3966 3967 same_flow = NAPI_GRO_CB(skb)->same_flow; 3968 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED; 3969 3970 if (pp) { 3971 struct sk_buff *nskb = *pp; 3972 3973 *pp = nskb->next; 3974 nskb->next = NULL; 3975 napi_gro_complete(nskb); 3976 napi->gro_count--; 3977 } 3978 3979 if (same_flow) 3980 goto ok; 3981 3982 if (NAPI_GRO_CB(skb)->flush) 3983 goto normal; 3984 3985 if (unlikely(napi->gro_count >= MAX_GRO_SKBS)) { 3986 struct sk_buff *nskb = napi->gro_list; 3987 3988 /* locate the end of the list to select the 'oldest' flow */ 3989 while (nskb->next) { 3990 pp = &nskb->next; 3991 nskb = *pp; 3992 } 3993 *pp = NULL; 3994 nskb->next = NULL; 3995 napi_gro_complete(nskb); 3996 } else { 3997 napi->gro_count++; 3998 } 3999 NAPI_GRO_CB(skb)->count = 1; 4000 NAPI_GRO_CB(skb)->age = jiffies; 4001 NAPI_GRO_CB(skb)->last = skb; 4002 skb_shinfo(skb)->gso_size = skb_gro_len(skb); 4003 skb->next = napi->gro_list; 4004 napi->gro_list = skb; 4005 ret = GRO_HELD; 4006 4007 pull: 4008 grow = skb_gro_offset(skb) - skb_headlen(skb); 4009 if (grow > 0) 4010 gro_pull_from_frag0(skb, grow); 4011 ok: 4012 return ret; 4013 4014 normal: 4015 ret = GRO_NORMAL; 4016 goto pull; 4017 } 4018 4019 struct packet_offload *gro_find_receive_by_type(__be16 type) 4020 { 4021 struct list_head *offload_head = &offload_base; 4022 struct packet_offload *ptype; 4023 4024 list_for_each_entry_rcu(ptype, offload_head, list) { 4025 if (ptype->type != type || !ptype->callbacks.gro_receive) 4026 continue; 4027 return ptype; 4028 } 4029 return NULL; 4030 } 4031 EXPORT_SYMBOL(gro_find_receive_by_type); 4032 4033 struct packet_offload *gro_find_complete_by_type(__be16 type) 4034 { 4035 struct list_head *offload_head = &offload_base; 4036 struct packet_offload *ptype; 4037 4038 list_for_each_entry_rcu(ptype, offload_head, list) { 4039 if (ptype->type != type || !ptype->callbacks.gro_complete) 4040 continue; 4041 return ptype; 4042 } 4043 return NULL; 4044 } 4045 EXPORT_SYMBOL(gro_find_complete_by_type); 4046 4047 static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb) 4048 { 4049 switch (ret) { 4050 case GRO_NORMAL: 4051 if (netif_receive_skb_internal(skb)) 4052 ret = GRO_DROP; 4053 break; 4054 4055 case GRO_DROP: 4056 kfree_skb(skb); 4057 break; 4058 4059 case GRO_MERGED_FREE: 4060 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) 4061 kmem_cache_free(skbuff_head_cache, skb); 4062 else 4063 __kfree_skb(skb); 4064 break; 4065 4066 case GRO_HELD: 4067 case GRO_MERGED: 4068 break; 4069 } 4070 4071 return ret; 4072 } 4073 4074 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 4075 { 4076 trace_napi_gro_receive_entry(skb); 4077 4078 skb_gro_reset_offset(skb); 4079 4080 return napi_skb_finish(dev_gro_receive(napi, skb), skb); 4081 } 4082 EXPORT_SYMBOL(napi_gro_receive); 4083 4084 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) 4085 { 4086 __skb_pull(skb, skb_headlen(skb)); 4087 /* restore the reserve we had after netdev_alloc_skb_ip_align() */ 4088 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb)); 4089 skb->vlan_tci = 0; 4090 skb->dev = napi->dev; 4091 skb->skb_iif = 0; 4092 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); 4093 4094 napi->skb = skb; 4095 } 4096 4097 struct sk_buff *napi_get_frags(struct napi_struct *napi) 4098 { 4099 struct sk_buff *skb = napi->skb; 4100 4101 if (!skb) { 4102 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD); 4103 napi->skb = skb; 4104 } 4105 return skb; 4106 } 4107 EXPORT_SYMBOL(napi_get_frags); 4108 4109 static gro_result_t napi_frags_finish(struct napi_struct *napi, 4110 struct sk_buff *skb, 4111 gro_result_t ret) 4112 { 4113 switch (ret) { 4114 case GRO_NORMAL: 4115 case GRO_HELD: 4116 __skb_push(skb, ETH_HLEN); 4117 skb->protocol = eth_type_trans(skb, skb->dev); 4118 if (ret == GRO_NORMAL && netif_receive_skb_internal(skb)) 4119 ret = GRO_DROP; 4120 break; 4121 4122 case GRO_DROP: 4123 case GRO_MERGED_FREE: 4124 napi_reuse_skb(napi, skb); 4125 break; 4126 4127 case GRO_MERGED: 4128 break; 4129 } 4130 4131 return ret; 4132 } 4133 4134 /* Upper GRO stack assumes network header starts at gro_offset=0 4135 * Drivers could call both napi_gro_frags() and napi_gro_receive() 4136 * We copy ethernet header into skb->data to have a common layout. 4137 */ 4138 static struct sk_buff *napi_frags_skb(struct napi_struct *napi) 4139 { 4140 struct sk_buff *skb = napi->skb; 4141 const struct ethhdr *eth; 4142 unsigned int hlen = sizeof(*eth); 4143 4144 napi->skb = NULL; 4145 4146 skb_reset_mac_header(skb); 4147 skb_gro_reset_offset(skb); 4148 4149 eth = skb_gro_header_fast(skb, 0); 4150 if (unlikely(skb_gro_header_hard(skb, hlen))) { 4151 eth = skb_gro_header_slow(skb, hlen, 0); 4152 if (unlikely(!eth)) { 4153 napi_reuse_skb(napi, skb); 4154 return NULL; 4155 } 4156 } else { 4157 gro_pull_from_frag0(skb, hlen); 4158 NAPI_GRO_CB(skb)->frag0 += hlen; 4159 NAPI_GRO_CB(skb)->frag0_len -= hlen; 4160 } 4161 __skb_pull(skb, hlen); 4162 4163 /* 4164 * This works because the only protocols we care about don't require 4165 * special handling. 4166 * We'll fix it up properly in napi_frags_finish() 4167 */ 4168 skb->protocol = eth->h_proto; 4169 4170 return skb; 4171 } 4172 4173 gro_result_t napi_gro_frags(struct napi_struct *napi) 4174 { 4175 struct sk_buff *skb = napi_frags_skb(napi); 4176 4177 if (!skb) 4178 return GRO_DROP; 4179 4180 trace_napi_gro_frags_entry(skb); 4181 4182 return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb)); 4183 } 4184 EXPORT_SYMBOL(napi_gro_frags); 4185 4186 /* 4187 * net_rps_action_and_irq_enable sends any pending IPI's for rps. 4188 * Note: called with local irq disabled, but exits with local irq enabled. 4189 */ 4190 static void net_rps_action_and_irq_enable(struct softnet_data *sd) 4191 { 4192 #ifdef CONFIG_RPS 4193 struct softnet_data *remsd = sd->rps_ipi_list; 4194 4195 if (remsd) { 4196 sd->rps_ipi_list = NULL; 4197 4198 local_irq_enable(); 4199 4200 /* Send pending IPI's to kick RPS processing on remote cpus. */ 4201 while (remsd) { 4202 struct softnet_data *next = remsd->rps_ipi_next; 4203 4204 if (cpu_online(remsd->cpu)) 4205 smp_call_function_single_async(remsd->cpu, 4206 &remsd->csd); 4207 remsd = next; 4208 } 4209 } else 4210 #endif 4211 local_irq_enable(); 4212 } 4213 4214 static int process_backlog(struct napi_struct *napi, int quota) 4215 { 4216 int work = 0; 4217 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog); 4218 4219 #ifdef CONFIG_RPS 4220 /* Check if we have pending ipi, its better to send them now, 4221 * not waiting net_rx_action() end. 4222 */ 4223 if (sd->rps_ipi_list) { 4224 local_irq_disable(); 4225 net_rps_action_and_irq_enable(sd); 4226 } 4227 #endif 4228 napi->weight = weight_p; 4229 local_irq_disable(); 4230 while (work < quota) { 4231 struct sk_buff *skb; 4232 unsigned int qlen; 4233 4234 while ((skb = __skb_dequeue(&sd->process_queue))) { 4235 local_irq_enable(); 4236 __netif_receive_skb(skb); 4237 local_irq_disable(); 4238 input_queue_head_incr(sd); 4239 if (++work >= quota) { 4240 local_irq_enable(); 4241 return work; 4242 } 4243 } 4244 4245 rps_lock(sd); 4246 qlen = skb_queue_len(&sd->input_pkt_queue); 4247 if (qlen) 4248 skb_queue_splice_tail_init(&sd->input_pkt_queue, 4249 &sd->process_queue); 4250 4251 if (qlen < quota - work) { 4252 /* 4253 * Inline a custom version of __napi_complete(). 4254 * only current cpu owns and manipulates this napi, 4255 * and NAPI_STATE_SCHED is the only possible flag set on backlog. 4256 * we can use a plain write instead of clear_bit(), 4257 * and we dont need an smp_mb() memory barrier. 4258 */ 4259 list_del(&napi->poll_list); 4260 napi->state = 0; 4261 4262 quota = work + qlen; 4263 } 4264 rps_unlock(sd); 4265 } 4266 local_irq_enable(); 4267 4268 return work; 4269 } 4270 4271 /** 4272 * __napi_schedule - schedule for receive 4273 * @n: entry to schedule 4274 * 4275 * The entry's receive function will be scheduled to run 4276 */ 4277 void __napi_schedule(struct napi_struct *n) 4278 { 4279 unsigned long flags; 4280 4281 local_irq_save(flags); 4282 ____napi_schedule(&__get_cpu_var(softnet_data), n); 4283 local_irq_restore(flags); 4284 } 4285 EXPORT_SYMBOL(__napi_schedule); 4286 4287 void __napi_complete(struct napi_struct *n) 4288 { 4289 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); 4290 BUG_ON(n->gro_list); 4291 4292 list_del(&n->poll_list); 4293 smp_mb__before_atomic(); 4294 clear_bit(NAPI_STATE_SCHED, &n->state); 4295 } 4296 EXPORT_SYMBOL(__napi_complete); 4297 4298 void napi_complete(struct napi_struct *n) 4299 { 4300 unsigned long flags; 4301 4302 /* 4303 * don't let napi dequeue from the cpu poll list 4304 * just in case its running on a different cpu 4305 */ 4306 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state))) 4307 return; 4308 4309 napi_gro_flush(n, false); 4310 local_irq_save(flags); 4311 __napi_complete(n); 4312 local_irq_restore(flags); 4313 } 4314 EXPORT_SYMBOL(napi_complete); 4315 4316 /* must be called under rcu_read_lock(), as we dont take a reference */ 4317 struct napi_struct *napi_by_id(unsigned int napi_id) 4318 { 4319 unsigned int hash = napi_id % HASH_SIZE(napi_hash); 4320 struct napi_struct *napi; 4321 4322 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node) 4323 if (napi->napi_id == napi_id) 4324 return napi; 4325 4326 return NULL; 4327 } 4328 EXPORT_SYMBOL_GPL(napi_by_id); 4329 4330 void napi_hash_add(struct napi_struct *napi) 4331 { 4332 if (!test_and_set_bit(NAPI_STATE_HASHED, &napi->state)) { 4333 4334 spin_lock(&napi_hash_lock); 4335 4336 /* 0 is not a valid id, we also skip an id that is taken 4337 * we expect both events to be extremely rare 4338 */ 4339 napi->napi_id = 0; 4340 while (!napi->napi_id) { 4341 napi->napi_id = ++napi_gen_id; 4342 if (napi_by_id(napi->napi_id)) 4343 napi->napi_id = 0; 4344 } 4345 4346 hlist_add_head_rcu(&napi->napi_hash_node, 4347 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]); 4348 4349 spin_unlock(&napi_hash_lock); 4350 } 4351 } 4352 EXPORT_SYMBOL_GPL(napi_hash_add); 4353 4354 /* Warning : caller is responsible to make sure rcu grace period 4355 * is respected before freeing memory containing @napi 4356 */ 4357 void napi_hash_del(struct napi_struct *napi) 4358 { 4359 spin_lock(&napi_hash_lock); 4360 4361 if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state)) 4362 hlist_del_rcu(&napi->napi_hash_node); 4363 4364 spin_unlock(&napi_hash_lock); 4365 } 4366 EXPORT_SYMBOL_GPL(napi_hash_del); 4367 4368 void netif_napi_add(struct net_device *dev, struct napi_struct *napi, 4369 int (*poll)(struct napi_struct *, int), int weight) 4370 { 4371 INIT_LIST_HEAD(&napi->poll_list); 4372 napi->gro_count = 0; 4373 napi->gro_list = NULL; 4374 napi->skb = NULL; 4375 napi->poll = poll; 4376 if (weight > NAPI_POLL_WEIGHT) 4377 pr_err_once("netif_napi_add() called with weight %d on device %s\n", 4378 weight, dev->name); 4379 napi->weight = weight; 4380 list_add(&napi->dev_list, &dev->napi_list); 4381 napi->dev = dev; 4382 #ifdef CONFIG_NETPOLL 4383 spin_lock_init(&napi->poll_lock); 4384 napi->poll_owner = -1; 4385 #endif 4386 set_bit(NAPI_STATE_SCHED, &napi->state); 4387 } 4388 EXPORT_SYMBOL(netif_napi_add); 4389 4390 void netif_napi_del(struct napi_struct *napi) 4391 { 4392 list_del_init(&napi->dev_list); 4393 napi_free_frags(napi); 4394 4395 kfree_skb_list(napi->gro_list); 4396 napi->gro_list = NULL; 4397 napi->gro_count = 0; 4398 } 4399 EXPORT_SYMBOL(netif_napi_del); 4400 4401 static void net_rx_action(struct softirq_action *h) 4402 { 4403 struct softnet_data *sd = &__get_cpu_var(softnet_data); 4404 unsigned long time_limit = jiffies + 2; 4405 int budget = netdev_budget; 4406 void *have; 4407 4408 local_irq_disable(); 4409 4410 while (!list_empty(&sd->poll_list)) { 4411 struct napi_struct *n; 4412 int work, weight; 4413 4414 /* If softirq window is exhuasted then punt. 4415 * Allow this to run for 2 jiffies since which will allow 4416 * an average latency of 1.5/HZ. 4417 */ 4418 if (unlikely(budget <= 0 || time_after_eq(jiffies, time_limit))) 4419 goto softnet_break; 4420 4421 local_irq_enable(); 4422 4423 /* Even though interrupts have been re-enabled, this 4424 * access is safe because interrupts can only add new 4425 * entries to the tail of this list, and only ->poll() 4426 * calls can remove this head entry from the list. 4427 */ 4428 n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list); 4429 4430 have = netpoll_poll_lock(n); 4431 4432 weight = n->weight; 4433 4434 /* This NAPI_STATE_SCHED test is for avoiding a race 4435 * with netpoll's poll_napi(). Only the entity which 4436 * obtains the lock and sees NAPI_STATE_SCHED set will 4437 * actually make the ->poll() call. Therefore we avoid 4438 * accidentally calling ->poll() when NAPI is not scheduled. 4439 */ 4440 work = 0; 4441 if (test_bit(NAPI_STATE_SCHED, &n->state)) { 4442 work = n->poll(n, weight); 4443 trace_napi_poll(n); 4444 } 4445 4446 WARN_ON_ONCE(work > weight); 4447 4448 budget -= work; 4449 4450 local_irq_disable(); 4451 4452 /* Drivers must not modify the NAPI state if they 4453 * consume the entire weight. In such cases this code 4454 * still "owns" the NAPI instance and therefore can 4455 * move the instance around on the list at-will. 4456 */ 4457 if (unlikely(work == weight)) { 4458 if (unlikely(napi_disable_pending(n))) { 4459 local_irq_enable(); 4460 napi_complete(n); 4461 local_irq_disable(); 4462 } else { 4463 if (n->gro_list) { 4464 /* flush too old packets 4465 * If HZ < 1000, flush all packets. 4466 */ 4467 local_irq_enable(); 4468 napi_gro_flush(n, HZ >= 1000); 4469 local_irq_disable(); 4470 } 4471 list_move_tail(&n->poll_list, &sd->poll_list); 4472 } 4473 } 4474 4475 netpoll_poll_unlock(have); 4476 } 4477 out: 4478 net_rps_action_and_irq_enable(sd); 4479 4480 #ifdef CONFIG_NET_DMA 4481 /* 4482 * There may not be any more sk_buffs coming right now, so push 4483 * any pending DMA copies to hardware 4484 */ 4485 dma_issue_pending_all(); 4486 #endif 4487 4488 return; 4489 4490 softnet_break: 4491 sd->time_squeeze++; 4492 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 4493 goto out; 4494 } 4495 4496 struct netdev_adjacent { 4497 struct net_device *dev; 4498 4499 /* upper master flag, there can only be one master device per list */ 4500 bool master; 4501 4502 /* counter for the number of times this device was added to us */ 4503 u16 ref_nr; 4504 4505 /* private field for the users */ 4506 void *private; 4507 4508 struct list_head list; 4509 struct rcu_head rcu; 4510 }; 4511 4512 static struct netdev_adjacent *__netdev_find_adj(struct net_device *dev, 4513 struct net_device *adj_dev, 4514 struct list_head *adj_list) 4515 { 4516 struct netdev_adjacent *adj; 4517 4518 list_for_each_entry(adj, adj_list, list) { 4519 if (adj->dev == adj_dev) 4520 return adj; 4521 } 4522 return NULL; 4523 } 4524 4525 /** 4526 * netdev_has_upper_dev - Check if device is linked to an upper device 4527 * @dev: device 4528 * @upper_dev: upper device to check 4529 * 4530 * Find out if a device is linked to specified upper device and return true 4531 * in case it is. Note that this checks only immediate upper device, 4532 * not through a complete stack of devices. The caller must hold the RTNL lock. 4533 */ 4534 bool netdev_has_upper_dev(struct net_device *dev, 4535 struct net_device *upper_dev) 4536 { 4537 ASSERT_RTNL(); 4538 4539 return __netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper); 4540 } 4541 EXPORT_SYMBOL(netdev_has_upper_dev); 4542 4543 /** 4544 * netdev_has_any_upper_dev - Check if device is linked to some device 4545 * @dev: device 4546 * 4547 * Find out if a device is linked to an upper device and return true in case 4548 * it is. The caller must hold the RTNL lock. 4549 */ 4550 static bool netdev_has_any_upper_dev(struct net_device *dev) 4551 { 4552 ASSERT_RTNL(); 4553 4554 return !list_empty(&dev->all_adj_list.upper); 4555 } 4556 4557 /** 4558 * netdev_master_upper_dev_get - Get master upper device 4559 * @dev: device 4560 * 4561 * Find a master upper device and return pointer to it or NULL in case 4562 * it's not there. The caller must hold the RTNL lock. 4563 */ 4564 struct net_device *netdev_master_upper_dev_get(struct net_device *dev) 4565 { 4566 struct netdev_adjacent *upper; 4567 4568 ASSERT_RTNL(); 4569 4570 if (list_empty(&dev->adj_list.upper)) 4571 return NULL; 4572 4573 upper = list_first_entry(&dev->adj_list.upper, 4574 struct netdev_adjacent, list); 4575 if (likely(upper->master)) 4576 return upper->dev; 4577 return NULL; 4578 } 4579 EXPORT_SYMBOL(netdev_master_upper_dev_get); 4580 4581 void *netdev_adjacent_get_private(struct list_head *adj_list) 4582 { 4583 struct netdev_adjacent *adj; 4584 4585 adj = list_entry(adj_list, struct netdev_adjacent, list); 4586 4587 return adj->private; 4588 } 4589 EXPORT_SYMBOL(netdev_adjacent_get_private); 4590 4591 /** 4592 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list 4593 * @dev: device 4594 * @iter: list_head ** of the current position 4595 * 4596 * Gets the next device from the dev's upper list, starting from iter 4597 * position. The caller must hold RCU read lock. 4598 */ 4599 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, 4600 struct list_head **iter) 4601 { 4602 struct netdev_adjacent *upper; 4603 4604 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held()); 4605 4606 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 4607 4608 if (&upper->list == &dev->adj_list.upper) 4609 return NULL; 4610 4611 *iter = &upper->list; 4612 4613 return upper->dev; 4614 } 4615 EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu); 4616 4617 /** 4618 * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list 4619 * @dev: device 4620 * @iter: list_head ** of the current position 4621 * 4622 * Gets the next device from the dev's upper list, starting from iter 4623 * position. The caller must hold RCU read lock. 4624 */ 4625 struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev, 4626 struct list_head **iter) 4627 { 4628 struct netdev_adjacent *upper; 4629 4630 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held()); 4631 4632 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 4633 4634 if (&upper->list == &dev->all_adj_list.upper) 4635 return NULL; 4636 4637 *iter = &upper->list; 4638 4639 return upper->dev; 4640 } 4641 EXPORT_SYMBOL(netdev_all_upper_get_next_dev_rcu); 4642 4643 /** 4644 * netdev_lower_get_next_private - Get the next ->private from the 4645 * lower neighbour list 4646 * @dev: device 4647 * @iter: list_head ** of the current position 4648 * 4649 * Gets the next netdev_adjacent->private from the dev's lower neighbour 4650 * list, starting from iter position. The caller must hold either hold the 4651 * RTNL lock or its own locking that guarantees that the neighbour lower 4652 * list will remain unchainged. 4653 */ 4654 void *netdev_lower_get_next_private(struct net_device *dev, 4655 struct list_head **iter) 4656 { 4657 struct netdev_adjacent *lower; 4658 4659 lower = list_entry(*iter, struct netdev_adjacent, list); 4660 4661 if (&lower->list == &dev->adj_list.lower) 4662 return NULL; 4663 4664 *iter = lower->list.next; 4665 4666 return lower->private; 4667 } 4668 EXPORT_SYMBOL(netdev_lower_get_next_private); 4669 4670 /** 4671 * netdev_lower_get_next_private_rcu - Get the next ->private from the 4672 * lower neighbour list, RCU 4673 * variant 4674 * @dev: device 4675 * @iter: list_head ** of the current position 4676 * 4677 * Gets the next netdev_adjacent->private from the dev's lower neighbour 4678 * list, starting from iter position. The caller must hold RCU read lock. 4679 */ 4680 void *netdev_lower_get_next_private_rcu(struct net_device *dev, 4681 struct list_head **iter) 4682 { 4683 struct netdev_adjacent *lower; 4684 4685 WARN_ON_ONCE(!rcu_read_lock_held()); 4686 4687 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); 4688 4689 if (&lower->list == &dev->adj_list.lower) 4690 return NULL; 4691 4692 *iter = &lower->list; 4693 4694 return lower->private; 4695 } 4696 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu); 4697 4698 /** 4699 * netdev_lower_get_next - Get the next device from the lower neighbour 4700 * list 4701 * @dev: device 4702 * @iter: list_head ** of the current position 4703 * 4704 * Gets the next netdev_adjacent from the dev's lower neighbour 4705 * list, starting from iter position. The caller must hold RTNL lock or 4706 * its own locking that guarantees that the neighbour lower 4707 * list will remain unchainged. 4708 */ 4709 void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter) 4710 { 4711 struct netdev_adjacent *lower; 4712 4713 lower = list_entry((*iter)->next, struct netdev_adjacent, list); 4714 4715 if (&lower->list == &dev->adj_list.lower) 4716 return NULL; 4717 4718 *iter = &lower->list; 4719 4720 return lower->dev; 4721 } 4722 EXPORT_SYMBOL(netdev_lower_get_next); 4723 4724 /** 4725 * netdev_lower_get_first_private_rcu - Get the first ->private from the 4726 * lower neighbour list, RCU 4727 * variant 4728 * @dev: device 4729 * 4730 * Gets the first netdev_adjacent->private from the dev's lower neighbour 4731 * list. The caller must hold RCU read lock. 4732 */ 4733 void *netdev_lower_get_first_private_rcu(struct net_device *dev) 4734 { 4735 struct netdev_adjacent *lower; 4736 4737 lower = list_first_or_null_rcu(&dev->adj_list.lower, 4738 struct netdev_adjacent, list); 4739 if (lower) 4740 return lower->private; 4741 return NULL; 4742 } 4743 EXPORT_SYMBOL(netdev_lower_get_first_private_rcu); 4744 4745 /** 4746 * netdev_master_upper_dev_get_rcu - Get master upper device 4747 * @dev: device 4748 * 4749 * Find a master upper device and return pointer to it or NULL in case 4750 * it's not there. The caller must hold the RCU read lock. 4751 */ 4752 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev) 4753 { 4754 struct netdev_adjacent *upper; 4755 4756 upper = list_first_or_null_rcu(&dev->adj_list.upper, 4757 struct netdev_adjacent, list); 4758 if (upper && likely(upper->master)) 4759 return upper->dev; 4760 return NULL; 4761 } 4762 EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu); 4763 4764 static int netdev_adjacent_sysfs_add(struct net_device *dev, 4765 struct net_device *adj_dev, 4766 struct list_head *dev_list) 4767 { 4768 char linkname[IFNAMSIZ+7]; 4769 sprintf(linkname, dev_list == &dev->adj_list.upper ? 4770 "upper_%s" : "lower_%s", adj_dev->name); 4771 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj), 4772 linkname); 4773 } 4774 static void netdev_adjacent_sysfs_del(struct net_device *dev, 4775 char *name, 4776 struct list_head *dev_list) 4777 { 4778 char linkname[IFNAMSIZ+7]; 4779 sprintf(linkname, dev_list == &dev->adj_list.upper ? 4780 "upper_%s" : "lower_%s", name); 4781 sysfs_remove_link(&(dev->dev.kobj), linkname); 4782 } 4783 4784 #define netdev_adjacent_is_neigh_list(dev, dev_list) \ 4785 (dev_list == &dev->adj_list.upper || \ 4786 dev_list == &dev->adj_list.lower) 4787 4788 static int __netdev_adjacent_dev_insert(struct net_device *dev, 4789 struct net_device *adj_dev, 4790 struct list_head *dev_list, 4791 void *private, bool master) 4792 { 4793 struct netdev_adjacent *adj; 4794 int ret; 4795 4796 adj = __netdev_find_adj(dev, adj_dev, dev_list); 4797 4798 if (adj) { 4799 adj->ref_nr++; 4800 return 0; 4801 } 4802 4803 adj = kmalloc(sizeof(*adj), GFP_KERNEL); 4804 if (!adj) 4805 return -ENOMEM; 4806 4807 adj->dev = adj_dev; 4808 adj->master = master; 4809 adj->ref_nr = 1; 4810 adj->private = private; 4811 dev_hold(adj_dev); 4812 4813 pr_debug("dev_hold for %s, because of link added from %s to %s\n", 4814 adj_dev->name, dev->name, adj_dev->name); 4815 4816 if (netdev_adjacent_is_neigh_list(dev, dev_list)) { 4817 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list); 4818 if (ret) 4819 goto free_adj; 4820 } 4821 4822 /* Ensure that master link is always the first item in list. */ 4823 if (master) { 4824 ret = sysfs_create_link(&(dev->dev.kobj), 4825 &(adj_dev->dev.kobj), "master"); 4826 if (ret) 4827 goto remove_symlinks; 4828 4829 list_add_rcu(&adj->list, dev_list); 4830 } else { 4831 list_add_tail_rcu(&adj->list, dev_list); 4832 } 4833 4834 return 0; 4835 4836 remove_symlinks: 4837 if (netdev_adjacent_is_neigh_list(dev, dev_list)) 4838 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list); 4839 free_adj: 4840 kfree(adj); 4841 dev_put(adj_dev); 4842 4843 return ret; 4844 } 4845 4846 static void __netdev_adjacent_dev_remove(struct net_device *dev, 4847 struct net_device *adj_dev, 4848 struct list_head *dev_list) 4849 { 4850 struct netdev_adjacent *adj; 4851 4852 adj = __netdev_find_adj(dev, adj_dev, dev_list); 4853 4854 if (!adj) { 4855 pr_err("tried to remove device %s from %s\n", 4856 dev->name, adj_dev->name); 4857 BUG(); 4858 } 4859 4860 if (adj->ref_nr > 1) { 4861 pr_debug("%s to %s ref_nr-- = %d\n", dev->name, adj_dev->name, 4862 adj->ref_nr-1); 4863 adj->ref_nr--; 4864 return; 4865 } 4866 4867 if (adj->master) 4868 sysfs_remove_link(&(dev->dev.kobj), "master"); 4869 4870 if (netdev_adjacent_is_neigh_list(dev, dev_list)) 4871 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list); 4872 4873 list_del_rcu(&adj->list); 4874 pr_debug("dev_put for %s, because link removed from %s to %s\n", 4875 adj_dev->name, dev->name, adj_dev->name); 4876 dev_put(adj_dev); 4877 kfree_rcu(adj, rcu); 4878 } 4879 4880 static int __netdev_adjacent_dev_link_lists(struct net_device *dev, 4881 struct net_device *upper_dev, 4882 struct list_head *up_list, 4883 struct list_head *down_list, 4884 void *private, bool master) 4885 { 4886 int ret; 4887 4888 ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, private, 4889 master); 4890 if (ret) 4891 return ret; 4892 4893 ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, private, 4894 false); 4895 if (ret) { 4896 __netdev_adjacent_dev_remove(dev, upper_dev, up_list); 4897 return ret; 4898 } 4899 4900 return 0; 4901 } 4902 4903 static int __netdev_adjacent_dev_link(struct net_device *dev, 4904 struct net_device *upper_dev) 4905 { 4906 return __netdev_adjacent_dev_link_lists(dev, upper_dev, 4907 &dev->all_adj_list.upper, 4908 &upper_dev->all_adj_list.lower, 4909 NULL, false); 4910 } 4911 4912 static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev, 4913 struct net_device *upper_dev, 4914 struct list_head *up_list, 4915 struct list_head *down_list) 4916 { 4917 __netdev_adjacent_dev_remove(dev, upper_dev, up_list); 4918 __netdev_adjacent_dev_remove(upper_dev, dev, down_list); 4919 } 4920 4921 static void __netdev_adjacent_dev_unlink(struct net_device *dev, 4922 struct net_device *upper_dev) 4923 { 4924 __netdev_adjacent_dev_unlink_lists(dev, upper_dev, 4925 &dev->all_adj_list.upper, 4926 &upper_dev->all_adj_list.lower); 4927 } 4928 4929 static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev, 4930 struct net_device *upper_dev, 4931 void *private, bool master) 4932 { 4933 int ret = __netdev_adjacent_dev_link(dev, upper_dev); 4934 4935 if (ret) 4936 return ret; 4937 4938 ret = __netdev_adjacent_dev_link_lists(dev, upper_dev, 4939 &dev->adj_list.upper, 4940 &upper_dev->adj_list.lower, 4941 private, master); 4942 if (ret) { 4943 __netdev_adjacent_dev_unlink(dev, upper_dev); 4944 return ret; 4945 } 4946 4947 return 0; 4948 } 4949 4950 static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev, 4951 struct net_device *upper_dev) 4952 { 4953 __netdev_adjacent_dev_unlink(dev, upper_dev); 4954 __netdev_adjacent_dev_unlink_lists(dev, upper_dev, 4955 &dev->adj_list.upper, 4956 &upper_dev->adj_list.lower); 4957 } 4958 4959 static int __netdev_upper_dev_link(struct net_device *dev, 4960 struct net_device *upper_dev, bool master, 4961 void *private) 4962 { 4963 struct netdev_adjacent *i, *j, *to_i, *to_j; 4964 int ret = 0; 4965 4966 ASSERT_RTNL(); 4967 4968 if (dev == upper_dev) 4969 return -EBUSY; 4970 4971 /* To prevent loops, check if dev is not upper device to upper_dev. */ 4972 if (__netdev_find_adj(upper_dev, dev, &upper_dev->all_adj_list.upper)) 4973 return -EBUSY; 4974 4975 if (__netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper)) 4976 return -EEXIST; 4977 4978 if (master && netdev_master_upper_dev_get(dev)) 4979 return -EBUSY; 4980 4981 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, private, 4982 master); 4983 if (ret) 4984 return ret; 4985 4986 /* Now that we linked these devs, make all the upper_dev's 4987 * all_adj_list.upper visible to every dev's all_adj_list.lower an 4988 * versa, and don't forget the devices itself. All of these 4989 * links are non-neighbours. 4990 */ 4991 list_for_each_entry(i, &dev->all_adj_list.lower, list) { 4992 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) { 4993 pr_debug("Interlinking %s with %s, non-neighbour\n", 4994 i->dev->name, j->dev->name); 4995 ret = __netdev_adjacent_dev_link(i->dev, j->dev); 4996 if (ret) 4997 goto rollback_mesh; 4998 } 4999 } 5000 5001 /* add dev to every upper_dev's upper device */ 5002 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) { 5003 pr_debug("linking %s's upper device %s with %s\n", 5004 upper_dev->name, i->dev->name, dev->name); 5005 ret = __netdev_adjacent_dev_link(dev, i->dev); 5006 if (ret) 5007 goto rollback_upper_mesh; 5008 } 5009 5010 /* add upper_dev to every dev's lower device */ 5011 list_for_each_entry(i, &dev->all_adj_list.lower, list) { 5012 pr_debug("linking %s's lower device %s with %s\n", dev->name, 5013 i->dev->name, upper_dev->name); 5014 ret = __netdev_adjacent_dev_link(i->dev, upper_dev); 5015 if (ret) 5016 goto rollback_lower_mesh; 5017 } 5018 5019 call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev); 5020 return 0; 5021 5022 rollback_lower_mesh: 5023 to_i = i; 5024 list_for_each_entry(i, &dev->all_adj_list.lower, list) { 5025 if (i == to_i) 5026 break; 5027 __netdev_adjacent_dev_unlink(i->dev, upper_dev); 5028 } 5029 5030 i = NULL; 5031 5032 rollback_upper_mesh: 5033 to_i = i; 5034 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) { 5035 if (i == to_i) 5036 break; 5037 __netdev_adjacent_dev_unlink(dev, i->dev); 5038 } 5039 5040 i = j = NULL; 5041 5042 rollback_mesh: 5043 to_i = i; 5044 to_j = j; 5045 list_for_each_entry(i, &dev->all_adj_list.lower, list) { 5046 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) { 5047 if (i == to_i && j == to_j) 5048 break; 5049 __netdev_adjacent_dev_unlink(i->dev, j->dev); 5050 } 5051 if (i == to_i) 5052 break; 5053 } 5054 5055 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev); 5056 5057 return ret; 5058 } 5059 5060 /** 5061 * netdev_upper_dev_link - Add a link to the upper device 5062 * @dev: device 5063 * @upper_dev: new upper device 5064 * 5065 * Adds a link to device which is upper to this one. The caller must hold 5066 * the RTNL lock. On a failure a negative errno code is returned. 5067 * On success the reference counts are adjusted and the function 5068 * returns zero. 5069 */ 5070 int netdev_upper_dev_link(struct net_device *dev, 5071 struct net_device *upper_dev) 5072 { 5073 return __netdev_upper_dev_link(dev, upper_dev, false, NULL); 5074 } 5075 EXPORT_SYMBOL(netdev_upper_dev_link); 5076 5077 /** 5078 * netdev_master_upper_dev_link - Add a master link to the upper device 5079 * @dev: device 5080 * @upper_dev: new upper device 5081 * 5082 * Adds a link to device which is upper to this one. In this case, only 5083 * one master upper device can be linked, although other non-master devices 5084 * might be linked as well. The caller must hold the RTNL lock. 5085 * On a failure a negative errno code is returned. On success the reference 5086 * counts are adjusted and the function returns zero. 5087 */ 5088 int netdev_master_upper_dev_link(struct net_device *dev, 5089 struct net_device *upper_dev) 5090 { 5091 return __netdev_upper_dev_link(dev, upper_dev, true, NULL); 5092 } 5093 EXPORT_SYMBOL(netdev_master_upper_dev_link); 5094 5095 int netdev_master_upper_dev_link_private(struct net_device *dev, 5096 struct net_device *upper_dev, 5097 void *private) 5098 { 5099 return __netdev_upper_dev_link(dev, upper_dev, true, private); 5100 } 5101 EXPORT_SYMBOL(netdev_master_upper_dev_link_private); 5102 5103 /** 5104 * netdev_upper_dev_unlink - Removes a link to upper device 5105 * @dev: device 5106 * @upper_dev: new upper device 5107 * 5108 * Removes a link to device which is upper to this one. The caller must hold 5109 * the RTNL lock. 5110 */ 5111 void netdev_upper_dev_unlink(struct net_device *dev, 5112 struct net_device *upper_dev) 5113 { 5114 struct netdev_adjacent *i, *j; 5115 ASSERT_RTNL(); 5116 5117 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev); 5118 5119 /* Here is the tricky part. We must remove all dev's lower 5120 * devices from all upper_dev's upper devices and vice 5121 * versa, to maintain the graph relationship. 5122 */ 5123 list_for_each_entry(i, &dev->all_adj_list.lower, list) 5124 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) 5125 __netdev_adjacent_dev_unlink(i->dev, j->dev); 5126 5127 /* remove also the devices itself from lower/upper device 5128 * list 5129 */ 5130 list_for_each_entry(i, &dev->all_adj_list.lower, list) 5131 __netdev_adjacent_dev_unlink(i->dev, upper_dev); 5132 5133 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) 5134 __netdev_adjacent_dev_unlink(dev, i->dev); 5135 5136 call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev); 5137 } 5138 EXPORT_SYMBOL(netdev_upper_dev_unlink); 5139 5140 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname) 5141 { 5142 struct netdev_adjacent *iter; 5143 5144 list_for_each_entry(iter, &dev->adj_list.upper, list) { 5145 netdev_adjacent_sysfs_del(iter->dev, oldname, 5146 &iter->dev->adj_list.lower); 5147 netdev_adjacent_sysfs_add(iter->dev, dev, 5148 &iter->dev->adj_list.lower); 5149 } 5150 5151 list_for_each_entry(iter, &dev->adj_list.lower, list) { 5152 netdev_adjacent_sysfs_del(iter->dev, oldname, 5153 &iter->dev->adj_list.upper); 5154 netdev_adjacent_sysfs_add(iter->dev, dev, 5155 &iter->dev->adj_list.upper); 5156 } 5157 } 5158 5159 void *netdev_lower_dev_get_private(struct net_device *dev, 5160 struct net_device *lower_dev) 5161 { 5162 struct netdev_adjacent *lower; 5163 5164 if (!lower_dev) 5165 return NULL; 5166 lower = __netdev_find_adj(dev, lower_dev, &dev->adj_list.lower); 5167 if (!lower) 5168 return NULL; 5169 5170 return lower->private; 5171 } 5172 EXPORT_SYMBOL(netdev_lower_dev_get_private); 5173 5174 5175 int dev_get_nest_level(struct net_device *dev, 5176 bool (*type_check)(struct net_device *dev)) 5177 { 5178 struct net_device *lower = NULL; 5179 struct list_head *iter; 5180 int max_nest = -1; 5181 int nest; 5182 5183 ASSERT_RTNL(); 5184 5185 netdev_for_each_lower_dev(dev, lower, iter) { 5186 nest = dev_get_nest_level(lower, type_check); 5187 if (max_nest < nest) 5188 max_nest = nest; 5189 } 5190 5191 if (type_check(dev)) 5192 max_nest++; 5193 5194 return max_nest; 5195 } 5196 EXPORT_SYMBOL(dev_get_nest_level); 5197 5198 static void dev_change_rx_flags(struct net_device *dev, int flags) 5199 { 5200 const struct net_device_ops *ops = dev->netdev_ops; 5201 5202 if (ops->ndo_change_rx_flags) 5203 ops->ndo_change_rx_flags(dev, flags); 5204 } 5205 5206 static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify) 5207 { 5208 unsigned int old_flags = dev->flags; 5209 kuid_t uid; 5210 kgid_t gid; 5211 5212 ASSERT_RTNL(); 5213 5214 dev->flags |= IFF_PROMISC; 5215 dev->promiscuity += inc; 5216 if (dev->promiscuity == 0) { 5217 /* 5218 * Avoid overflow. 5219 * If inc causes overflow, untouch promisc and return error. 5220 */ 5221 if (inc < 0) 5222 dev->flags &= ~IFF_PROMISC; 5223 else { 5224 dev->promiscuity -= inc; 5225 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n", 5226 dev->name); 5227 return -EOVERFLOW; 5228 } 5229 } 5230 if (dev->flags != old_flags) { 5231 pr_info("device %s %s promiscuous mode\n", 5232 dev->name, 5233 dev->flags & IFF_PROMISC ? "entered" : "left"); 5234 if (audit_enabled) { 5235 current_uid_gid(&uid, &gid); 5236 audit_log(current->audit_context, GFP_ATOMIC, 5237 AUDIT_ANOM_PROMISCUOUS, 5238 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u", 5239 dev->name, (dev->flags & IFF_PROMISC), 5240 (old_flags & IFF_PROMISC), 5241 from_kuid(&init_user_ns, audit_get_loginuid(current)), 5242 from_kuid(&init_user_ns, uid), 5243 from_kgid(&init_user_ns, gid), 5244 audit_get_sessionid(current)); 5245 } 5246 5247 dev_change_rx_flags(dev, IFF_PROMISC); 5248 } 5249 if (notify) 5250 __dev_notify_flags(dev, old_flags, IFF_PROMISC); 5251 return 0; 5252 } 5253 5254 /** 5255 * dev_set_promiscuity - update promiscuity count on a device 5256 * @dev: device 5257 * @inc: modifier 5258 * 5259 * Add or remove promiscuity from a device. While the count in the device 5260 * remains above zero the interface remains promiscuous. Once it hits zero 5261 * the device reverts back to normal filtering operation. A negative inc 5262 * value is used to drop promiscuity on the device. 5263 * Return 0 if successful or a negative errno code on error. 5264 */ 5265 int dev_set_promiscuity(struct net_device *dev, int inc) 5266 { 5267 unsigned int old_flags = dev->flags; 5268 int err; 5269 5270 err = __dev_set_promiscuity(dev, inc, true); 5271 if (err < 0) 5272 return err; 5273 if (dev->flags != old_flags) 5274 dev_set_rx_mode(dev); 5275 return err; 5276 } 5277 EXPORT_SYMBOL(dev_set_promiscuity); 5278 5279 static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify) 5280 { 5281 unsigned int old_flags = dev->flags, old_gflags = dev->gflags; 5282 5283 ASSERT_RTNL(); 5284 5285 dev->flags |= IFF_ALLMULTI; 5286 dev->allmulti += inc; 5287 if (dev->allmulti == 0) { 5288 /* 5289 * Avoid overflow. 5290 * If inc causes overflow, untouch allmulti and return error. 5291 */ 5292 if (inc < 0) 5293 dev->flags &= ~IFF_ALLMULTI; 5294 else { 5295 dev->allmulti -= inc; 5296 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n", 5297 dev->name); 5298 return -EOVERFLOW; 5299 } 5300 } 5301 if (dev->flags ^ old_flags) { 5302 dev_change_rx_flags(dev, IFF_ALLMULTI); 5303 dev_set_rx_mode(dev); 5304 if (notify) 5305 __dev_notify_flags(dev, old_flags, 5306 dev->gflags ^ old_gflags); 5307 } 5308 return 0; 5309 } 5310 5311 /** 5312 * dev_set_allmulti - update allmulti count on a device 5313 * @dev: device 5314 * @inc: modifier 5315 * 5316 * Add or remove reception of all multicast frames to a device. While the 5317 * count in the device remains above zero the interface remains listening 5318 * to all interfaces. Once it hits zero the device reverts back to normal 5319 * filtering operation. A negative @inc value is used to drop the counter 5320 * when releasing a resource needing all multicasts. 5321 * Return 0 if successful or a negative errno code on error. 5322 */ 5323 5324 int dev_set_allmulti(struct net_device *dev, int inc) 5325 { 5326 return __dev_set_allmulti(dev, inc, true); 5327 } 5328 EXPORT_SYMBOL(dev_set_allmulti); 5329 5330 /* 5331 * Upload unicast and multicast address lists to device and 5332 * configure RX filtering. When the device doesn't support unicast 5333 * filtering it is put in promiscuous mode while unicast addresses 5334 * are present. 5335 */ 5336 void __dev_set_rx_mode(struct net_device *dev) 5337 { 5338 const struct net_device_ops *ops = dev->netdev_ops; 5339 5340 /* dev_open will call this function so the list will stay sane. */ 5341 if (!(dev->flags&IFF_UP)) 5342 return; 5343 5344 if (!netif_device_present(dev)) 5345 return; 5346 5347 if (!(dev->priv_flags & IFF_UNICAST_FLT)) { 5348 /* Unicast addresses changes may only happen under the rtnl, 5349 * therefore calling __dev_set_promiscuity here is safe. 5350 */ 5351 if (!netdev_uc_empty(dev) && !dev->uc_promisc) { 5352 __dev_set_promiscuity(dev, 1, false); 5353 dev->uc_promisc = true; 5354 } else if (netdev_uc_empty(dev) && dev->uc_promisc) { 5355 __dev_set_promiscuity(dev, -1, false); 5356 dev->uc_promisc = false; 5357 } 5358 } 5359 5360 if (ops->ndo_set_rx_mode) 5361 ops->ndo_set_rx_mode(dev); 5362 } 5363 5364 void dev_set_rx_mode(struct net_device *dev) 5365 { 5366 netif_addr_lock_bh(dev); 5367 __dev_set_rx_mode(dev); 5368 netif_addr_unlock_bh(dev); 5369 } 5370 5371 /** 5372 * dev_get_flags - get flags reported to userspace 5373 * @dev: device 5374 * 5375 * Get the combination of flag bits exported through APIs to userspace. 5376 */ 5377 unsigned int dev_get_flags(const struct net_device *dev) 5378 { 5379 unsigned int flags; 5380 5381 flags = (dev->flags & ~(IFF_PROMISC | 5382 IFF_ALLMULTI | 5383 IFF_RUNNING | 5384 IFF_LOWER_UP | 5385 IFF_DORMANT)) | 5386 (dev->gflags & (IFF_PROMISC | 5387 IFF_ALLMULTI)); 5388 5389 if (netif_running(dev)) { 5390 if (netif_oper_up(dev)) 5391 flags |= IFF_RUNNING; 5392 if (netif_carrier_ok(dev)) 5393 flags |= IFF_LOWER_UP; 5394 if (netif_dormant(dev)) 5395 flags |= IFF_DORMANT; 5396 } 5397 5398 return flags; 5399 } 5400 EXPORT_SYMBOL(dev_get_flags); 5401 5402 int __dev_change_flags(struct net_device *dev, unsigned int flags) 5403 { 5404 unsigned int old_flags = dev->flags; 5405 int ret; 5406 5407 ASSERT_RTNL(); 5408 5409 /* 5410 * Set the flags on our device. 5411 */ 5412 5413 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP | 5414 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL | 5415 IFF_AUTOMEDIA)) | 5416 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC | 5417 IFF_ALLMULTI)); 5418 5419 /* 5420 * Load in the correct multicast list now the flags have changed. 5421 */ 5422 5423 if ((old_flags ^ flags) & IFF_MULTICAST) 5424 dev_change_rx_flags(dev, IFF_MULTICAST); 5425 5426 dev_set_rx_mode(dev); 5427 5428 /* 5429 * Have we downed the interface. We handle IFF_UP ourselves 5430 * according to user attempts to set it, rather than blindly 5431 * setting it. 5432 */ 5433 5434 ret = 0; 5435 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */ 5436 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev); 5437 5438 if (!ret) 5439 dev_set_rx_mode(dev); 5440 } 5441 5442 if ((flags ^ dev->gflags) & IFF_PROMISC) { 5443 int inc = (flags & IFF_PROMISC) ? 1 : -1; 5444 unsigned int old_flags = dev->flags; 5445 5446 dev->gflags ^= IFF_PROMISC; 5447 5448 if (__dev_set_promiscuity(dev, inc, false) >= 0) 5449 if (dev->flags != old_flags) 5450 dev_set_rx_mode(dev); 5451 } 5452 5453 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI 5454 is important. Some (broken) drivers set IFF_PROMISC, when 5455 IFF_ALLMULTI is requested not asking us and not reporting. 5456 */ 5457 if ((flags ^ dev->gflags) & IFF_ALLMULTI) { 5458 int inc = (flags & IFF_ALLMULTI) ? 1 : -1; 5459 5460 dev->gflags ^= IFF_ALLMULTI; 5461 __dev_set_allmulti(dev, inc, false); 5462 } 5463 5464 return ret; 5465 } 5466 5467 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags, 5468 unsigned int gchanges) 5469 { 5470 unsigned int changes = dev->flags ^ old_flags; 5471 5472 if (gchanges) 5473 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC); 5474 5475 if (changes & IFF_UP) { 5476 if (dev->flags & IFF_UP) 5477 call_netdevice_notifiers(NETDEV_UP, dev); 5478 else 5479 call_netdevice_notifiers(NETDEV_DOWN, dev); 5480 } 5481 5482 if (dev->flags & IFF_UP && 5483 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) { 5484 struct netdev_notifier_change_info change_info; 5485 5486 change_info.flags_changed = changes; 5487 call_netdevice_notifiers_info(NETDEV_CHANGE, dev, 5488 &change_info.info); 5489 } 5490 } 5491 5492 /** 5493 * dev_change_flags - change device settings 5494 * @dev: device 5495 * @flags: device state flags 5496 * 5497 * Change settings on device based state flags. The flags are 5498 * in the userspace exported format. 5499 */ 5500 int dev_change_flags(struct net_device *dev, unsigned int flags) 5501 { 5502 int ret; 5503 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags; 5504 5505 ret = __dev_change_flags(dev, flags); 5506 if (ret < 0) 5507 return ret; 5508 5509 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags); 5510 __dev_notify_flags(dev, old_flags, changes); 5511 return ret; 5512 } 5513 EXPORT_SYMBOL(dev_change_flags); 5514 5515 static int __dev_set_mtu(struct net_device *dev, int new_mtu) 5516 { 5517 const struct net_device_ops *ops = dev->netdev_ops; 5518 5519 if (ops->ndo_change_mtu) 5520 return ops->ndo_change_mtu(dev, new_mtu); 5521 5522 dev->mtu = new_mtu; 5523 return 0; 5524 } 5525 5526 /** 5527 * dev_set_mtu - Change maximum transfer unit 5528 * @dev: device 5529 * @new_mtu: new transfer unit 5530 * 5531 * Change the maximum transfer size of the network device. 5532 */ 5533 int dev_set_mtu(struct net_device *dev, int new_mtu) 5534 { 5535 int err, orig_mtu; 5536 5537 if (new_mtu == dev->mtu) 5538 return 0; 5539 5540 /* MTU must be positive. */ 5541 if (new_mtu < 0) 5542 return -EINVAL; 5543 5544 if (!netif_device_present(dev)) 5545 return -ENODEV; 5546 5547 err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev); 5548 err = notifier_to_errno(err); 5549 if (err) 5550 return err; 5551 5552 orig_mtu = dev->mtu; 5553 err = __dev_set_mtu(dev, new_mtu); 5554 5555 if (!err) { 5556 err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev); 5557 err = notifier_to_errno(err); 5558 if (err) { 5559 /* setting mtu back and notifying everyone again, 5560 * so that they have a chance to revert changes. 5561 */ 5562 __dev_set_mtu(dev, orig_mtu); 5563 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev); 5564 } 5565 } 5566 return err; 5567 } 5568 EXPORT_SYMBOL(dev_set_mtu); 5569 5570 /** 5571 * dev_set_group - Change group this device belongs to 5572 * @dev: device 5573 * @new_group: group this device should belong to 5574 */ 5575 void dev_set_group(struct net_device *dev, int new_group) 5576 { 5577 dev->group = new_group; 5578 } 5579 EXPORT_SYMBOL(dev_set_group); 5580 5581 /** 5582 * dev_set_mac_address - Change Media Access Control Address 5583 * @dev: device 5584 * @sa: new address 5585 * 5586 * Change the hardware (MAC) address of the device 5587 */ 5588 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa) 5589 { 5590 const struct net_device_ops *ops = dev->netdev_ops; 5591 int err; 5592 5593 if (!ops->ndo_set_mac_address) 5594 return -EOPNOTSUPP; 5595 if (sa->sa_family != dev->type) 5596 return -EINVAL; 5597 if (!netif_device_present(dev)) 5598 return -ENODEV; 5599 err = ops->ndo_set_mac_address(dev, sa); 5600 if (err) 5601 return err; 5602 dev->addr_assign_type = NET_ADDR_SET; 5603 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 5604 add_device_randomness(dev->dev_addr, dev->addr_len); 5605 return 0; 5606 } 5607 EXPORT_SYMBOL(dev_set_mac_address); 5608 5609 /** 5610 * dev_change_carrier - Change device carrier 5611 * @dev: device 5612 * @new_carrier: new value 5613 * 5614 * Change device carrier 5615 */ 5616 int dev_change_carrier(struct net_device *dev, bool new_carrier) 5617 { 5618 const struct net_device_ops *ops = dev->netdev_ops; 5619 5620 if (!ops->ndo_change_carrier) 5621 return -EOPNOTSUPP; 5622 if (!netif_device_present(dev)) 5623 return -ENODEV; 5624 return ops->ndo_change_carrier(dev, new_carrier); 5625 } 5626 EXPORT_SYMBOL(dev_change_carrier); 5627 5628 /** 5629 * dev_get_phys_port_id - Get device physical port ID 5630 * @dev: device 5631 * @ppid: port ID 5632 * 5633 * Get device physical port ID 5634 */ 5635 int dev_get_phys_port_id(struct net_device *dev, 5636 struct netdev_phys_port_id *ppid) 5637 { 5638 const struct net_device_ops *ops = dev->netdev_ops; 5639 5640 if (!ops->ndo_get_phys_port_id) 5641 return -EOPNOTSUPP; 5642 return ops->ndo_get_phys_port_id(dev, ppid); 5643 } 5644 EXPORT_SYMBOL(dev_get_phys_port_id); 5645 5646 /** 5647 * dev_new_index - allocate an ifindex 5648 * @net: the applicable net namespace 5649 * 5650 * Returns a suitable unique value for a new device interface 5651 * number. The caller must hold the rtnl semaphore or the 5652 * dev_base_lock to be sure it remains unique. 5653 */ 5654 static int dev_new_index(struct net *net) 5655 { 5656 int ifindex = net->ifindex; 5657 for (;;) { 5658 if (++ifindex <= 0) 5659 ifindex = 1; 5660 if (!__dev_get_by_index(net, ifindex)) 5661 return net->ifindex = ifindex; 5662 } 5663 } 5664 5665 /* Delayed registration/unregisteration */ 5666 static LIST_HEAD(net_todo_list); 5667 DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq); 5668 5669 static void net_set_todo(struct net_device *dev) 5670 { 5671 list_add_tail(&dev->todo_list, &net_todo_list); 5672 dev_net(dev)->dev_unreg_count++; 5673 } 5674 5675 static void rollback_registered_many(struct list_head *head) 5676 { 5677 struct net_device *dev, *tmp; 5678 LIST_HEAD(close_head); 5679 5680 BUG_ON(dev_boot_phase); 5681 ASSERT_RTNL(); 5682 5683 list_for_each_entry_safe(dev, tmp, head, unreg_list) { 5684 /* Some devices call without registering 5685 * for initialization unwind. Remove those 5686 * devices and proceed with the remaining. 5687 */ 5688 if (dev->reg_state == NETREG_UNINITIALIZED) { 5689 pr_debug("unregister_netdevice: device %s/%p never was registered\n", 5690 dev->name, dev); 5691 5692 WARN_ON(1); 5693 list_del(&dev->unreg_list); 5694 continue; 5695 } 5696 dev->dismantle = true; 5697 BUG_ON(dev->reg_state != NETREG_REGISTERED); 5698 } 5699 5700 /* If device is running, close it first. */ 5701 list_for_each_entry(dev, head, unreg_list) 5702 list_add_tail(&dev->close_list, &close_head); 5703 dev_close_many(&close_head); 5704 5705 list_for_each_entry(dev, head, unreg_list) { 5706 /* And unlink it from device chain. */ 5707 unlist_netdevice(dev); 5708 5709 dev->reg_state = NETREG_UNREGISTERING; 5710 } 5711 5712 synchronize_net(); 5713 5714 list_for_each_entry(dev, head, unreg_list) { 5715 /* Shutdown queueing discipline. */ 5716 dev_shutdown(dev); 5717 5718 5719 /* Notify protocols, that we are about to destroy 5720 this device. They should clean all the things. 5721 */ 5722 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 5723 5724 /* 5725 * Flush the unicast and multicast chains 5726 */ 5727 dev_uc_flush(dev); 5728 dev_mc_flush(dev); 5729 5730 if (dev->netdev_ops->ndo_uninit) 5731 dev->netdev_ops->ndo_uninit(dev); 5732 5733 if (!dev->rtnl_link_ops || 5734 dev->rtnl_link_state == RTNL_LINK_INITIALIZED) 5735 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL); 5736 5737 /* Notifier chain MUST detach us all upper devices. */ 5738 WARN_ON(netdev_has_any_upper_dev(dev)); 5739 5740 /* Remove entries from kobject tree */ 5741 netdev_unregister_kobject(dev); 5742 #ifdef CONFIG_XPS 5743 /* Remove XPS queueing entries */ 5744 netif_reset_xps_queues_gt(dev, 0); 5745 #endif 5746 } 5747 5748 synchronize_net(); 5749 5750 list_for_each_entry(dev, head, unreg_list) 5751 dev_put(dev); 5752 } 5753 5754 static void rollback_registered(struct net_device *dev) 5755 { 5756 LIST_HEAD(single); 5757 5758 list_add(&dev->unreg_list, &single); 5759 rollback_registered_many(&single); 5760 list_del(&single); 5761 } 5762 5763 static netdev_features_t netdev_fix_features(struct net_device *dev, 5764 netdev_features_t features) 5765 { 5766 /* Fix illegal checksum combinations */ 5767 if ((features & NETIF_F_HW_CSUM) && 5768 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { 5769 netdev_warn(dev, "mixed HW and IP checksum settings.\n"); 5770 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); 5771 } 5772 5773 /* TSO requires that SG is present as well. */ 5774 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) { 5775 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n"); 5776 features &= ~NETIF_F_ALL_TSO; 5777 } 5778 5779 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) && 5780 !(features & NETIF_F_IP_CSUM)) { 5781 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n"); 5782 features &= ~NETIF_F_TSO; 5783 features &= ~NETIF_F_TSO_ECN; 5784 } 5785 5786 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) && 5787 !(features & NETIF_F_IPV6_CSUM)) { 5788 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n"); 5789 features &= ~NETIF_F_TSO6; 5790 } 5791 5792 /* TSO ECN requires that TSO is present as well. */ 5793 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN) 5794 features &= ~NETIF_F_TSO_ECN; 5795 5796 /* Software GSO depends on SG. */ 5797 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) { 5798 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n"); 5799 features &= ~NETIF_F_GSO; 5800 } 5801 5802 /* UFO needs SG and checksumming */ 5803 if (features & NETIF_F_UFO) { 5804 /* maybe split UFO into V4 and V6? */ 5805 if (!((features & NETIF_F_GEN_CSUM) || 5806 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM)) 5807 == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { 5808 netdev_dbg(dev, 5809 "Dropping NETIF_F_UFO since no checksum offload features.\n"); 5810 features &= ~NETIF_F_UFO; 5811 } 5812 5813 if (!(features & NETIF_F_SG)) { 5814 netdev_dbg(dev, 5815 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n"); 5816 features &= ~NETIF_F_UFO; 5817 } 5818 } 5819 5820 #ifdef CONFIG_NET_RX_BUSY_POLL 5821 if (dev->netdev_ops->ndo_busy_poll) 5822 features |= NETIF_F_BUSY_POLL; 5823 else 5824 #endif 5825 features &= ~NETIF_F_BUSY_POLL; 5826 5827 return features; 5828 } 5829 5830 int __netdev_update_features(struct net_device *dev) 5831 { 5832 netdev_features_t features; 5833 int err = 0; 5834 5835 ASSERT_RTNL(); 5836 5837 features = netdev_get_wanted_features(dev); 5838 5839 if (dev->netdev_ops->ndo_fix_features) 5840 features = dev->netdev_ops->ndo_fix_features(dev, features); 5841 5842 /* driver might be less strict about feature dependencies */ 5843 features = netdev_fix_features(dev, features); 5844 5845 if (dev->features == features) 5846 return 0; 5847 5848 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n", 5849 &dev->features, &features); 5850 5851 if (dev->netdev_ops->ndo_set_features) 5852 err = dev->netdev_ops->ndo_set_features(dev, features); 5853 5854 if (unlikely(err < 0)) { 5855 netdev_err(dev, 5856 "set_features() failed (%d); wanted %pNF, left %pNF\n", 5857 err, &features, &dev->features); 5858 return -1; 5859 } 5860 5861 if (!err) 5862 dev->features = features; 5863 5864 return 1; 5865 } 5866 5867 /** 5868 * netdev_update_features - recalculate device features 5869 * @dev: the device to check 5870 * 5871 * Recalculate dev->features set and send notifications if it 5872 * has changed. Should be called after driver or hardware dependent 5873 * conditions might have changed that influence the features. 5874 */ 5875 void netdev_update_features(struct net_device *dev) 5876 { 5877 if (__netdev_update_features(dev)) 5878 netdev_features_change(dev); 5879 } 5880 EXPORT_SYMBOL(netdev_update_features); 5881 5882 /** 5883 * netdev_change_features - recalculate device features 5884 * @dev: the device to check 5885 * 5886 * Recalculate dev->features set and send notifications even 5887 * if they have not changed. Should be called instead of 5888 * netdev_update_features() if also dev->vlan_features might 5889 * have changed to allow the changes to be propagated to stacked 5890 * VLAN devices. 5891 */ 5892 void netdev_change_features(struct net_device *dev) 5893 { 5894 __netdev_update_features(dev); 5895 netdev_features_change(dev); 5896 } 5897 EXPORT_SYMBOL(netdev_change_features); 5898 5899 /** 5900 * netif_stacked_transfer_operstate - transfer operstate 5901 * @rootdev: the root or lower level device to transfer state from 5902 * @dev: the device to transfer operstate to 5903 * 5904 * Transfer operational state from root to device. This is normally 5905 * called when a stacking relationship exists between the root 5906 * device and the device(a leaf device). 5907 */ 5908 void netif_stacked_transfer_operstate(const struct net_device *rootdev, 5909 struct net_device *dev) 5910 { 5911 if (rootdev->operstate == IF_OPER_DORMANT) 5912 netif_dormant_on(dev); 5913 else 5914 netif_dormant_off(dev); 5915 5916 if (netif_carrier_ok(rootdev)) { 5917 if (!netif_carrier_ok(dev)) 5918 netif_carrier_on(dev); 5919 } else { 5920 if (netif_carrier_ok(dev)) 5921 netif_carrier_off(dev); 5922 } 5923 } 5924 EXPORT_SYMBOL(netif_stacked_transfer_operstate); 5925 5926 #ifdef CONFIG_SYSFS 5927 static int netif_alloc_rx_queues(struct net_device *dev) 5928 { 5929 unsigned int i, count = dev->num_rx_queues; 5930 struct netdev_rx_queue *rx; 5931 5932 BUG_ON(count < 1); 5933 5934 rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL); 5935 if (!rx) 5936 return -ENOMEM; 5937 5938 dev->_rx = rx; 5939 5940 for (i = 0; i < count; i++) 5941 rx[i].dev = dev; 5942 return 0; 5943 } 5944 #endif 5945 5946 static void netdev_init_one_queue(struct net_device *dev, 5947 struct netdev_queue *queue, void *_unused) 5948 { 5949 /* Initialize queue lock */ 5950 spin_lock_init(&queue->_xmit_lock); 5951 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type); 5952 queue->xmit_lock_owner = -1; 5953 netdev_queue_numa_node_write(queue, NUMA_NO_NODE); 5954 queue->dev = dev; 5955 #ifdef CONFIG_BQL 5956 dql_init(&queue->dql, HZ); 5957 #endif 5958 } 5959 5960 static void netif_free_tx_queues(struct net_device *dev) 5961 { 5962 kvfree(dev->_tx); 5963 } 5964 5965 static int netif_alloc_netdev_queues(struct net_device *dev) 5966 { 5967 unsigned int count = dev->num_tx_queues; 5968 struct netdev_queue *tx; 5969 size_t sz = count * sizeof(*tx); 5970 5971 BUG_ON(count < 1 || count > 0xffff); 5972 5973 tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); 5974 if (!tx) { 5975 tx = vzalloc(sz); 5976 if (!tx) 5977 return -ENOMEM; 5978 } 5979 dev->_tx = tx; 5980 5981 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL); 5982 spin_lock_init(&dev->tx_global_lock); 5983 5984 return 0; 5985 } 5986 5987 /** 5988 * register_netdevice - register a network device 5989 * @dev: device to register 5990 * 5991 * Take a completed network device structure and add it to the kernel 5992 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier 5993 * chain. 0 is returned on success. A negative errno code is returned 5994 * on a failure to set up the device, or if the name is a duplicate. 5995 * 5996 * Callers must hold the rtnl semaphore. You may want 5997 * register_netdev() instead of this. 5998 * 5999 * BUGS: 6000 * The locking appears insufficient to guarantee two parallel registers 6001 * will not get the same name. 6002 */ 6003 6004 int register_netdevice(struct net_device *dev) 6005 { 6006 int ret; 6007 struct net *net = dev_net(dev); 6008 6009 BUG_ON(dev_boot_phase); 6010 ASSERT_RTNL(); 6011 6012 might_sleep(); 6013 6014 /* When net_device's are persistent, this will be fatal. */ 6015 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED); 6016 BUG_ON(!net); 6017 6018 spin_lock_init(&dev->addr_list_lock); 6019 netdev_set_addr_lockdep_class(dev); 6020 6021 dev->iflink = -1; 6022 6023 ret = dev_get_valid_name(net, dev, dev->name); 6024 if (ret < 0) 6025 goto out; 6026 6027 /* Init, if this function is available */ 6028 if (dev->netdev_ops->ndo_init) { 6029 ret = dev->netdev_ops->ndo_init(dev); 6030 if (ret) { 6031 if (ret > 0) 6032 ret = -EIO; 6033 goto out; 6034 } 6035 } 6036 6037 if (((dev->hw_features | dev->features) & 6038 NETIF_F_HW_VLAN_CTAG_FILTER) && 6039 (!dev->netdev_ops->ndo_vlan_rx_add_vid || 6040 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) { 6041 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n"); 6042 ret = -EINVAL; 6043 goto err_uninit; 6044 } 6045 6046 ret = -EBUSY; 6047 if (!dev->ifindex) 6048 dev->ifindex = dev_new_index(net); 6049 else if (__dev_get_by_index(net, dev->ifindex)) 6050 goto err_uninit; 6051 6052 if (dev->iflink == -1) 6053 dev->iflink = dev->ifindex; 6054 6055 /* Transfer changeable features to wanted_features and enable 6056 * software offloads (GSO and GRO). 6057 */ 6058 dev->hw_features |= NETIF_F_SOFT_FEATURES; 6059 dev->features |= NETIF_F_SOFT_FEATURES; 6060 dev->wanted_features = dev->features & dev->hw_features; 6061 6062 if (!(dev->flags & IFF_LOOPBACK)) { 6063 dev->hw_features |= NETIF_F_NOCACHE_COPY; 6064 } 6065 6066 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices. 6067 */ 6068 dev->vlan_features |= NETIF_F_HIGHDMA; 6069 6070 /* Make NETIF_F_SG inheritable to tunnel devices. 6071 */ 6072 dev->hw_enc_features |= NETIF_F_SG; 6073 6074 /* Make NETIF_F_SG inheritable to MPLS. 6075 */ 6076 dev->mpls_features |= NETIF_F_SG; 6077 6078 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev); 6079 ret = notifier_to_errno(ret); 6080 if (ret) 6081 goto err_uninit; 6082 6083 ret = netdev_register_kobject(dev); 6084 if (ret) 6085 goto err_uninit; 6086 dev->reg_state = NETREG_REGISTERED; 6087 6088 __netdev_update_features(dev); 6089 6090 /* 6091 * Default initial state at registry is that the 6092 * device is present. 6093 */ 6094 6095 set_bit(__LINK_STATE_PRESENT, &dev->state); 6096 6097 linkwatch_init_dev(dev); 6098 6099 dev_init_scheduler(dev); 6100 dev_hold(dev); 6101 list_netdevice(dev); 6102 add_device_randomness(dev->dev_addr, dev->addr_len); 6103 6104 /* If the device has permanent device address, driver should 6105 * set dev_addr and also addr_assign_type should be set to 6106 * NET_ADDR_PERM (default value). 6107 */ 6108 if (dev->addr_assign_type == NET_ADDR_PERM) 6109 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 6110 6111 /* Notify protocols, that a new device appeared. */ 6112 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev); 6113 ret = notifier_to_errno(ret); 6114 if (ret) { 6115 rollback_registered(dev); 6116 dev->reg_state = NETREG_UNREGISTERED; 6117 } 6118 /* 6119 * Prevent userspace races by waiting until the network 6120 * device is fully setup before sending notifications. 6121 */ 6122 if (!dev->rtnl_link_ops || 6123 dev->rtnl_link_state == RTNL_LINK_INITIALIZED) 6124 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL); 6125 6126 out: 6127 return ret; 6128 6129 err_uninit: 6130 if (dev->netdev_ops->ndo_uninit) 6131 dev->netdev_ops->ndo_uninit(dev); 6132 goto out; 6133 } 6134 EXPORT_SYMBOL(register_netdevice); 6135 6136 /** 6137 * init_dummy_netdev - init a dummy network device for NAPI 6138 * @dev: device to init 6139 * 6140 * This takes a network device structure and initialize the minimum 6141 * amount of fields so it can be used to schedule NAPI polls without 6142 * registering a full blown interface. This is to be used by drivers 6143 * that need to tie several hardware interfaces to a single NAPI 6144 * poll scheduler due to HW limitations. 6145 */ 6146 int init_dummy_netdev(struct net_device *dev) 6147 { 6148 /* Clear everything. Note we don't initialize spinlocks 6149 * are they aren't supposed to be taken by any of the 6150 * NAPI code and this dummy netdev is supposed to be 6151 * only ever used for NAPI polls 6152 */ 6153 memset(dev, 0, sizeof(struct net_device)); 6154 6155 /* make sure we BUG if trying to hit standard 6156 * register/unregister code path 6157 */ 6158 dev->reg_state = NETREG_DUMMY; 6159 6160 /* NAPI wants this */ 6161 INIT_LIST_HEAD(&dev->napi_list); 6162 6163 /* a dummy interface is started by default */ 6164 set_bit(__LINK_STATE_PRESENT, &dev->state); 6165 set_bit(__LINK_STATE_START, &dev->state); 6166 6167 /* Note : We dont allocate pcpu_refcnt for dummy devices, 6168 * because users of this 'device' dont need to change 6169 * its refcount. 6170 */ 6171 6172 return 0; 6173 } 6174 EXPORT_SYMBOL_GPL(init_dummy_netdev); 6175 6176 6177 /** 6178 * register_netdev - register a network device 6179 * @dev: device to register 6180 * 6181 * Take a completed network device structure and add it to the kernel 6182 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier 6183 * chain. 0 is returned on success. A negative errno code is returned 6184 * on a failure to set up the device, or if the name is a duplicate. 6185 * 6186 * This is a wrapper around register_netdevice that takes the rtnl semaphore 6187 * and expands the device name if you passed a format string to 6188 * alloc_netdev. 6189 */ 6190 int register_netdev(struct net_device *dev) 6191 { 6192 int err; 6193 6194 rtnl_lock(); 6195 err = register_netdevice(dev); 6196 rtnl_unlock(); 6197 return err; 6198 } 6199 EXPORT_SYMBOL(register_netdev); 6200 6201 int netdev_refcnt_read(const struct net_device *dev) 6202 { 6203 int i, refcnt = 0; 6204 6205 for_each_possible_cpu(i) 6206 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i); 6207 return refcnt; 6208 } 6209 EXPORT_SYMBOL(netdev_refcnt_read); 6210 6211 /** 6212 * netdev_wait_allrefs - wait until all references are gone. 6213 * @dev: target net_device 6214 * 6215 * This is called when unregistering network devices. 6216 * 6217 * Any protocol or device that holds a reference should register 6218 * for netdevice notification, and cleanup and put back the 6219 * reference if they receive an UNREGISTER event. 6220 * We can get stuck here if buggy protocols don't correctly 6221 * call dev_put. 6222 */ 6223 static void netdev_wait_allrefs(struct net_device *dev) 6224 { 6225 unsigned long rebroadcast_time, warning_time; 6226 int refcnt; 6227 6228 linkwatch_forget_dev(dev); 6229 6230 rebroadcast_time = warning_time = jiffies; 6231 refcnt = netdev_refcnt_read(dev); 6232 6233 while (refcnt != 0) { 6234 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) { 6235 rtnl_lock(); 6236 6237 /* Rebroadcast unregister notification */ 6238 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 6239 6240 __rtnl_unlock(); 6241 rcu_barrier(); 6242 rtnl_lock(); 6243 6244 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev); 6245 if (test_bit(__LINK_STATE_LINKWATCH_PENDING, 6246 &dev->state)) { 6247 /* We must not have linkwatch events 6248 * pending on unregister. If this 6249 * happens, we simply run the queue 6250 * unscheduled, resulting in a noop 6251 * for this device. 6252 */ 6253 linkwatch_run_queue(); 6254 } 6255 6256 __rtnl_unlock(); 6257 6258 rebroadcast_time = jiffies; 6259 } 6260 6261 msleep(250); 6262 6263 refcnt = netdev_refcnt_read(dev); 6264 6265 if (time_after(jiffies, warning_time + 10 * HZ)) { 6266 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n", 6267 dev->name, refcnt); 6268 warning_time = jiffies; 6269 } 6270 } 6271 } 6272 6273 /* The sequence is: 6274 * 6275 * rtnl_lock(); 6276 * ... 6277 * register_netdevice(x1); 6278 * register_netdevice(x2); 6279 * ... 6280 * unregister_netdevice(y1); 6281 * unregister_netdevice(y2); 6282 * ... 6283 * rtnl_unlock(); 6284 * free_netdev(y1); 6285 * free_netdev(y2); 6286 * 6287 * We are invoked by rtnl_unlock(). 6288 * This allows us to deal with problems: 6289 * 1) We can delete sysfs objects which invoke hotplug 6290 * without deadlocking with linkwatch via keventd. 6291 * 2) Since we run with the RTNL semaphore not held, we can sleep 6292 * safely in order to wait for the netdev refcnt to drop to zero. 6293 * 6294 * We must not return until all unregister events added during 6295 * the interval the lock was held have been completed. 6296 */ 6297 void netdev_run_todo(void) 6298 { 6299 struct list_head list; 6300 6301 /* Snapshot list, allow later requests */ 6302 list_replace_init(&net_todo_list, &list); 6303 6304 __rtnl_unlock(); 6305 6306 6307 /* Wait for rcu callbacks to finish before next phase */ 6308 if (!list_empty(&list)) 6309 rcu_barrier(); 6310 6311 while (!list_empty(&list)) { 6312 struct net_device *dev 6313 = list_first_entry(&list, struct net_device, todo_list); 6314 list_del(&dev->todo_list); 6315 6316 rtnl_lock(); 6317 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev); 6318 __rtnl_unlock(); 6319 6320 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) { 6321 pr_err("network todo '%s' but state %d\n", 6322 dev->name, dev->reg_state); 6323 dump_stack(); 6324 continue; 6325 } 6326 6327 dev->reg_state = NETREG_UNREGISTERED; 6328 6329 on_each_cpu(flush_backlog, dev, 1); 6330 6331 netdev_wait_allrefs(dev); 6332 6333 /* paranoia */ 6334 BUG_ON(netdev_refcnt_read(dev)); 6335 WARN_ON(rcu_access_pointer(dev->ip_ptr)); 6336 WARN_ON(rcu_access_pointer(dev->ip6_ptr)); 6337 WARN_ON(dev->dn_ptr); 6338 6339 if (dev->destructor) 6340 dev->destructor(dev); 6341 6342 /* Report a network device has been unregistered */ 6343 rtnl_lock(); 6344 dev_net(dev)->dev_unreg_count--; 6345 __rtnl_unlock(); 6346 wake_up(&netdev_unregistering_wq); 6347 6348 /* Free network device */ 6349 kobject_put(&dev->dev.kobj); 6350 } 6351 } 6352 6353 /* Convert net_device_stats to rtnl_link_stats64. They have the same 6354 * fields in the same order, with only the type differing. 6355 */ 6356 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, 6357 const struct net_device_stats *netdev_stats) 6358 { 6359 #if BITS_PER_LONG == 64 6360 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats)); 6361 memcpy(stats64, netdev_stats, sizeof(*stats64)); 6362 #else 6363 size_t i, n = sizeof(*stats64) / sizeof(u64); 6364 const unsigned long *src = (const unsigned long *)netdev_stats; 6365 u64 *dst = (u64 *)stats64; 6366 6367 BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) != 6368 sizeof(*stats64) / sizeof(u64)); 6369 for (i = 0; i < n; i++) 6370 dst[i] = src[i]; 6371 #endif 6372 } 6373 EXPORT_SYMBOL(netdev_stats_to_stats64); 6374 6375 /** 6376 * dev_get_stats - get network device statistics 6377 * @dev: device to get statistics from 6378 * @storage: place to store stats 6379 * 6380 * Get network statistics from device. Return @storage. 6381 * The device driver may provide its own method by setting 6382 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats; 6383 * otherwise the internal statistics structure is used. 6384 */ 6385 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, 6386 struct rtnl_link_stats64 *storage) 6387 { 6388 const struct net_device_ops *ops = dev->netdev_ops; 6389 6390 if (ops->ndo_get_stats64) { 6391 memset(storage, 0, sizeof(*storage)); 6392 ops->ndo_get_stats64(dev, storage); 6393 } else if (ops->ndo_get_stats) { 6394 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev)); 6395 } else { 6396 netdev_stats_to_stats64(storage, &dev->stats); 6397 } 6398 storage->rx_dropped += atomic_long_read(&dev->rx_dropped); 6399 storage->tx_dropped += atomic_long_read(&dev->tx_dropped); 6400 return storage; 6401 } 6402 EXPORT_SYMBOL(dev_get_stats); 6403 6404 struct netdev_queue *dev_ingress_queue_create(struct net_device *dev) 6405 { 6406 struct netdev_queue *queue = dev_ingress_queue(dev); 6407 6408 #ifdef CONFIG_NET_CLS_ACT 6409 if (queue) 6410 return queue; 6411 queue = kzalloc(sizeof(*queue), GFP_KERNEL); 6412 if (!queue) 6413 return NULL; 6414 netdev_init_one_queue(dev, queue, NULL); 6415 queue->qdisc = &noop_qdisc; 6416 queue->qdisc_sleeping = &noop_qdisc; 6417 rcu_assign_pointer(dev->ingress_queue, queue); 6418 #endif 6419 return queue; 6420 } 6421 6422 static const struct ethtool_ops default_ethtool_ops; 6423 6424 void netdev_set_default_ethtool_ops(struct net_device *dev, 6425 const struct ethtool_ops *ops) 6426 { 6427 if (dev->ethtool_ops == &default_ethtool_ops) 6428 dev->ethtool_ops = ops; 6429 } 6430 EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops); 6431 6432 void netdev_freemem(struct net_device *dev) 6433 { 6434 char *addr = (char *)dev - dev->padded; 6435 6436 kvfree(addr); 6437 } 6438 6439 /** 6440 * alloc_netdev_mqs - allocate network device 6441 * @sizeof_priv: size of private data to allocate space for 6442 * @name: device name format string 6443 * @setup: callback to initialize device 6444 * @txqs: the number of TX subqueues to allocate 6445 * @rxqs: the number of RX subqueues to allocate 6446 * 6447 * Allocates a struct net_device with private data area for driver use 6448 * and performs basic initialization. Also allocates subqueue structs 6449 * for each queue on the device. 6450 */ 6451 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, 6452 void (*setup)(struct net_device *), 6453 unsigned int txqs, unsigned int rxqs) 6454 { 6455 struct net_device *dev; 6456 size_t alloc_size; 6457 struct net_device *p; 6458 6459 BUG_ON(strlen(name) >= sizeof(dev->name)); 6460 6461 if (txqs < 1) { 6462 pr_err("alloc_netdev: Unable to allocate device with zero queues\n"); 6463 return NULL; 6464 } 6465 6466 #ifdef CONFIG_SYSFS 6467 if (rxqs < 1) { 6468 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n"); 6469 return NULL; 6470 } 6471 #endif 6472 6473 alloc_size = sizeof(struct net_device); 6474 if (sizeof_priv) { 6475 /* ensure 32-byte alignment of private area */ 6476 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN); 6477 alloc_size += sizeof_priv; 6478 } 6479 /* ensure 32-byte alignment of whole construct */ 6480 alloc_size += NETDEV_ALIGN - 1; 6481 6482 p = kzalloc(alloc_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); 6483 if (!p) 6484 p = vzalloc(alloc_size); 6485 if (!p) 6486 return NULL; 6487 6488 dev = PTR_ALIGN(p, NETDEV_ALIGN); 6489 dev->padded = (char *)dev - (char *)p; 6490 6491 dev->pcpu_refcnt = alloc_percpu(int); 6492 if (!dev->pcpu_refcnt) 6493 goto free_dev; 6494 6495 if (dev_addr_init(dev)) 6496 goto free_pcpu; 6497 6498 dev_mc_init(dev); 6499 dev_uc_init(dev); 6500 6501 dev_net_set(dev, &init_net); 6502 6503 dev->gso_max_size = GSO_MAX_SIZE; 6504 dev->gso_max_segs = GSO_MAX_SEGS; 6505 6506 INIT_LIST_HEAD(&dev->napi_list); 6507 INIT_LIST_HEAD(&dev->unreg_list); 6508 INIT_LIST_HEAD(&dev->close_list); 6509 INIT_LIST_HEAD(&dev->link_watch_list); 6510 INIT_LIST_HEAD(&dev->adj_list.upper); 6511 INIT_LIST_HEAD(&dev->adj_list.lower); 6512 INIT_LIST_HEAD(&dev->all_adj_list.upper); 6513 INIT_LIST_HEAD(&dev->all_adj_list.lower); 6514 dev->priv_flags = IFF_XMIT_DST_RELEASE; 6515 setup(dev); 6516 6517 dev->num_tx_queues = txqs; 6518 dev->real_num_tx_queues = txqs; 6519 if (netif_alloc_netdev_queues(dev)) 6520 goto free_all; 6521 6522 #ifdef CONFIG_SYSFS 6523 dev->num_rx_queues = rxqs; 6524 dev->real_num_rx_queues = rxqs; 6525 if (netif_alloc_rx_queues(dev)) 6526 goto free_all; 6527 #endif 6528 6529 strcpy(dev->name, name); 6530 dev->group = INIT_NETDEV_GROUP; 6531 if (!dev->ethtool_ops) 6532 dev->ethtool_ops = &default_ethtool_ops; 6533 return dev; 6534 6535 free_all: 6536 free_netdev(dev); 6537 return NULL; 6538 6539 free_pcpu: 6540 free_percpu(dev->pcpu_refcnt); 6541 free_dev: 6542 netdev_freemem(dev); 6543 return NULL; 6544 } 6545 EXPORT_SYMBOL(alloc_netdev_mqs); 6546 6547 /** 6548 * free_netdev - free network device 6549 * @dev: device 6550 * 6551 * This function does the last stage of destroying an allocated device 6552 * interface. The reference to the device object is released. 6553 * If this is the last reference then it will be freed. 6554 */ 6555 void free_netdev(struct net_device *dev) 6556 { 6557 struct napi_struct *p, *n; 6558 6559 release_net(dev_net(dev)); 6560 6561 netif_free_tx_queues(dev); 6562 #ifdef CONFIG_SYSFS 6563 kfree(dev->_rx); 6564 #endif 6565 6566 kfree(rcu_dereference_protected(dev->ingress_queue, 1)); 6567 6568 /* Flush device addresses */ 6569 dev_addr_flush(dev); 6570 6571 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list) 6572 netif_napi_del(p); 6573 6574 free_percpu(dev->pcpu_refcnt); 6575 dev->pcpu_refcnt = NULL; 6576 6577 /* Compatibility with error handling in drivers */ 6578 if (dev->reg_state == NETREG_UNINITIALIZED) { 6579 netdev_freemem(dev); 6580 return; 6581 } 6582 6583 BUG_ON(dev->reg_state != NETREG_UNREGISTERED); 6584 dev->reg_state = NETREG_RELEASED; 6585 6586 /* will free via device release */ 6587 put_device(&dev->dev); 6588 } 6589 EXPORT_SYMBOL(free_netdev); 6590 6591 /** 6592 * synchronize_net - Synchronize with packet receive processing 6593 * 6594 * Wait for packets currently being received to be done. 6595 * Does not block later packets from starting. 6596 */ 6597 void synchronize_net(void) 6598 { 6599 might_sleep(); 6600 if (rtnl_is_locked()) 6601 synchronize_rcu_expedited(); 6602 else 6603 synchronize_rcu(); 6604 } 6605 EXPORT_SYMBOL(synchronize_net); 6606 6607 /** 6608 * unregister_netdevice_queue - remove device from the kernel 6609 * @dev: device 6610 * @head: list 6611 * 6612 * This function shuts down a device interface and removes it 6613 * from the kernel tables. 6614 * If head not NULL, device is queued to be unregistered later. 6615 * 6616 * Callers must hold the rtnl semaphore. You may want 6617 * unregister_netdev() instead of this. 6618 */ 6619 6620 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head) 6621 { 6622 ASSERT_RTNL(); 6623 6624 if (head) { 6625 list_move_tail(&dev->unreg_list, head); 6626 } else { 6627 rollback_registered(dev); 6628 /* Finish processing unregister after unlock */ 6629 net_set_todo(dev); 6630 } 6631 } 6632 EXPORT_SYMBOL(unregister_netdevice_queue); 6633 6634 /** 6635 * unregister_netdevice_many - unregister many devices 6636 * @head: list of devices 6637 * 6638 * Note: As most callers use a stack allocated list_head, 6639 * we force a list_del() to make sure stack wont be corrupted later. 6640 */ 6641 void unregister_netdevice_many(struct list_head *head) 6642 { 6643 struct net_device *dev; 6644 6645 if (!list_empty(head)) { 6646 rollback_registered_many(head); 6647 list_for_each_entry(dev, head, unreg_list) 6648 net_set_todo(dev); 6649 list_del(head); 6650 } 6651 } 6652 EXPORT_SYMBOL(unregister_netdevice_many); 6653 6654 /** 6655 * unregister_netdev - remove device from the kernel 6656 * @dev: device 6657 * 6658 * This function shuts down a device interface and removes it 6659 * from the kernel tables. 6660 * 6661 * This is just a wrapper for unregister_netdevice that takes 6662 * the rtnl semaphore. In general you want to use this and not 6663 * unregister_netdevice. 6664 */ 6665 void unregister_netdev(struct net_device *dev) 6666 { 6667 rtnl_lock(); 6668 unregister_netdevice(dev); 6669 rtnl_unlock(); 6670 } 6671 EXPORT_SYMBOL(unregister_netdev); 6672 6673 /** 6674 * dev_change_net_namespace - move device to different nethost namespace 6675 * @dev: device 6676 * @net: network namespace 6677 * @pat: If not NULL name pattern to try if the current device name 6678 * is already taken in the destination network namespace. 6679 * 6680 * This function shuts down a device interface and moves it 6681 * to a new network namespace. On success 0 is returned, on 6682 * a failure a netagive errno code is returned. 6683 * 6684 * Callers must hold the rtnl semaphore. 6685 */ 6686 6687 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat) 6688 { 6689 int err; 6690 6691 ASSERT_RTNL(); 6692 6693 /* Don't allow namespace local devices to be moved. */ 6694 err = -EINVAL; 6695 if (dev->features & NETIF_F_NETNS_LOCAL) 6696 goto out; 6697 6698 /* Ensure the device has been registrered */ 6699 if (dev->reg_state != NETREG_REGISTERED) 6700 goto out; 6701 6702 /* Get out if there is nothing todo */ 6703 err = 0; 6704 if (net_eq(dev_net(dev), net)) 6705 goto out; 6706 6707 /* Pick the destination device name, and ensure 6708 * we can use it in the destination network namespace. 6709 */ 6710 err = -EEXIST; 6711 if (__dev_get_by_name(net, dev->name)) { 6712 /* We get here if we can't use the current device name */ 6713 if (!pat) 6714 goto out; 6715 if (dev_get_valid_name(net, dev, pat) < 0) 6716 goto out; 6717 } 6718 6719 /* 6720 * And now a mini version of register_netdevice unregister_netdevice. 6721 */ 6722 6723 /* If device is running close it first. */ 6724 dev_close(dev); 6725 6726 /* And unlink it from device chain */ 6727 err = -ENODEV; 6728 unlist_netdevice(dev); 6729 6730 synchronize_net(); 6731 6732 /* Shutdown queueing discipline. */ 6733 dev_shutdown(dev); 6734 6735 /* Notify protocols, that we are about to destroy 6736 this device. They should clean all the things. 6737 6738 Note that dev->reg_state stays at NETREG_REGISTERED. 6739 This is wanted because this way 8021q and macvlan know 6740 the device is just moving and can keep their slaves up. 6741 */ 6742 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 6743 rcu_barrier(); 6744 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev); 6745 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL); 6746 6747 /* 6748 * Flush the unicast and multicast chains 6749 */ 6750 dev_uc_flush(dev); 6751 dev_mc_flush(dev); 6752 6753 /* Send a netdev-removed uevent to the old namespace */ 6754 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE); 6755 6756 /* Actually switch the network namespace */ 6757 dev_net_set(dev, net); 6758 6759 /* If there is an ifindex conflict assign a new one */ 6760 if (__dev_get_by_index(net, dev->ifindex)) { 6761 int iflink = (dev->iflink == dev->ifindex); 6762 dev->ifindex = dev_new_index(net); 6763 if (iflink) 6764 dev->iflink = dev->ifindex; 6765 } 6766 6767 /* Send a netdev-add uevent to the new namespace */ 6768 kobject_uevent(&dev->dev.kobj, KOBJ_ADD); 6769 6770 /* Fixup kobjects */ 6771 err = device_rename(&dev->dev, dev->name); 6772 WARN_ON(err); 6773 6774 /* Add the device back in the hashes */ 6775 list_netdevice(dev); 6776 6777 /* Notify protocols, that a new device appeared. */ 6778 call_netdevice_notifiers(NETDEV_REGISTER, dev); 6779 6780 /* 6781 * Prevent userspace races by waiting until the network 6782 * device is fully setup before sending notifications. 6783 */ 6784 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL); 6785 6786 synchronize_net(); 6787 err = 0; 6788 out: 6789 return err; 6790 } 6791 EXPORT_SYMBOL_GPL(dev_change_net_namespace); 6792 6793 static int dev_cpu_callback(struct notifier_block *nfb, 6794 unsigned long action, 6795 void *ocpu) 6796 { 6797 struct sk_buff **list_skb; 6798 struct sk_buff *skb; 6799 unsigned int cpu, oldcpu = (unsigned long)ocpu; 6800 struct softnet_data *sd, *oldsd; 6801 6802 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN) 6803 return NOTIFY_OK; 6804 6805 local_irq_disable(); 6806 cpu = smp_processor_id(); 6807 sd = &per_cpu(softnet_data, cpu); 6808 oldsd = &per_cpu(softnet_data, oldcpu); 6809 6810 /* Find end of our completion_queue. */ 6811 list_skb = &sd->completion_queue; 6812 while (*list_skb) 6813 list_skb = &(*list_skb)->next; 6814 /* Append completion queue from offline CPU. */ 6815 *list_skb = oldsd->completion_queue; 6816 oldsd->completion_queue = NULL; 6817 6818 /* Append output queue from offline CPU. */ 6819 if (oldsd->output_queue) { 6820 *sd->output_queue_tailp = oldsd->output_queue; 6821 sd->output_queue_tailp = oldsd->output_queue_tailp; 6822 oldsd->output_queue = NULL; 6823 oldsd->output_queue_tailp = &oldsd->output_queue; 6824 } 6825 /* Append NAPI poll list from offline CPU. */ 6826 if (!list_empty(&oldsd->poll_list)) { 6827 list_splice_init(&oldsd->poll_list, &sd->poll_list); 6828 raise_softirq_irqoff(NET_RX_SOFTIRQ); 6829 } 6830 6831 raise_softirq_irqoff(NET_TX_SOFTIRQ); 6832 local_irq_enable(); 6833 6834 /* Process offline CPU's input_pkt_queue */ 6835 while ((skb = __skb_dequeue(&oldsd->process_queue))) { 6836 netif_rx_internal(skb); 6837 input_queue_head_incr(oldsd); 6838 } 6839 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) { 6840 netif_rx_internal(skb); 6841 input_queue_head_incr(oldsd); 6842 } 6843 6844 return NOTIFY_OK; 6845 } 6846 6847 6848 /** 6849 * netdev_increment_features - increment feature set by one 6850 * @all: current feature set 6851 * @one: new feature set 6852 * @mask: mask feature set 6853 * 6854 * Computes a new feature set after adding a device with feature set 6855 * @one to the master device with current feature set @all. Will not 6856 * enable anything that is off in @mask. Returns the new feature set. 6857 */ 6858 netdev_features_t netdev_increment_features(netdev_features_t all, 6859 netdev_features_t one, netdev_features_t mask) 6860 { 6861 if (mask & NETIF_F_GEN_CSUM) 6862 mask |= NETIF_F_ALL_CSUM; 6863 mask |= NETIF_F_VLAN_CHALLENGED; 6864 6865 all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask; 6866 all &= one | ~NETIF_F_ALL_FOR_ALL; 6867 6868 /* If one device supports hw checksumming, set for all. */ 6869 if (all & NETIF_F_GEN_CSUM) 6870 all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM); 6871 6872 return all; 6873 } 6874 EXPORT_SYMBOL(netdev_increment_features); 6875 6876 static struct hlist_head * __net_init netdev_create_hash(void) 6877 { 6878 int i; 6879 struct hlist_head *hash; 6880 6881 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL); 6882 if (hash != NULL) 6883 for (i = 0; i < NETDEV_HASHENTRIES; i++) 6884 INIT_HLIST_HEAD(&hash[i]); 6885 6886 return hash; 6887 } 6888 6889 /* Initialize per network namespace state */ 6890 static int __net_init netdev_init(struct net *net) 6891 { 6892 if (net != &init_net) 6893 INIT_LIST_HEAD(&net->dev_base_head); 6894 6895 net->dev_name_head = netdev_create_hash(); 6896 if (net->dev_name_head == NULL) 6897 goto err_name; 6898 6899 net->dev_index_head = netdev_create_hash(); 6900 if (net->dev_index_head == NULL) 6901 goto err_idx; 6902 6903 return 0; 6904 6905 err_idx: 6906 kfree(net->dev_name_head); 6907 err_name: 6908 return -ENOMEM; 6909 } 6910 6911 /** 6912 * netdev_drivername - network driver for the device 6913 * @dev: network device 6914 * 6915 * Determine network driver for device. 6916 */ 6917 const char *netdev_drivername(const struct net_device *dev) 6918 { 6919 const struct device_driver *driver; 6920 const struct device *parent; 6921 const char *empty = ""; 6922 6923 parent = dev->dev.parent; 6924 if (!parent) 6925 return empty; 6926 6927 driver = parent->driver; 6928 if (driver && driver->name) 6929 return driver->name; 6930 return empty; 6931 } 6932 6933 static int __netdev_printk(const char *level, const struct net_device *dev, 6934 struct va_format *vaf) 6935 { 6936 int r; 6937 6938 if (dev && dev->dev.parent) { 6939 r = dev_printk_emit(level[1] - '0', 6940 dev->dev.parent, 6941 "%s %s %s: %pV", 6942 dev_driver_string(dev->dev.parent), 6943 dev_name(dev->dev.parent), 6944 netdev_name(dev), vaf); 6945 } else if (dev) { 6946 r = printk("%s%s: %pV", level, netdev_name(dev), vaf); 6947 } else { 6948 r = printk("%s(NULL net_device): %pV", level, vaf); 6949 } 6950 6951 return r; 6952 } 6953 6954 int netdev_printk(const char *level, const struct net_device *dev, 6955 const char *format, ...) 6956 { 6957 struct va_format vaf; 6958 va_list args; 6959 int r; 6960 6961 va_start(args, format); 6962 6963 vaf.fmt = format; 6964 vaf.va = &args; 6965 6966 r = __netdev_printk(level, dev, &vaf); 6967 6968 va_end(args); 6969 6970 return r; 6971 } 6972 EXPORT_SYMBOL(netdev_printk); 6973 6974 #define define_netdev_printk_level(func, level) \ 6975 int func(const struct net_device *dev, const char *fmt, ...) \ 6976 { \ 6977 int r; \ 6978 struct va_format vaf; \ 6979 va_list args; \ 6980 \ 6981 va_start(args, fmt); \ 6982 \ 6983 vaf.fmt = fmt; \ 6984 vaf.va = &args; \ 6985 \ 6986 r = __netdev_printk(level, dev, &vaf); \ 6987 \ 6988 va_end(args); \ 6989 \ 6990 return r; \ 6991 } \ 6992 EXPORT_SYMBOL(func); 6993 6994 define_netdev_printk_level(netdev_emerg, KERN_EMERG); 6995 define_netdev_printk_level(netdev_alert, KERN_ALERT); 6996 define_netdev_printk_level(netdev_crit, KERN_CRIT); 6997 define_netdev_printk_level(netdev_err, KERN_ERR); 6998 define_netdev_printk_level(netdev_warn, KERN_WARNING); 6999 define_netdev_printk_level(netdev_notice, KERN_NOTICE); 7000 define_netdev_printk_level(netdev_info, KERN_INFO); 7001 7002 static void __net_exit netdev_exit(struct net *net) 7003 { 7004 kfree(net->dev_name_head); 7005 kfree(net->dev_index_head); 7006 } 7007 7008 static struct pernet_operations __net_initdata netdev_net_ops = { 7009 .init = netdev_init, 7010 .exit = netdev_exit, 7011 }; 7012 7013 static void __net_exit default_device_exit(struct net *net) 7014 { 7015 struct net_device *dev, *aux; 7016 /* 7017 * Push all migratable network devices back to the 7018 * initial network namespace 7019 */ 7020 rtnl_lock(); 7021 for_each_netdev_safe(net, dev, aux) { 7022 int err; 7023 char fb_name[IFNAMSIZ]; 7024 7025 /* Ignore unmoveable devices (i.e. loopback) */ 7026 if (dev->features & NETIF_F_NETNS_LOCAL) 7027 continue; 7028 7029 /* Leave virtual devices for the generic cleanup */ 7030 if (dev->rtnl_link_ops) 7031 continue; 7032 7033 /* Push remaining network devices to init_net */ 7034 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex); 7035 err = dev_change_net_namespace(dev, &init_net, fb_name); 7036 if (err) { 7037 pr_emerg("%s: failed to move %s to init_net: %d\n", 7038 __func__, dev->name, err); 7039 BUG(); 7040 } 7041 } 7042 rtnl_unlock(); 7043 } 7044 7045 static void __net_exit rtnl_lock_unregistering(struct list_head *net_list) 7046 { 7047 /* Return with the rtnl_lock held when there are no network 7048 * devices unregistering in any network namespace in net_list. 7049 */ 7050 struct net *net; 7051 bool unregistering; 7052 DEFINE_WAIT(wait); 7053 7054 for (;;) { 7055 prepare_to_wait(&netdev_unregistering_wq, &wait, 7056 TASK_UNINTERRUPTIBLE); 7057 unregistering = false; 7058 rtnl_lock(); 7059 list_for_each_entry(net, net_list, exit_list) { 7060 if (net->dev_unreg_count > 0) { 7061 unregistering = true; 7062 break; 7063 } 7064 } 7065 if (!unregistering) 7066 break; 7067 __rtnl_unlock(); 7068 schedule(); 7069 } 7070 finish_wait(&netdev_unregistering_wq, &wait); 7071 } 7072 7073 static void __net_exit default_device_exit_batch(struct list_head *net_list) 7074 { 7075 /* At exit all network devices most be removed from a network 7076 * namespace. Do this in the reverse order of registration. 7077 * Do this across as many network namespaces as possible to 7078 * improve batching efficiency. 7079 */ 7080 struct net_device *dev; 7081 struct net *net; 7082 LIST_HEAD(dev_kill_list); 7083 7084 /* To prevent network device cleanup code from dereferencing 7085 * loopback devices or network devices that have been freed 7086 * wait here for all pending unregistrations to complete, 7087 * before unregistring the loopback device and allowing the 7088 * network namespace be freed. 7089 * 7090 * The netdev todo list containing all network devices 7091 * unregistrations that happen in default_device_exit_batch 7092 * will run in the rtnl_unlock() at the end of 7093 * default_device_exit_batch. 7094 */ 7095 rtnl_lock_unregistering(net_list); 7096 list_for_each_entry(net, net_list, exit_list) { 7097 for_each_netdev_reverse(net, dev) { 7098 if (dev->rtnl_link_ops) 7099 dev->rtnl_link_ops->dellink(dev, &dev_kill_list); 7100 else 7101 unregister_netdevice_queue(dev, &dev_kill_list); 7102 } 7103 } 7104 unregister_netdevice_many(&dev_kill_list); 7105 rtnl_unlock(); 7106 } 7107 7108 static struct pernet_operations __net_initdata default_device_ops = { 7109 .exit = default_device_exit, 7110 .exit_batch = default_device_exit_batch, 7111 }; 7112 7113 /* 7114 * Initialize the DEV module. At boot time this walks the device list and 7115 * unhooks any devices that fail to initialise (normally hardware not 7116 * present) and leaves us with a valid list of present and active devices. 7117 * 7118 */ 7119 7120 /* 7121 * This is called single threaded during boot, so no need 7122 * to take the rtnl semaphore. 7123 */ 7124 static int __init net_dev_init(void) 7125 { 7126 int i, rc = -ENOMEM; 7127 7128 BUG_ON(!dev_boot_phase); 7129 7130 if (dev_proc_init()) 7131 goto out; 7132 7133 if (netdev_kobject_init()) 7134 goto out; 7135 7136 INIT_LIST_HEAD(&ptype_all); 7137 for (i = 0; i < PTYPE_HASH_SIZE; i++) 7138 INIT_LIST_HEAD(&ptype_base[i]); 7139 7140 INIT_LIST_HEAD(&offload_base); 7141 7142 if (register_pernet_subsys(&netdev_net_ops)) 7143 goto out; 7144 7145 /* 7146 * Initialise the packet receive queues. 7147 */ 7148 7149 for_each_possible_cpu(i) { 7150 struct softnet_data *sd = &per_cpu(softnet_data, i); 7151 7152 skb_queue_head_init(&sd->input_pkt_queue); 7153 skb_queue_head_init(&sd->process_queue); 7154 INIT_LIST_HEAD(&sd->poll_list); 7155 sd->output_queue_tailp = &sd->output_queue; 7156 #ifdef CONFIG_RPS 7157 sd->csd.func = rps_trigger_softirq; 7158 sd->csd.info = sd; 7159 sd->cpu = i; 7160 #endif 7161 7162 sd->backlog.poll = process_backlog; 7163 sd->backlog.weight = weight_p; 7164 } 7165 7166 dev_boot_phase = 0; 7167 7168 /* The loopback device is special if any other network devices 7169 * is present in a network namespace the loopback device must 7170 * be present. Since we now dynamically allocate and free the 7171 * loopback device ensure this invariant is maintained by 7172 * keeping the loopback device as the first device on the 7173 * list of network devices. Ensuring the loopback devices 7174 * is the first device that appears and the last network device 7175 * that disappears. 7176 */ 7177 if (register_pernet_device(&loopback_net_ops)) 7178 goto out; 7179 7180 if (register_pernet_device(&default_device_ops)) 7181 goto out; 7182 7183 open_softirq(NET_TX_SOFTIRQ, net_tx_action); 7184 open_softirq(NET_RX_SOFTIRQ, net_rx_action); 7185 7186 hotcpu_notifier(dev_cpu_callback, 0); 7187 dst_init(); 7188 rc = 0; 7189 out: 7190 return rc; 7191 } 7192 7193 subsys_initcall(net_dev_init); 7194